aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.mailmap4
-rw-r--r--CREDITS5
-rw-r--r--Documentation/ABI/testing/sysfs-devices-deferred_probe12
-rw-r--r--Documentation/DocBook/Makefile2
-rw-r--r--Documentation/admin-guide/kernel-parameters.txt13
-rw-r--r--Documentation/block/queue-sysfs.txt6
-rw-r--r--Documentation/devicetree/bindings/i2c/i2c.txt8
-rw-r--r--Documentation/devicetree/bindings/input/tps65218-pwrbutton.txt4
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/snps,archs-idu-intc.txt3
-rw-r--r--Documentation/devicetree/bindings/mtd/tango-nand.txt6
-rw-r--r--Documentation/devicetree/bindings/net/mediatek-net.txt2
-rw-r--r--Documentation/devicetree/bindings/net/phy.txt5
-rw-r--r--Documentation/devicetree/bindings/net/ti,dp83867.txt6
-rw-r--r--Documentation/devicetree/bindings/power/supply/tps65217_charger.txt7
-rw-r--r--Documentation/devicetree/bindings/spi/sh-msiof.txt19
-rw-r--r--Documentation/driver-api/infrastructure.rst15
-rw-r--r--Documentation/filesystems/proc.txt5
-rw-r--r--Documentation/media/uapi/cec/cec-func-close.rst5
-rw-r--r--Documentation/media/uapi/cec/cec-func-ioctl.rst5
-rw-r--r--Documentation/media/uapi/cec/cec-func-open.rst5
-rw-r--r--Documentation/media/uapi/cec/cec-func-poll.rst5
-rw-r--r--Documentation/media/uapi/cec/cec-intro.rst17
-rw-r--r--Documentation/media/uapi/cec/cec-ioc-adap-g-caps.rst5
-rw-r--r--Documentation/media/uapi/cec/cec-ioc-adap-g-log-addrs.rst5
-rw-r--r--Documentation/media/uapi/cec/cec-ioc-adap-g-phys-addr.rst5
-rw-r--r--Documentation/media/uapi/cec/cec-ioc-dqevent.rst5
-rw-r--r--Documentation/media/uapi/cec/cec-ioc-g-mode.rst5
-rw-r--r--Documentation/media/uapi/cec/cec-ioc-receive.rst5
-rw-r--r--Documentation/media/uapi/v4l/pixfmt-007.rst23
-rw-r--r--Documentation/networking/mpls-sysctl.txt4
-rw-r--r--Documentation/power/states.txt4
-rw-r--r--Documentation/unaligned-memory-access.txt2
-rw-r--r--Documentation/vfio-mediated-device.txt27
-rw-r--r--Documentation/vm/page_frags42
-rw-r--r--MAINTAINERS130
-rw-r--r--Makefile6
-rw-r--r--arch/arc/Kconfig2
-rw-r--r--arch/arc/include/asm/cache.h9
-rw-r--r--arch/arc/include/asm/delay.h4
-rw-r--r--arch/arc/include/asm/entry-arcv2.h2
-rw-r--r--arch/arc/include/asm/module.h4
-rw-r--r--arch/arc/include/asm/ptrace.h2
-rw-r--r--arch/arc/include/asm/setup.h1
-rw-r--r--arch/arc/kernel/head.S14
-rw-r--r--arch/arc/kernel/intc-arcv2.c6
-rw-r--r--arch/arc/kernel/intc-compact.c4
-rw-r--r--arch/arc/kernel/mcip.c59
-rw-r--r--arch/arc/kernel/module.c4
-rw-r--r--arch/arc/kernel/smp.c25
-rw-r--r--arch/arc/kernel/unaligned.c3
-rw-r--r--arch/arc/mm/cache.c155
-rw-r--r--arch/arc/mm/init.c5
-rw-r--r--arch/arm/Kconfig3
-rw-r--r--arch/arm/boot/dts/Makefile4
-rw-r--r--arch/arm/boot/dts/am335x-bone-common.dtsi8
-rw-r--r--arch/arm/boot/dts/am335x-icev2.dts1
-rw-r--r--arch/arm/boot/dts/am33xx.dtsi1
-rw-r--r--arch/arm/boot/dts/am4372.dtsi1
-rw-r--r--arch/arm/boot/dts/am571x-idk.dts10
-rw-r--r--arch/arm/boot/dts/am572x-idk.dts14
-rw-r--r--arch/arm/boot/dts/am57xx-idk-common.dtsi23
-rw-r--r--arch/arm/boot/dts/bcm-nsp.dtsi2
-rw-r--r--arch/arm/boot/dts/da850-evm.dts1
-rw-r--r--arch/arm/boot/dts/dm814x.dtsi1
-rw-r--r--arch/arm/boot/dts/dm816x.dtsi1
-rw-r--r--arch/arm/boot/dts/dra7.dtsi2
-rw-r--r--arch/arm/boot/dts/dra72-evm-tps65917.dtsi16
-rw-r--r--arch/arm/boot/dts/imx1.dtsi8
-rw-r--r--arch/arm/boot/dts/imx23.dtsi8
-rw-r--r--arch/arm/boot/dts/imx25.dtsi8
-rw-r--r--arch/arm/boot/dts/imx27.dtsi8
-rw-r--r--arch/arm/boot/dts/imx28.dtsi8
-rw-r--r--arch/arm/boot/dts/imx31.dtsi12
-rw-r--r--arch/arm/boot/dts/imx35.dtsi8
-rw-r--r--arch/arm/boot/dts/imx50.dtsi8
-rw-r--r--arch/arm/boot/dts/imx51.dtsi8
-rw-r--r--arch/arm/boot/dts/imx53.dtsi8
-rw-r--r--arch/arm/boot/dts/imx6dl.dtsi2
-rw-r--r--arch/arm/boot/dts/imx6qdl-nitrogen6_max.dtsi4
-rw-r--r--arch/arm/boot/dts/imx6qdl-nitrogen6_som2.dtsi4
-rw-r--r--arch/arm/boot/dts/imx6qdl-nitrogen6x.dtsi1
-rw-r--r--arch/arm/boot/dts/imx6qdl.dtsi9
-rw-r--r--arch/arm/boot/dts/imx6sl.dtsi9
-rw-r--r--arch/arm/boot/dts/imx6sx.dtsi9
-rw-r--r--arch/arm/boot/dts/imx6ul.dtsi8
-rw-r--r--arch/arm/boot/dts/imx7s.dtsi8
-rw-r--r--arch/arm/boot/dts/logicpd-som-lv-37xx-devkit.dts11
-rw-r--r--arch/arm/boot/dts/omap2.dtsi1
-rw-r--r--arch/arm/boot/dts/omap3-n900.dts2
-rw-r--r--arch/arm/boot/dts/omap3.dtsi1
-rw-r--r--arch/arm/boot/dts/omap4.dtsi1
-rw-r--r--arch/arm/boot/dts/omap5.dtsi2
-rw-r--r--arch/arm/boot/dts/orion5x-linkstation-lschl.dts (renamed from arch/arm/boot/dts/orion5x-lschl.dts)4
-rw-r--r--arch/arm/boot/dts/qcom-apq8064.dtsi4
-rw-r--r--arch/arm/boot/dts/qcom-mdm9615.dtsi2
-rw-r--r--arch/arm/boot/dts/stih407-family.dtsi1
-rw-r--r--arch/arm/boot/dts/sun6i-a31-hummingbird.dts4
-rw-r--r--arch/arm/boot/dts/sun6i-a31.dtsi1
-rw-r--r--arch/arm/boot/dts/sun7i-a20-olinuxino-lime2-emmc.dts2
-rw-r--r--arch/arm/boot/dts/vexpress-v2p-ca15-tc1.dts2
-rw-r--r--arch/arm/boot/dts/vexpress-v2p-ca15_a7.dts2
-rw-r--r--arch/arm/boot/dts/vf610-zii-dev-rev-b.dts3
-rw-r--r--arch/arm/configs/ezx_defconfig4
-rw-r--r--arch/arm/configs/imote2_defconfig4
-rw-r--r--arch/arm/configs/multi_v7_defconfig5
-rw-r--r--arch/arm/configs/omap2plus_defconfig1
-rw-r--r--arch/arm/configs/s3c2410_defconfig6
-rw-r--r--arch/arm/include/asm/cputype.h3
-rw-r--r--arch/arm/include/asm/ftrace.h18
-rw-r--r--arch/arm/include/asm/uaccess.h44
-rw-r--r--arch/arm/include/asm/virt.h5
-rw-r--r--arch/arm/include/uapi/asm/types.h (renamed from arch/arm/include/asm/types.h)6
-rw-r--r--arch/arm/kernel/hw_breakpoint.c16
-rw-r--r--arch/arm/kernel/ptrace.c2
-rw-r--r--arch/arm/kernel/smp_tlb.c7
-rw-r--r--arch/arm/kvm/arm.c3
-rw-r--r--arch/arm/lib/getuser.S2
-rw-r--r--arch/arm/mach-davinci/clock.c12
-rw-r--r--arch/arm/mach-davinci/clock.h2
-rw-r--r--arch/arm/mach-davinci/da850.c32
-rw-r--r--arch/arm/mach-davinci/usb-da8xx.c34
-rw-r--r--arch/arm/mach-exynos/platsmp.c31
-rw-r--r--arch/arm/mach-imx/mach-imx1.c1
-rw-r--r--arch/arm/mach-imx/mmdc.c2
-rw-r--r--arch/arm/mach-omap1/dma.c16
-rw-r--r--arch/arm/mach-omap2/Makefile2
-rw-r--r--arch/arm/mach-omap2/board-generic.c2
-rw-r--r--arch/arm/mach-omap2/gpio.c160
-rw-r--r--arch/arm/mach-omap2/omap-headsmp.S3
-rw-r--r--arch/arm/mach-omap2/omap_hwmod.c8
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_3xxx_data.c20
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_common_data.h4
-rw-r--r--arch/arm/mach-omap2/pdata-quirks.c2
-rw-r--r--arch/arm/mach-omap2/prm_common.c4
-rw-r--r--arch/arm/mach-omap2/timer.c9
-rw-r--r--arch/arm/mach-s3c24xx/common.c76
-rw-r--r--arch/arm/mach-ux500/pm.c4
-rw-r--r--arch/arm/mm/fault.c4
-rw-r--r--arch/arm/mm/fault.h4
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gx.dtsi34
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxbb-nexbox-a95x.dts16
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts16
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxbb-p20x.dtsi16
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxbb.dtsi6
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxl-nexbox-a95x.dts16
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxl.dtsi6
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxm-nexbox-a1.dts16
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxm.dtsi4
-rw-r--r--arch/arm64/boot/dts/arm/rtsm_ve-aemv8a.dts2
-rw-r--r--arch/arm64/boot/dts/exynos/exynos5433.dtsi2
-rw-r--r--arch/arm64/boot/dts/qcom/msm8996.dtsi10
-rw-r--r--arch/arm64/boot/dts/renesas/r8a7795-h3ulcb.dts2
-rw-r--r--arch/arm64/boot/dts/xilinx/zynqmp-ep108.dts2
-rw-r--r--arch/arm64/boot/dts/xilinx/zynqmp.dtsi6
-rw-r--r--arch/arm64/configs/defconfig1
-rw-r--r--arch/arm64/crypto/aes-modes.S88
-rw-r--r--arch/arm64/include/asm/asm-uaccess.h65
-rw-r--r--arch/arm64/include/asm/assembler.h36
-rw-r--r--arch/arm64/include/asm/current.h10
-rw-r--r--arch/arm64/include/asm/memory.h2
-rw-r--r--arch/arm64/include/asm/uaccess.h64
-rw-r--r--arch/arm64/include/asm/virt.h9
-rw-r--r--arch/arm64/include/uapi/asm/ptrace.h1
-rw-r--r--arch/arm64/kernel/entry.S4
-rw-r--r--arch/arm64/kernel/ptrace.c16
-rw-r--r--arch/arm64/kernel/topology.c8
-rw-r--r--arch/arm64/kernel/traps.c28
-rw-r--r--arch/arm64/lib/clear_user.S2
-rw-r--r--arch/arm64/lib/copy_from_user.S2
-rw-r--r--arch/arm64/lib/copy_in_user.S2
-rw-r--r--arch/arm64/lib/copy_to_user.S2
-rw-r--r--arch/arm64/mm/cache.S2
-rw-r--r--arch/arm64/mm/dma-mapping.c3
-rw-r--r--arch/arm64/mm/fault.c8
-rw-r--r--arch/arm64/mm/hugetlbpage.c2
-rw-r--r--arch/arm64/mm/init.c5
-rw-r--r--arch/arm64/xen/hypercall.S2
-rw-r--r--arch/frv/include/asm/atomic.h35
-rw-r--r--arch/mips/kvm/entry.c5
-rw-r--r--arch/mips/kvm/mips.c4
-rw-r--r--arch/mn10300/include/asm/switch_to.h2
-rw-r--r--arch/openrisc/kernel/vmlinux.lds.S2
-rw-r--r--arch/parisc/include/asm/bitops.h8
-rw-r--r--arch/parisc/include/asm/thread_info.h1
-rw-r--r--arch/parisc/include/uapi/asm/bitsperlong.h2
-rw-r--r--arch/parisc/include/uapi/asm/swab.h5
-rw-r--r--arch/parisc/kernel/time.c23
-rw-r--r--arch/parisc/mm/fault.c2
-rw-r--r--arch/powerpc/Kconfig2
-rw-r--r--arch/powerpc/include/asm/book3s/64/hash-4k.h5
-rw-r--r--arch/powerpc/include/asm/book3s/64/hash.h4
-rw-r--r--arch/powerpc/include/asm/cpu_has_feature.h2
-rw-r--r--arch/powerpc/include/asm/hugetlb.h14
-rw-r--r--arch/powerpc/include/asm/mmu.h2
-rw-r--r--arch/powerpc/include/asm/module.h4
-rw-r--r--arch/powerpc/include/asm/nohash/pgtable.h5
-rw-r--r--arch/powerpc/include/asm/page.h3
-rw-r--r--arch/powerpc/include/asm/perf_event_server.h1
-rw-r--r--arch/powerpc/include/asm/pgtable-be-types.h8
-rw-r--r--arch/powerpc/include/asm/pgtable-types.h7
-rw-r--r--arch/powerpc/include/asm/ppc-opcode.h10
-rw-r--r--arch/powerpc/include/asm/reg.h3
-rw-r--r--arch/powerpc/include/asm/stackprotector.h40
-rw-r--r--arch/powerpc/include/asm/xics.h1
-rw-r--r--arch/powerpc/kernel/Makefile4
-rw-r--r--arch/powerpc/kernel/asm-offsets.c3
-rw-r--r--arch/powerpc/kernel/eeh.c10
-rw-r--r--arch/powerpc/kernel/eeh_driver.c2
-rw-r--r--arch/powerpc/kernel/entry_32.S6
-rw-r--r--arch/powerpc/kernel/module_64.c8
-rw-r--r--arch/powerpc/kernel/process.c6
-rw-r--r--arch/powerpc/kernel/prom_init.c3
-rw-r--r--arch/powerpc/kernel/ptrace.c14
-rw-r--r--arch/powerpc/mm/fault.c21
-rw-r--r--arch/powerpc/mm/hash_utils_64.c4
-rw-r--r--arch/powerpc/mm/hugetlbpage-hash64.c5
-rw-r--r--arch/powerpc/mm/hugetlbpage.c31
-rw-r--r--arch/powerpc/mm/init-common.c13
-rw-r--r--arch/powerpc/mm/init_64.c3
-rw-r--r--arch/powerpc/mm/pgtable-book3s64.c18
-rw-r--r--arch/powerpc/mm/pgtable-radix.c4
-rw-r--r--arch/powerpc/mm/tlb-radix.c6
-rw-r--r--arch/powerpc/perf/core-book3s.c2
-rw-r--r--arch/powerpc/perf/power9-events-list.h2
-rw-r--r--arch/powerpc/perf/power9-pmu.c2
-rw-r--r--arch/powerpc/platforms/powernv/smp.c12
-rw-r--r--arch/powerpc/sysdev/xics/icp-opal.c66
-rw-r--r--arch/s390/configs/default_defconfig27
-rw-r--r--arch/s390/configs/gcov_defconfig50
-rw-r--r--arch/s390/configs/performance_defconfig33
-rw-r--r--arch/s390/defconfig5
-rw-r--r--arch/s390/include/asm/asm-prototypes.h8
-rw-r--r--arch/s390/include/asm/ctl_reg.h4
-rw-r--r--arch/s390/kernel/ptrace.c8
-rw-r--r--arch/s390/kernel/vtime.c8
-rw-r--r--arch/s390/kvm/kvm-s390.c4
-rw-r--r--arch/s390/mm/pgtable.c7
-rw-r--r--arch/sparc/include/asm/mmu_context_64.h8
-rw-r--r--arch/sparc/kernel/irq_64.c2
-rw-r--r--arch/sparc/kernel/sstate.c6
-rw-r--r--arch/sparc/kernel/traps_64.c73
-rw-r--r--arch/tile/kernel/ptrace.c2
-rw-r--r--arch/x86/boot/string.c1
-rw-r--r--arch/x86/boot/string.h9
-rw-r--r--arch/x86/crypto/aesni-intel_glue.c11
-rw-r--r--arch/x86/entry/entry_32.S30
-rw-r--r--arch/x86/entry/entry_64.S11
-rw-r--r--arch/x86/events/amd/ibs.c2
-rw-r--r--arch/x86/events/core.c4
-rw-r--r--arch/x86/events/intel/core.c9
-rw-r--r--arch/x86/events/intel/cstate.c2
-rw-r--r--arch/x86/events/intel/ds.c6
-rw-r--r--arch/x86/events/intel/rapl.c61
-rw-r--r--arch/x86/events/intel/uncore.c233
-rw-r--r--arch/x86/events/intel/uncore_snbep.c2
-rw-r--r--arch/x86/include/asm/bitops.h13
-rw-r--r--arch/x86/include/asm/intel-family.h2
-rw-r--r--arch/x86/include/asm/microcode.h1
-rw-r--r--arch/x86/include/asm/microcode_intel.h15
-rw-r--r--arch/x86/include/asm/processor.h19
-rw-r--r--arch/x86/include/asm/stacktrace.h2
-rw-r--r--arch/x86/include/asm/switch_to.h10
-rw-r--r--arch/x86/kernel/apic/io_apic.c2
-rw-r--r--arch/x86/kernel/cpu/amd.c17
-rw-r--r--arch/x86/kernel/cpu/common.c3
-rw-r--r--arch/x86/kernel/cpu/intel.c11
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce.c31
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce_amd.c3
-rw-r--r--arch/x86/kernel/cpu/microcode/amd.c5
-rw-r--r--arch/x86/kernel/cpu/microcode/core.c22
-rw-r--r--arch/x86/kernel/cpu/microcode/intel.c79
-rw-r--r--arch/x86/kernel/fpu/core.c4
-rw-r--r--arch/x86/kernel/hpet.c1
-rw-r--r--arch/x86/kernel/pci-swiotlb.c6
-rw-r--r--arch/x86/kernel/smpboot.c12
-rw-r--r--arch/x86/kernel/tsc.c6
-rw-r--r--arch/x86/kernel/tsc_sync.c16
-rw-r--r--arch/x86/kernel/unwind_frame.c30
-rw-r--r--arch/x86/kernel/vm86_32.c5
-rw-r--r--arch/x86/kvm/emulate.c70
-rw-r--r--arch/x86/kvm/lapic.c6
-rw-r--r--arch/x86/kvm/lapic.h1
-rw-r--r--arch/x86/kvm/vmx.c14
-rw-r--r--arch/x86/kvm/x86.c16
-rw-r--r--arch/x86/mm/dump_pagetables.c2
-rw-r--r--arch/x86/mm/mpx.c2
-rw-r--r--arch/x86/net/bpf_jit_comp.c2
-rw-r--r--arch/x86/pci/acpi.c10
-rw-r--r--arch/x86/platform/efi/efi.c66
-rw-r--r--arch/x86/platform/efi/efi_64.c16
-rw-r--r--arch/x86/platform/efi/quirks.c4
-rw-r--r--arch/x86/platform/intel-mid/device_libs/Makefile2
-rw-r--r--arch/x86/platform/intel-mid/device_libs/platform_mrfld_spidev.c (renamed from arch/x86/platform/intel-mid/device_libs/platform_spidev.c)4
-rw-r--r--arch/x86/xen/pci-swiotlb-xen.c2
-rw-r--r--arch/x86/xen/setup.c6
-rw-r--r--arch/xtensa/kernel/setup.c2
-rw-r--r--block/blk-lib.c22
-rw-r--r--block/blk-mq.c1
-rw-r--r--block/blk-wbt.c13
-rw-r--r--block/blk-zoned.c4
-rw-r--r--block/cfq-iosched.c25
-rw-r--r--block/partition-generic.c14
-rw-r--r--crypto/algapi.c1
-rw-r--r--crypto/algif_aead.c2
-rw-r--r--crypto/testmgr.c30
-rw-r--r--drivers/acpi/acpi_watchdog.c2
-rw-r--r--drivers/acpi/acpica/tbdata.c9
-rw-r--r--drivers/acpi/acpica/tbinstal.c17
-rw-r--r--drivers/acpi/glue.c11
-rw-r--r--drivers/acpi/internal.h1
-rw-r--r--drivers/acpi/nfit/core.c6
-rw-r--r--drivers/acpi/scan.c1
-rw-r--r--drivers/acpi/sleep.c8
-rw-r--r--drivers/acpi/sysfs.c56
-rw-r--r--drivers/acpi/video_detect.c11
-rw-r--r--drivers/ata/libata-core.c6
-rw-r--r--drivers/ata/sata_mv.c3
-rw-r--r--drivers/auxdisplay/Kconfig6
-rw-r--r--drivers/base/base.h2
-rw-r--r--drivers/base/core.c7
-rw-r--r--drivers/base/dd.c13
-rw-r--r--drivers/base/firmware_class.c5
-rw-r--r--drivers/base/memory.c12
-rw-r--r--drivers/base/power/domain.c1
-rw-r--r--drivers/base/power/runtime.c11
-rw-r--r--drivers/bcma/bcma_private.h3
-rw-r--r--drivers/bcma/driver_chipcommon.c11
-rw-r--r--drivers/bcma/driver_mips.c3
-rw-r--r--drivers/block/nbd.c12
-rw-r--r--drivers/block/virtio_blk.c7
-rw-r--r--drivers/block/xen-blkfront.c22
-rw-r--r--drivers/block/zram/zram_drv.c19
-rw-r--r--drivers/char/hw_random/core.c3
-rw-r--r--drivers/char/mem.c10
-rw-r--r--drivers/char/ppdev.c13
-rw-r--r--drivers/char/virtio_console.c2
-rw-r--r--drivers/clk/clk-stm32f4.c4
-rw-r--r--drivers/clk/renesas/clk-mstp.c27
-rw-r--r--drivers/clk/samsung/clk-exynos5420.c14
-rw-r--r--drivers/clocksource/exynos_mct.c1
-rw-r--r--drivers/cpufreq/brcmstb-avs-cpufreq.c17
-rw-r--r--drivers/cpufreq/cpufreq-dt-platdev.c2
-rw-r--r--drivers/cpufreq/intel_pstate.c97
-rw-r--r--drivers/crypto/ccp/ccp-dev-v5.c2
-rw-r--r--drivers/crypto/ccp/ccp-dev.h1
-rw-r--r--drivers/crypto/ccp/ccp-dmaengine.c6
-rw-r--r--drivers/crypto/chelsio/chcr_algo.c53
-rw-r--r--drivers/crypto/chelsio/chcr_core.c18
-rw-r--r--drivers/crypto/chelsio/chcr_crypto.h3
-rw-r--r--drivers/crypto/marvell/cesa.h3
-rw-r--r--drivers/crypto/marvell/hash.c34
-rw-r--r--drivers/crypto/marvell/tdma.c9
-rw-r--r--drivers/crypto/qat/qat_c62x/adf_drv.c2
-rw-r--r--drivers/crypto/qat/qat_common/adf_accel_devices.h1
-rw-r--r--drivers/crypto/qat/qat_common/qat_hal.c4
-rw-r--r--drivers/devfreq/devfreq.c15
-rw-r--r--drivers/devfreq/exynos-bus.c2
-rw-r--r--drivers/dma/cppi41.c69
-rw-r--r--drivers/dma/dw/Kconfig2
-rw-r--r--drivers/dma/ioat/hw.h2
-rw-r--r--drivers/dma/ioat/init.c15
-rw-r--r--drivers/dma/omap-dma.c61
-rw-r--r--drivers/dma/pl330.c30
-rw-r--r--drivers/dma/sh/rcar-dmac.c8
-rw-r--r--drivers/dma/stm32-dma.c17
-rw-r--r--drivers/dma/ti-dma-crossbar.c2
-rw-r--r--drivers/extcon/extcon.c2
-rw-r--r--drivers/firmware/arm_scpi.c10
-rw-r--r--drivers/firmware/efi/fake_mem.c3
-rw-r--r--drivers/firmware/efi/libstub/efistub.h8
-rw-r--r--drivers/firmware/efi/libstub/fdt.c81
-rw-r--r--drivers/firmware/efi/memmap.c38
-rw-r--r--drivers/firmware/psci_checker.c4
-rw-r--r--drivers/gpio/gpio-mxs.c2
-rw-r--r--drivers/gpio/gpiolib.c20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v10_0.c22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v11_0.c35
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v6_0.c24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v8_0.c22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_virtual.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c31
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c38
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_dpm.c84
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c42
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v3_0.c32
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vi.c10
-rw-r--r--drivers/gpu/drm/amd/include/amd_shared.h3
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c4
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c24
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c3
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c2
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c1
-rw-r--r--drivers/gpu/drm/ast/ast_drv.h1
-rw-r--r--drivers/gpu/drm/ast/ast_main.c157
-rw-r--r--drivers/gpu/drm/ast/ast_post.c18
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix_dp_core.c7
-rw-r--r--drivers/gpu/drm/cirrus/Kconfig9
-rw-r--r--drivers/gpu/drm/drm_atomic.c25
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c13
-rw-r--r--drivers/gpu/drm/drm_connector.c23
-rw-r--r--drivers/gpu/drm/drm_dp_mst_topology.c2
-rw-r--r--drivers/gpu/drm/drm_drv.c4
-rw-r--r--drivers/gpu/drm/drm_modes.c7
-rw-r--r--drivers/gpu/drm/drm_probe_helper.c12
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_mmu.c7
-rw-r--r--drivers/gpu/drm/exynos/exynos5433_drm_decon.c15
-rw-r--r--drivers/gpu/drm/i915/gvt/aperture_gm.c36
-rw-r--r--drivers/gpu/drm/i915/gvt/cfg_space.c78
-rw-r--r--drivers/gpu/drm/i915/gvt/cmd_parser.c4
-rw-r--r--drivers/gpu/drm/i915/gvt/execlist.c66
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.c120
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.h5
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.c8
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.h9
-rw-r--r--drivers/gpu/drm/i915/gvt/handlers.c103
-rw-r--r--drivers/gpu/drm/i915/gvt/kvmgt.c86
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio.c84
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio.h4
-rw-r--r--drivers/gpu/drm/i915/gvt/opregion.c10
-rw-r--r--drivers/gpu/drm/i915/gvt/reg.h3
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.c14
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.h2
-rw-r--r--drivers/gpu/drm/i915/gvt/vgpu.c162
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c6
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h21
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c81
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c1
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c12
-rw-r--r--drivers/gpu/drm/i915/i915_gem_internal.c12
-rw-r--r--drivers/gpu/drm/i915/i915_gem_request.h19
-rw-r--r--drivers/gpu/drm/i915/i915_vma.c1
-rw-r--r--drivers/gpu/drm/i915/intel_atomic_plane.c20
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c9
-rw-r--r--drivers/gpu/drm/i915/intel_display.c208
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c41
-rw-r--r--drivers/gpu/drm/i915/intel_dpll_mgr.c3
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h11
-rw-r--r--drivers/gpu/drm/i915/intel_fbc.c52
-rw-r--r--drivers/gpu/drm/i915/intel_fbdev.c7
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c10
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c9
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c8
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c8
-rw-r--r--drivers/gpu/drm/meson/meson_plane.c3
-rw-r--r--drivers/gpu/drm/meson/meson_venc.c19
-rw-r--r--drivers/gpu/drm/meson/meson_venc_cvbs.c2
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.c14
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c6
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h4
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c22
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c2
-rw-r--r--drivers/gpu/drm/msm/msm_gem_submit.c18
-rw-r--r--drivers/gpu/drm/msm/msm_ringbuffer.c3
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/hw.c3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c43
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.h1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_led.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_usif.c3
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c6
-rw-r--r--drivers/gpu/drm/nouveau/nv84_fence.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/hdagt215.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_cursor.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c10
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c4
-rw-r--r--drivers/gpu/drm/radeon/si.c79
-rw-r--r--drivers/gpu/drm/radeon/si_dpm.c25
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_crtc.c27
-rw-r--r--drivers/gpu/drm/vc4/vc4_crtc.c2
-rw-r--r--drivers/gpu/drm/vc4/vc4_gem.c4
-rw-r--r--drivers/gpu/drm/vc4/vc4_plane.c2
-rw-r--r--drivers/gpu/drm/vc4/vc4_render_cl.c2
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_fb.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fb.c3
-rw-r--r--drivers/hid/hid-asus.c17
-rw-r--r--drivers/hid/hid-core.c1
-rw-r--r--drivers/hid/hid-corsair.c60
-rw-r--r--drivers/hid/hid-cp2112.c28
-rw-r--r--drivers/hid/hid-cypress.c3
-rw-r--r--drivers/hid/hid-ids.h10
-rw-r--r--drivers/hid/hid-lg.c2
-rw-r--r--drivers/hid/hid-sensor-hub.c3
-rw-r--r--drivers/hid/hid-sony.c36
-rw-r--r--drivers/hid/i2c-hid/i2c-hid.c9
-rw-r--r--drivers/hid/usbhid/hid-quirks.c3
-rw-r--r--drivers/hid/wacom_sys.c16
-rw-r--r--drivers/hid/wacom_wac.c34
-rw-r--r--drivers/hv/ring_buffer.c1
-rw-r--r--drivers/hwmon/lm90.c2
-rw-r--r--drivers/i2c/busses/i2c-cadence.c8
-rw-r--r--drivers/i2c/busses/i2c-designware-core.c45
-rw-r--r--drivers/i2c/busses/i2c-designware-core.h1
-rw-r--r--drivers/i2c/busses/i2c-imx-lpi2c.c20
-rw-r--r--drivers/i2c/busses/i2c-piix4.c26
-rw-r--r--drivers/i2c/i2c-core.c21
-rw-r--r--drivers/i2c/i2c-dev.c2
-rw-r--r--drivers/iio/accel/st_accel_core.c12
-rw-r--r--drivers/iio/adc/Kconfig2
-rw-r--r--drivers/iio/adc/palmas_gpadc.c4
-rw-r--r--drivers/iio/common/st_sensors/st_sensors_buffer.c4
-rw-r--r--drivers/iio/common/st_sensors/st_sensors_core.c13
-rw-r--r--drivers/iio/counter/104-quad-8.c13
-rw-r--r--drivers/iio/health/afe4403.c4
-rw-r--r--drivers/iio/health/afe4404.c4
-rw-r--r--drivers/iio/health/max30100.c2
-rw-r--r--drivers/iio/humidity/dht11.c6
-rw-r--r--drivers/iio/imu/bmi160/bmi160_core.c25
-rw-r--r--drivers/iio/light/max44000.c2
-rw-r--r--drivers/infiniband/core/cma.c3
-rw-r--r--drivers/infiniband/core/umem.c2
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_provider.c11
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c7
-rw-r--r--drivers/infiniband/hw/cxgb4/cq.c21
-rw-r--r--drivers/infiniband/hw/cxgb4/device.c9
-rw-r--r--drivers/infiniband/hw/cxgb4/iw_cxgb4.h24
-rw-r--r--drivers/infiniband/hw/cxgb4/provider.c33
-rw-r--r--drivers/infiniband/hw/cxgb4/qp.c147
-rw-r--r--drivers/infiniband/hw/cxgb4/t4.h2
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_verbs.c11
-rw-r--r--drivers/infiniband/hw/mlx4/main.c14
-rw-r--r--drivers/infiniband/hw/nes/nes_verbs.c12
-rw-r--r--drivers/infiniband/hw/qedr/main.c23
-rw-r--r--drivers/infiniband/hw/qedr/qedr.h8
-rw-r--r--drivers/infiniband/hw/qedr/qedr_cm.c14
-rw-r--r--drivers/infiniband/hw/qedr/verbs.c62
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c4
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c2
-rw-r--r--drivers/infiniband/sw/rxe/rxe_mr.c8
-rw-r--r--drivers/infiniband/sw/rxe/rxe_net.c2
-rw-r--r--drivers/infiniband/sw/rxe/rxe_qp.c3
-rw-r--r--drivers/infiniband/sw/rxe/rxe_resp.c2
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.c11
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.h2
-rw-r--r--drivers/infiniband/ulp/iser/iser_verbs.c13
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c15
-rw-r--r--drivers/input/joydev.c1
-rw-r--r--drivers/input/joystick/xpad.c6
-rw-r--r--drivers/input/misc/adxl34x-i2c.c4
-rw-r--r--drivers/input/misc/uinput.c20
-rw-r--r--drivers/input/mouse/alps.h2
-rw-r--r--drivers/input/mouse/elan_i2c_core.c1
-rw-r--r--drivers/input/mouse/synaptics_i2c.c4
-rw-r--r--drivers/input/rmi4/Kconfig9
-rw-r--r--drivers/input/rmi4/rmi_driver.c4
-rw-r--r--drivers/input/serio/i8042-x86ia64io.h6
-rw-r--r--drivers/input/touchscreen/elants_i2c.c4
-rw-r--r--drivers/input/touchscreen/wm97xx-core.c2
-rw-r--r--drivers/iommu/amd_iommu.c2
-rw-r--r--drivers/iommu/dmar.c6
-rw-r--r--drivers/iommu/intel-iommu.c42
-rw-r--r--drivers/irqchip/irq-keystone.c28
-rw-r--r--drivers/irqchip/irq-mxs.c4
-rw-r--r--drivers/isdn/hardware/eicon/message.c3
-rw-r--r--drivers/md/dm-crypt.c8
-rw-r--r--drivers/md/dm-mpath.c4
-rw-r--r--drivers/md/dm-rq.c4
-rw-r--r--drivers/md/md.c5
-rw-r--r--drivers/md/md.h8
-rw-r--r--drivers/md/raid0.c12
-rw-r--r--drivers/md/raid1.c275
-rw-r--r--drivers/md/raid10.c245
-rw-r--r--drivers/md/raid5-cache.c140
-rw-r--r--drivers/md/raid5.c128
-rw-r--r--drivers/md/raid5.h7
-rw-r--r--drivers/media/cec/cec-adap.c112
-rw-r--r--drivers/media/dvb-core/dvb_net.c15
-rw-r--r--drivers/media/i2c/Kconfig1
-rw-r--r--drivers/media/i2c/smiapp/smiapp-core.c33
-rw-r--r--drivers/media/i2c/tvp5150.c56
-rw-r--r--drivers/media/i2c/tvp5150_reg.h9
-rw-r--r--drivers/media/pci/cobalt/cobalt-driver.c8
-rw-r--r--drivers/media/pci/cobalt/cobalt-driver.h2
-rw-r--r--drivers/media/usb/dvb-usb/pctv452e.c133
-rw-r--r--drivers/media/usb/siano/smsusb.c18
-rw-r--r--drivers/memstick/core/memstick.c2
-rw-r--r--drivers/misc/mei/bus-fixup.c3
-rw-r--r--drivers/misc/mei/bus.c2
-rw-r--r--drivers/misc/mei/client.c20
-rw-r--r--drivers/misc/mei/debugfs.c2
-rw-r--r--drivers/misc/mei/hbm.c4
-rw-r--r--drivers/misc/mei/hw.h6
-rw-r--r--drivers/misc/mei/mei_dev.h2
-rw-r--r--drivers/mmc/core/mmc.c4
-rw-r--r--drivers/mmc/core/mmc_ops.c25
-rw-r--r--drivers/mmc/host/dw_mmc.c7
-rw-r--r--drivers/mmc/host/meson-gx-mmc.c8
-rw-r--r--drivers/mmc/host/mmci.c32
-rw-r--r--drivers/mmc/host/mxs-mmc.c6
-rw-r--r--drivers/mmc/host/sdhci-acpi.c3
-rw-r--r--drivers/mmc/host/sdhci.c3
-rw-r--r--drivers/mtd/nand/Kconfig3
-rw-r--r--drivers/mtd/nand/lpc32xx_mlc.c2
-rw-r--r--drivers/mtd/nand/tango_nand.c4
-rw-r--r--drivers/mtd/nand/xway_nand.c5
-rw-r--r--drivers/net/appletalk/ipddp.c2
-rw-r--r--drivers/net/can/c_can/c_can_pci.c1
-rw-r--r--drivers/net/can/ti_hecc.c16
-rw-r--r--drivers/net/dsa/bcm_sf2.c11
-rw-r--r--drivers/net/ethernet/adaptec/starfire.c45
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-common.h2
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-dev.c8
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-drv.c6
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-pci.c15
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe.h2
-rw-r--r--drivers/net/ethernet/atheros/alx/main.c11
-rw-r--r--drivers/net/ethernet/broadcom/bcm63xx_enet.c6
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c48
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c80
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c3
-rw-r--r--drivers/net/ethernet/cadence/macb.c188
-rw-r--r--drivers/net/ethernet/cadence/macb.h20
-rw-r--r--drivers/net/ethernet/cadence/macb_pci.c27
-rw-r--r--drivers/net/ethernet/cavium/Kconfig2
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_bgx.c119
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_bgx.h5
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_xcv.c3
-rw-r--r--drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.c12
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c2
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c59
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth.c8
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c23
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c6
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h8
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_enet.c2
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.c7
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c43
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c6
-rw-r--r--drivers/net/ethernet/korina.c8
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/catas.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cq.c38
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_clock.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c45
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/eq.c23
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/icm.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/intf.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c18
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c33
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c69
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c266
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.h32
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c107
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c27
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/port.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/vport.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci_hw.h8
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/switchx2.c1
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ll2.c88
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ll2.h24
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_roce.c2
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac-phy.c7
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac.c6
-rw-r--r--drivers/net/ethernet/realtek/r8169.c3
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c133
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c8
-rw-r--r--drivers/net/ethernet/sfc/ef10.c3
-rw-r--r--drivers/net/ethernet/sfc/ethtool.c2
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h2
-rw-r--r--drivers/net/ethernet/sfc/siena.c1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-oxnas.c89
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c42
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c1
-rw-r--r--drivers/net/ethernet/ti/cpmac.c2
-rw-r--r--drivers/net/ethernet/ti/cpsw.c2
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_emaclite.c126
-rw-r--r--drivers/net/gtp.c13
-rw-r--r--drivers/net/hamradio/mkiss.c4
-rw-r--r--drivers/net/hyperv/netvsc.c6
-rw-r--r--drivers/net/hyperv/netvsc_drv.c3
-rw-r--r--drivers/net/ieee802154/at86rf230.c4
-rw-r--r--drivers/net/ieee802154/atusb.c59
-rw-r--r--drivers/net/ipvlan/ipvlan.h5
-rw-r--r--drivers/net/ipvlan/ipvlan_core.c60
-rw-r--r--drivers/net/ipvlan/ipvlan_main.c7
-rw-r--r--drivers/net/loopback.c1
-rw-r--r--drivers/net/macvtap.c6
-rw-r--r--drivers/net/phy/Kconfig1
-rw-r--r--drivers/net/phy/bcm63xx.c21
-rw-r--r--drivers/net/phy/dp83848.c3
-rw-r--r--drivers/net/phy/dp83867.c18
-rw-r--r--drivers/net/phy/marvell.c5
-rw-r--r--drivers/net/phy/mdio-bcm-iproc.c6
-rw-r--r--drivers/net/phy/micrel.c14
-rw-r--r--drivers/net/phy/phy.c24
-rw-r--r--drivers/net/phy/phy_device.c21
-rw-r--r--drivers/net/phy/phy_led_triggers.c9
-rw-r--r--drivers/net/tun.c12
-rw-r--r--drivers/net/usb/asix_devices.c1
-rw-r--r--drivers/net/usb/catc.c56
-rw-r--r--drivers/net/usb/cdc_ether.c8
-rw-r--r--drivers/net/usb/pegasus.c29
-rw-r--r--drivers/net/usb/qmi_wwan.c7
-rw-r--r--drivers/net/usb/r8152.c117
-rw-r--r--drivers/net/usb/rtl8150.c34
-rw-r--r--drivers/net/usb/sierra_net.c111
-rw-r--r--drivers/net/virtio_net.c25
-rw-r--r--drivers/net/vrf.c7
-rw-r--r--drivers/net/vxlan.c27
-rw-r--r--drivers/net/wan/slic_ds26522.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-8000.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.c7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tt.c12
-rw-r--r--drivers/net/wireless/intersil/orinoco/mic.c44
-rw-r--r--drivers/net/wireless/intersil/orinoco/mic.h3
-rw-r--r--drivers/net/wireless/intersil/orinoco/orinoco.h4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c9
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/usb.c1
-rw-r--r--drivers/net/xen-netback/common.h8
-rw-r--r--drivers/net/xen-netback/interface.c14
-rw-r--r--drivers/net/xen-netback/xenbus.c13
-rw-r--r--drivers/net/xen-netfront.c46
-rw-r--r--drivers/ntb/hw/intel/ntb_hw_intel.c24
-rw-r--r--drivers/ntb/ntb_transport.c5
-rw-r--r--drivers/ntb/test/ntb_perf.c2
-rw-r--r--drivers/nvdimm/namespace_devs.c34
-rw-r--r--drivers/nvdimm/pfn_devs.c7
-rw-r--r--drivers/nvdimm/pmem.c4
-rw-r--r--drivers/nvme/host/core.c24
-rw-r--r--drivers/nvme/host/fc.c30
-rw-r--r--drivers/nvme/host/nvme.h9
-rw-r--r--drivers/nvme/host/pci.c32
-rw-r--r--drivers/nvme/host/rdma.c15
-rw-r--r--drivers/nvme/host/scsi.c27
-rw-r--r--drivers/nvme/target/admin-cmd.c4
-rw-r--r--drivers/nvme/target/configfs.c1
-rw-r--r--drivers/nvme/target/core.c15
-rw-r--r--drivers/nvme/target/fc.c36
-rw-r--r--drivers/nvme/target/fcloop.c4
-rw-r--r--drivers/nvme/target/nvmet.h1
-rw-r--r--drivers/nvme/target/rdma.c17
-rw-r--r--drivers/nvmem/core.c4
-rw-r--r--drivers/nvmem/imx-ocotp.c2
-rw-r--r--drivers/nvmem/qfprom.c14
-rw-r--r--drivers/parport/parport_gsc.c8
-rw-r--r--drivers/pci/host/pci-xgene-msi.c2
-rw-r--r--drivers/pci/host/pcie-designware.c10
-rw-r--r--drivers/pci/hotplug/pciehp_ctrl.c6
-rw-r--r--drivers/pci/msi.c10
-rw-r--r--drivers/pci/pci.c12
-rw-r--r--drivers/pci/pcie/aspm.c19
-rw-r--r--drivers/pci/pcie/pme.c12
-rw-r--r--drivers/pci/probe.c12
-rw-r--r--drivers/pinctrl/berlin/berlin-bg4ct.c2
-rw-r--r--drivers/pinctrl/intel/pinctrl-baytrail.c60
-rw-r--r--drivers/pinctrl/intel/pinctrl-broxton.c2
-rw-r--r--drivers/pinctrl/intel/pinctrl-intel.c30
-rw-r--r--drivers/pinctrl/intel/pinctrl-merrifield.c3
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson-gxbb.c7
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson-gxl.c7
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson.c2
-rw-r--r--drivers/pinctrl/pinctrl-amd.c21
-rw-r--r--drivers/pinctrl/samsung/pinctrl-exynos.c91
-rw-r--r--drivers/pinctrl/samsung/pinctrl-exynos.h31
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sunxi.c3
-rw-r--r--drivers/pinctrl/uniphier/pinctrl-uniphier-ld20.c2
-rw-r--r--drivers/platform/x86/Kconfig2
-rw-r--r--drivers/platform/x86/fujitsu-laptop.c42
-rw-r--r--drivers/platform/x86/ideapad-laptop.c1
-rw-r--r--drivers/platform/x86/intel_mid_powerbtn.c2
-rw-r--r--drivers/platform/x86/mlx-platform.c2
-rw-r--r--drivers/platform/x86/surface3-wmi.c6
-rw-r--r--drivers/regulator/axp20x-regulator.c2
-rw-r--r--drivers/regulator/fixed.c46
-rw-r--r--drivers/regulator/twl6030-regulator.c2
-rw-r--r--drivers/remoteproc/remoteproc_core.c29
-rw-r--r--drivers/reset/core.c2
-rw-r--r--drivers/rpmsg/rpmsg_core.c4
-rw-r--r--drivers/rtc/Kconfig5
-rw-r--r--drivers/rtc/rtc-jz4740.c12
-rw-r--r--drivers/s390/scsi/zfcp_fsf.c8
-rw-r--r--drivers/s390/virtio/virtio_ccw.c29
-rw-r--r--drivers/scsi/aacraid/comminit.c8
-rw-r--r--drivers/scsi/bfa/bfad.c6
-rw-r--r--drivers/scsi/bfa/bfad_bsg.c2
-rw-r--r--drivers/scsi/bfa/bfad_drv.h2
-rw-r--r--drivers/scsi/fnic/fnic.h1
-rw-r--r--drivers/scsi/fnic/fnic_scsi.c16
-rw-r--r--drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c10
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c7
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.h12
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c58
-rw-r--r--drivers/scsi/qedi/Kconfig2
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c21
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h6
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c95
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c37
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.c5
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.h3
-rw-r--r--drivers/scsi/qla2xxx/qla_nx2.c17
-rw-r--r--drivers/scsi/qla2xxx/qla_nx2.h17
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c20
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c57
-rw-r--r--drivers/scsi/qla2xxx/qla_target.h22
-rw-r--r--drivers/scsi/qla2xxx/qla_tmpl.c24
-rw-r--r--drivers/scsi/qla2xxx/tcm_qla2xxx.c4
-rw-r--r--drivers/scsi/qla2xxx/tcm_qla2xxx.h1
-rw-r--r--drivers/scsi/scsi_lib.c7
-rw-r--r--drivers/scsi/sd.c29
-rw-r--r--drivers/scsi/ses.c2
-rw-r--r--drivers/scsi/sg.c4
-rw-r--r--drivers/scsi/snic/snic_main.c3
-rw-r--r--drivers/scsi/virtio_scsi.c11
-rw-r--r--drivers/soc/ti/wkup_m3_ipc.c1
-rw-r--r--drivers/spi/Kconfig1
-rw-r--r--drivers/spi/spi-armada-3700.c11
-rw-r--r--drivers/spi/spi-axi-spi-engine.c3
-rw-r--r--drivers/spi/spi-davinci.c4
-rw-r--r--drivers/spi/spi-dw-mid.c4
-rw-r--r--drivers/spi/spi-dw.c5
-rw-r--r--drivers/spi/spi-pxa2xx.c1
-rw-r--r--drivers/spi/spi-sh-msiof.c4
-rw-r--r--drivers/staging/greybus/timesync_platform.c6
-rw-r--r--drivers/staging/lustre/lustre/llite/llite_mmap.c4
-rw-r--r--drivers/staging/octeon/ethernet.c2
-rw-r--r--drivers/target/target_core_device.c10
-rw-r--r--drivers/target/target_core_sbc.c8
-rw-r--r--drivers/target/target_core_transport.c110
-rw-r--r--drivers/target/target_core_xcopy.c159
-rw-r--r--drivers/target/target_core_xcopy.h7
-rw-r--r--drivers/thermal/rockchip_thermal.c153
-rw-r--r--drivers/thermal/thermal_core.c10
-rw-r--r--drivers/tty/serial/8250/8250_core.c2
-rw-r--r--drivers/tty/serial/8250/8250_pci.c12
-rw-r--r--drivers/tty/serial/8250/8250_port.c2
-rw-r--r--drivers/tty/serial/atmel_serial.c22
-rw-r--r--drivers/tty/sysrq.c4
-rw-r--r--drivers/usb/core/config.c10
-rw-r--r--drivers/usb/core/hub.c59
-rw-r--r--drivers/usb/core/quirks.c4
-rw-r--r--drivers/usb/dwc2/core.h4
-rw-r--r--drivers/usb/dwc2/gadget.c20
-rw-r--r--drivers/usb/dwc2/hcd.c7
-rw-r--r--drivers/usb/dwc2/params.c40
-rw-r--r--drivers/usb/dwc3/core.h10
-rw-r--r--drivers/usb/dwc3/dwc3-exynos.c4
-rw-r--r--drivers/usb/dwc3/dwc3-omap.c6
-rw-r--r--drivers/usb/dwc3/dwc3-pci.c17
-rw-r--r--drivers/usb/dwc3/ep0.c46
-rw-r--r--drivers/usb/dwc3/gadget.c24
-rw-r--r--drivers/usb/gadget/composite.c14
-rw-r--r--drivers/usb/gadget/function/f_fs.c39
-rw-r--r--drivers/usb/gadget/function/f_hid.c2
-rw-r--r--drivers/usb/gadget/legacy/inode.c18
-rw-r--r--drivers/usb/gadget/udc/atmel_usba_udc.c3
-rw-r--r--drivers/usb/gadget/udc/atmel_usba_udc.h1
-rw-r--r--drivers/usb/gadget/udc/core.c6
-rw-r--r--drivers/usb/gadget/udc/dummy_hcd.c6
-rw-r--r--drivers/usb/host/ohci-at91.c24
-rw-r--r--drivers/usb/host/xhci-mem.c46
-rw-r--r--drivers/usb/host/xhci-mtk.c4
-rw-r--r--drivers/usb/host/xhci-pci.c3
-rw-r--r--drivers/usb/host/xhci-plat.c2
-rw-r--r--drivers/usb/host/xhci-ring.c262
-rw-r--r--drivers/usb/host/xhci.c17
-rw-r--r--drivers/usb/host/xhci.h5
-rw-r--r--drivers/usb/musb/blackfin.c1
-rw-r--r--drivers/usb/musb/musb_core.c28
-rw-r--r--drivers/usb/musb/musb_core.h8
-rw-r--r--drivers/usb/musb/musb_debugfs.c20
-rw-r--r--drivers/usb/musb/musb_dsps.c12
-rw-r--r--drivers/usb/musb/musb_host.c10
-rw-r--r--drivers/usb/musb/musbhsdma.h2
-rw-r--r--drivers/usb/serial/ch341.c108
-rw-r--r--drivers/usb/serial/cyberjack.c10
-rw-r--r--drivers/usb/serial/f81534.c8
-rw-r--r--drivers/usb/serial/garmin_gps.c1
-rw-r--r--drivers/usb/serial/io_edgeport.c5
-rw-r--r--drivers/usb/serial/io_ti.c22
-rw-r--r--drivers/usb/serial/iuu_phoenix.c11
-rw-r--r--drivers/usb/serial/keyspan_pda.c14
-rw-r--r--drivers/usb/serial/kl5kusb105.c9
-rw-r--r--drivers/usb/serial/kobil_sct.c12
-rw-r--r--drivers/usb/serial/mos7720.c56
-rw-r--r--drivers/usb/serial/mos7840.c24
-rw-r--r--drivers/usb/serial/omninet.c13
-rw-r--r--drivers/usb/serial/option.c1
-rw-r--r--drivers/usb/serial/oti6858.c16
-rw-r--r--drivers/usb/serial/pl2303.c9
-rw-r--r--drivers/usb/serial/pl2303.h1
-rw-r--r--drivers/usb/serial/qcserial.c1
-rw-r--r--drivers/usb/serial/quatech2.c4
-rw-r--r--drivers/usb/serial/spcp8x5.c14
-rw-r--r--drivers/usb/serial/ti_usb_3410_5052.c7
-rw-r--r--drivers/usb/storage/unusual_devs.h7
-rw-r--r--drivers/usb/wusbcore/crypto.c3
-rw-r--r--drivers/vfio/mdev/mdev_core.c100
-rw-r--r--drivers/vfio/mdev/mdev_private.h29
-rw-r--r--drivers/vfio/mdev/mdev_sysfs.c8
-rw-r--r--drivers/vfio/mdev/vfio_mdev.c12
-rw-r--r--drivers/vfio/pci/vfio_pci.c4
-rw-r--r--drivers/vfio/pci/vfio_pci_rdwr.c5
-rw-r--r--drivers/vfio/vfio_iommu_spapr_tce.c37
-rw-r--r--drivers/vfio/vfio_iommu_type1.c102
-rw-r--r--drivers/vhost/scsi.c4
-rw-r--r--drivers/vhost/vhost.c10
-rw-r--r--drivers/vhost/vsock.c13
-rw-r--r--drivers/video/fbdev/cobalt_lcdfb.c5
-rw-r--r--drivers/video/fbdev/core/fbcmap.c26
-rw-r--r--drivers/virtio/virtio_mmio.c20
-rw-r--r--drivers/vme/bridges/vme_ca91cx42.c2
-rw-r--r--drivers/xen/arm-device.c8
-rw-r--r--drivers/xen/events/events_fifo.c3
-rw-r--r--drivers/xen/evtchn.c4
-rw-r--r--drivers/xen/platform-pci.c71
-rw-r--r--drivers/xen/swiotlb-xen.c13
-rw-r--r--drivers/xen/xenbus/xenbus_comms.h1
-rw-r--r--drivers/xen/xenbus/xenbus_dev_frontend.c49
-rw-r--r--fs/Kconfig1
-rw-r--r--fs/aio.c6
-rw-r--r--fs/binfmt_elf.c1
-rw-r--r--fs/block_dev.c9
-rw-r--r--fs/btrfs/async-thread.c15
-rw-r--r--fs/btrfs/compression.c39
-rw-r--r--fs/btrfs/extent-tree.c8
-rw-r--r--fs/btrfs/inode.c39
-rw-r--r--fs/btrfs/ioctl.c6
-rw-r--r--fs/btrfs/tree-log.c13
-rw-r--r--fs/btrfs/uuid-tree.c4
-rw-r--r--fs/buffer.c2
-rw-r--r--fs/ceph/addr.c4
-rw-r--r--fs/ceph/caps.c7
-rw-r--r--fs/ceph/dir.c5
-rw-r--r--fs/ceph/inode.c3
-rw-r--r--fs/ceph/mds_client.c14
-rw-r--r--fs/cifs/readdir.c1
-rw-r--r--fs/coredump.c18
-rw-r--r--fs/crypto/keyinfo.c3
-rw-r--r--fs/crypto/policy.c5
-rw-r--r--fs/dax.c301
-rw-r--r--fs/dcache.c7
-rw-r--r--fs/direct-io.c3
-rw-r--r--fs/ext2/Kconfig1
-rw-r--r--fs/ext2/inode.c3
-rw-r--r--fs/ext4/Kconfig1
-rw-r--r--fs/ext4/file.c48
-rw-r--r--fs/f2fs/segment.c4
-rw-r--r--fs/f2fs/super.c6
-rw-r--r--fs/fscache/cookie.c5
-rw-r--r--fs/fscache/netfs.c1
-rw-r--r--fs/fscache/object.c32
-rw-r--r--fs/fuse/dev.c8
-rw-r--r--fs/fuse/dir.c2
-rw-r--r--fs/iomap.c3
-rw-r--r--fs/libfs.c3
-rw-r--r--fs/namespace.c64
-rw-r--r--fs/nfs/nfs4proc.c33
-rw-r--r--fs/nfs/nfs4state.c2
-rw-r--r--fs/nfs/pnfs.c2
-rw-r--r--fs/nfsd/nfs4layouts.c5
-rw-r--r--fs/nfsd/nfs4state.c19
-rw-r--r--fs/nfsd/nfs4xdr.c4
-rw-r--r--fs/nfsd/state.h4
-rw-r--r--fs/notify/mark.c12
-rw-r--r--fs/ocfs2/dlmglue.c10
-rw-r--r--fs/ocfs2/stackglue.c6
-rw-r--r--fs/ocfs2/stackglue.h3
-rw-r--r--fs/overlayfs/namei.c27
-rw-r--r--fs/posix_acl.c9
-rw-r--r--fs/proc/base.c2
-rw-r--r--fs/proc/page.c3
-rw-r--r--fs/proc/proc_sysctl.c3
-rw-r--r--fs/pstore/ram.c2
-rw-r--r--fs/romfs/super.c23
-rw-r--r--fs/splice.c1
-rw-r--r--fs/ubifs/Kconfig2
-rw-r--r--fs/ubifs/dir.c58
-rw-r--r--fs/ubifs/ioctl.c3
-rw-r--r--fs/ubifs/journal.c2
-rw-r--r--fs/ubifs/tnc.c25
-rw-r--r--fs/userfaultfd.c37
-rw-r--r--fs/xfs/libxfs/xfs_ag_resv.c73
-rw-r--r--fs/xfs/libxfs/xfs_alloc.c115
-rw-r--r--fs/xfs/libxfs/xfs_alloc.h2
-rw-r--r--fs/xfs/libxfs/xfs_attr.c6
-rw-r--r--fs/xfs/libxfs/xfs_bmap.c51
-rw-r--r--fs/xfs/libxfs/xfs_bmap.h6
-rw-r--r--fs/xfs/libxfs/xfs_bmap_btree.c3
-rw-r--r--fs/xfs/libxfs/xfs_dir2.c39
-rw-r--r--fs/xfs/libxfs/xfs_dir2.h8
-rw-r--r--fs/xfs/libxfs/xfs_ialloc_btree.c90
-rw-r--r--fs/xfs/libxfs/xfs_ialloc_btree.h3
-rw-r--r--fs/xfs/libxfs/xfs_inode_buf.c10
-rw-r--r--fs/xfs/libxfs/xfs_refcount_btree.c9
-rw-r--r--fs/xfs/libxfs/xfs_refcount_btree.h3
-rw-r--r--fs/xfs/libxfs/xfs_rmap_btree.c14
-rw-r--r--fs/xfs/libxfs/xfs_rmap_btree.h3
-rw-r--r--fs/xfs/libxfs/xfs_sb.c2
-rw-r--r--fs/xfs/xfs_aops.c19
-rw-r--r--fs/xfs/xfs_bmap_util.c28
-rw-r--r--fs/xfs/xfs_buf.c1
-rw-r--r--fs/xfs/xfs_dquot.c4
-rw-r--r--fs/xfs/xfs_fsops.c14
-rw-r--r--fs/xfs/xfs_icache.c3
-rw-r--r--fs/xfs/xfs_inode.c23
-rw-r--r--fs/xfs/xfs_iomap.c2
-rw-r--r--fs/xfs/xfs_iops.c50
-rw-r--r--fs/xfs/xfs_linux.h6
-rw-r--r--fs/xfs/xfs_log.c12
-rw-r--r--fs/xfs/xfs_mount.h1
-rw-r--r--fs/xfs/xfs_qm.c3
-rw-r--r--fs/xfs/xfs_refcount_item.c3
-rw-r--r--fs/xfs/xfs_sysfs.c4
-rw-r--r--include/asm-generic/asm-prototypes.h6
-rw-r--r--include/asm-generic/export.h11
-rw-r--r--include/drm/drmP.h1
-rw-r--r--include/drm/drm_atomic.h2
-rw-r--r--include/drm/drm_connector.h16
-rw-r--r--include/drm/drm_mode_config.h2
-rw-r--r--include/dt-bindings/mfd/tps65217.h26
-rw-r--r--include/kvm/arm_arch_timer.h1
-rw-r--r--include/linux/blkdev.h19
-rw-r--r--include/linux/bpf-cgroup.h13
-rw-r--r--include/linux/bpf.h4
-rw-r--r--include/linux/buffer_head.h4
-rw-r--r--include/linux/can/core.h7
-rw-r--r--include/linux/coredump.h1
-rw-r--r--include/linux/cpuhotplug.h5
-rw-r--r--include/linux/cpumask.h8
-rw-r--r--include/linux/dax.h3
-rw-r--r--include/linux/efi.h2
-rw-r--r--include/linux/export.h17
-rw-r--r--include/linux/filter.h7
-rw-r--r--include/linux/fscache-cache.h1
-rw-r--r--include/linux/fsnotify_backend.h2
-rw-r--r--include/linux/genhd.h9
-rw-r--r--include/linux/gfp.h22
-rw-r--r--include/linux/gpio/driver.h70
-rw-r--r--include/linux/hyperv.h32
-rw-r--r--include/linux/i2c.h1
-rw-r--r--include/linux/iio/common/st_sensors.h12
-rw-r--r--include/linux/irq.h17
-rw-r--r--include/linux/jump_label_ratelimit.h5
-rw-r--r--include/linux/kernel.h4
-rw-r--r--include/linux/log2.h13
-rw-r--r--include/linux/mdev.h56
-rw-r--r--include/linux/memcontrol.h26
-rw-r--r--include/linux/memory_hotplug.h7
-rw-r--r--include/linux/micrel_phy.h2
-rw-r--r--include/linux/mlx4/device.h2
-rw-r--r--include/linux/mlx5/device.h5
-rw-r--r--include/linux/mlx5/driver.h1
-rw-r--r--include/linux/mlx5/mlx5_ifc.h93
-rw-r--r--include/linux/mm.h4
-rw-r--r--include/linux/mm_inline.h2
-rw-r--r--include/linux/mmzone.h6
-rw-r--r--include/linux/module.h14
-rw-r--r--include/linux/netdevice.h42
-rw-r--r--include/linux/nfs4.h3
-rw-r--r--include/linux/nmi.h1
-rw-r--r--include/linux/page-flags.h2
-rw-r--r--include/linux/percpu-refcount.h4
-rw-r--r--include/linux/perf_event.h1
-rw-r--r--include/linux/phy.h1
-rw-r--r--include/linux/phy_led_triggers.h4
-rw-r--r--include/linux/radix-tree.h4
-rw-r--r--include/linux/rcupdate.h4
-rw-r--r--include/linux/remoteproc.h4
-rw-r--r--include/linux/sched.h10
-rw-r--r--include/linux/skbuff.h2
-rw-r--r--include/linux/slab.h4
-rw-r--r--include/linux/sunrpc/clnt.h1
-rw-r--r--include/linux/sunrpc/svc_xprt.h1
-rw-r--r--include/linux/suspend.h2
-rw-r--r--include/linux/swap.h3
-rw-r--r--include/linux/swiotlb.h11
-rw-r--r--include/linux/tcp.h7
-rw-r--r--include/linux/timerfd.h20
-rw-r--r--include/linux/virtio_net.h6
-rw-r--r--include/net/cipso_ipv4.h4
-rw-r--r--include/net/ipv6.h7
-rw-r--r--include/net/lwtunnel.h16
-rw-r--r--include/net/netfilter/nf_tables.h6
-rw-r--r--include/net/netfilter/nft_fib.h6
-rw-r--r--include/net/netns/ipv4.h1
-rw-r--r--include/net/sock.h4
-rw-r--r--include/net/tcp.h1
-rw-r--r--include/rdma/ib_verbs.h14
-rw-r--r--include/scsi/libfc.h6
-rw-r--r--include/soc/arc/mcip.h16
-rw-r--r--include/sound/hdmi-codec.h8
-rw-r--r--include/sound/soc.h3
-rw-r--r--include/target/target_core_base.h5
-rw-r--r--include/trace/events/btrfs.h146
-rw-r--r--include/trace/events/mmflags.h3
-rw-r--r--include/trace/events/swiotlb.h17
-rw-r--r--include/uapi/linux/Kbuild1
-rw-r--r--include/uapi/linux/bpf.h7
-rw-r--r--include/uapi/linux/cec-funcs.h10
-rw-r--r--include/uapi/linux/ethtool.h4
-rw-r--r--include/uapi/linux/l2tp.h7
-rw-r--r--include/uapi/linux/netfilter/nf_log.h2
-rw-r--r--include/uapi/linux/netfilter/nf_tables.h4
-rw-r--r--include/uapi/linux/nl80211.h4
-rw-r--r--include/uapi/linux/pkt_cls.h2
-rw-r--r--include/uapi/linux/seg6.h9
-rw-r--r--include/uapi/linux/tc_act/tc_bpf.h2
-rw-r--r--include/uapi/linux/timerfd.h36
-rw-r--r--include/uapi/linux/usb/functionfs.h1
-rw-r--r--include/uapi/linux/videodev2.h7
-rw-r--r--include/uapi/rdma/Kbuild1
-rw-r--r--include/uapi/rdma/cxgb3-abi.h2
-rw-r--r--include/uapi/rdma/ib_user_verbs.h11
-rw-r--r--init/Kconfig8
-rw-r--r--ipc/sem.c2
-rw-r--r--kernel/audit_tree.c18
-rw-r--r--kernel/bpf/arraymap.c20
-rw-r--r--kernel/bpf/cgroup.c59
-rw-r--r--kernel/bpf/core.c14
-rw-r--r--kernel/bpf/hashtab.c24
-rw-r--r--kernel/bpf/stackmap.c20
-rw-r--r--kernel/bpf/syscall.c54
-rw-r--r--kernel/bpf/verifier.c2
-rw-r--r--kernel/capability.c1
-rw-r--r--kernel/cgroup.c22
-rw-r--r--kernel/cpu.c33
-rw-r--r--kernel/events/core.c269
-rw-r--r--kernel/futex.c2
-rw-r--r--kernel/irq/irqdomain.c44
-rw-r--r--kernel/jump_label.c7
-rw-r--r--kernel/memremap.c4
-rw-r--r--kernel/module.c55
-rw-r--r--kernel/panic.c4
-rw-r--r--kernel/pid_namespace.c10
-rw-r--r--kernel/power/suspend.c4
-rw-r--r--kernel/printk/printk.c2
-rw-r--r--kernel/rcu/rcu.h1
-rw-r--r--kernel/rcu/tiny.c4
-rw-r--r--kernel/rcu/tiny_plugin.h9
-rw-r--r--kernel/rcu/tree.c33
-rw-r--r--kernel/rcu/tree_exp.h52
-rw-r--r--kernel/rcu/tree_plugin.h2
-rw-r--r--kernel/rcu/update.c38
-rw-r--r--kernel/signal.c4
-rw-r--r--kernel/stacktrace.c12
-rw-r--r--kernel/sysctl.c1
-rw-r--r--kernel/time/tick-broadcast.c15
-rw-r--r--kernel/time/timekeeping_debug.c4
-rw-r--r--kernel/trace/trace_hwlat.c8
-rw-r--r--kernel/trace/trace_kprobe.c2
-rw-r--r--kernel/ucount.c17
-rw-r--r--kernel/watchdog.c9
-rw-r--r--kernel/watchdog_hld.c3
-rw-r--r--lib/Kconfig.debug2
-rw-r--r--lib/ioremap.c1
-rw-r--r--lib/iov_iter.c54
-rw-r--r--lib/radix-tree.c11
-rw-r--r--lib/swiotlb.c58
-rw-r--r--mm/filemap.c43
-rw-r--r--mm/huge_memory.c27
-rw-r--r--mm/hugetlb.c37
-rw-r--r--mm/kasan/report.c3
-rw-r--r--mm/khugepaged.c26
-rw-r--r--mm/memcontrol.c22
-rw-r--r--mm/memory.c88
-rw-r--r--mm/memory_hotplug.c56
-rw-r--r--mm/mempolicy.c2
-rw-r--r--mm/page_alloc.c118
-rw-r--r--mm/shmem.c11
-rw-r--r--mm/slab.c8
-rw-r--r--mm/slub.c27
-rw-r--r--mm/swapfile.c20
-rw-r--r--mm/truncate.c75
-rw-r--r--mm/vmscan.c27
-rw-r--r--mm/workingset.c3
-rw-r--r--mm/zswap.c30
-rw-r--r--net/Kconfig4
-rw-r--r--net/atm/lec.c2
-rw-r--r--net/ax25/ax25_subr.c2
-rw-r--r--net/batman-adv/fragmentation.c10
-rw-r--r--net/bridge/br_netfilter_hooks.c2
-rw-r--r--net/bridge/br_netlink.c33
-rw-r--r--net/can/af_can.c12
-rw-r--r--net/can/af_can.h3
-rw-r--r--net/can/bcm.c27
-rw-r--r--net/can/gw.c2
-rw-r--r--net/can/raw.c4
-rw-r--r--net/ceph/crypto.c2
-rw-r--r--net/core/datagram.c8
-rw-r--r--net/core/dev.c39
-rw-r--r--net/core/drop_monitor.c39
-rw-r--r--net/core/ethtool.c11
-rw-r--r--net/core/filter.c6
-rw-r--r--net/core/flow_dissector.c9
-rw-r--r--net/core/lwt_bpf.c1
-rw-r--r--net/core/lwtunnel.c66
-rw-r--r--net/core/neighbour.c3
-rw-r--r--net/core/rtnetlink.c6
-rw-r--r--net/core/skbuff.c8
-rw-r--r--net/core/sock.c6
-rw-r--r--net/dccp/input.c3
-rw-r--r--net/dccp/ipv6.c4
-rw-r--r--net/dsa/dsa2.c12
-rw-r--r--net/dsa/slave.c8
-rw-r--r--net/ethernet/eth.c1
-rw-r--r--net/ipv4/arp.c12
-rw-r--r--net/ipv4/cipso_ipv4.c4
-rw-r--r--net/ipv4/fib_frontend.c10
-rw-r--r--net/ipv4/fib_semantics.c20
-rw-r--r--net/ipv4/igmp.c8
-rw-r--r--net/ipv4/ip_output.c1
-rw-r--r--net/ipv4/ip_sockglue.c17
-rw-r--r--net/ipv4/ip_tunnel_core.c2
-rw-r--r--net/ipv4/netfilter/ipt_CLUSTERIP.c37
-rw-r--r--net/ipv4/netfilter/ipt_rpfilter.c8
-rw-r--r--net/ipv4/netfilter/nf_reject_ipv4.c2
-rw-r--r--net/ipv4/netfilter/nft_fib_ipv4.c15
-rw-r--r--net/ipv4/ping.c2
-rw-r--r--net/ipv4/route.c5
-rw-r--r--net/ipv4/sysctl_net_ipv4.c16
-rw-r--r--net/ipv4/tcp.c6
-rw-r--r--net/ipv4/tcp_fastopen.c3
-rw-r--r--net/ipv4/tcp_input.c2
-rw-r--r--net/ipv4/tcp_ipv4.c4
-rw-r--r--net/ipv4/tcp_metrics.c1
-rw-r--r--net/ipv4/tcp_output.c6
-rw-r--r--net/ipv4/tcp_probe.c4
-rw-r--r--net/ipv4/udp.c2
-rw-r--r--net/ipv6/addrconf.c20
-rw-r--r--net/ipv6/datagram.c14
-rw-r--r--net/ipv6/exthdrs.c31
-rw-r--r--net/ipv6/ila/ila_lwt.c1
-rw-r--r--net/ipv6/inet6_connection_sock.c2
-rw-r--r--net/ipv6/ip6_gre.c43
-rw-r--r--net/ipv6/ip6_offload.c1
-rw-r--r--net/ipv6/ip6_output.c13
-rw-r--r--net/ipv6/ip6_tunnel.c40
-rw-r--r--net/ipv6/ip6_vti.c2
-rw-r--r--net/ipv6/mcast.c50
-rw-r--r--net/ipv6/netfilter/ip6t_rpfilter.c8
-rw-r--r--net/ipv6/netfilter/nf_reject_ipv6.c3
-rw-r--r--net/ipv6/netfilter/nft_fib_ipv6.c13
-rw-r--r--net/ipv6/route.c19
-rw-r--r--net/ipv6/seg6.c2
-rw-r--r--net/ipv6/seg6_hmac.c10
-rw-r--r--net/ipv6/seg6_iptunnel.c5
-rw-r--r--net/ipv6/sit.c1
-rw-r--r--net/ipv6/tcp_ipv6.c39
-rw-r--r--net/ipv6/udp.c6
-rw-r--r--net/irda/irqueue.c34
-rw-r--r--net/iucv/af_iucv.c25
-rw-r--r--net/kcm/kcmsock.c46
-rw-r--r--net/l2tp/l2tp_core.h1
-rw-r--r--net/l2tp/l2tp_ip.c46
-rw-r--r--net/l2tp/l2tp_ip6.c26
-rw-r--r--net/llc/llc_conn.c3
-rw-r--r--net/llc/llc_sap.c3
-rw-r--r--net/mac80211/chan.c3
-rw-r--r--net/mac80211/fils_aead.c6
-rw-r--r--net/mac80211/iface.c21
-rw-r--r--net/mac80211/main.c13
-rw-r--r--net/mac80211/mesh.c2
-rw-r--r--net/mac80211/rx.c38
-rw-r--r--net/mac80211/sta_info.c4
-rw-r--r--net/mac80211/tx.c20
-rw-r--r--net/mac80211/vht.c4
-rw-r--r--net/mpls/af_mpls.c48
-rw-r--r--net/mpls/mpls_iptunnel.c1
-rw-r--r--net/netfilter/Kconfig2
-rw-r--r--net/netfilter/nf_conntrack_core.c44
-rw-r--r--net/netfilter/nf_log.c1
-rw-r--r--net/netfilter/nf_tables_api.c69
-rw-r--r--net/netfilter/nft_dynset.c3
-rw-r--r--net/netfilter/nft_log.c3
-rw-r--r--net/netfilter/nft_lookup.c3
-rw-r--r--net/netfilter/nft_objref.c6
-rw-r--r--net/netfilter/nft_payload.c27
-rw-r--r--net/netfilter/nft_queue.c2
-rw-r--r--net/netfilter/nft_quota.c26
-rw-r--r--net/netfilter/nft_set_hash.c2
-rw-r--r--net/netfilter/nft_set_rbtree.c2
-rw-r--r--net/netlabel/netlabel_kapi.c5
-rw-r--r--net/openvswitch/conntrack.c6
-rw-r--r--net/openvswitch/datapath.c1
-rw-r--r--net/openvswitch/flow.c54
-rw-r--r--net/packet/af_packet.c83
-rw-r--r--net/qrtr/qrtr.c4
-rw-r--r--net/sched/act_api.c5
-rw-r--r--net/sched/act_bpf.c5
-rw-r--r--net/sched/cls_api.c4
-rw-r--r--net/sched/cls_bpf.c4
-rw-r--r--net/sched/cls_flower.c8
-rw-r--r--net/sched/cls_matchall.c127
-rw-r--r--net/sctp/ipv6.c3
-rw-r--r--net/sctp/offload.c2
-rw-r--r--net/sctp/outqueue.c2
-rw-r--r--net/sctp/socket.c9
-rw-r--r--net/socket.c4
-rw-r--r--net/sunrpc/auth_gss/gss_rpc_xdr.c2
-rw-r--r--net/sunrpc/auth_gss/svcauth_gss.c2
-rw-r--r--net/sunrpc/clnt.c5
-rw-r--r--net/sunrpc/sunrpc_syms.c1
-rw-r--r--net/sunrpc/svc_xprt.c10
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_recvfrom.c2
-rw-r--r--net/tipc/discover.c4
-rw-r--r--net/tipc/link.c2
-rw-r--r--net/tipc/msg.c16
-rw-r--r--net/tipc/msg.h2
-rw-r--r--net/tipc/name_distr.c2
-rw-r--r--net/tipc/node.c9
-rw-r--r--net/tipc/server.c48
-rw-r--r--net/tipc/socket.c24
-rw-r--r--net/tipc/subscr.c124
-rw-r--r--net/tipc/subscr.h1
-rw-r--r--net/unix/af_unix.c27
-rw-r--r--net/wireless/nl80211.c32
-rw-r--r--samples/Kconfig7
-rw-r--r--samples/Makefile3
-rw-r--r--samples/bpf/sock_example.h2
-rw-r--r--samples/bpf/tc_l2_redirect_kern.c1
-rw-r--r--samples/bpf/test_cgrp2_attach.c2
-rw-r--r--samples/bpf/test_cgrp2_attach2.c68
-rw-r--r--samples/bpf/test_cgrp2_sock.c2
-rw-r--r--samples/bpf/test_cgrp2_sock2.c2
-rw-r--r--samples/bpf/trace_output_user.c1
-rw-r--r--samples/bpf/xdp_tx_iptunnel_kern.c1
-rw-r--r--samples/vfio-mdev/Makefile14
-rw-r--r--samples/vfio-mdev/mtty.c55
-rw-r--r--scripts/Makefile.build2
-rw-r--r--scripts/gcc-plugins/gcc-common.h85
-rw-r--r--scripts/gcc-plugins/latent_entropy_plugin.c4
-rw-r--r--scripts/genksyms/genksyms.c19
-rw-r--r--scripts/kallsyms.c12
-rw-r--r--scripts/mod/modpost.c10
-rw-r--r--security/selinux/hooks.c2
-rw-r--r--sound/core/seq/seq_memory.c9
-rw-r--r--sound/core/seq/seq_queue.c33
-rw-r--r--sound/firewire/fireworks/fireworks_stream.c2
-rw-r--r--sound/firewire/tascam/tascam-stream.c2
-rw-r--r--sound/pci/hda/patch_hdmi.c1
-rw-r--r--sound/pci/hda/patch_realtek.c2
-rw-r--r--sound/soc/codecs/nau8825.c9
-rw-r--r--sound/soc/codecs/nau8825.h7
-rw-r--r--sound/soc/codecs/rt5645.c3
-rw-r--r--sound/soc/codecs/tlv320aic3x.c13
-rw-r--r--sound/soc/codecs/wm_adsp.c25
-rw-r--r--sound/soc/dwc/designware_i2s.c25
-rw-r--r--sound/soc/fsl/fsl_ssi.c74
-rw-r--r--sound/soc/intel/boards/bytcr_rt5640.c18
-rw-r--r--sound/soc/intel/skylake/skl-pcm.c3
-rw-r--r--sound/soc/intel/skylake/skl-sst.c3
-rw-r--r--sound/soc/sh/rcar/core.c4
-rw-r--r--sound/soc/soc-core.c10
-rw-r--r--sound/soc/soc-pcm.c4
-rw-r--r--sound/soc/soc-topology.c3
-rw-r--r--sound/usb/endpoint.c20
-rw-r--r--sound/usb/endpoint.h2
-rw-r--r--sound/usb/line6/driver.c3
-rw-r--r--sound/usb/pcm.c10
-rw-r--r--sound/usb/quirks.c1
-rw-r--r--tools/include/uapi/linux/bpf.h7
-rw-r--r--tools/lib/bpf/bpf.c4
-rw-r--r--tools/lib/bpf/bpf.h3
-rw-r--r--tools/lib/subcmd/parse-options.c3
-rw-r--r--tools/lib/subcmd/parse-options.h5
-rw-r--r--tools/lib/traceevent/plugin_sched_switch.c4
-rw-r--r--tools/objtool/arch/x86/decode.c2
-rw-r--r--tools/perf/Documentation/perf-record.txt4
-rw-r--r--tools/perf/Makefile.perf4
-rw-r--r--tools/perf/builtin-diff.c2
-rw-r--r--tools/perf/builtin-kmem.c1
-rw-r--r--tools/perf/builtin-record.c4
-rw-r--r--tools/perf/builtin-sched.c17
-rw-r--r--tools/perf/ui/hist.c10
-rw-r--r--tools/perf/util/callchain.c11
-rw-r--r--tools/perf/util/callchain.h6
-rw-r--r--tools/perf/util/hist.c7
-rw-r--r--tools/perf/util/hist.h7
-rw-r--r--tools/perf/util/probe-event.c166
-rw-r--r--tools/perf/util/probe-finder.c15
-rw-r--r--tools/perf/util/probe-finder.h3
-rw-r--r--tools/perf/util/symbol-elf.c6
-rw-r--r--tools/testing/selftests/Makefile2
-rwxr-xr-xtools/testing/selftests/bpf/test_kmod.sh2
-rw-r--r--tools/testing/selftests/bpf/test_lru_map.c53
-rwxr-xr-xtools/testing/selftests/net/run_netsocktests2
-rw-r--r--tools/testing/selftests/powerpc/pmu/ebb/pmc56_overflow_test.c2
-rw-r--r--tools/testing/selftests/x86/protection_keys.c2
-rw-r--r--tools/virtio/ringtest/main.h12
-rwxr-xr-xtools/virtio/ringtest/run-on-all.sh5
-rw-r--r--usr/Makefile16
-rw-r--r--virt/kvm/arm/arch_timer.c26
-rw-r--r--virt/kvm/arm/hyp/timer-sr.c33
-rw-r--r--virt/kvm/arm/vgic/vgic-init.c18
-rw-r--r--virt/kvm/arm/vgic/vgic-v2.c2
-rw-r--r--virt/kvm/arm/vgic/vgic-v3.c2
-rw-r--r--virt/lib/irqbypass.c4
1421 files changed, 14772 insertions, 8842 deletions
diff --git a/.mailmap b/.mailmap
index 02d261407683..67dc22ffc9a8 100644
--- a/.mailmap
+++ b/.mailmap
@@ -137,6 +137,7 @@ Ricardo Ribalda Delgado <ricardo.ribalda@gmail.com>
137Rudolf Marek <R.Marek@sh.cvut.cz> 137Rudolf Marek <R.Marek@sh.cvut.cz>
138Rui Saraiva <rmps@joel.ist.utl.pt> 138Rui Saraiva <rmps@joel.ist.utl.pt>
139Sachin P Sant <ssant@in.ibm.com> 139Sachin P Sant <ssant@in.ibm.com>
140Sarangdhar Joshi <spjoshi@codeaurora.org>
140Sam Ravnborg <sam@mars.ravnborg.org> 141Sam Ravnborg <sam@mars.ravnborg.org>
141Santosh Shilimkar <ssantosh@kernel.org> 142Santosh Shilimkar <ssantosh@kernel.org>
142Santosh Shilimkar <santosh.shilimkar@oracle.org> 143Santosh Shilimkar <santosh.shilimkar@oracle.org>
@@ -150,10 +151,13 @@ Shuah Khan <shuah@kernel.org> <shuah.kh@samsung.com>
150Simon Kelley <simon@thekelleys.org.uk> 151Simon Kelley <simon@thekelleys.org.uk>
151Stéphane Witzmann <stephane.witzmann@ubpmes.univ-bpclermont.fr> 152Stéphane Witzmann <stephane.witzmann@ubpmes.univ-bpclermont.fr>
152Stephen Hemminger <shemminger@osdl.org> 153Stephen Hemminger <shemminger@osdl.org>
154Subash Abhinov Kasiviswanathan <subashab@codeaurora.org>
155Subhash Jadavani <subhashj@codeaurora.org>
153Sudeep Holla <sudeep.holla@arm.com> Sudeep KarkadaNagesha <sudeep.karkadanagesha@arm.com> 156Sudeep Holla <sudeep.holla@arm.com> Sudeep KarkadaNagesha <sudeep.karkadanagesha@arm.com>
154Sumit Semwal <sumit.semwal@ti.com> 157Sumit Semwal <sumit.semwal@ti.com>
155Tejun Heo <htejun@gmail.com> 158Tejun Heo <htejun@gmail.com>
156Thomas Graf <tgraf@suug.ch> 159Thomas Graf <tgraf@suug.ch>
160Thomas Pedersen <twp@codeaurora.org>
157Tony Luck <tony.luck@intel.com> 161Tony Luck <tony.luck@intel.com>
158Tsuneo Yoshioka <Tsuneo.Yoshioka@f-secure.com> 162Tsuneo Yoshioka <Tsuneo.Yoshioka@f-secure.com>
159Uwe Kleine-König <ukleinek@informatik.uni-freiburg.de> 163Uwe Kleine-König <ukleinek@informatik.uni-freiburg.de>
diff --git a/CREDITS b/CREDITS
index c58560701d13..c5626bf06264 100644
--- a/CREDITS
+++ b/CREDITS
@@ -2478,12 +2478,11 @@ S: D-90453 Nuernberg
2478S: Germany 2478S: Germany
2479 2479
2480N: Arnaldo Carvalho de Melo 2480N: Arnaldo Carvalho de Melo
2481E: acme@ghostprotocols.net 2481E: acme@kernel.org
2482E: arnaldo.melo@gmail.com 2482E: arnaldo.melo@gmail.com
2483E: acme@redhat.com 2483E: acme@redhat.com
2484W: http://oops.ghostprotocols.net:81/blog/
2485P: 1024D/9224DF01 D5DF E3BB E3C8 BCBB F8AD 841A B6AB 4681 9224 DF01 2484P: 1024D/9224DF01 D5DF E3BB E3C8 BCBB F8AD 841A B6AB 4681 9224 DF01
2486D: IPX, LLC, DCCP, cyc2x, wl3501_cs, net/ hacks 2485D: tools/, IPX, LLC, DCCP, cyc2x, wl3501_cs, net/ hacks
2487S: Brazil 2486S: Brazil
2488 2487
2489N: Karsten Merker 2488N: Karsten Merker
diff --git a/Documentation/ABI/testing/sysfs-devices-deferred_probe b/Documentation/ABI/testing/sysfs-devices-deferred_probe
deleted file mode 100644
index 58553d7a321f..000000000000
--- a/Documentation/ABI/testing/sysfs-devices-deferred_probe
+++ /dev/null
@@ -1,12 +0,0 @@
1What: /sys/devices/.../deferred_probe
2Date: August 2016
3Contact: Ben Hutchings <ben.hutchings@codethink.co.uk>
4Description:
5 The /sys/devices/.../deferred_probe attribute is
6 present for all devices. If a driver detects during
7 probing a device that a related device is not yet
8 ready, it may defer probing of the first device. The
9 kernel will retry probing the first device after any
10 other device is successfully probed. This attribute
11 reads as 1 if probing of this device is currently
12 deferred, or 0 otherwise.
diff --git a/Documentation/DocBook/Makefile b/Documentation/DocBook/Makefile
index c75e5d6b8fa8..a6eb7dcd4dd5 100644
--- a/Documentation/DocBook/Makefile
+++ b/Documentation/DocBook/Makefile
@@ -12,7 +12,7 @@ DOCBOOKS := z8530book.xml \
12 kernel-api.xml filesystems.xml lsm.xml kgdb.xml \ 12 kernel-api.xml filesystems.xml lsm.xml kgdb.xml \
13 gadget.xml libata.xml mtdnand.xml librs.xml rapidio.xml \ 13 gadget.xml libata.xml mtdnand.xml librs.xml rapidio.xml \
14 genericirq.xml s390-drivers.xml uio-howto.xml scsi.xml \ 14 genericirq.xml s390-drivers.xml uio-howto.xml scsi.xml \
15 80211.xml sh.xml regulator.xml w1.xml \ 15 sh.xml regulator.xml w1.xml \
16 writing_musb_glue_layer.xml iio.xml 16 writing_musb_glue_layer.xml iio.xml
17 17
18ifeq ($(DOCBOOKS),) 18ifeq ($(DOCBOOKS),)
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index 21e2d8863705..be7c0d9506b1 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -106,6 +106,16 @@
106 use by PCI 106 use by PCI
107 Format: <irq>,<irq>... 107 Format: <irq>,<irq>...
108 108
109 acpi_mask_gpe= [HW,ACPI]
110 Due to the existence of _Lxx/_Exx, some GPEs triggered
111 by unsupported hardware/firmware features can result in
112 GPE floodings that cannot be automatically disabled by
113 the GPE dispatcher.
114 This facility can be used to prevent such uncontrolled
115 GPE floodings.
116 Format: <int>
117 Support masking of GPEs numbered from 0x00 to 0x7f.
118
109 acpi_no_auto_serialize [HW,ACPI] 119 acpi_no_auto_serialize [HW,ACPI]
110 Disable auto-serialization of AML methods 120 Disable auto-serialization of AML methods
111 AML control methods that contain the opcodes to create 121 AML control methods that contain the opcodes to create
@@ -3811,10 +3821,11 @@
3811 it if 0 is given (See Documentation/cgroup-v1/memory.txt) 3821 it if 0 is given (See Documentation/cgroup-v1/memory.txt)
3812 3822
3813 swiotlb= [ARM,IA-64,PPC,MIPS,X86] 3823 swiotlb= [ARM,IA-64,PPC,MIPS,X86]
3814 Format: { <int> | force } 3824 Format: { <int> | force | noforce }
3815 <int> -- Number of I/O TLB slabs 3825 <int> -- Number of I/O TLB slabs
3816 force -- force using of bounce buffers even if they 3826 force -- force using of bounce buffers even if they
3817 wouldn't be automatically used by the kernel 3827 wouldn't be automatically used by the kernel
3828 noforce -- Never use bounce buffers (for debugging)
3818 3829
3819 switches= [HW,M68k] 3830 switches= [HW,M68k]
3820 3831
diff --git a/Documentation/block/queue-sysfs.txt b/Documentation/block/queue-sysfs.txt
index 51642159aedb..c0a3bb5a6e4e 100644
--- a/Documentation/block/queue-sysfs.txt
+++ b/Documentation/block/queue-sysfs.txt
@@ -54,9 +54,9 @@ This is the hardware sector size of the device, in bytes.
54 54
55io_poll (RW) 55io_poll (RW)
56------------ 56------------
57When read, this file shows the total number of block IO polls and how 57When read, this file shows whether polling is enabled (1) or disabled
58many returned success. Writing '0' to this file will disable polling 58(0). Writing '0' to this file will disable polling for this device.
59for this device. Writing any non-zero value will enable this feature. 59Writing any non-zero value will enable this feature.
60 60
61io_poll_delay (RW) 61io_poll_delay (RW)
62------------------ 62------------------
diff --git a/Documentation/devicetree/bindings/i2c/i2c.txt b/Documentation/devicetree/bindings/i2c/i2c.txt
index 5fa691e6f638..cee9d5055fa2 100644
--- a/Documentation/devicetree/bindings/i2c/i2c.txt
+++ b/Documentation/devicetree/bindings/i2c/i2c.txt
@@ -62,6 +62,9 @@ wants to support one of the below features, it should adapt the bindings below.
62 "irq" and "wakeup" names are recognized by I2C core, other names are 62 "irq" and "wakeup" names are recognized by I2C core, other names are
63 left to individual drivers. 63 left to individual drivers.
64 64
65- host-notify
66 device uses SMBus host notify protocol instead of interrupt line.
67
65- multi-master 68- multi-master
66 states that there is another master active on this bus. The OS can use 69 states that there is another master active on this bus. The OS can use
67 this information to adapt power management to keep the arbitration awake 70 this information to adapt power management to keep the arbitration awake
@@ -81,6 +84,11 @@ Binding may contain optional "interrupts" property, describing interrupts
81used by the device. I2C core will assign "irq" interrupt (or the very first 84used by the device. I2C core will assign "irq" interrupt (or the very first
82interrupt if not using interrupt names) as primary interrupt for the slave. 85interrupt if not using interrupt names) as primary interrupt for the slave.
83 86
87Alternatively, devices supporting SMbus Host Notify, and connected to
88adapters that support this feature, may use "host-notify" property. I2C
89core will create a virtual interrupt for Host Notify and assign it as
90primary interrupt for the slave.
91
84Also, if device is marked as a wakeup source, I2C core will set up "wakeup" 92Also, if device is marked as a wakeup source, I2C core will set up "wakeup"
85interrupt for the device. If "wakeup" interrupt name is not present in the 93interrupt for the device. If "wakeup" interrupt name is not present in the
86binding, then primary interrupt will be used as wakeup interrupt. 94binding, then primary interrupt will be used as wakeup interrupt.
diff --git a/Documentation/devicetree/bindings/input/tps65218-pwrbutton.txt b/Documentation/devicetree/bindings/input/tps65218-pwrbutton.txt
index 3e5b9793341f..8682ab6d4a50 100644
--- a/Documentation/devicetree/bindings/input/tps65218-pwrbutton.txt
+++ b/Documentation/devicetree/bindings/input/tps65218-pwrbutton.txt
@@ -8,8 +8,9 @@ This driver provides a simple power button event via an Interrupt.
8Required properties: 8Required properties:
9- compatible: should be "ti,tps65217-pwrbutton" or "ti,tps65218-pwrbutton" 9- compatible: should be "ti,tps65217-pwrbutton" or "ti,tps65218-pwrbutton"
10 10
11Required properties for TPS65218: 11Required properties:
12- interrupts: should be one of the following 12- interrupts: should be one of the following
13 - <2>: For controllers compatible with tps65217
13 - <3 IRQ_TYPE_EDGE_BOTH>: For controllers compatible with tps65218 14 - <3 IRQ_TYPE_EDGE_BOTH>: For controllers compatible with tps65218
14 15
15Examples: 16Examples:
@@ -17,6 +18,7 @@ Examples:
17&tps { 18&tps {
18 tps65217-pwrbutton { 19 tps65217-pwrbutton {
19 compatible = "ti,tps65217-pwrbutton"; 20 compatible = "ti,tps65217-pwrbutton";
21 interrupts = <2>;
20 }; 22 };
21}; 23};
22 24
diff --git a/Documentation/devicetree/bindings/interrupt-controller/snps,archs-idu-intc.txt b/Documentation/devicetree/bindings/interrupt-controller/snps,archs-idu-intc.txt
index 0dcb7c7d3e40..944657684d73 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/snps,archs-idu-intc.txt
+++ b/Documentation/devicetree/bindings/interrupt-controller/snps,archs-idu-intc.txt
@@ -15,6 +15,9 @@ Properties:
15 Second cell specifies the irq distribution mode to cores 15 Second cell specifies the irq distribution mode to cores
16 0=Round Robin; 1=cpu0, 2=cpu1, 4=cpu2, 8=cpu3 16 0=Round Robin; 1=cpu0, 2=cpu1, 4=cpu2, 8=cpu3
17 17
18 The second cell in interrupts property is deprecated and may be ignored by
19 the kernel.
20
18 intc accessed via the special ARC AUX register interface, hence "reg" property 21 intc accessed via the special ARC AUX register interface, hence "reg" property
19 is not specified. 22 is not specified.
20 23
diff --git a/Documentation/devicetree/bindings/mtd/tango-nand.txt b/Documentation/devicetree/bindings/mtd/tango-nand.txt
index ad5a02f2ac8c..cd1bf2ac9055 100644
--- a/Documentation/devicetree/bindings/mtd/tango-nand.txt
+++ b/Documentation/devicetree/bindings/mtd/tango-nand.txt
@@ -5,7 +5,7 @@ Required properties:
5- compatible: "sigma,smp8758-nand" 5- compatible: "sigma,smp8758-nand"
6- reg: address/size of nfc_reg, nfc_mem, and pbus_reg 6- reg: address/size of nfc_reg, nfc_mem, and pbus_reg
7- dmas: reference to the DMA channel used by the controller 7- dmas: reference to the DMA channel used by the controller
8- dma-names: "nfc_sbox" 8- dma-names: "rxtx"
9- clocks: reference to the system clock 9- clocks: reference to the system clock
10- #address-cells: <1> 10- #address-cells: <1>
11- #size-cells: <0> 11- #size-cells: <0>
@@ -17,9 +17,9 @@ Example:
17 17
18 nandc: nand-controller@2c000 { 18 nandc: nand-controller@2c000 {
19 compatible = "sigma,smp8758-nand"; 19 compatible = "sigma,smp8758-nand";
20 reg = <0x2c000 0x30 0x2d000 0x800 0x20000 0x1000>; 20 reg = <0x2c000 0x30>, <0x2d000 0x800>, <0x20000 0x1000>;
21 dmas = <&dma0 3>; 21 dmas = <&dma0 3>;
22 dma-names = "nfc_sbox"; 22 dma-names = "rxtx";
23 clocks = <&clkgen SYS_CLK>; 23 clocks = <&clkgen SYS_CLK>;
24 #address-cells = <1>; 24 #address-cells = <1>;
25 #size-cells = <0>; 25 #size-cells = <0>;
diff --git a/Documentation/devicetree/bindings/net/mediatek-net.txt b/Documentation/devicetree/bindings/net/mediatek-net.txt
index c010fafc66a8..c7194e87d5f4 100644
--- a/Documentation/devicetree/bindings/net/mediatek-net.txt
+++ b/Documentation/devicetree/bindings/net/mediatek-net.txt
@@ -7,7 +7,7 @@ have dual GMAC each represented by a child node..
7* Ethernet controller node 7* Ethernet controller node
8 8
9Required properties: 9Required properties:
10- compatible: Should be "mediatek,mt7623-eth" 10- compatible: Should be "mediatek,mt2701-eth"
11- reg: Address and length of the register set for the device 11- reg: Address and length of the register set for the device
12- interrupts: Should contain the three frame engines interrupts in numeric 12- interrupts: Should contain the three frame engines interrupts in numeric
13 order. These are fe_int0, fe_int1 and fe_int2. 13 order. These are fe_int0, fe_int1 and fe_int2.
diff --git a/Documentation/devicetree/bindings/net/phy.txt b/Documentation/devicetree/bindings/net/phy.txt
index ff1bc4b1bb3b..fb5056b22685 100644
--- a/Documentation/devicetree/bindings/net/phy.txt
+++ b/Documentation/devicetree/bindings/net/phy.txt
@@ -19,8 +19,9 @@ Optional Properties:
19 specifications. If neither of these are specified, the default is to 19 specifications. If neither of these are specified, the default is to
20 assume clause 22. 20 assume clause 22.
21 21
22 If the phy's identifier is known then the list may contain an entry 22 If the PHY reports an incorrect ID (or none at all) then the
23 of the form: "ethernet-phy-idAAAA.BBBB" where 23 "compatible" list may contain an entry with the correct PHY ID in the
24 form: "ethernet-phy-idAAAA.BBBB" where
24 AAAA - The value of the 16 bit Phy Identifier 1 register as 25 AAAA - The value of the 16 bit Phy Identifier 1 register as
25 4 hex digits. This is the chip vendor OUI bits 3:18 26 4 hex digits. This is the chip vendor OUI bits 3:18
26 BBBB - The value of the 16 bit Phy Identifier 2 register as 27 BBBB - The value of the 16 bit Phy Identifier 2 register as
diff --git a/Documentation/devicetree/bindings/net/ti,dp83867.txt b/Documentation/devicetree/bindings/net/ti,dp83867.txt
index 85bf945b898f..afe9630a5e7d 100644
--- a/Documentation/devicetree/bindings/net/ti,dp83867.txt
+++ b/Documentation/devicetree/bindings/net/ti,dp83867.txt
@@ -3,9 +3,11 @@
3Required properties: 3Required properties:
4 - reg - The ID number for the phy, usually a small integer 4 - reg - The ID number for the phy, usually a small integer
5 - ti,rx-internal-delay - RGMII Receive Clock Delay - see dt-bindings/net/ti-dp83867.h 5 - ti,rx-internal-delay - RGMII Receive Clock Delay - see dt-bindings/net/ti-dp83867.h
6 for applicable values 6 for applicable values. Required only if interface type is
7 PHY_INTERFACE_MODE_RGMII_ID or PHY_INTERFACE_MODE_RGMII_RXID
7 - ti,tx-internal-delay - RGMII Transmit Clock Delay - see dt-bindings/net/ti-dp83867.h 8 - ti,tx-internal-delay - RGMII Transmit Clock Delay - see dt-bindings/net/ti-dp83867.h
8 for applicable values 9 for applicable values. Required only if interface type is
10 PHY_INTERFACE_MODE_RGMII_ID or PHY_INTERFACE_MODE_RGMII_TXID
9 - ti,fifo-depth - Transmitt FIFO depth- see dt-bindings/net/ti-dp83867.h 11 - ti,fifo-depth - Transmitt FIFO depth- see dt-bindings/net/ti-dp83867.h
10 for applicable values 12 for applicable values
11 13
diff --git a/Documentation/devicetree/bindings/power/supply/tps65217_charger.txt b/Documentation/devicetree/bindings/power/supply/tps65217_charger.txt
index 98d131acee95..a11072c5a866 100644
--- a/Documentation/devicetree/bindings/power/supply/tps65217_charger.txt
+++ b/Documentation/devicetree/bindings/power/supply/tps65217_charger.txt
@@ -2,11 +2,16 @@ TPS65217 Charger
2 2
3Required Properties: 3Required Properties:
4-compatible: "ti,tps65217-charger" 4-compatible: "ti,tps65217-charger"
5-interrupts: TPS65217 interrupt numbers for the AC and USB charger input change.
6 Should be <0> for the USB charger and <1> for the AC adapter.
7-interrupt-names: Should be "USB" and "AC"
5 8
6This node is a subnode of the tps65217 PMIC. 9This node is a subnode of the tps65217 PMIC.
7 10
8Example: 11Example:
9 12
10 tps65217-charger { 13 tps65217-charger {
11 compatible = "ti,tps65090-charger"; 14 compatible = "ti,tps65217-charger";
15 interrupts = <0>, <1>;
16 interrupt-names = "USB", "AC";
12 }; 17 };
diff --git a/Documentation/devicetree/bindings/spi/sh-msiof.txt b/Documentation/devicetree/bindings/spi/sh-msiof.txt
index da6614c63796..dc975064fa27 100644
--- a/Documentation/devicetree/bindings/spi/sh-msiof.txt
+++ b/Documentation/devicetree/bindings/spi/sh-msiof.txt
@@ -1,17 +1,23 @@
1Renesas MSIOF spi controller 1Renesas MSIOF spi controller
2 2
3Required properties: 3Required properties:
4- compatible : "renesas,msiof-<soctype>" for SoCs, 4- compatible : "renesas,msiof-r8a7790" (R-Car H2)
5 "renesas,sh-msiof" for SuperH, or
6 "renesas,sh-mobile-msiof" for SH Mobile series.
7 Examples with soctypes are:
8 "renesas,msiof-r8a7790" (R-Car H2)
9 "renesas,msiof-r8a7791" (R-Car M2-W) 5 "renesas,msiof-r8a7791" (R-Car M2-W)
10 "renesas,msiof-r8a7792" (R-Car V2H) 6 "renesas,msiof-r8a7792" (R-Car V2H)
11 "renesas,msiof-r8a7793" (R-Car M2-N) 7 "renesas,msiof-r8a7793" (R-Car M2-N)
12 "renesas,msiof-r8a7794" (R-Car E2) 8 "renesas,msiof-r8a7794" (R-Car E2)
13 "renesas,msiof-r8a7796" (R-Car M3-W) 9 "renesas,msiof-r8a7796" (R-Car M3-W)
14 "renesas,msiof-sh73a0" (SH-Mobile AG5) 10 "renesas,msiof-sh73a0" (SH-Mobile AG5)
11 "renesas,sh-mobile-msiof" (generic SH-Mobile compatibile device)
12 "renesas,rcar-gen2-msiof" (generic R-Car Gen2 compatible device)
13 "renesas,rcar-gen3-msiof" (generic R-Car Gen3 compatible device)
14 "renesas,sh-msiof" (deprecated)
15
16 When compatible with the generic version, nodes
17 must list the SoC-specific version corresponding
18 to the platform first followed by the generic
19 version.
20
15- reg : A list of offsets and lengths of the register sets for 21- reg : A list of offsets and lengths of the register sets for
16 the device. 22 the device.
17 If only one register set is present, it is to be used 23 If only one register set is present, it is to be used
@@ -61,7 +67,8 @@ Documentation/devicetree/bindings/pinctrl/renesas,*.
61Example: 67Example:
62 68
63 msiof0: spi@e6e20000 { 69 msiof0: spi@e6e20000 {
64 compatible = "renesas,msiof-r8a7791"; 70 compatible = "renesas,msiof-r8a7791",
71 "renesas,rcar-gen2-msiof";
65 reg = <0 0xe6e20000 0 0x0064>; 72 reg = <0 0xe6e20000 0 0x0064>;
66 interrupts = <0 156 IRQ_TYPE_LEVEL_HIGH>; 73 interrupts = <0 156 IRQ_TYPE_LEVEL_HIGH>;
67 clocks = <&mstp0_clks R8A7791_CLK_MSIOF0>; 74 clocks = <&mstp0_clks R8A7791_CLK_MSIOF0>;
diff --git a/Documentation/driver-api/infrastructure.rst b/Documentation/driver-api/infrastructure.rst
index 0bb0b5fc9512..6d9ff316b608 100644
--- a/Documentation/driver-api/infrastructure.rst
+++ b/Documentation/driver-api/infrastructure.rst
@@ -55,21 +55,6 @@ Device Drivers DMA Management
55.. kernel-doc:: drivers/base/dma-mapping.c 55.. kernel-doc:: drivers/base/dma-mapping.c
56 :export: 56 :export:
57 57
58Device Drivers Power Management
59-------------------------------
60
61.. kernel-doc:: drivers/base/power/main.c
62 :export:
63
64Device Drivers ACPI Support
65---------------------------
66
67.. kernel-doc:: drivers/acpi/scan.c
68 :export:
69
70.. kernel-doc:: drivers/acpi/scan.c
71 :internal:
72
73Device drivers PnP support 58Device drivers PnP support
74-------------------------- 59--------------------------
75 60
diff --git a/Documentation/filesystems/proc.txt b/Documentation/filesystems/proc.txt
index 72624a16b792..c94b4675d021 100644
--- a/Documentation/filesystems/proc.txt
+++ b/Documentation/filesystems/proc.txt
@@ -212,10 +212,11 @@ asynchronous manner and the value may not be very precise. To see a precise
212snapshot of a moment, you can see /proc/<pid>/smaps file and scan page table. 212snapshot of a moment, you can see /proc/<pid>/smaps file and scan page table.
213It's slow but very precise. 213It's slow but very precise.
214 214
215Table 1-2: Contents of the status files (as of 4.1) 215Table 1-2: Contents of the status files (as of 4.8)
216.............................................................................. 216..............................................................................
217 Field Content 217 Field Content
218 Name filename of the executable 218 Name filename of the executable
219 Umask file mode creation mask
219 State state (R is running, S is sleeping, D is sleeping 220 State state (R is running, S is sleeping, D is sleeping
220 in an uninterruptible wait, Z is zombie, 221 in an uninterruptible wait, Z is zombie,
221 T is traced or stopped) 222 T is traced or stopped)
@@ -226,7 +227,6 @@ Table 1-2: Contents of the status files (as of 4.1)
226 TracerPid PID of process tracing this process (0 if not) 227 TracerPid PID of process tracing this process (0 if not)
227 Uid Real, effective, saved set, and file system UIDs 228 Uid Real, effective, saved set, and file system UIDs
228 Gid Real, effective, saved set, and file system GIDs 229 Gid Real, effective, saved set, and file system GIDs
229 Umask file mode creation mask
230 FDSize number of file descriptor slots currently allocated 230 FDSize number of file descriptor slots currently allocated
231 Groups supplementary group list 231 Groups supplementary group list
232 NStgid descendant namespace thread group ID hierarchy 232 NStgid descendant namespace thread group ID hierarchy
@@ -236,6 +236,7 @@ Table 1-2: Contents of the status files (as of 4.1)
236 VmPeak peak virtual memory size 236 VmPeak peak virtual memory size
237 VmSize total program size 237 VmSize total program size
238 VmLck locked memory size 238 VmLck locked memory size
239 VmPin pinned memory size
239 VmHWM peak resident set size ("high water mark") 240 VmHWM peak resident set size ("high water mark")
240 VmRSS size of memory portions. It contains the three 241 VmRSS size of memory portions. It contains the three
241 following parts (VmRSS = RssAnon + RssFile + RssShmem) 242 following parts (VmRSS = RssAnon + RssFile + RssShmem)
diff --git a/Documentation/media/uapi/cec/cec-func-close.rst b/Documentation/media/uapi/cec/cec-func-close.rst
index 8267c31b317d..895d9c2d1c04 100644
--- a/Documentation/media/uapi/cec/cec-func-close.rst
+++ b/Documentation/media/uapi/cec/cec-func-close.rst
@@ -33,11 +33,6 @@ Arguments
33Description 33Description
34=========== 34===========
35 35
36.. note::
37
38 This documents the proposed CEC API. This API is not yet finalized
39 and is currently only available as a staging kernel module.
40
41Closes the cec device. Resources associated with the file descriptor are 36Closes the cec device. Resources associated with the file descriptor are
42freed. The device configuration remain unchanged. 37freed. The device configuration remain unchanged.
43 38
diff --git a/Documentation/media/uapi/cec/cec-func-ioctl.rst b/Documentation/media/uapi/cec/cec-func-ioctl.rst
index 9e8dbb118d6a..7dcfd178fb24 100644
--- a/Documentation/media/uapi/cec/cec-func-ioctl.rst
+++ b/Documentation/media/uapi/cec/cec-func-ioctl.rst
@@ -39,11 +39,6 @@ Arguments
39Description 39Description
40=========== 40===========
41 41
42.. note::
43
44 This documents the proposed CEC API. This API is not yet finalized
45 and is currently only available as a staging kernel module.
46
47The :c:func:`ioctl()` function manipulates cec device parameters. The 42The :c:func:`ioctl()` function manipulates cec device parameters. The
48argument ``fd`` must be an open file descriptor. 43argument ``fd`` must be an open file descriptor.
49 44
diff --git a/Documentation/media/uapi/cec/cec-func-open.rst b/Documentation/media/uapi/cec/cec-func-open.rst
index af3f5b5c24c6..0304388cd159 100644
--- a/Documentation/media/uapi/cec/cec-func-open.rst
+++ b/Documentation/media/uapi/cec/cec-func-open.rst
@@ -46,11 +46,6 @@ Arguments
46Description 46Description
47=========== 47===========
48 48
49.. note::
50
51 This documents the proposed CEC API. This API is not yet finalized
52 and is currently only available as a staging kernel module.
53
54To open a cec device applications call :c:func:`open()` with the 49To open a cec device applications call :c:func:`open()` with the
55desired device name. The function has no side effects; the device 50desired device name. The function has no side effects; the device
56configuration remain unchanged. 51configuration remain unchanged.
diff --git a/Documentation/media/uapi/cec/cec-func-poll.rst b/Documentation/media/uapi/cec/cec-func-poll.rst
index cfb73e6027a5..6a863cfda6e0 100644
--- a/Documentation/media/uapi/cec/cec-func-poll.rst
+++ b/Documentation/media/uapi/cec/cec-func-poll.rst
@@ -39,11 +39,6 @@ Arguments
39Description 39Description
40=========== 40===========
41 41
42.. note::
43
44 This documents the proposed CEC API. This API is not yet finalized
45 and is currently only available as a staging kernel module.
46
47With the :c:func:`poll()` function applications can wait for CEC 42With the :c:func:`poll()` function applications can wait for CEC
48events. 43events.
49 44
diff --git a/Documentation/media/uapi/cec/cec-intro.rst b/Documentation/media/uapi/cec/cec-intro.rst
index 4a19ea5323a9..07ee2b8f89d6 100644
--- a/Documentation/media/uapi/cec/cec-intro.rst
+++ b/Documentation/media/uapi/cec/cec-intro.rst
@@ -3,11 +3,6 @@
3Introduction 3Introduction
4============ 4============
5 5
6.. note::
7
8 This documents the proposed CEC API. This API is not yet finalized
9 and is currently only available as a staging kernel module.
10
11HDMI connectors provide a single pin for use by the Consumer Electronics 6HDMI connectors provide a single pin for use by the Consumer Electronics
12Control protocol. This protocol allows different devices connected by an 7Control protocol. This protocol allows different devices connected by an
13HDMI cable to communicate. The protocol for CEC version 1.4 is defined 8HDMI cable to communicate. The protocol for CEC version 1.4 is defined
@@ -31,3 +26,15 @@ control just the CEC pin.
31Drivers that support CEC will create a CEC device node (/dev/cecX) to 26Drivers that support CEC will create a CEC device node (/dev/cecX) to
32give userspace access to the CEC adapter. The 27give userspace access to the CEC adapter. The
33:ref:`CEC_ADAP_G_CAPS` ioctl will tell userspace what it is allowed to do. 28:ref:`CEC_ADAP_G_CAPS` ioctl will tell userspace what it is allowed to do.
29
30In order to check the support and test it, it is suggested to download
31the `v4l-utils <https://git.linuxtv.org/v4l-utils.git/>`_ package. It
32provides three tools to handle CEC:
33
34- cec-ctl: the Swiss army knife of CEC. Allows you to configure, transmit
35 and monitor CEC messages.
36
37- cec-compliance: does a CEC compliance test of a remote CEC device to
38 determine how compliant the CEC implementation is.
39
40- cec-follower: emulates a CEC follower.
diff --git a/Documentation/media/uapi/cec/cec-ioc-adap-g-caps.rst b/Documentation/media/uapi/cec/cec-ioc-adap-g-caps.rst
index 2b0ddb14b280..a0e961f11017 100644
--- a/Documentation/media/uapi/cec/cec-ioc-adap-g-caps.rst
+++ b/Documentation/media/uapi/cec/cec-ioc-adap-g-caps.rst
@@ -29,11 +29,6 @@ Arguments
29Description 29Description
30=========== 30===========
31 31
32.. note::
33
34 This documents the proposed CEC API. This API is not yet finalized
35 and is currently only available as a staging kernel module.
36
37All cec devices must support :ref:`ioctl CEC_ADAP_G_CAPS <CEC_ADAP_G_CAPS>`. To query 32All cec devices must support :ref:`ioctl CEC_ADAP_G_CAPS <CEC_ADAP_G_CAPS>`. To query
38device information, applications call the ioctl with a pointer to a 33device information, applications call the ioctl with a pointer to a
39struct :c:type:`cec_caps`. The driver fills the structure and 34struct :c:type:`cec_caps`. The driver fills the structure and
diff --git a/Documentation/media/uapi/cec/cec-ioc-adap-g-log-addrs.rst b/Documentation/media/uapi/cec/cec-ioc-adap-g-log-addrs.rst
index b878637e91b3..09f09bbe28d4 100644
--- a/Documentation/media/uapi/cec/cec-ioc-adap-g-log-addrs.rst
+++ b/Documentation/media/uapi/cec/cec-ioc-adap-g-log-addrs.rst
@@ -35,11 +35,6 @@ Arguments
35Description 35Description
36=========== 36===========
37 37
38.. note::
39
40 This documents the proposed CEC API. This API is not yet finalized
41 and is currently only available as a staging kernel module.
42
43To query the current CEC logical addresses, applications call 38To query the current CEC logical addresses, applications call
44:ref:`ioctl CEC_ADAP_G_LOG_ADDRS <CEC_ADAP_G_LOG_ADDRS>` with a pointer to a 39:ref:`ioctl CEC_ADAP_G_LOG_ADDRS <CEC_ADAP_G_LOG_ADDRS>` with a pointer to a
45struct :c:type:`cec_log_addrs` where the driver stores the logical addresses. 40struct :c:type:`cec_log_addrs` where the driver stores the logical addresses.
diff --git a/Documentation/media/uapi/cec/cec-ioc-adap-g-phys-addr.rst b/Documentation/media/uapi/cec/cec-ioc-adap-g-phys-addr.rst
index 3357deb43c85..a3cdc75cec3e 100644
--- a/Documentation/media/uapi/cec/cec-ioc-adap-g-phys-addr.rst
+++ b/Documentation/media/uapi/cec/cec-ioc-adap-g-phys-addr.rst
@@ -35,11 +35,6 @@ Arguments
35Description 35Description
36=========== 36===========
37 37
38.. note::
39
40 This documents the proposed CEC API. This API is not yet finalized
41 and is currently only available as a staging kernel module.
42
43To query the current physical address applications call 38To query the current physical address applications call
44:ref:`ioctl CEC_ADAP_G_PHYS_ADDR <CEC_ADAP_G_PHYS_ADDR>` with a pointer to a __u16 where the 39:ref:`ioctl CEC_ADAP_G_PHYS_ADDR <CEC_ADAP_G_PHYS_ADDR>` with a pointer to a __u16 where the
45driver stores the physical address. 40driver stores the physical address.
diff --git a/Documentation/media/uapi/cec/cec-ioc-dqevent.rst b/Documentation/media/uapi/cec/cec-ioc-dqevent.rst
index e256c6605de7..6e589a1fae17 100644
--- a/Documentation/media/uapi/cec/cec-ioc-dqevent.rst
+++ b/Documentation/media/uapi/cec/cec-ioc-dqevent.rst
@@ -30,11 +30,6 @@ Arguments
30Description 30Description
31=========== 31===========
32 32
33.. note::
34
35 This documents the proposed CEC API. This API is not yet finalized
36 and is currently only available as a staging kernel module.
37
38CEC devices can send asynchronous events. These can be retrieved by 33CEC devices can send asynchronous events. These can be retrieved by
39calling :c:func:`CEC_DQEVENT`. If the file descriptor is in 34calling :c:func:`CEC_DQEVENT`. If the file descriptor is in
40non-blocking mode and no event is pending, then it will return -1 and 35non-blocking mode and no event is pending, then it will return -1 and
diff --git a/Documentation/media/uapi/cec/cec-ioc-g-mode.rst b/Documentation/media/uapi/cec/cec-ioc-g-mode.rst
index 4f5818b9d277..e4ded9df0a84 100644
--- a/Documentation/media/uapi/cec/cec-ioc-g-mode.rst
+++ b/Documentation/media/uapi/cec/cec-ioc-g-mode.rst
@@ -31,11 +31,6 @@ Arguments
31Description 31Description
32=========== 32===========
33 33
34.. note::
35
36 This documents the proposed CEC API. This API is not yet finalized
37 and is currently only available as a staging kernel module.
38
39By default any filehandle can use :ref:`CEC_TRANSMIT`, but in order to prevent 34By default any filehandle can use :ref:`CEC_TRANSMIT`, but in order to prevent
40applications from stepping on each others toes it must be possible to 35applications from stepping on each others toes it must be possible to
41obtain exclusive access to the CEC adapter. This ioctl sets the 36obtain exclusive access to the CEC adapter. This ioctl sets the
diff --git a/Documentation/media/uapi/cec/cec-ioc-receive.rst b/Documentation/media/uapi/cec/cec-ioc-receive.rst
index bdf015b1d1dc..dc2adb391c0a 100644
--- a/Documentation/media/uapi/cec/cec-ioc-receive.rst
+++ b/Documentation/media/uapi/cec/cec-ioc-receive.rst
@@ -34,11 +34,6 @@ Arguments
34Description 34Description
35=========== 35===========
36 36
37.. note::
38
39 This documents the proposed CEC API. This API is not yet finalized
40 and is currently only available as a staging kernel module.
41
42To receive a CEC message the application has to fill in the 37To receive a CEC message the application has to fill in the
43``timeout`` field of struct :c:type:`cec_msg` and pass it to 38``timeout`` field of struct :c:type:`cec_msg` and pass it to
44:ref:`ioctl CEC_RECEIVE <CEC_RECEIVE>`. 39:ref:`ioctl CEC_RECEIVE <CEC_RECEIVE>`.
diff --git a/Documentation/media/uapi/v4l/pixfmt-007.rst b/Documentation/media/uapi/v4l/pixfmt-007.rst
index 44bb5a7059b3..95a23a28c595 100644
--- a/Documentation/media/uapi/v4l/pixfmt-007.rst
+++ b/Documentation/media/uapi/v4l/pixfmt-007.rst
@@ -211,7 +211,13 @@ Colorspace sRGB (V4L2_COLORSPACE_SRGB)
211The :ref:`srgb` standard defines the colorspace used by most webcams 211The :ref:`srgb` standard defines the colorspace used by most webcams
212and computer graphics. The default transfer function is 212and computer graphics. The default transfer function is
213``V4L2_XFER_FUNC_SRGB``. The default Y'CbCr encoding is 213``V4L2_XFER_FUNC_SRGB``. The default Y'CbCr encoding is
214``V4L2_YCBCR_ENC_601``. The default Y'CbCr quantization is full range. 214``V4L2_YCBCR_ENC_601``. The default Y'CbCr quantization is limited range.
215
216Note that the :ref:`sycc` standard specifies full range quantization,
217however all current capture hardware supported by the kernel convert
218R'G'B' to limited range Y'CbCr. So choosing full range as the default
219would break how applications interpret the quantization range.
220
215The chromaticities of the primary colors and the white reference are: 221The chromaticities of the primary colors and the white reference are:
216 222
217 223
@@ -276,7 +282,7 @@ the following ``V4L2_YCBCR_ENC_601`` encoding as defined by :ref:`sycc`:
276 282
277Y' is clamped to the range [0…1] and Cb and Cr are clamped to the range 283Y' is clamped to the range [0…1] and Cb and Cr are clamped to the range
278[-0.5…0.5]. This transform is identical to one defined in SMPTE 284[-0.5…0.5]. This transform is identical to one defined in SMPTE
279170M/BT.601. The Y'CbCr quantization is full range. 285170M/BT.601. The Y'CbCr quantization is limited range.
280 286
281 287
282.. _col-adobergb: 288.. _col-adobergb:
@@ -288,10 +294,15 @@ The :ref:`adobergb` standard defines the colorspace used by computer
288graphics that use the AdobeRGB colorspace. This is also known as the 294graphics that use the AdobeRGB colorspace. This is also known as the
289:ref:`oprgb` standard. The default transfer function is 295:ref:`oprgb` standard. The default transfer function is
290``V4L2_XFER_FUNC_ADOBERGB``. The default Y'CbCr encoding is 296``V4L2_XFER_FUNC_ADOBERGB``. The default Y'CbCr encoding is
291``V4L2_YCBCR_ENC_601``. The default Y'CbCr quantization is full 297``V4L2_YCBCR_ENC_601``. The default Y'CbCr quantization is limited
292range. The chromaticities of the primary colors and the white reference 298range.
293are: 299
300Note that the :ref:`oprgb` standard specifies full range quantization,
301however all current capture hardware supported by the kernel convert
302R'G'B' to limited range Y'CbCr. So choosing full range as the default
303would break how applications interpret the quantization range.
294 304
305The chromaticities of the primary colors and the white reference are:
295 306
296 307
297.. tabularcolumns:: |p{4.4cm}|p{4.4cm}|p{8.7cm}| 308.. tabularcolumns:: |p{4.4cm}|p{4.4cm}|p{8.7cm}|
@@ -344,7 +355,7 @@ the following ``V4L2_YCBCR_ENC_601`` encoding:
344 355
345Y' is clamped to the range [0…1] and Cb and Cr are clamped to the range 356Y' is clamped to the range [0…1] and Cb and Cr are clamped to the range
346[-0.5…0.5]. This transform is identical to one defined in SMPTE 357[-0.5…0.5]. This transform is identical to one defined in SMPTE
347170M/BT.601. The Y'CbCr quantization is full range. 358170M/BT.601. The Y'CbCr quantization is limited range.
348 359
349 360
350.. _col-bt2020: 361.. _col-bt2020:
diff --git a/Documentation/networking/mpls-sysctl.txt b/Documentation/networking/mpls-sysctl.txt
index 9ed15f86c17c..15d8d16934fd 100644
--- a/Documentation/networking/mpls-sysctl.txt
+++ b/Documentation/networking/mpls-sysctl.txt
@@ -5,8 +5,8 @@ platform_labels - INTEGER
5 possible to configure forwarding for label values equal to or 5 possible to configure forwarding for label values equal to or
6 greater than the number of platform labels. 6 greater than the number of platform labels.
7 7
8 A dense utliziation of the entries in the platform label table 8 A dense utilization of the entries in the platform label table
9 is possible and expected aas the platform labels are locally 9 is possible and expected as the platform labels are locally
10 allocated. 10 allocated.
11 11
12 If the number of platform label table entries is set to 0 no 12 If the number of platform label table entries is set to 0 no
diff --git a/Documentation/power/states.txt b/Documentation/power/states.txt
index 8a39ce45d8a0..008ecb588317 100644
--- a/Documentation/power/states.txt
+++ b/Documentation/power/states.txt
@@ -35,9 +35,7 @@ only one way to cause the system to go into the Suspend-To-RAM state (write
35The default suspend mode (ie. the one to be used without writing anything into 35The default suspend mode (ie. the one to be used without writing anything into
36/sys/power/mem_sleep) is either "deep" (if Suspend-To-RAM is supported) or 36/sys/power/mem_sleep) is either "deep" (if Suspend-To-RAM is supported) or
37"s2idle", but it can be overridden by the value of the "mem_sleep_default" 37"s2idle", but it can be overridden by the value of the "mem_sleep_default"
38parameter in the kernel command line. On some ACPI-based systems, depending on 38parameter in the kernel command line.
39the information in the FADT, the default may be "s2idle" even if Suspend-To-RAM
40is supported.
41 39
42The properties of all of the sleep states are described below. 40The properties of all of the sleep states are described below.
43 41
diff --git a/Documentation/unaligned-memory-access.txt b/Documentation/unaligned-memory-access.txt
index a445da098bc6..3f76c0c37920 100644
--- a/Documentation/unaligned-memory-access.txt
+++ b/Documentation/unaligned-memory-access.txt
@@ -151,7 +151,7 @@ bool ether_addr_equal(const u8 *addr1, const u8 *addr2)
151#else 151#else
152 const u16 *a = (const u16 *)addr1; 152 const u16 *a = (const u16 *)addr1;
153 const u16 *b = (const u16 *)addr2; 153 const u16 *b = (const u16 *)addr2;
154 return ((a[0] ^ b[0]) | (a[1] ^ b[1]) | (a[2] ^ b[2])) != 0; 154 return ((a[0] ^ b[0]) | (a[1] ^ b[1]) | (a[2] ^ b[2])) == 0;
155#endif 155#endif
156} 156}
157 157
diff --git a/Documentation/vfio-mediated-device.txt b/Documentation/vfio-mediated-device.txt
index b38afec35edc..d226c7a5ba8b 100644
--- a/Documentation/vfio-mediated-device.txt
+++ b/Documentation/vfio-mediated-device.txt
@@ -127,22 +127,22 @@ the VFIO when devices are unbound from the driver.
127Physical Device Driver Interface 127Physical Device Driver Interface
128-------------------------------- 128--------------------------------
129 129
130The physical device driver interface provides the parent_ops[3] structure to 130The physical device driver interface provides the mdev_parent_ops[3] structure
131define the APIs to manage work in the mediated core driver that is related to 131to define the APIs to manage work in the mediated core driver that is related
132the physical device. 132to the physical device.
133 133
134The structures in the parent_ops structure are as follows: 134The structures in the mdev_parent_ops structure are as follows:
135 135
136* dev_attr_groups: attributes of the parent device 136* dev_attr_groups: attributes of the parent device
137* mdev_attr_groups: attributes of the mediated device 137* mdev_attr_groups: attributes of the mediated device
138* supported_config: attributes to define supported configurations 138* supported_config: attributes to define supported configurations
139 139
140The functions in the parent_ops structure are as follows: 140The functions in the mdev_parent_ops structure are as follows:
141 141
142* create: allocate basic resources in a driver for a mediated device 142* create: allocate basic resources in a driver for a mediated device
143* remove: free resources in a driver when a mediated device is destroyed 143* remove: free resources in a driver when a mediated device is destroyed
144 144
145The callbacks in the parent_ops structure are as follows: 145The callbacks in the mdev_parent_ops structure are as follows:
146 146
147* open: open callback of mediated device 147* open: open callback of mediated device
148* close: close callback of mediated device 148* close: close callback of mediated device
@@ -151,14 +151,14 @@ The callbacks in the parent_ops structure are as follows:
151* write: write emulation callback 151* write: write emulation callback
152* mmap: mmap emulation callback 152* mmap: mmap emulation callback
153 153
154A driver should use the parent_ops structure in the function call to register 154A driver should use the mdev_parent_ops structure in the function call to
155itself with the mdev core driver: 155register itself with the mdev core driver:
156 156
157extern int mdev_register_device(struct device *dev, 157extern int mdev_register_device(struct device *dev,
158 const struct parent_ops *ops); 158 const struct mdev_parent_ops *ops);
159 159
160However, the parent_ops structure is not required in the function call that a 160However, the mdev_parent_ops structure is not required in the function call
161driver should use to unregister itself with the mdev core driver: 161that a driver should use to unregister itself with the mdev core driver:
162 162
163extern void mdev_unregister_device(struct device *dev); 163extern void mdev_unregister_device(struct device *dev);
164 164
@@ -223,6 +223,9 @@ Directories and files under the sysfs for Each Physical Device
223 223
224 sprintf(buf, "%s-%s", dev_driver_string(parent->dev), group->name); 224 sprintf(buf, "%s-%s", dev_driver_string(parent->dev), group->name);
225 225
226 (or using mdev_parent_dev(mdev) to arrive at the parent device outside
227 of the core mdev code)
228
226* device_api 229* device_api
227 230
228 This attribute should show which device API is being created, for example, 231 This attribute should show which device API is being created, for example,
@@ -394,5 +397,5 @@ References
394 397
395[1] See Documentation/vfio.txt for more information on VFIO. 398[1] See Documentation/vfio.txt for more information on VFIO.
396[2] struct mdev_driver in include/linux/mdev.h 399[2] struct mdev_driver in include/linux/mdev.h
397[3] struct parent_ops in include/linux/mdev.h 400[3] struct mdev_parent_ops in include/linux/mdev.h
398[4] struct vfio_iommu_driver_ops in include/linux/vfio.h 401[4] struct vfio_iommu_driver_ops in include/linux/vfio.h
diff --git a/Documentation/vm/page_frags b/Documentation/vm/page_frags
new file mode 100644
index 000000000000..a6714565dbf9
--- /dev/null
+++ b/Documentation/vm/page_frags
@@ -0,0 +1,42 @@
1Page fragments
2--------------
3
4A page fragment is an arbitrary-length arbitrary-offset area of memory
5which resides within a 0 or higher order compound page. Multiple
6fragments within that page are individually refcounted, in the page's
7reference counter.
8
9The page_frag functions, page_frag_alloc and page_frag_free, provide a
10simple allocation framework for page fragments. This is used by the
11network stack and network device drivers to provide a backing region of
12memory for use as either an sk_buff->head, or to be used in the "frags"
13portion of skb_shared_info.
14
15In order to make use of the page fragment APIs a backing page fragment
16cache is needed. This provides a central point for the fragment allocation
17and tracks allows multiple calls to make use of a cached page. The
18advantage to doing this is that multiple calls to get_page can be avoided
19which can be expensive at allocation time. However due to the nature of
20this caching it is required that any calls to the cache be protected by
21either a per-cpu limitation, or a per-cpu limitation and forcing interrupts
22to be disabled when executing the fragment allocation.
23
24The network stack uses two separate caches per CPU to handle fragment
25allocation. The netdev_alloc_cache is used by callers making use of the
26__netdev_alloc_frag and __netdev_alloc_skb calls. The napi_alloc_cache is
27used by callers of the __napi_alloc_frag and __napi_alloc_skb calls. The
28main difference between these two calls is the context in which they may be
29called. The "netdev" prefixed functions are usable in any context as these
30functions will disable interrupts, while the "napi" prefixed functions are
31only usable within the softirq context.
32
33Many network device drivers use a similar methodology for allocating page
34fragments, but the page fragments are cached at the ring or descriptor
35level. In order to enable these cases it is necessary to provide a generic
36way of tearing down a page cache. For this reason __page_frag_cache_drain
37was implemented. It allows for freeing multiple references from a single
38page via a single call. The advantage to doing this is that it allows for
39cleaning up the multiple references that were added to a page in order to
40avoid calling get_page per allocation.
41
42Alexander Duyck, Nov 29, 2016.
diff --git a/MAINTAINERS b/MAINTAINERS
index cfff2c9e3d94..527d13759ecc 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -81,7 +81,6 @@ Descriptions of section entries:
81 Q: Patchwork web based patch tracking system site 81 Q: Patchwork web based patch tracking system site
82 T: SCM tree type and location. 82 T: SCM tree type and location.
83 Type is one of: git, hg, quilt, stgit, topgit 83 Type is one of: git, hg, quilt, stgit, topgit
84 B: Bug tracking system location.
85 S: Status, one of the following: 84 S: Status, one of the following:
86 Supported: Someone is actually paid to look after this. 85 Supported: Someone is actually paid to look after this.
87 Maintained: Someone actually looks after it. 86 Maintained: Someone actually looks after it.
@@ -878,8 +877,8 @@ S: Odd fixes
878F: drivers/hwmon/applesmc.c 877F: drivers/hwmon/applesmc.c
879 878
880APPLETALK NETWORK LAYER 879APPLETALK NETWORK LAYER
881M: Arnaldo Carvalho de Melo <acme@ghostprotocols.net> 880L: netdev@vger.kernel.org
882S: Maintained 881S: Odd fixes
883F: drivers/net/appletalk/ 882F: drivers/net/appletalk/
884F: net/appletalk/ 883F: net/appletalk/
885 884
@@ -977,6 +976,7 @@ M: Russell King <linux@armlinux.org.uk>
977L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 976L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
978W: http://www.armlinux.org.uk/ 977W: http://www.armlinux.org.uk/
979S: Maintained 978S: Maintained
979T: git git://git.armlinux.org.uk/~rmk/linux-arm.git
980F: arch/arm/ 980F: arch/arm/
981 981
982ARM SUB-ARCHITECTURES 982ARM SUB-ARCHITECTURES
@@ -1091,7 +1091,7 @@ F: arch/arm/boot/dts/aspeed-*
1091F: drivers/*/*aspeed* 1091F: drivers/*/*aspeed*
1092 1092
1093ARM/ATMEL AT91RM9200, AT91SAM9 AND SAMA5 SOC SUPPORT 1093ARM/ATMEL AT91RM9200, AT91SAM9 AND SAMA5 SOC SUPPORT
1094M: Nicolas Ferre <nicolas.ferre@atmel.com> 1094M: Nicolas Ferre <nicolas.ferre@microchip.com>
1095M: Alexandre Belloni <alexandre.belloni@free-electrons.com> 1095M: Alexandre Belloni <alexandre.belloni@free-electrons.com>
1096M: Jean-Christophe Plagniol-Villard <plagnioj@jcrosoft.com> 1096M: Jean-Christophe Plagniol-Villard <plagnioj@jcrosoft.com>
1097L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 1097L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
@@ -1154,6 +1154,7 @@ ARM/CLKDEV SUPPORT
1154M: Russell King <linux@armlinux.org.uk> 1154M: Russell King <linux@armlinux.org.uk>
1155L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 1155L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
1156S: Maintained 1156S: Maintained
1157T: git git://git.armlinux.org.uk/~rmk/linux-arm.git clkdev
1157F: arch/arm/include/asm/clkdev.h 1158F: arch/arm/include/asm/clkdev.h
1158F: drivers/clk/clkdev.c 1159F: drivers/clk/clkdev.c
1159 1160
@@ -1689,6 +1690,7 @@ M: Krzysztof Kozlowski <krzk@kernel.org>
1689R: Javier Martinez Canillas <javier@osg.samsung.com> 1690R: Javier Martinez Canillas <javier@osg.samsung.com>
1690L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 1691L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
1691L: linux-samsung-soc@vger.kernel.org (moderated for non-subscribers) 1692L: linux-samsung-soc@vger.kernel.org (moderated for non-subscribers)
1693Q: https://patchwork.kernel.org/project/linux-samsung-soc/list/
1692S: Maintained 1694S: Maintained
1693F: arch/arm/boot/dts/s3c* 1695F: arch/arm/boot/dts/s3c*
1694F: arch/arm/boot/dts/s5p* 1696F: arch/arm/boot/dts/s5p*
@@ -1771,7 +1773,7 @@ F: drivers/soc/renesas/
1771F: include/linux/soc/renesas/ 1773F: include/linux/soc/renesas/
1772 1774
1773ARM/SOCFPGA ARCHITECTURE 1775ARM/SOCFPGA ARCHITECTURE
1774M: Dinh Nguyen <dinguyen@opensource.altera.com> 1776M: Dinh Nguyen <dinguyen@kernel.org>
1775S: Maintained 1777S: Maintained
1776F: arch/arm/mach-socfpga/ 1778F: arch/arm/mach-socfpga/
1777F: arch/arm/boot/dts/socfpga* 1779F: arch/arm/boot/dts/socfpga*
@@ -1781,7 +1783,7 @@ W: http://www.rocketboards.org
1781T: git git://git.kernel.org/pub/scm/linux/kernel/git/dinguyen/linux.git 1783T: git git://git.kernel.org/pub/scm/linux/kernel/git/dinguyen/linux.git
1782 1784
1783ARM/SOCFPGA CLOCK FRAMEWORK SUPPORT 1785ARM/SOCFPGA CLOCK FRAMEWORK SUPPORT
1784M: Dinh Nguyen <dinguyen@opensource.altera.com> 1786M: Dinh Nguyen <dinguyen@kernel.org>
1785S: Maintained 1787S: Maintained
1786F: drivers/clk/socfpga/ 1788F: drivers/clk/socfpga/
1787 1789
@@ -2173,64 +2175,56 @@ F: include/linux/atm*
2173F: include/uapi/linux/atm* 2175F: include/uapi/linux/atm*
2174 2176
2175ATMEL AT91 / AT32 MCI DRIVER 2177ATMEL AT91 / AT32 MCI DRIVER
2176M: Ludovic Desroches <ludovic.desroches@atmel.com> 2178M: Ludovic Desroches <ludovic.desroches@microchip.com>
2177S: Maintained 2179S: Maintained
2178F: drivers/mmc/host/atmel-mci.c 2180F: drivers/mmc/host/atmel-mci.c
2179 2181
2180ATMEL AT91 SAMA5D2-Compatible Shutdown Controller 2182ATMEL AT91 SAMA5D2-Compatible Shutdown Controller
2181M: Nicolas Ferre <nicolas.ferre@atmel.com> 2183M: Nicolas Ferre <nicolas.ferre@microchip.com>
2182S: Supported 2184S: Supported
2183F: drivers/power/reset/at91-sama5d2_shdwc.c 2185F: drivers/power/reset/at91-sama5d2_shdwc.c
2184 2186
2185ATMEL SAMA5D2 ADC DRIVER 2187ATMEL SAMA5D2 ADC DRIVER
2186M: Ludovic Desroches <ludovic.desroches@atmel.com> 2188M: Ludovic Desroches <ludovic.desroches@microchip.com>
2187L: linux-iio@vger.kernel.org 2189L: linux-iio@vger.kernel.org
2188S: Supported 2190S: Supported
2189F: drivers/iio/adc/at91-sama5d2_adc.c 2191F: drivers/iio/adc/at91-sama5d2_adc.c
2190 2192
2191ATMEL Audio ALSA driver 2193ATMEL Audio ALSA driver
2192M: Nicolas Ferre <nicolas.ferre@atmel.com> 2194M: Nicolas Ferre <nicolas.ferre@microchip.com>
2193L: alsa-devel@alsa-project.org (moderated for non-subscribers) 2195L: alsa-devel@alsa-project.org (moderated for non-subscribers)
2194S: Supported 2196S: Supported
2195F: sound/soc/atmel 2197F: sound/soc/atmel
2196 2198
2197ATMEL DMA DRIVER
2198M: Nicolas Ferre <nicolas.ferre@atmel.com>
2199L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
2200S: Supported
2201F: drivers/dma/at_hdmac.c
2202F: drivers/dma/at_hdmac_regs.h
2203F: include/linux/platform_data/dma-atmel.h
2204
2205ATMEL XDMA DRIVER 2199ATMEL XDMA DRIVER
2206M: Ludovic Desroches <ludovic.desroches@atmel.com> 2200M: Ludovic Desroches <ludovic.desroches@microchip.com>
2207L: linux-arm-kernel@lists.infradead.org 2201L: linux-arm-kernel@lists.infradead.org
2208L: dmaengine@vger.kernel.org 2202L: dmaengine@vger.kernel.org
2209S: Supported 2203S: Supported
2210F: drivers/dma/at_xdmac.c 2204F: drivers/dma/at_xdmac.c
2211 2205
2212ATMEL I2C DRIVER 2206ATMEL I2C DRIVER
2213M: Ludovic Desroches <ludovic.desroches@atmel.com> 2207M: Ludovic Desroches <ludovic.desroches@microchip.com>
2214L: linux-i2c@vger.kernel.org 2208L: linux-i2c@vger.kernel.org
2215S: Supported 2209S: Supported
2216F: drivers/i2c/busses/i2c-at91.c 2210F: drivers/i2c/busses/i2c-at91.c
2217 2211
2218ATMEL ISI DRIVER 2212ATMEL ISI DRIVER
2219M: Ludovic Desroches <ludovic.desroches@atmel.com> 2213M: Ludovic Desroches <ludovic.desroches@microchip.com>
2220L: linux-media@vger.kernel.org 2214L: linux-media@vger.kernel.org
2221S: Supported 2215S: Supported
2222F: drivers/media/platform/soc_camera/atmel-isi.c 2216F: drivers/media/platform/soc_camera/atmel-isi.c
2223F: include/media/atmel-isi.h 2217F: include/media/atmel-isi.h
2224 2218
2225ATMEL LCDFB DRIVER 2219ATMEL LCDFB DRIVER
2226M: Nicolas Ferre <nicolas.ferre@atmel.com> 2220M: Nicolas Ferre <nicolas.ferre@microchip.com>
2227L: linux-fbdev@vger.kernel.org 2221L: linux-fbdev@vger.kernel.org
2228S: Maintained 2222S: Maintained
2229F: drivers/video/fbdev/atmel_lcdfb.c 2223F: drivers/video/fbdev/atmel_lcdfb.c
2230F: include/video/atmel_lcdc.h 2224F: include/video/atmel_lcdc.h
2231 2225
2232ATMEL MACB ETHERNET DRIVER 2226ATMEL MACB ETHERNET DRIVER
2233M: Nicolas Ferre <nicolas.ferre@atmel.com> 2227M: Nicolas Ferre <nicolas.ferre@microchip.com>
2234S: Supported 2228S: Supported
2235F: drivers/net/ethernet/cadence/ 2229F: drivers/net/ethernet/cadence/
2236 2230
@@ -2242,32 +2236,32 @@ S: Supported
2242F: drivers/mtd/nand/atmel_nand* 2236F: drivers/mtd/nand/atmel_nand*
2243 2237
2244ATMEL SDMMC DRIVER 2238ATMEL SDMMC DRIVER
2245M: Ludovic Desroches <ludovic.desroches@atmel.com> 2239M: Ludovic Desroches <ludovic.desroches@microchip.com>
2246L: linux-mmc@vger.kernel.org 2240L: linux-mmc@vger.kernel.org
2247S: Supported 2241S: Supported
2248F: drivers/mmc/host/sdhci-of-at91.c 2242F: drivers/mmc/host/sdhci-of-at91.c
2249 2243
2250ATMEL SPI DRIVER 2244ATMEL SPI DRIVER
2251M: Nicolas Ferre <nicolas.ferre@atmel.com> 2245M: Nicolas Ferre <nicolas.ferre@microchip.com>
2252S: Supported 2246S: Supported
2253F: drivers/spi/spi-atmel.* 2247F: drivers/spi/spi-atmel.*
2254 2248
2255ATMEL SSC DRIVER 2249ATMEL SSC DRIVER
2256M: Nicolas Ferre <nicolas.ferre@atmel.com> 2250M: Nicolas Ferre <nicolas.ferre@microchip.com>
2257L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 2251L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
2258S: Supported 2252S: Supported
2259F: drivers/misc/atmel-ssc.c 2253F: drivers/misc/atmel-ssc.c
2260F: include/linux/atmel-ssc.h 2254F: include/linux/atmel-ssc.h
2261 2255
2262ATMEL Timer Counter (TC) AND CLOCKSOURCE DRIVERS 2256ATMEL Timer Counter (TC) AND CLOCKSOURCE DRIVERS
2263M: Nicolas Ferre <nicolas.ferre@atmel.com> 2257M: Nicolas Ferre <nicolas.ferre@microchip.com>
2264L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 2258L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
2265S: Supported 2259S: Supported
2266F: drivers/misc/atmel_tclib.c 2260F: drivers/misc/atmel_tclib.c
2267F: drivers/clocksource/tcb_clksrc.c 2261F: drivers/clocksource/tcb_clksrc.c
2268 2262
2269ATMEL USBA UDC DRIVER 2263ATMEL USBA UDC DRIVER
2270M: Nicolas Ferre <nicolas.ferre@atmel.com> 2264M: Nicolas Ferre <nicolas.ferre@microchip.com>
2271L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 2265L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
2272S: Supported 2266S: Supported
2273F: drivers/usb/gadget/udc/atmel_usba_udc.* 2267F: drivers/usb/gadget/udc/atmel_usba_udc.*
@@ -3573,7 +3567,7 @@ F: drivers/infiniband/hw/cxgb3/
3573F: include/uapi/rdma/cxgb3-abi.h 3567F: include/uapi/rdma/cxgb3-abi.h
3574 3568
3575CXGB4 ETHERNET DRIVER (CXGB4) 3569CXGB4 ETHERNET DRIVER (CXGB4)
3576M: Hariprasad S <hariprasad@chelsio.com> 3570M: Ganesh Goudar <ganeshgr@chelsio.com>
3577L: netdev@vger.kernel.org 3571L: netdev@vger.kernel.org
3578W: http://www.chelsio.com 3572W: http://www.chelsio.com
3579S: Supported 3573S: Supported
@@ -3800,6 +3794,7 @@ F: include/linux/devcoredump.h
3800DEVICE FREQUENCY (DEVFREQ) 3794DEVICE FREQUENCY (DEVFREQ)
3801M: MyungJoo Ham <myungjoo.ham@samsung.com> 3795M: MyungJoo Ham <myungjoo.ham@samsung.com>
3802M: Kyungmin Park <kyungmin.park@samsung.com> 3796M: Kyungmin Park <kyungmin.park@samsung.com>
3797R: Chanwoo Choi <cw00.choi@samsung.com>
3803L: linux-pm@vger.kernel.org 3798L: linux-pm@vger.kernel.org
3804T: git git://git.kernel.org/pub/scm/linux/kernel/git/mzx/devfreq.git 3799T: git git://git.kernel.org/pub/scm/linux/kernel/git/mzx/devfreq.git
3805S: Maintained 3800S: Maintained
@@ -4105,18 +4100,24 @@ F: drivers/gpu/drm/bridge/
4105 4100
4106DRM DRIVER FOR BOCHS VIRTUAL GPU 4101DRM DRIVER FOR BOCHS VIRTUAL GPU
4107M: Gerd Hoffmann <kraxel@redhat.com> 4102M: Gerd Hoffmann <kraxel@redhat.com>
4108S: Odd Fixes 4103L: virtualization@lists.linux-foundation.org
4104T: git git://git.kraxel.org/linux drm-qemu
4105S: Maintained
4109F: drivers/gpu/drm/bochs/ 4106F: drivers/gpu/drm/bochs/
4110 4107
4111DRM DRIVER FOR QEMU'S CIRRUS DEVICE 4108DRM DRIVER FOR QEMU'S CIRRUS DEVICE
4112M: Dave Airlie <airlied@redhat.com> 4109M: Dave Airlie <airlied@redhat.com>
4113S: Odd Fixes 4110M: Gerd Hoffmann <kraxel@redhat.com>
4111L: virtualization@lists.linux-foundation.org
4112T: git git://git.kraxel.org/linux drm-qemu
4113S: Obsolete
4114W: https://www.kraxel.org/blog/2014/10/qemu-using-cirrus-considered-harmful/
4114F: drivers/gpu/drm/cirrus/ 4115F: drivers/gpu/drm/cirrus/
4115 4116
4116RADEON and AMDGPU DRM DRIVERS 4117RADEON and AMDGPU DRM DRIVERS
4117M: Alex Deucher <alexander.deucher@amd.com> 4118M: Alex Deucher <alexander.deucher@amd.com>
4118M: Christian König <christian.koenig@amd.com> 4119M: Christian König <christian.koenig@amd.com>
4119L: dri-devel@lists.freedesktop.org 4120L: amd-gfx@lists.freedesktop.org
4120T: git git://people.freedesktop.org/~agd5f/linux 4121T: git git://people.freedesktop.org/~agd5f/linux
4121S: Supported 4122S: Supported
4122F: drivers/gpu/drm/radeon/ 4123F: drivers/gpu/drm/radeon/
@@ -4152,7 +4153,7 @@ F: Documentation/gpu/i915.rst
4152INTEL GVT-g DRIVERS (Intel GPU Virtualization) 4153INTEL GVT-g DRIVERS (Intel GPU Virtualization)
4153M: Zhenyu Wang <zhenyuw@linux.intel.com> 4154M: Zhenyu Wang <zhenyuw@linux.intel.com>
4154M: Zhi Wang <zhi.a.wang@intel.com> 4155M: Zhi Wang <zhi.a.wang@intel.com>
4155L: igvt-g-dev@lists.01.org 4156L: intel-gvt-dev@lists.freedesktop.org
4156L: intel-gfx@lists.freedesktop.org 4157L: intel-gfx@lists.freedesktop.org
4157W: https://01.org/igvt-g 4158W: https://01.org/igvt-g
4158T: git https://github.com/01org/gvt-linux.git 4159T: git https://github.com/01org/gvt-linux.git
@@ -4303,7 +4304,10 @@ F: Documentation/devicetree/bindings/display/renesas,du.txt
4303 4304
4304DRM DRIVER FOR QXL VIRTUAL GPU 4305DRM DRIVER FOR QXL VIRTUAL GPU
4305M: Dave Airlie <airlied@redhat.com> 4306M: Dave Airlie <airlied@redhat.com>
4306S: Odd Fixes 4307M: Gerd Hoffmann <kraxel@redhat.com>
4308L: virtualization@lists.linux-foundation.org
4309T: git git://git.kraxel.org/linux drm-qemu
4310S: Maintained
4307F: drivers/gpu/drm/qxl/ 4311F: drivers/gpu/drm/qxl/
4308F: include/uapi/drm/qxl_drm.h 4312F: include/uapi/drm/qxl_drm.h
4309 4313
@@ -5080,9 +5084,11 @@ F: drivers/net/wan/dlci.c
5080F: drivers/net/wan/sdla.c 5084F: drivers/net/wan/sdla.c
5081 5085
5082FRAMEBUFFER LAYER 5086FRAMEBUFFER LAYER
5087M: Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com>
5083L: linux-fbdev@vger.kernel.org 5088L: linux-fbdev@vger.kernel.org
5089T: git git://github.com/bzolnier/linux.git
5084Q: http://patchwork.kernel.org/project/linux-fbdev/list/ 5090Q: http://patchwork.kernel.org/project/linux-fbdev/list/
5085S: Orphan 5091S: Maintained
5086F: Documentation/fb/ 5092F: Documentation/fb/
5087F: drivers/video/ 5093F: drivers/video/
5088F: include/video/ 5094F: include/video/
@@ -5504,6 +5510,7 @@ M: Alex Elder <elder@kernel.org>
5504M: Greg Kroah-Hartman <gregkh@linuxfoundation.org> 5510M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
5505S: Maintained 5511S: Maintained
5506F: drivers/staging/greybus/ 5512F: drivers/staging/greybus/
5513L: greybus-dev@lists.linaro.org
5507 5514
5508GREYBUS AUDIO PROTOCOLS DRIVERS 5515GREYBUS AUDIO PROTOCOLS DRIVERS
5509M: Vaibhav Agarwal <vaibhav.sr@gmail.com> 5516M: Vaibhav Agarwal <vaibhav.sr@gmail.com>
@@ -5961,6 +5968,7 @@ F: drivers/media/platform/sti/hva
5961Hyper-V CORE AND DRIVERS 5968Hyper-V CORE AND DRIVERS
5962M: "K. Y. Srinivasan" <kys@microsoft.com> 5969M: "K. Y. Srinivasan" <kys@microsoft.com>
5963M: Haiyang Zhang <haiyangz@microsoft.com> 5970M: Haiyang Zhang <haiyangz@microsoft.com>
5971M: Stephen Hemminger <sthemmin@microsoft.com>
5964L: devel@linuxdriverproject.org 5972L: devel@linuxdriverproject.org
5965S: Maintained 5973S: Maintained
5966F: arch/x86/include/asm/mshyperv.h 5974F: arch/x86/include/asm/mshyperv.h
@@ -6719,9 +6727,8 @@ S: Odd Fixes
6719F: drivers/tty/ipwireless/ 6727F: drivers/tty/ipwireless/
6720 6728
6721IPX NETWORK LAYER 6729IPX NETWORK LAYER
6722M: Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
6723L: netdev@vger.kernel.org 6730L: netdev@vger.kernel.org
6724S: Maintained 6731S: Odd fixes
6725F: include/net/ipx.h 6732F: include/net/ipx.h
6726F: include/uapi/linux/ipx.h 6733F: include/uapi/linux/ipx.h
6727F: net/ipx/ 6734F: net/ipx/
@@ -7493,8 +7500,8 @@ S: Maintained
7493F: drivers/misc/lkdtm* 7500F: drivers/misc/lkdtm*
7494 7501
7495LLC (802.2) 7502LLC (802.2)
7496M: Arnaldo Carvalho de Melo <acme@ghostprotocols.net> 7503L: netdev@vger.kernel.org
7497S: Maintained 7504S: Odd fixes
7498F: include/linux/llc.h 7505F: include/linux/llc.h
7499F: include/uapi/linux/llc.h 7506F: include/uapi/linux/llc.h
7500F: include/net/llc* 7507F: include/net/llc*
@@ -7701,8 +7708,10 @@ F: drivers/net/dsa/mv88e6xxx/
7701F: Documentation/devicetree/bindings/net/dsa/marvell.txt 7708F: Documentation/devicetree/bindings/net/dsa/marvell.txt
7702 7709
7703MARVELL ARMADA DRM SUPPORT 7710MARVELL ARMADA DRM SUPPORT
7704M: Russell King <rmk+kernel@armlinux.org.uk> 7711M: Russell King <linux@armlinux.org.uk>
7705S: Maintained 7712S: Maintained
7713T: git git://git.armlinux.org.uk/~rmk/linux-arm.git drm-armada-devel
7714T: git git://git.armlinux.org.uk/~rmk/linux-arm.git drm-armada-fixes
7706F: drivers/gpu/drm/armada/ 7715F: drivers/gpu/drm/armada/
7707F: include/uapi/drm/armada_drm.h 7716F: include/uapi/drm/armada_drm.h
7708F: Documentation/devicetree/bindings/display/armada/ 7717F: Documentation/devicetree/bindings/display/armada/
@@ -8174,6 +8183,15 @@ S: Maintained
8174F: drivers/tty/serial/atmel_serial.c 8183F: drivers/tty/serial/atmel_serial.c
8175F: include/linux/atmel_serial.h 8184F: include/linux/atmel_serial.h
8176 8185
8186MICROCHIP / ATMEL DMA DRIVER
8187M: Ludovic Desroches <ludovic.desroches@microchip.com>
8188L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
8189L: dmaengine@vger.kernel.org
8190S: Supported
8191F: drivers/dma/at_hdmac.c
8192F: drivers/dma/at_hdmac_regs.h
8193F: include/linux/platform_data/dma-atmel.h
8194
8177MICROCHIP / ATMEL ISC DRIVER 8195MICROCHIP / ATMEL ISC DRIVER
8178M: Songjun Wu <songjun.wu@microchip.com> 8196M: Songjun Wu <songjun.wu@microchip.com>
8179L: linux-media@vger.kernel.org 8197L: linux-media@vger.kernel.org
@@ -8852,17 +8870,22 @@ F: drivers/video/fbdev/nvidia/
8852NVM EXPRESS DRIVER 8870NVM EXPRESS DRIVER
8853M: Keith Busch <keith.busch@intel.com> 8871M: Keith Busch <keith.busch@intel.com>
8854M: Jens Axboe <axboe@fb.com> 8872M: Jens Axboe <axboe@fb.com>
8873M: Christoph Hellwig <hch@lst.de>
8874M: Sagi Grimberg <sagi@grimberg.me>
8855L: linux-nvme@lists.infradead.org 8875L: linux-nvme@lists.infradead.org
8856T: git git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux-block.git 8876T: git://git.infradead.org/nvme.git
8857W: https://kernel.googlesource.com/pub/scm/linux/kernel/git/axboe/linux-block/ 8877W: http://git.infradead.org/nvme.git
8858S: Supported 8878S: Supported
8859F: drivers/nvme/host/ 8879F: drivers/nvme/host/
8860F: include/linux/nvme.h 8880F: include/linux/nvme.h
8881F: include/uapi/linux/nvme_ioctl.h
8861 8882
8862NVM EXPRESS TARGET DRIVER 8883NVM EXPRESS TARGET DRIVER
8863M: Christoph Hellwig <hch@lst.de> 8884M: Christoph Hellwig <hch@lst.de>
8864M: Sagi Grimberg <sagi@grimberg.me> 8885M: Sagi Grimberg <sagi@grimberg.me>
8865L: linux-nvme@lists.infradead.org 8886L: linux-nvme@lists.infradead.org
8887T: git://git.infradead.org/nvme.git
8888W: http://git.infradead.org/nvme.git
8866S: Supported 8889S: Supported
8867F: drivers/nvme/target/ 8890F: drivers/nvme/target/
8868 8891
@@ -8893,8 +8916,10 @@ S: Supported
8893F: drivers/nfc/nxp-nci 8916F: drivers/nfc/nxp-nci
8894 8917
8895NXP TDA998X DRM DRIVER 8918NXP TDA998X DRM DRIVER
8896M: Russell King <rmk+kernel@armlinux.org.uk> 8919M: Russell King <linux@armlinux.org.uk>
8897S: Supported 8920S: Supported
8921T: git git://git.armlinux.org.uk/~rmk/linux-arm.git drm-tda998x-devel
8922T: git git://git.armlinux.org.uk/~rmk/linux-arm.git drm-tda998x-fixes
8898F: drivers/gpu/drm/i2c/tda998x_drv.c 8923F: drivers/gpu/drm/i2c/tda998x_drv.c
8899F: include/drm/i2c/tda998x.h 8924F: include/drm/i2c/tda998x.h
8900 8925
@@ -9710,7 +9735,7 @@ S: Maintained
9710F: drivers/pinctrl/pinctrl-at91.* 9735F: drivers/pinctrl/pinctrl-at91.*
9711 9736
9712PIN CONTROLLER - ATMEL AT91 PIO4 9737PIN CONTROLLER - ATMEL AT91 PIO4
9713M: Ludovic Desroches <ludovic.desroches@atmel.com> 9738M: Ludovic Desroches <ludovic.desroches@microchip.com>
9714L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 9739L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
9715L: linux-gpio@vger.kernel.org 9740L: linux-gpio@vger.kernel.org
9716S: Supported 9741S: Supported
@@ -9842,7 +9867,7 @@ M: Mark Rutland <mark.rutland@arm.com>
9842M: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com> 9867M: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
9843L: linux-arm-kernel@lists.infradead.org 9868L: linux-arm-kernel@lists.infradead.org
9844S: Maintained 9869S: Maintained
9845F: drivers/firmware/psci.c 9870F: drivers/firmware/psci*.c
9846F: include/linux/psci.h 9871F: include/linux/psci.h
9847F: include/uapi/linux/psci.h 9872F: include/uapi/linux/psci.h
9848 9873
@@ -10169,7 +10194,6 @@ F: drivers/media/tuners/qt1010*
10169QUALCOMM ATHEROS ATH9K WIRELESS DRIVER 10194QUALCOMM ATHEROS ATH9K WIRELESS DRIVER
10170M: QCA ath9k Development <ath9k-devel@qca.qualcomm.com> 10195M: QCA ath9k Development <ath9k-devel@qca.qualcomm.com>
10171L: linux-wireless@vger.kernel.org 10196L: linux-wireless@vger.kernel.org
10172L: ath9k-devel@lists.ath9k.org
10173W: http://wireless.kernel.org/en/users/Drivers/ath9k 10197W: http://wireless.kernel.org/en/users/Drivers/ath9k
10174S: Supported 10198S: Supported
10175F: drivers/net/wireless/ath/ath9k/ 10199F: drivers/net/wireless/ath/ath9k/
@@ -13040,7 +13064,7 @@ F: drivers/input/serio/userio.c
13040F: include/uapi/linux/userio.h 13064F: include/uapi/linux/userio.h
13041 13065
13042VIRTIO CONSOLE DRIVER 13066VIRTIO CONSOLE DRIVER
13043M: Amit Shah <amit.shah@redhat.com> 13067M: Amit Shah <amit@kernel.org>
13044L: virtualization@lists.linux-foundation.org 13068L: virtualization@lists.linux-foundation.org
13045S: Maintained 13069S: Maintained
13046F: drivers/char/virtio_console.c 13070F: drivers/char/virtio_console.c
@@ -13075,6 +13099,7 @@ M: David Airlie <airlied@linux.ie>
13075M: Gerd Hoffmann <kraxel@redhat.com> 13099M: Gerd Hoffmann <kraxel@redhat.com>
13076L: dri-devel@lists.freedesktop.org 13100L: dri-devel@lists.freedesktop.org
13077L: virtualization@lists.linux-foundation.org 13101L: virtualization@lists.linux-foundation.org
13102T: git git://git.kraxel.org/linux drm-qemu
13078S: Maintained 13103S: Maintained
13079F: drivers/gpu/drm/virtio/ 13104F: drivers/gpu/drm/virtio/
13080F: include/uapi/linux/virtio_gpu.h 13105F: include/uapi/linux/virtio_gpu.h
@@ -13347,10 +13372,8 @@ S: Maintained
13347F: drivers/input/misc/wistron_btns.c 13372F: drivers/input/misc/wistron_btns.c
13348 13373
13349WL3501 WIRELESS PCMCIA CARD DRIVER 13374WL3501 WIRELESS PCMCIA CARD DRIVER
13350M: Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
13351L: linux-wireless@vger.kernel.org 13375L: linux-wireless@vger.kernel.org
13352W: http://oops.ghostprotocols.net:81/blog 13376S: Odd fixes
13353S: Maintained
13354F: drivers/net/wireless/wl3501* 13377F: drivers/net/wireless/wl3501*
13355 13378
13356WOLFSON MICROELECTRONICS DRIVERS 13379WOLFSON MICROELECTRONICS DRIVERS
@@ -13426,6 +13449,7 @@ F: arch/x86/
13426 13449
13427X86 PLATFORM DRIVERS 13450X86 PLATFORM DRIVERS
13428M: Darren Hart <dvhart@infradead.org> 13451M: Darren Hart <dvhart@infradead.org>
13452M: Andy Shevchenko <andy@infradead.org>
13429L: platform-driver-x86@vger.kernel.org 13453L: platform-driver-x86@vger.kernel.org
13430T: git git://git.infradead.org/users/dvhart/linux-platform-drivers-x86.git 13454T: git git://git.infradead.org/users/dvhart/linux-platform-drivers-x86.git
13431S: Maintained 13455S: Maintained
@@ -13527,11 +13551,11 @@ F: arch/x86/xen/*swiotlb*
13527F: drivers/xen/*swiotlb* 13551F: drivers/xen/*swiotlb*
13528 13552
13529XFS FILESYSTEM 13553XFS FILESYSTEM
13530M: Dave Chinner <david@fromorbit.com> 13554M: Darrick J. Wong <darrick.wong@oracle.com>
13531M: linux-xfs@vger.kernel.org 13555M: linux-xfs@vger.kernel.org
13532L: linux-xfs@vger.kernel.org 13556L: linux-xfs@vger.kernel.org
13533W: http://xfs.org/ 13557W: http://xfs.org/
13534T: git git://git.kernel.org/pub/scm/linux/kernel/git/dgc/linux-xfs.git 13558T: git git://git.kernel.org/pub/scm/fs/xfs/xfs-linux.git
13535S: Supported 13559S: Supported
13536F: Documentation/filesystems/xfs.txt 13560F: Documentation/filesystems/xfs.txt
13537F: fs/xfs/ 13561F: fs/xfs/
@@ -13597,6 +13621,7 @@ F: drivers/net/hamradio/z8530.h
13597 13621
13598ZBUD COMPRESSED PAGE ALLOCATOR 13622ZBUD COMPRESSED PAGE ALLOCATOR
13599M: Seth Jennings <sjenning@redhat.com> 13623M: Seth Jennings <sjenning@redhat.com>
13624M: Dan Streetman <ddstreet@ieee.org>
13600L: linux-mm@kvack.org 13625L: linux-mm@kvack.org
13601S: Maintained 13626S: Maintained
13602F: mm/zbud.c 13627F: mm/zbud.c
@@ -13652,6 +13677,7 @@ F: Documentation/vm/zsmalloc.txt
13652 13677
13653ZSWAP COMPRESSED SWAP CACHING 13678ZSWAP COMPRESSED SWAP CACHING
13654M: Seth Jennings <sjenning@redhat.com> 13679M: Seth Jennings <sjenning@redhat.com>
13680M: Dan Streetman <ddstreet@ieee.org>
13655L: linux-mm@kvack.org 13681L: linux-mm@kvack.org
13656S: Maintained 13682S: Maintained
13657F: mm/zswap.c 13683F: mm/zswap.c
diff --git a/Makefile b/Makefile
index ec411ba9e40f..f1e6a02a0c19 100644
--- a/Makefile
+++ b/Makefile
@@ -1,8 +1,8 @@
1VERSION = 4 1VERSION = 4
2PATCHLEVEL = 10 2PATCHLEVEL = 10
3SUBLEVEL = 0 3SUBLEVEL = 0
4EXTRAVERSION = -rc1 4EXTRAVERSION =
5NAME = Roaring Lionus 5NAME = Fearless Coyote
6 6
7# *DOCUMENTATION* 7# *DOCUMENTATION*
8# To see a list of typical targets execute "make help" 8# To see a list of typical targets execute "make help"
@@ -797,7 +797,7 @@ KBUILD_CFLAGS += $(call cc-option,-Werror=incompatible-pointer-types)
797KBUILD_ARFLAGS := $(call ar-option,D) 797KBUILD_ARFLAGS := $(call ar-option,D)
798 798
799# check for 'asm goto' 799# check for 'asm goto'
800ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-goto.sh $(CC)), y) 800ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-goto.sh $(CC) $(KBUILD_CFLAGS)), y)
801 KBUILD_CFLAGS += -DCC_HAVE_ASM_GOTO 801 KBUILD_CFLAGS += -DCC_HAVE_ASM_GOTO
802 KBUILD_AFLAGS += -DCC_HAVE_ASM_GOTO 802 KBUILD_AFLAGS += -DCC_HAVE_ASM_GOTO
803endif 803endif
diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig
index c75d29077e4a..283099c9560a 100644
--- a/arch/arc/Kconfig
+++ b/arch/arc/Kconfig
@@ -29,7 +29,7 @@ config ARC
29 select HAVE_KPROBES 29 select HAVE_KPROBES
30 select HAVE_KRETPROBES 30 select HAVE_KRETPROBES
31 select HAVE_MEMBLOCK 31 select HAVE_MEMBLOCK
32 select HAVE_MOD_ARCH_SPECIFIC if ARC_DW2_UNWIND 32 select HAVE_MOD_ARCH_SPECIFIC
33 select HAVE_OPROFILE 33 select HAVE_OPROFILE
34 select HAVE_PERF_EVENTS 34 select HAVE_PERF_EVENTS
35 select HANDLE_DOMAIN_IRQ 35 select HANDLE_DOMAIN_IRQ
diff --git a/arch/arc/include/asm/cache.h b/arch/arc/include/asm/cache.h
index b3410ff6a62d..5008021fba98 100644
--- a/arch/arc/include/asm/cache.h
+++ b/arch/arc/include/asm/cache.h
@@ -67,7 +67,7 @@ extern unsigned long perip_base, perip_end;
67#define ARC_REG_IC_PTAG_HI 0x1F 67#define ARC_REG_IC_PTAG_HI 0x1F
68 68
69/* Bit val in IC_CTRL */ 69/* Bit val in IC_CTRL */
70#define IC_CTRL_CACHE_DISABLE 0x1 70#define IC_CTRL_DIS 0x1
71 71
72/* Data cache related Auxiliary registers */ 72/* Data cache related Auxiliary registers */
73#define ARC_REG_DC_BCR 0x72 /* Build Config reg */ 73#define ARC_REG_DC_BCR 0x72 /* Build Config reg */
@@ -80,8 +80,9 @@ extern unsigned long perip_base, perip_end;
80#define ARC_REG_DC_PTAG_HI 0x5F 80#define ARC_REG_DC_PTAG_HI 0x5F
81 81
82/* Bit val in DC_CTRL */ 82/* Bit val in DC_CTRL */
83#define DC_CTRL_INV_MODE_FLUSH 0x40 83#define DC_CTRL_DIS 0x001
84#define DC_CTRL_FLUSH_STATUS 0x100 84#define DC_CTRL_INV_MODE_FLUSH 0x040
85#define DC_CTRL_FLUSH_STATUS 0x100
85 86
86/*System-level cache (L2 cache) related Auxiliary registers */ 87/*System-level cache (L2 cache) related Auxiliary registers */
87#define ARC_REG_SLC_CFG 0x901 88#define ARC_REG_SLC_CFG 0x901
@@ -92,8 +93,8 @@ extern unsigned long perip_base, perip_end;
92#define ARC_REG_SLC_RGN_END 0x916 93#define ARC_REG_SLC_RGN_END 0x916
93 94
94/* Bit val in SLC_CONTROL */ 95/* Bit val in SLC_CONTROL */
96#define SLC_CTRL_DIS 0x001
95#define SLC_CTRL_IM 0x040 97#define SLC_CTRL_IM 0x040
96#define SLC_CTRL_DISABLE 0x001
97#define SLC_CTRL_BUSY 0x100 98#define SLC_CTRL_BUSY 0x100
98#define SLC_CTRL_RGN_OP_INV 0x200 99#define SLC_CTRL_RGN_OP_INV 0x200
99 100
diff --git a/arch/arc/include/asm/delay.h b/arch/arc/include/asm/delay.h
index a36e8601114d..d5da2115d78a 100644
--- a/arch/arc/include/asm/delay.h
+++ b/arch/arc/include/asm/delay.h
@@ -26,7 +26,9 @@ static inline void __delay(unsigned long loops)
26 " lp 1f \n" 26 " lp 1f \n"
27 " nop \n" 27 " nop \n"
28 "1: \n" 28 "1: \n"
29 : : "r"(loops)); 29 :
30 : "r"(loops)
31 : "lp_count");
30} 32}
31 33
32extern void __bad_udelay(void); 34extern void __bad_udelay(void);
diff --git a/arch/arc/include/asm/entry-arcv2.h b/arch/arc/include/asm/entry-arcv2.h
index b5ff87e6f4b7..aee1a77934cf 100644
--- a/arch/arc/include/asm/entry-arcv2.h
+++ b/arch/arc/include/asm/entry-arcv2.h
@@ -16,6 +16,7 @@
16 ; 16 ;
17 ; Now manually save: r12, sp, fp, gp, r25 17 ; Now manually save: r12, sp, fp, gp, r25
18 18
19 PUSH r30
19 PUSH r12 20 PUSH r12
20 21
21 ; Saving pt_regs->sp correctly requires some extra work due to the way 22 ; Saving pt_regs->sp correctly requires some extra work due to the way
@@ -72,6 +73,7 @@
72 POPAX AUX_USER_SP 73 POPAX AUX_USER_SP
731: 741:
74 POP r12 75 POP r12
76 POP r30
75 77
76.endm 78.endm
77 79
diff --git a/arch/arc/include/asm/module.h b/arch/arc/include/asm/module.h
index 6e91d8b339c3..567590ea8f6c 100644
--- a/arch/arc/include/asm/module.h
+++ b/arch/arc/include/asm/module.h
@@ -14,13 +14,13 @@
14 14
15#include <asm-generic/module.h> 15#include <asm-generic/module.h>
16 16
17#ifdef CONFIG_ARC_DW2_UNWIND
18struct mod_arch_specific { 17struct mod_arch_specific {
18#ifdef CONFIG_ARC_DW2_UNWIND
19 void *unw_info; 19 void *unw_info;
20 int unw_sec_idx; 20 int unw_sec_idx;
21#endif
21 const char *secstr; 22 const char *secstr;
22}; 23};
23#endif
24 24
25#define MODULE_PROC_FAMILY "ARC700" 25#define MODULE_PROC_FAMILY "ARC700"
26 26
diff --git a/arch/arc/include/asm/ptrace.h b/arch/arc/include/asm/ptrace.h
index 69095da1fcfd..47111d565a95 100644
--- a/arch/arc/include/asm/ptrace.h
+++ b/arch/arc/include/asm/ptrace.h
@@ -84,7 +84,7 @@ struct pt_regs {
84 unsigned long fp; 84 unsigned long fp;
85 unsigned long sp; /* user/kernel sp depending on where we came from */ 85 unsigned long sp; /* user/kernel sp depending on where we came from */
86 86
87 unsigned long r12; 87 unsigned long r12, r30;
88 88
89 /*------- Below list auto saved by h/w -----------*/ 89 /*------- Below list auto saved by h/w -----------*/
90 unsigned long r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11; 90 unsigned long r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11;
diff --git a/arch/arc/include/asm/setup.h b/arch/arc/include/asm/setup.h
index cb954cdab070..c568a9df82b1 100644
--- a/arch/arc/include/asm/setup.h
+++ b/arch/arc/include/asm/setup.h
@@ -31,6 +31,7 @@ extern int root_mountflags, end_mem;
31 31
32void setup_processor(void); 32void setup_processor(void);
33void __init setup_arch_memory(void); 33void __init setup_arch_memory(void);
34long __init arc_get_mem_sz(void);
34 35
35/* Helpers used in arc_*_mumbojumbo routines */ 36/* Helpers used in arc_*_mumbojumbo routines */
36#define IS_AVAIL1(v, s) ((v) ? s : "") 37#define IS_AVAIL1(v, s) ((v) ? s : "")
diff --git a/arch/arc/kernel/head.S b/arch/arc/kernel/head.S
index 689dd867fdff..8b90d25a15cc 100644
--- a/arch/arc/kernel/head.S
+++ b/arch/arc/kernel/head.S
@@ -71,14 +71,14 @@ ENTRY(stext)
71 GET_CPU_ID r5 71 GET_CPU_ID r5
72 cmp r5, 0 72 cmp r5, 0
73 mov.nz r0, r5 73 mov.nz r0, r5
74#ifdef CONFIG_ARC_SMP_HALT_ON_RESET 74 bz .Lmaster_proceed
75 ; Non-Master can proceed as system would be booted sufficiently 75
76 jnz first_lines_of_secondary
77#else
78 ; Non-Masters wait for Master to boot enough and bring them up 76 ; Non-Masters wait for Master to boot enough and bring them up
79 jnz arc_platform_smp_wait_to_boot 77 ; when they resume, tail-call to entry point
80#endif 78 mov blink, @first_lines_of_secondary
81 ; Master falls thru 79 j arc_platform_smp_wait_to_boot
80
81.Lmaster_proceed:
82#endif 82#endif
83 83
84 ; Clear BSS before updating any globals 84 ; Clear BSS before updating any globals
diff --git a/arch/arc/kernel/intc-arcv2.c b/arch/arc/kernel/intc-arcv2.c
index 994dca7014db..ecef0fb0b66c 100644
--- a/arch/arc/kernel/intc-arcv2.c
+++ b/arch/arc/kernel/intc-arcv2.c
@@ -77,20 +77,20 @@ void arc_init_IRQ(void)
77 77
78static void arcv2_irq_mask(struct irq_data *data) 78static void arcv2_irq_mask(struct irq_data *data)
79{ 79{
80 write_aux_reg(AUX_IRQ_SELECT, data->irq); 80 write_aux_reg(AUX_IRQ_SELECT, data->hwirq);
81 write_aux_reg(AUX_IRQ_ENABLE, 0); 81 write_aux_reg(AUX_IRQ_ENABLE, 0);
82} 82}
83 83
84static void arcv2_irq_unmask(struct irq_data *data) 84static void arcv2_irq_unmask(struct irq_data *data)
85{ 85{
86 write_aux_reg(AUX_IRQ_SELECT, data->irq); 86 write_aux_reg(AUX_IRQ_SELECT, data->hwirq);
87 write_aux_reg(AUX_IRQ_ENABLE, 1); 87 write_aux_reg(AUX_IRQ_ENABLE, 1);
88} 88}
89 89
90void arcv2_irq_enable(struct irq_data *data) 90void arcv2_irq_enable(struct irq_data *data)
91{ 91{
92 /* set default priority */ 92 /* set default priority */
93 write_aux_reg(AUX_IRQ_SELECT, data->irq); 93 write_aux_reg(AUX_IRQ_SELECT, data->hwirq);
94 write_aux_reg(AUX_IRQ_PRIORITY, ARCV2_IRQ_DEF_PRIO); 94 write_aux_reg(AUX_IRQ_PRIORITY, ARCV2_IRQ_DEF_PRIO);
95 95
96 /* 96 /*
diff --git a/arch/arc/kernel/intc-compact.c b/arch/arc/kernel/intc-compact.c
index ce9deb953ca9..8c1fd5c00782 100644
--- a/arch/arc/kernel/intc-compact.c
+++ b/arch/arc/kernel/intc-compact.c
@@ -57,7 +57,7 @@ static void arc_irq_mask(struct irq_data *data)
57 unsigned int ienb; 57 unsigned int ienb;
58 58
59 ienb = read_aux_reg(AUX_IENABLE); 59 ienb = read_aux_reg(AUX_IENABLE);
60 ienb &= ~(1 << data->irq); 60 ienb &= ~(1 << data->hwirq);
61 write_aux_reg(AUX_IENABLE, ienb); 61 write_aux_reg(AUX_IENABLE, ienb);
62} 62}
63 63
@@ -66,7 +66,7 @@ static void arc_irq_unmask(struct irq_data *data)
66 unsigned int ienb; 66 unsigned int ienb;
67 67
68 ienb = read_aux_reg(AUX_IENABLE); 68 ienb = read_aux_reg(AUX_IENABLE);
69 ienb |= (1 << data->irq); 69 ienb |= (1 << data->hwirq);
70 write_aux_reg(AUX_IENABLE, ienb); 70 write_aux_reg(AUX_IENABLE, ienb);
71} 71}
72 72
diff --git a/arch/arc/kernel/mcip.c b/arch/arc/kernel/mcip.c
index 560c4afc2af4..9f6b68fd4f3b 100644
--- a/arch/arc/kernel/mcip.c
+++ b/arch/arc/kernel/mcip.c
@@ -10,6 +10,7 @@
10 10
11#include <linux/smp.h> 11#include <linux/smp.h>
12#include <linux/irq.h> 12#include <linux/irq.h>
13#include <linux/irqchip/chained_irq.h>
13#include <linux/spinlock.h> 14#include <linux/spinlock.h>
14#include <soc/arc/mcip.h> 15#include <soc/arc/mcip.h>
15#include <asm/irqflags-arcv2.h> 16#include <asm/irqflags-arcv2.h>
@@ -92,11 +93,10 @@ static void mcip_probe_n_setup(void)
92 READ_BCR(ARC_REG_MCIP_BCR, mp); 93 READ_BCR(ARC_REG_MCIP_BCR, mp);
93 94
94 sprintf(smp_cpuinfo_buf, 95 sprintf(smp_cpuinfo_buf,
95 "Extn [SMP]\t: ARConnect (v%d): %d cores with %s%s%s%s%s\n", 96 "Extn [SMP]\t: ARConnect (v%d): %d cores with %s%s%s%s\n",
96 mp.ver, mp.num_cores, 97 mp.ver, mp.num_cores,
97 IS_AVAIL1(mp.ipi, "IPI "), 98 IS_AVAIL1(mp.ipi, "IPI "),
98 IS_AVAIL1(mp.idu, "IDU "), 99 IS_AVAIL1(mp.idu, "IDU "),
99 IS_AVAIL1(mp.llm, "LLM "),
100 IS_AVAIL1(mp.dbg, "DEBUG "), 100 IS_AVAIL1(mp.dbg, "DEBUG "),
101 IS_AVAIL1(mp.gfrc, "GFRC")); 101 IS_AVAIL1(mp.gfrc, "GFRC"));
102 102
@@ -174,7 +174,6 @@ static void idu_irq_unmask(struct irq_data *data)
174 raw_spin_unlock_irqrestore(&mcip_lock, flags); 174 raw_spin_unlock_irqrestore(&mcip_lock, flags);
175} 175}
176 176
177#ifdef CONFIG_SMP
178static int 177static int
179idu_irq_set_affinity(struct irq_data *data, const struct cpumask *cpumask, 178idu_irq_set_affinity(struct irq_data *data, const struct cpumask *cpumask,
180 bool force) 179 bool force)
@@ -204,12 +203,27 @@ idu_irq_set_affinity(struct irq_data *data, const struct cpumask *cpumask,
204 203
205 return IRQ_SET_MASK_OK; 204 return IRQ_SET_MASK_OK;
206} 205}
207#endif 206
207static void idu_irq_enable(struct irq_data *data)
208{
209 /*
210 * By default send all common interrupts to all available online CPUs.
211 * The affinity of common interrupts in IDU must be set manually since
212 * in some cases the kernel will not call irq_set_affinity() by itself:
213 * 1. When the kernel is not configured with support of SMP.
214 * 2. When the kernel is configured with support of SMP but upper
215 * interrupt controllers does not support setting of the affinity
216 * and cannot propagate it to IDU.
217 */
218 idu_irq_set_affinity(data, cpu_online_mask, false);
219 idu_irq_unmask(data);
220}
208 221
209static struct irq_chip idu_irq_chip = { 222static struct irq_chip idu_irq_chip = {
210 .name = "MCIP IDU Intc", 223 .name = "MCIP IDU Intc",
211 .irq_mask = idu_irq_mask, 224 .irq_mask = idu_irq_mask,
212 .irq_unmask = idu_irq_unmask, 225 .irq_unmask = idu_irq_unmask,
226 .irq_enable = idu_irq_enable,
213#ifdef CONFIG_SMP 227#ifdef CONFIG_SMP
214 .irq_set_affinity = idu_irq_set_affinity, 228 .irq_set_affinity = idu_irq_set_affinity,
215#endif 229#endif
@@ -221,10 +235,13 @@ static irq_hw_number_t idu_first_hwirq;
221static void idu_cascade_isr(struct irq_desc *desc) 235static void idu_cascade_isr(struct irq_desc *desc)
222{ 236{
223 struct irq_domain *idu_domain = irq_desc_get_handler_data(desc); 237 struct irq_domain *idu_domain = irq_desc_get_handler_data(desc);
238 struct irq_chip *core_chip = irq_desc_get_chip(desc);
224 irq_hw_number_t core_hwirq = irqd_to_hwirq(irq_desc_get_irq_data(desc)); 239 irq_hw_number_t core_hwirq = irqd_to_hwirq(irq_desc_get_irq_data(desc));
225 irq_hw_number_t idu_hwirq = core_hwirq - idu_first_hwirq; 240 irq_hw_number_t idu_hwirq = core_hwirq - idu_first_hwirq;
226 241
242 chained_irq_enter(core_chip, desc);
227 generic_handle_irq(irq_find_mapping(idu_domain, idu_hwirq)); 243 generic_handle_irq(irq_find_mapping(idu_domain, idu_hwirq));
244 chained_irq_exit(core_chip, desc);
228} 245}
229 246
230static int idu_irq_map(struct irq_domain *d, unsigned int virq, irq_hw_number_t hwirq) 247static int idu_irq_map(struct irq_domain *d, unsigned int virq, irq_hw_number_t hwirq)
@@ -239,36 +256,14 @@ static int idu_irq_xlate(struct irq_domain *d, struct device_node *n,
239 const u32 *intspec, unsigned int intsize, 256 const u32 *intspec, unsigned int intsize,
240 irq_hw_number_t *out_hwirq, unsigned int *out_type) 257 irq_hw_number_t *out_hwirq, unsigned int *out_type)
241{ 258{
242 irq_hw_number_t hwirq = *out_hwirq = intspec[0]; 259 /*
243 int distri = intspec[1]; 260 * Ignore value of interrupt distribution mode for common interrupts in
244 unsigned long flags; 261 * IDU which resides in intspec[1] since setting an affinity using value
245 262 * from Device Tree is deprecated in ARC.
263 */
264 *out_hwirq = intspec[0];
246 *out_type = IRQ_TYPE_NONE; 265 *out_type = IRQ_TYPE_NONE;
247 266
248 /* XXX: validate distribution scheme again online cpu mask */
249 if (distri == 0) {
250 /* 0 - Round Robin to all cpus, otherwise 1 bit per core */
251 raw_spin_lock_irqsave(&mcip_lock, flags);
252 idu_set_dest(hwirq, BIT(num_online_cpus()) - 1);
253 idu_set_mode(hwirq, IDU_M_TRIG_LEVEL, IDU_M_DISTRI_RR);
254 raw_spin_unlock_irqrestore(&mcip_lock, flags);
255 } else {
256 /*
257 * DEST based distribution for Level Triggered intr can only
258 * have 1 CPU, so generalize it to always contain 1 cpu
259 */
260 int cpu = ffs(distri);
261
262 if (cpu != fls(distri))
263 pr_warn("IDU irq %lx distri mode set to cpu %x\n",
264 hwirq, cpu);
265
266 raw_spin_lock_irqsave(&mcip_lock, flags);
267 idu_set_dest(hwirq, cpu);
268 idu_set_mode(hwirq, IDU_M_TRIG_LEVEL, IDU_M_DISTRI_DEST);
269 raw_spin_unlock_irqrestore(&mcip_lock, flags);
270 }
271
272 return 0; 267 return 0;
273} 268}
274 269
diff --git a/arch/arc/kernel/module.c b/arch/arc/kernel/module.c
index 42e964db2967..3d99a6091332 100644
--- a/arch/arc/kernel/module.c
+++ b/arch/arc/kernel/module.c
@@ -32,8 +32,8 @@ int module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
32#ifdef CONFIG_ARC_DW2_UNWIND 32#ifdef CONFIG_ARC_DW2_UNWIND
33 mod->arch.unw_sec_idx = 0; 33 mod->arch.unw_sec_idx = 0;
34 mod->arch.unw_info = NULL; 34 mod->arch.unw_info = NULL;
35 mod->arch.secstr = secstr;
36#endif 35#endif
36 mod->arch.secstr = secstr;
37 return 0; 37 return 0;
38} 38}
39 39
@@ -113,8 +113,10 @@ int apply_relocate_add(Elf32_Shdr *sechdrs,
113 113
114 } 114 }
115 115
116#ifdef CONFIG_ARC_DW2_UNWIND
116 if (strcmp(module->arch.secstr+sechdrs[tgtsec].sh_name, ".eh_frame") == 0) 117 if (strcmp(module->arch.secstr+sechdrs[tgtsec].sh_name, ".eh_frame") == 0)
117 module->arch.unw_sec_idx = tgtsec; 118 module->arch.unw_sec_idx = tgtsec;
119#endif
118 120
119 return 0; 121 return 0;
120 122
diff --git a/arch/arc/kernel/smp.c b/arch/arc/kernel/smp.c
index 88674d972c9d..2afbafadb6ab 100644
--- a/arch/arc/kernel/smp.c
+++ b/arch/arc/kernel/smp.c
@@ -90,22 +90,37 @@ void __init smp_cpus_done(unsigned int max_cpus)
90 */ 90 */
91static volatile int wake_flag; 91static volatile int wake_flag;
92 92
93#ifdef CONFIG_ISA_ARCOMPACT
94
95#define __boot_read(f) f
96#define __boot_write(f, v) f = v
97
98#else
99
100#define __boot_read(f) arc_read_uncached_32(&f)
101#define __boot_write(f, v) arc_write_uncached_32(&f, v)
102
103#endif
104
93static void arc_default_smp_cpu_kick(int cpu, unsigned long pc) 105static void arc_default_smp_cpu_kick(int cpu, unsigned long pc)
94{ 106{
95 BUG_ON(cpu == 0); 107 BUG_ON(cpu == 0);
96 wake_flag = cpu; 108
109 __boot_write(wake_flag, cpu);
97} 110}
98 111
99void arc_platform_smp_wait_to_boot(int cpu) 112void arc_platform_smp_wait_to_boot(int cpu)
100{ 113{
101 while (wake_flag != cpu) 114 /* for halt-on-reset, we've waited already */
115 if (IS_ENABLED(CONFIG_ARC_SMP_HALT_ON_RESET))
116 return;
117
118 while (__boot_read(wake_flag) != cpu)
102 ; 119 ;
103 120
104 wake_flag = 0; 121 __boot_write(wake_flag, 0);
105 __asm__ __volatile__("j @first_lines_of_secondary \n");
106} 122}
107 123
108
109const char *arc_platform_smp_cpuinfo(void) 124const char *arc_platform_smp_cpuinfo(void)
110{ 125{
111 return plat_smp_ops.info ? : ""; 126 return plat_smp_ops.info ? : "";
diff --git a/arch/arc/kernel/unaligned.c b/arch/arc/kernel/unaligned.c
index abd961f3e763..5f69c3bd59bb 100644
--- a/arch/arc/kernel/unaligned.c
+++ b/arch/arc/kernel/unaligned.c
@@ -241,8 +241,9 @@ int misaligned_fixup(unsigned long address, struct pt_regs *regs,
241 if (state.fault) 241 if (state.fault)
242 goto fault; 242 goto fault;
243 243
244 /* clear any remanants of delay slot */
244 if (delay_mode(regs)) { 245 if (delay_mode(regs)) {
245 regs->ret = regs->bta; 246 regs->ret = regs->bta & ~1U;
246 regs->status32 &= ~STATUS_DE_MASK; 247 regs->status32 &= ~STATUS_DE_MASK;
247 } else { 248 } else {
248 regs->ret += state.instr_len; 249 regs->ret += state.instr_len;
diff --git a/arch/arc/mm/cache.c b/arch/arc/mm/cache.c
index ec86ac0e3321..d408fa21a07c 100644
--- a/arch/arc/mm/cache.c
+++ b/arch/arc/mm/cache.c
@@ -23,7 +23,7 @@
23 23
24static int l2_line_sz; 24static int l2_line_sz;
25static int ioc_exists; 25static int ioc_exists;
26int slc_enable = 1, ioc_enable = 0; 26int slc_enable = 1, ioc_enable = 1;
27unsigned long perip_base = ARC_UNCACHED_ADDR_SPACE; /* legacy value for boot */ 27unsigned long perip_base = ARC_UNCACHED_ADDR_SPACE; /* legacy value for boot */
28unsigned long perip_end = 0xFFFFFFFF; /* legacy value */ 28unsigned long perip_end = 0xFFFFFFFF; /* legacy value */
29 29
@@ -271,7 +271,11 @@ void __cache_line_loop_v2(phys_addr_t paddr, unsigned long vaddr,
271 271
272/* 272/*
273 * For ARC700 MMUv3 I-cache and D-cache flushes 273 * For ARC700 MMUv3 I-cache and D-cache flushes
274 * Also reused for HS38 aliasing I-cache configuration 274 * - ARC700 programming model requires paddr and vaddr be passed in seperate
275 * AUX registers (*_IV*L and *_PTAG respectively) irrespective of whether the
276 * caches actually alias or not.
277 * - For HS38, only the aliasing I-cache configuration uses the PTAG reg
278 * (non aliasing I-cache version doesn't; while D-cache can't possibly alias)
275 */ 279 */
276static inline 280static inline
277void __cache_line_loop_v3(phys_addr_t paddr, unsigned long vaddr, 281void __cache_line_loop_v3(phys_addr_t paddr, unsigned long vaddr,
@@ -458,6 +462,21 @@ static inline void __dc_entire_op(const int op)
458 __after_dc_op(op); 462 __after_dc_op(op);
459} 463}
460 464
465static inline void __dc_disable(void)
466{
467 const int r = ARC_REG_DC_CTRL;
468
469 __dc_entire_op(OP_FLUSH_N_INV);
470 write_aux_reg(r, read_aux_reg(r) | DC_CTRL_DIS);
471}
472
473static void __dc_enable(void)
474{
475 const int r = ARC_REG_DC_CTRL;
476
477 write_aux_reg(r, read_aux_reg(r) & ~DC_CTRL_DIS);
478}
479
461/* For kernel mappings cache operation: index is same as paddr */ 480/* For kernel mappings cache operation: index is same as paddr */
462#define __dc_line_op_k(p, sz, op) __dc_line_op(p, p, sz, op) 481#define __dc_line_op_k(p, sz, op) __dc_line_op(p, p, sz, op)
463 482
@@ -483,6 +502,8 @@ static inline void __dc_line_op(phys_addr_t paddr, unsigned long vaddr,
483#else 502#else
484 503
485#define __dc_entire_op(op) 504#define __dc_entire_op(op)
505#define __dc_disable()
506#define __dc_enable()
486#define __dc_line_op(paddr, vaddr, sz, op) 507#define __dc_line_op(paddr, vaddr, sz, op)
487#define __dc_line_op_k(paddr, sz, op) 508#define __dc_line_op_k(paddr, sz, op)
488 509
@@ -597,6 +618,40 @@ noinline void slc_op(phys_addr_t paddr, unsigned long sz, const int op)
597#endif 618#endif
598} 619}
599 620
621noinline static void slc_entire_op(const int op)
622{
623 unsigned int ctrl, r = ARC_REG_SLC_CTRL;
624
625 ctrl = read_aux_reg(r);
626
627 if (!(op & OP_FLUSH)) /* i.e. OP_INV */
628 ctrl &= ~SLC_CTRL_IM; /* clear IM: Disable flush before Inv */
629 else
630 ctrl |= SLC_CTRL_IM;
631
632 write_aux_reg(r, ctrl);
633
634 write_aux_reg(ARC_REG_SLC_INVALIDATE, 1);
635
636 /* Important to wait for flush to complete */
637 while (read_aux_reg(r) & SLC_CTRL_BUSY);
638}
639
640static inline void arc_slc_disable(void)
641{
642 const int r = ARC_REG_SLC_CTRL;
643
644 slc_entire_op(OP_FLUSH_N_INV);
645 write_aux_reg(r, read_aux_reg(r) | SLC_CTRL_DIS);
646}
647
648static inline void arc_slc_enable(void)
649{
650 const int r = ARC_REG_SLC_CTRL;
651
652 write_aux_reg(r, read_aux_reg(r) & ~SLC_CTRL_DIS);
653}
654
600/*********************************************************** 655/***********************************************************
601 * Exported APIs 656 * Exported APIs
602 */ 657 */
@@ -923,21 +978,54 @@ SYSCALL_DEFINE3(cacheflush, uint32_t, start, uint32_t, sz, uint32_t, flags)
923 return 0; 978 return 0;
924} 979}
925 980
926void arc_cache_init(void) 981/*
982 * IO-Coherency (IOC) setup rules:
983 *
984 * 1. Needs to be at system level, so only once by Master core
985 * Non-Masters need not be accessing caches at that time
986 * - They are either HALT_ON_RESET and kick started much later or
987 * - if run on reset, need to ensure that arc_platform_smp_wait_to_boot()
988 * doesn't perturb caches or coherency unit
989 *
990 * 2. caches (L1 and SLC) need to be purged (flush+inv) before setting up IOC,
991 * otherwise any straggler data might behave strangely post IOC enabling
992 *
993 * 3. All Caches need to be disabled when setting up IOC to elide any in-flight
994 * Coherency transactions
995 */
996noinline void __init arc_ioc_setup(void)
927{ 997{
928 unsigned int __maybe_unused cpu = smp_processor_id(); 998 unsigned int ap_sz;
929 char str[256];
930 999
931 printk(arc_cache_mumbojumbo(0, str, sizeof(str))); 1000 /* Flush + invalidate + disable L1 dcache */
1001 __dc_disable();
1002
1003 /* Flush + invalidate SLC */
1004 if (read_aux_reg(ARC_REG_SLC_BCR))
1005 slc_entire_op(OP_FLUSH_N_INV);
1006
1007 /* IOC Aperture start: TDB: handle non default CONFIG_LINUX_LINK_BASE */
1008 write_aux_reg(ARC_REG_IO_COH_AP0_BASE, 0x80000);
932 1009
933 /* 1010 /*
934 * Only master CPU needs to execute rest of function: 1011 * IOC Aperture size:
935 * - Assume SMP so all cores will have same cache config so 1012 * decoded as 2 ^ (SIZE + 2) KB: so setting 0x11 implies 512M
936 * any geomtry checks will be same for all 1013 * TBD: fix for PGU + 1GB of low mem
937 * - IOC setup / dma callbacks only need to be setup once 1014 * TBD: fix for PAE
938 */ 1015 */
939 if (cpu) 1016 ap_sz = order_base_2(arc_get_mem_sz()/1024) - 2;
940 return; 1017 write_aux_reg(ARC_REG_IO_COH_AP0_SIZE, ap_sz);
1018
1019 write_aux_reg(ARC_REG_IO_COH_PARTIAL, 1);
1020 write_aux_reg(ARC_REG_IO_COH_ENABLE, 1);
1021
1022 /* Re-enable L1 dcache */
1023 __dc_enable();
1024}
1025
1026void __init arc_cache_init_master(void)
1027{
1028 unsigned int __maybe_unused cpu = smp_processor_id();
941 1029
942 if (IS_ENABLED(CONFIG_ARC_HAS_ICACHE)) { 1030 if (IS_ENABLED(CONFIG_ARC_HAS_ICACHE)) {
943 struct cpuinfo_arc_cache *ic = &cpuinfo_arc700[cpu].icache; 1031 struct cpuinfo_arc_cache *ic = &cpuinfo_arc700[cpu].icache;
@@ -985,30 +1073,14 @@ void arc_cache_init(void)
985 } 1073 }
986 } 1074 }
987 1075
988 if (is_isa_arcv2() && l2_line_sz && !slc_enable) { 1076 /* Note that SLC disable not formally supported till HS 3.0 */
989 1077 if (is_isa_arcv2() && l2_line_sz && !slc_enable)
990 /* IM set : flush before invalidate */ 1078 arc_slc_disable();
991 write_aux_reg(ARC_REG_SLC_CTRL,
992 read_aux_reg(ARC_REG_SLC_CTRL) | SLC_CTRL_IM);
993 1079
994 write_aux_reg(ARC_REG_SLC_INVALIDATE, 1); 1080 if (is_isa_arcv2() && ioc_enable)
995 1081 arc_ioc_setup();
996 /* Important to wait for flush to complete */
997 while (read_aux_reg(ARC_REG_SLC_CTRL) & SLC_CTRL_BUSY);
998 write_aux_reg(ARC_REG_SLC_CTRL,
999 read_aux_reg(ARC_REG_SLC_CTRL) | SLC_CTRL_DISABLE);
1000 }
1001 1082
1002 if (is_isa_arcv2() && ioc_enable) { 1083 if (is_isa_arcv2() && ioc_enable) {
1003 /* IO coherency base - 0x8z */
1004 write_aux_reg(ARC_REG_IO_COH_AP0_BASE, 0x80000);
1005 /* IO coherency aperture size - 512Mb: 0x8z-0xAz */
1006 write_aux_reg(ARC_REG_IO_COH_AP0_SIZE, 0x11);
1007 /* Enable partial writes */
1008 write_aux_reg(ARC_REG_IO_COH_PARTIAL, 1);
1009 /* Enable IO coherency */
1010 write_aux_reg(ARC_REG_IO_COH_ENABLE, 1);
1011
1012 __dma_cache_wback_inv = __dma_cache_wback_inv_ioc; 1084 __dma_cache_wback_inv = __dma_cache_wback_inv_ioc;
1013 __dma_cache_inv = __dma_cache_inv_ioc; 1085 __dma_cache_inv = __dma_cache_inv_ioc;
1014 __dma_cache_wback = __dma_cache_wback_ioc; 1086 __dma_cache_wback = __dma_cache_wback_ioc;
@@ -1022,3 +1094,20 @@ void arc_cache_init(void)
1022 __dma_cache_wback = __dma_cache_wback_l1; 1094 __dma_cache_wback = __dma_cache_wback_l1;
1023 } 1095 }
1024} 1096}
1097
1098void __ref arc_cache_init(void)
1099{
1100 unsigned int __maybe_unused cpu = smp_processor_id();
1101 char str[256];
1102
1103 printk(arc_cache_mumbojumbo(0, str, sizeof(str)));
1104
1105 /*
1106 * Only master CPU needs to execute rest of function:
1107 * - Assume SMP so all cores will have same cache config so
1108 * any geomtry checks will be same for all
1109 * - IOC setup / dma callbacks only need to be setup once
1110 */
1111 if (!cpu)
1112 arc_cache_init_master();
1113}
diff --git a/arch/arc/mm/init.c b/arch/arc/mm/init.c
index 399e2f223d25..8c9415ed6280 100644
--- a/arch/arc/mm/init.c
+++ b/arch/arc/mm/init.c
@@ -40,6 +40,11 @@ struct pglist_data node_data[MAX_NUMNODES] __read_mostly;
40EXPORT_SYMBOL(node_data); 40EXPORT_SYMBOL(node_data);
41#endif 41#endif
42 42
43long __init arc_get_mem_sz(void)
44{
45 return low_mem_sz;
46}
47
43/* User can over-ride above with "mem=nnn[KkMm]" in cmdline */ 48/* User can over-ride above with "mem=nnn[KkMm]" in cmdline */
44static int __init setup_mem_sz(char *str) 49static int __init setup_mem_sz(char *str)
45{ 50{
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 5fab553fd03a..186c4c214e0a 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -1502,8 +1502,7 @@ source kernel/Kconfig.preempt
1502 1502
1503config HZ_FIXED 1503config HZ_FIXED
1504 int 1504 int
1505 default 200 if ARCH_EBSA110 || ARCH_S3C24XX || \ 1505 default 200 if ARCH_EBSA110
1506 ARCH_S5PV210 || ARCH_EXYNOS4
1507 default 128 if SOC_AT91RM9200 1506 default 128 if SOC_AT91RM9200
1508 default 0 1507 default 0
1509 1508
diff --git a/arch/arm/boot/dts/Makefile b/arch/arm/boot/dts/Makefile
index 1e0bfd1bddfc..9e4cae54df13 100644
--- a/arch/arm/boot/dts/Makefile
+++ b/arch/arm/boot/dts/Makefile
@@ -501,6 +501,7 @@ dtb-$(CONFIG_ARCH_OMAP3) += \
501 am3517-evm.dtb \ 501 am3517-evm.dtb \
502 am3517_mt_ventoux.dtb \ 502 am3517_mt_ventoux.dtb \
503 logicpd-torpedo-37xx-devkit.dtb \ 503 logicpd-torpedo-37xx-devkit.dtb \
504 logicpd-som-lv-37xx-devkit.dtb \
504 omap3430-sdp.dtb \ 505 omap3430-sdp.dtb \
505 omap3-beagle.dtb \ 506 omap3-beagle.dtb \
506 omap3-beagle-xm.dtb \ 507 omap3-beagle-xm.dtb \
@@ -619,7 +620,7 @@ dtb-$(CONFIG_ARCH_ORION5X) += \
619 orion5x-lacie-ethernet-disk-mini-v2.dtb \ 620 orion5x-lacie-ethernet-disk-mini-v2.dtb \
620 orion5x-linkstation-lsgl.dtb \ 621 orion5x-linkstation-lsgl.dtb \
621 orion5x-linkstation-lswtgl.dtb \ 622 orion5x-linkstation-lswtgl.dtb \
622 orion5x-lschl.dtb \ 623 orion5x-linkstation-lschl.dtb \
623 orion5x-lswsgl.dtb \ 624 orion5x-lswsgl.dtb \
624 orion5x-maxtor-shared-storage-2.dtb \ 625 orion5x-maxtor-shared-storage-2.dtb \
625 orion5x-netgear-wnr854t.dtb \ 626 orion5x-netgear-wnr854t.dtb \
@@ -848,6 +849,7 @@ dtb-$(CONFIG_MACH_SUN8I) += \
848 sun8i-a83t-allwinner-h8homlet-v2.dtb \ 849 sun8i-a83t-allwinner-h8homlet-v2.dtb \
849 sun8i-a83t-cubietruck-plus.dtb \ 850 sun8i-a83t-cubietruck-plus.dtb \
850 sun8i-h3-bananapi-m2-plus.dtb \ 851 sun8i-h3-bananapi-m2-plus.dtb \
852 sun8i-h3-nanopi-m1.dtb \
851 sun8i-h3-nanopi-neo.dtb \ 853 sun8i-h3-nanopi-neo.dtb \
852 sun8i-h3-orangepi-2.dtb \ 854 sun8i-h3-orangepi-2.dtb \
853 sun8i-h3-orangepi-lite.dtb \ 855 sun8i-h3-orangepi-lite.dtb \
diff --git a/arch/arm/boot/dts/am335x-bone-common.dtsi b/arch/arm/boot/dts/am335x-bone-common.dtsi
index cbb4b3f893f4..bf6b26abe35b 100644
--- a/arch/arm/boot/dts/am335x-bone-common.dtsi
+++ b/arch/arm/boot/dts/am335x-bone-common.dtsi
@@ -6,8 +6,6 @@
6 * published by the Free Software Foundation. 6 * published by the Free Software Foundation.
7 */ 7 */
8 8
9#include <dt-bindings/mfd/tps65217.h>
10
11/ { 9/ {
12 cpus { 10 cpus {
13 cpu@0 { 11 cpu@0 {
@@ -321,13 +319,13 @@
321 ti,pmic-shutdown-controller; 319 ti,pmic-shutdown-controller;
322 320
323 charger { 321 charger {
324 interrupts = <TPS65217_IRQ_AC>, <TPS65217_IRQ_USB>; 322 interrupts = <0>, <1>;
325 interrupts-names = "AC", "USB"; 323 interrupt-names = "USB", "AC";
326 status = "okay"; 324 status = "okay";
327 }; 325 };
328 326
329 pwrbutton { 327 pwrbutton {
330 interrupts = <TPS65217_IRQ_PB>; 328 interrupts = <2>;
331 status = "okay"; 329 status = "okay";
332 }; 330 };
333 331
diff --git a/arch/arm/boot/dts/am335x-icev2.dts b/arch/arm/boot/dts/am335x-icev2.dts
index ff9417ce93c0..a2ad076822db 100644
--- a/arch/arm/boot/dts/am335x-icev2.dts
+++ b/arch/arm/boot/dts/am335x-icev2.dts
@@ -174,7 +174,6 @@
174 AM33XX_IOPAD(0x8fc, PIN_INPUT_PULLUP | MUX_MODE0) /* (G16) mmc0_dat0.mmc0_dat0 */ 174 AM33XX_IOPAD(0x8fc, PIN_INPUT_PULLUP | MUX_MODE0) /* (G16) mmc0_dat0.mmc0_dat0 */
175 AM33XX_IOPAD(0x900, PIN_INPUT_PULLUP | MUX_MODE0) /* (G17) mmc0_clk.mmc0_clk */ 175 AM33XX_IOPAD(0x900, PIN_INPUT_PULLUP | MUX_MODE0) /* (G17) mmc0_clk.mmc0_clk */
176 AM33XX_IOPAD(0x904, PIN_INPUT_PULLUP | MUX_MODE0) /* (G18) mmc0_cmd.mmc0_cmd */ 176 AM33XX_IOPAD(0x904, PIN_INPUT_PULLUP | MUX_MODE0) /* (G18) mmc0_cmd.mmc0_cmd */
177 AM33XX_IOPAD(0x960, PIN_INPUT_PULLUP | MUX_MODE5) /* (C15) spi0_cs1.mmc0_sdcd */
178 >; 177 >;
179 }; 178 };
180 179
diff --git a/arch/arm/boot/dts/am33xx.dtsi b/arch/arm/boot/dts/am33xx.dtsi
index 64c8aa9057a3..18d72a245e88 100644
--- a/arch/arm/boot/dts/am33xx.dtsi
+++ b/arch/arm/boot/dts/am33xx.dtsi
@@ -16,6 +16,7 @@
16 interrupt-parent = <&intc>; 16 interrupt-parent = <&intc>;
17 #address-cells = <1>; 17 #address-cells = <1>;
18 #size-cells = <1>; 18 #size-cells = <1>;
19 chosen { };
19 20
20 aliases { 21 aliases {
21 i2c0 = &i2c0; 22 i2c0 = &i2c0;
diff --git a/arch/arm/boot/dts/am4372.dtsi b/arch/arm/boot/dts/am4372.dtsi
index ac55f93fc91e..2df9e6050c2f 100644
--- a/arch/arm/boot/dts/am4372.dtsi
+++ b/arch/arm/boot/dts/am4372.dtsi
@@ -16,6 +16,7 @@
16 interrupt-parent = <&wakeupgen>; 16 interrupt-parent = <&wakeupgen>;
17 #address-cells = <1>; 17 #address-cells = <1>;
18 #size-cells = <1>; 18 #size-cells = <1>;
19 chosen { };
19 20
20 memory@0 { 21 memory@0 {
21 device_type = "memory"; 22 device_type = "memory";
diff --git a/arch/arm/boot/dts/am571x-idk.dts b/arch/arm/boot/dts/am571x-idk.dts
index d6e43e5184c1..ad68d1eb3bc3 100644
--- a/arch/arm/boot/dts/am571x-idk.dts
+++ b/arch/arm/boot/dts/am571x-idk.dts
@@ -62,11 +62,6 @@
62 linux,default-trigger = "mmc0"; 62 linux,default-trigger = "mmc0";
63 }; 63 };
64 }; 64 };
65
66 extcon_usb2: extcon_usb2 {
67 compatible = "linux,extcon-usb-gpio";
68 id-gpio = <&gpio5 7 GPIO_ACTIVE_HIGH>;
69 };
70}; 65};
71 66
72&mmc1 { 67&mmc1 {
@@ -79,3 +74,8 @@
79&omap_dwc3_2 { 74&omap_dwc3_2 {
80 extcon = <&extcon_usb2>; 75 extcon = <&extcon_usb2>;
81}; 76};
77
78&extcon_usb2 {
79 id-gpio = <&gpio5 7 GPIO_ACTIVE_HIGH>;
80 vbus-gpio = <&gpio7 22 GPIO_ACTIVE_HIGH>;
81};
diff --git a/arch/arm/boot/dts/am572x-idk.dts b/arch/arm/boot/dts/am572x-idk.dts
index 27d9149cedba..8350b4b34b08 100644
--- a/arch/arm/boot/dts/am572x-idk.dts
+++ b/arch/arm/boot/dts/am572x-idk.dts
@@ -23,11 +23,6 @@
23 reg = <0x0 0x80000000 0x0 0x80000000>; 23 reg = <0x0 0x80000000 0x0 0x80000000>;
24 }; 24 };
25 25
26 extcon_usb2: extcon_usb2 {
27 compatible = "linux,extcon-usb-gpio";
28 id-gpio = <&gpio3 16 GPIO_ACTIVE_HIGH>;
29 };
30
31 status-leds { 26 status-leds {
32 compatible = "gpio-leds"; 27 compatible = "gpio-leds";
33 cpu0-led { 28 cpu0-led {
@@ -76,6 +71,11 @@
76 extcon = <&extcon_usb2>; 71 extcon = <&extcon_usb2>;
77}; 72};
78 73
74&extcon_usb2 {
75 id-gpio = <&gpio3 16 GPIO_ACTIVE_HIGH>;
76 vbus-gpio = <&gpio3 26 GPIO_ACTIVE_HIGH>;
77};
78
79&mmc1 { 79&mmc1 {
80 status = "okay"; 80 status = "okay";
81 vmmc-supply = <&v3_3d>; 81 vmmc-supply = <&v3_3d>;
@@ -87,3 +87,7 @@
87&sn65hvs882 { 87&sn65hvs882 {
88 load-gpios = <&gpio3 19 GPIO_ACTIVE_LOW>; 88 load-gpios = <&gpio3 19 GPIO_ACTIVE_LOW>;
89}; 89};
90
91&pcie1 {
92 gpios = <&gpio3 23 GPIO_ACTIVE_HIGH>;
93};
diff --git a/arch/arm/boot/dts/am57xx-idk-common.dtsi b/arch/arm/boot/dts/am57xx-idk-common.dtsi
index 17cd9814aa44..e5ac1d81d15c 100644
--- a/arch/arm/boot/dts/am57xx-idk-common.dtsi
+++ b/arch/arm/boot/dts/am57xx-idk-common.dtsi
@@ -307,6 +307,20 @@
307 gpio-controller; 307 gpio-controller;
308 #gpio-cells = <2>; 308 #gpio-cells = <2>;
309 }; 309 };
310
311 extcon_usb2: tps659038_usb {
312 compatible = "ti,palmas-usb-vid";
313 ti,enable-vbus-detection;
314 ti,enable-id-detection;
315 /* ID & VBUS GPIOs provided in board dts */
316 };
317 };
318
319 tpic2810: tpic2810@60 {
320 compatible = "ti,tpic2810";
321 reg = <0x60>;
322 gpio-controller;
323 #gpio-cells = <2>;
310 }; 324 };
311}; 325};
312 326
@@ -323,13 +337,6 @@
323 spi-max-frequency = <1000000>; 337 spi-max-frequency = <1000000>;
324 spi-cpol; 338 spi-cpol;
325 }; 339 };
326
327 tpic2810: tpic2810@60 {
328 compatible = "ti,tpic2810";
329 reg = <0x60>;
330 gpio-controller;
331 #gpio-cells = <2>;
332 };
333}; 340};
334 341
335&uart3 { 342&uart3 {
@@ -373,7 +380,7 @@
373}; 380};
374 381
375&usb2 { 382&usb2 {
376 dr_mode = "otg"; 383 dr_mode = "peripheral";
377}; 384};
378 385
379&mmc2 { 386&mmc2 {
diff --git a/arch/arm/boot/dts/bcm-nsp.dtsi b/arch/arm/boot/dts/bcm-nsp.dtsi
index b6142bda661e..15f07f9af3b3 100644
--- a/arch/arm/boot/dts/bcm-nsp.dtsi
+++ b/arch/arm/boot/dts/bcm-nsp.dtsi
@@ -160,7 +160,7 @@
160 160
161 axi { 161 axi {
162 compatible = "simple-bus"; 162 compatible = "simple-bus";
163 ranges = <0x00000000 0x18000000 0x0011c40a>; 163 ranges = <0x00000000 0x18000000 0x0011c40c>;
164 #address-cells = <1>; 164 #address-cells = <1>;
165 #size-cells = <1>; 165 #size-cells = <1>;
166 166
diff --git a/arch/arm/boot/dts/da850-evm.dts b/arch/arm/boot/dts/da850-evm.dts
index 41de15fe15a2..78492a0bbbab 100644
--- a/arch/arm/boot/dts/da850-evm.dts
+++ b/arch/arm/boot/dts/da850-evm.dts
@@ -99,6 +99,7 @@
99 #size-cells = <1>; 99 #size-cells = <1>;
100 compatible = "m25p64"; 100 compatible = "m25p64";
101 spi-max-frequency = <30000000>; 101 spi-max-frequency = <30000000>;
102 m25p,fast-read;
102 reg = <0>; 103 reg = <0>;
103 partition@0 { 104 partition@0 {
104 label = "U-Boot-SPL"; 105 label = "U-Boot-SPL";
diff --git a/arch/arm/boot/dts/dm814x.dtsi b/arch/arm/boot/dts/dm814x.dtsi
index 1facc5f12cef..81b8cecb5820 100644
--- a/arch/arm/boot/dts/dm814x.dtsi
+++ b/arch/arm/boot/dts/dm814x.dtsi
@@ -12,6 +12,7 @@
12 interrupt-parent = <&intc>; 12 interrupt-parent = <&intc>;
13 #address-cells = <1>; 13 #address-cells = <1>;
14 #size-cells = <1>; 14 #size-cells = <1>;
15 chosen { };
15 16
16 aliases { 17 aliases {
17 i2c0 = &i2c1; 18 i2c0 = &i2c1;
diff --git a/arch/arm/boot/dts/dm816x.dtsi b/arch/arm/boot/dts/dm816x.dtsi
index 61dd2f6b02bc..6db652ae9bd5 100644
--- a/arch/arm/boot/dts/dm816x.dtsi
+++ b/arch/arm/boot/dts/dm816x.dtsi
@@ -12,6 +12,7 @@
12 interrupt-parent = <&intc>; 12 interrupt-parent = <&intc>;
13 #address-cells = <1>; 13 #address-cells = <1>;
14 #size-cells = <1>; 14 #size-cells = <1>;
15 chosen { };
15 16
16 aliases { 17 aliases {
17 i2c0 = &i2c1; 18 i2c0 = &i2c1;
diff --git a/arch/arm/boot/dts/dra7.dtsi b/arch/arm/boot/dts/dra7.dtsi
index addb7530cfbe..5ba161679e01 100644
--- a/arch/arm/boot/dts/dra7.dtsi
+++ b/arch/arm/boot/dts/dra7.dtsi
@@ -18,6 +18,7 @@
18 18
19 compatible = "ti,dra7xx"; 19 compatible = "ti,dra7xx";
20 interrupt-parent = <&crossbar_mpu>; 20 interrupt-parent = <&crossbar_mpu>;
21 chosen { };
21 22
22 aliases { 23 aliases {
23 i2c0 = &i2c1; 24 i2c0 = &i2c1;
@@ -1377,6 +1378,7 @@
1377 phy-names = "sata-phy"; 1378 phy-names = "sata-phy";
1378 clocks = <&sata_ref_clk>; 1379 clocks = <&sata_ref_clk>;
1379 ti,hwmods = "sata"; 1380 ti,hwmods = "sata";
1381 ports-implemented = <0x1>;
1380 }; 1382 };
1381 1383
1382 rtc: rtc@48838000 { 1384 rtc: rtc@48838000 {
diff --git a/arch/arm/boot/dts/dra72-evm-tps65917.dtsi b/arch/arm/boot/dts/dra72-evm-tps65917.dtsi
index ee6dac44edf1..e6df676886c0 100644
--- a/arch/arm/boot/dts/dra72-evm-tps65917.dtsi
+++ b/arch/arm/boot/dts/dra72-evm-tps65917.dtsi
@@ -132,3 +132,19 @@
132 ti,palmas-long-press-seconds = <6>; 132 ti,palmas-long-press-seconds = <6>;
133 }; 133 };
134}; 134};
135
136&usb2_phy1 {
137 phy-supply = <&ldo4_reg>;
138};
139
140&usb2_phy2 {
141 phy-supply = <&ldo4_reg>;
142};
143
144&dss {
145 vdda_video-supply = <&ldo5_reg>;
146};
147
148&mmc1 {
149 vmmc_aux-supply = <&ldo1_reg>;
150};
diff --git a/arch/arm/boot/dts/imx1.dtsi b/arch/arm/boot/dts/imx1.dtsi
index b792eee3899b..2ee40bc9ec21 100644
--- a/arch/arm/boot/dts/imx1.dtsi
+++ b/arch/arm/boot/dts/imx1.dtsi
@@ -18,6 +18,14 @@
18/ { 18/ {
19 #address-cells = <1>; 19 #address-cells = <1>;
20 #size-cells = <1>; 20 #size-cells = <1>;
21 /*
22 * The decompressor and also some bootloaders rely on a
23 * pre-existing /chosen node to be available to insert the
24 * command line and merge other ATAGS info.
25 * Also for U-Boot there must be a pre-existing /memory node.
26 */
27 chosen {};
28 memory { device_type = "memory"; reg = <0 0>; };
21 29
22 aliases { 30 aliases {
23 gpio0 = &gpio1; 31 gpio0 = &gpio1;
diff --git a/arch/arm/boot/dts/imx23.dtsi b/arch/arm/boot/dts/imx23.dtsi
index ac2a9da62b6c..43ccbbf754a3 100644
--- a/arch/arm/boot/dts/imx23.dtsi
+++ b/arch/arm/boot/dts/imx23.dtsi
@@ -16,6 +16,14 @@
16 #size-cells = <1>; 16 #size-cells = <1>;
17 17
18 interrupt-parent = <&icoll>; 18 interrupt-parent = <&icoll>;
19 /*
20 * The decompressor and also some bootloaders rely on a
21 * pre-existing /chosen node to be available to insert the
22 * command line and merge other ATAGS info.
23 * Also for U-Boot there must be a pre-existing /memory node.
24 */
25 chosen {};
26 memory { device_type = "memory"; reg = <0 0>; };
19 27
20 aliases { 28 aliases {
21 gpio0 = &gpio0; 29 gpio0 = &gpio0;
diff --git a/arch/arm/boot/dts/imx25.dtsi b/arch/arm/boot/dts/imx25.dtsi
index 831d09a28155..acd475659156 100644
--- a/arch/arm/boot/dts/imx25.dtsi
+++ b/arch/arm/boot/dts/imx25.dtsi
@@ -14,6 +14,14 @@
14/ { 14/ {
15 #address-cells = <1>; 15 #address-cells = <1>;
16 #size-cells = <1>; 16 #size-cells = <1>;
17 /*
18 * The decompressor and also some bootloaders rely on a
19 * pre-existing /chosen node to be available to insert the
20 * command line and merge other ATAGS info.
21 * Also for U-Boot there must be a pre-existing /memory node.
22 */
23 chosen {};
24 memory { device_type = "memory"; reg = <0 0>; };
17 25
18 aliases { 26 aliases {
19 ethernet0 = &fec; 27 ethernet0 = &fec;
diff --git a/arch/arm/boot/dts/imx27.dtsi b/arch/arm/boot/dts/imx27.dtsi
index 9d8b5969ee3b..b397384248f4 100644
--- a/arch/arm/boot/dts/imx27.dtsi
+++ b/arch/arm/boot/dts/imx27.dtsi
@@ -19,6 +19,14 @@
19/ { 19/ {
20 #address-cells = <1>; 20 #address-cells = <1>;
21 #size-cells = <1>; 21 #size-cells = <1>;
22 /*
23 * The decompressor and also some bootloaders rely on a
24 * pre-existing /chosen node to be available to insert the
25 * command line and merge other ATAGS info.
26 * Also for U-Boot there must be a pre-existing /memory node.
27 */
28 chosen {};
29 memory { device_type = "memory"; reg = <0 0>; };
22 30
23 aliases { 31 aliases {
24 ethernet0 = &fec; 32 ethernet0 = &fec;
diff --git a/arch/arm/boot/dts/imx28.dtsi b/arch/arm/boot/dts/imx28.dtsi
index 3aabf65a6a52..d6a2190b60ef 100644
--- a/arch/arm/boot/dts/imx28.dtsi
+++ b/arch/arm/boot/dts/imx28.dtsi
@@ -17,6 +17,14 @@
17 #size-cells = <1>; 17 #size-cells = <1>;
18 18
19 interrupt-parent = <&icoll>; 19 interrupt-parent = <&icoll>;
20 /*
21 * The decompressor and also some bootloaders rely on a
22 * pre-existing /chosen node to be available to insert the
23 * command line and merge other ATAGS info.
24 * Also for U-Boot there must be a pre-existing /memory node.
25 */
26 chosen {};
27 memory { device_type = "memory"; reg = <0 0>; };
20 28
21 aliases { 29 aliases {
22 ethernet0 = &mac0; 30 ethernet0 = &mac0;
diff --git a/arch/arm/boot/dts/imx31.dtsi b/arch/arm/boot/dts/imx31.dtsi
index 685916e3d8a1..23b0d2cf9acd 100644
--- a/arch/arm/boot/dts/imx31.dtsi
+++ b/arch/arm/boot/dts/imx31.dtsi
@@ -12,6 +12,14 @@
12/ { 12/ {
13 #address-cells = <1>; 13 #address-cells = <1>;
14 #size-cells = <1>; 14 #size-cells = <1>;
15 /*
16 * The decompressor and also some bootloaders rely on a
17 * pre-existing /chosen node to be available to insert the
18 * command line and merge other ATAGS info.
19 * Also for U-Boot there must be a pre-existing /memory node.
20 */
21 chosen {};
22 memory { device_type = "memory"; reg = <0 0>; };
15 23
16 aliases { 24 aliases {
17 serial0 = &uart1; 25 serial0 = &uart1;
@@ -31,11 +39,11 @@
31 }; 39 };
32 }; 40 };
33 41
34 avic: avic-interrupt-controller@60000000 { 42 avic: interrupt-controller@68000000 {
35 compatible = "fsl,imx31-avic", "fsl,avic"; 43 compatible = "fsl,imx31-avic", "fsl,avic";
36 interrupt-controller; 44 interrupt-controller;
37 #interrupt-cells = <1>; 45 #interrupt-cells = <1>;
38 reg = <0x60000000 0x100000>; 46 reg = <0x68000000 0x100000>;
39 }; 47 };
40 48
41 soc { 49 soc {
diff --git a/arch/arm/boot/dts/imx35.dtsi b/arch/arm/boot/dts/imx35.dtsi
index 9f40e6229189..d0496c65cea2 100644
--- a/arch/arm/boot/dts/imx35.dtsi
+++ b/arch/arm/boot/dts/imx35.dtsi
@@ -13,6 +13,14 @@
13/ { 13/ {
14 #address-cells = <1>; 14 #address-cells = <1>;
15 #size-cells = <1>; 15 #size-cells = <1>;
16 /*
17 * The decompressor and also some bootloaders rely on a
18 * pre-existing /chosen node to be available to insert the
19 * command line and merge other ATAGS info.
20 * Also for U-Boot there must be a pre-existing /memory node.
21 */
22 chosen {};
23 memory { device_type = "memory"; reg = <0 0>; };
16 24
17 aliases { 25 aliases {
18 ethernet0 = &fec; 26 ethernet0 = &fec;
diff --git a/arch/arm/boot/dts/imx50.dtsi b/arch/arm/boot/dts/imx50.dtsi
index fe0221e4cbf7..ceae909e2201 100644
--- a/arch/arm/boot/dts/imx50.dtsi
+++ b/arch/arm/boot/dts/imx50.dtsi
@@ -17,6 +17,14 @@
17/ { 17/ {
18 #address-cells = <1>; 18 #address-cells = <1>;
19 #size-cells = <1>; 19 #size-cells = <1>;
20 /*
21 * The decompressor and also some bootloaders rely on a
22 * pre-existing /chosen node to be available to insert the
23 * command line and merge other ATAGS info.
24 * Also for U-Boot there must be a pre-existing /memory node.
25 */
26 chosen {};
27 memory { device_type = "memory"; reg = <0 0>; };
20 28
21 aliases { 29 aliases {
22 ethernet0 = &fec; 30 ethernet0 = &fec;
diff --git a/arch/arm/boot/dts/imx51.dtsi b/arch/arm/boot/dts/imx51.dtsi
index 33526cade735..1ee1d542d9ad 100644
--- a/arch/arm/boot/dts/imx51.dtsi
+++ b/arch/arm/boot/dts/imx51.dtsi
@@ -19,6 +19,14 @@
19/ { 19/ {
20 #address-cells = <1>; 20 #address-cells = <1>;
21 #size-cells = <1>; 21 #size-cells = <1>;
22 /*
23 * The decompressor and also some bootloaders rely on a
24 * pre-existing /chosen node to be available to insert the
25 * command line and merge other ATAGS info.
26 * Also for U-Boot there must be a pre-existing /memory node.
27 */
28 chosen {};
29 memory { device_type = "memory"; reg = <0 0>; };
22 30
23 aliases { 31 aliases {
24 ethernet0 = &fec; 32 ethernet0 = &fec;
diff --git a/arch/arm/boot/dts/imx53.dtsi b/arch/arm/boot/dts/imx53.dtsi
index ca51dc03e327..2e516f4985e4 100644
--- a/arch/arm/boot/dts/imx53.dtsi
+++ b/arch/arm/boot/dts/imx53.dtsi
@@ -19,6 +19,14 @@
19/ { 19/ {
20 #address-cells = <1>; 20 #address-cells = <1>;
21 #size-cells = <1>; 21 #size-cells = <1>;
22 /*
23 * The decompressor and also some bootloaders rely on a
24 * pre-existing /chosen node to be available to insert the
25 * command line and merge other ATAGS info.
26 * Also for U-Boot there must be a pre-existing /memory node.
27 */
28 chosen {};
29 memory { device_type = "memory"; reg = <0 0>; };
22 30
23 aliases { 31 aliases {
24 ethernet0 = &fec; 32 ethernet0 = &fec;
diff --git a/arch/arm/boot/dts/imx6dl.dtsi b/arch/arm/boot/dts/imx6dl.dtsi
index 1ade1951e620..7aa120fbdc71 100644
--- a/arch/arm/boot/dts/imx6dl.dtsi
+++ b/arch/arm/boot/dts/imx6dl.dtsi
@@ -137,7 +137,7 @@
137&gpio4 { 137&gpio4 {
138 gpio-ranges = <&iomuxc 5 136 1>, <&iomuxc 6 145 1>, <&iomuxc 7 150 1>, 138 gpio-ranges = <&iomuxc 5 136 1>, <&iomuxc 6 145 1>, <&iomuxc 7 150 1>,
139 <&iomuxc 8 146 1>, <&iomuxc 9 151 1>, <&iomuxc 10 147 1>, 139 <&iomuxc 8 146 1>, <&iomuxc 9 151 1>, <&iomuxc 10 147 1>,
140 <&iomuxc 11 151 1>, <&iomuxc 12 148 1>, <&iomuxc 13 153 1>, 140 <&iomuxc 11 152 1>, <&iomuxc 12 148 1>, <&iomuxc 13 153 1>,
141 <&iomuxc 14 149 1>, <&iomuxc 15 154 1>, <&iomuxc 16 39 7>, 141 <&iomuxc 14 149 1>, <&iomuxc 15 154 1>, <&iomuxc 16 39 7>,
142 <&iomuxc 23 56 1>, <&iomuxc 24 61 7>, <&iomuxc 31 46 1>; 142 <&iomuxc 23 56 1>, <&iomuxc 24 61 7>, <&iomuxc 31 46 1>;
143}; 143};
diff --git a/arch/arm/boot/dts/imx6qdl-nitrogen6_max.dtsi b/arch/arm/boot/dts/imx6qdl-nitrogen6_max.dtsi
index 34887a10c5f1..47ba97229a48 100644
--- a/arch/arm/boot/dts/imx6qdl-nitrogen6_max.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-nitrogen6_max.dtsi
@@ -319,8 +319,6 @@
319 compatible = "fsl,imx6q-nitrogen6_max-sgtl5000", 319 compatible = "fsl,imx6q-nitrogen6_max-sgtl5000",
320 "fsl,imx-audio-sgtl5000"; 320 "fsl,imx-audio-sgtl5000";
321 model = "imx6q-nitrogen6_max-sgtl5000"; 321 model = "imx6q-nitrogen6_max-sgtl5000";
322 pinctrl-names = "default";
323 pinctrl-0 = <&pinctrl_sgtl5000>;
324 ssi-controller = <&ssi1>; 322 ssi-controller = <&ssi1>;
325 audio-codec = <&codec>; 323 audio-codec = <&codec>;
326 audio-routing = 324 audio-routing =
@@ -402,6 +400,8 @@
402 400
403 codec: sgtl5000@0a { 401 codec: sgtl5000@0a {
404 compatible = "fsl,sgtl5000"; 402 compatible = "fsl,sgtl5000";
403 pinctrl-names = "default";
404 pinctrl-0 = <&pinctrl_sgtl5000>;
405 reg = <0x0a>; 405 reg = <0x0a>;
406 clocks = <&clks IMX6QDL_CLK_CKO>; 406 clocks = <&clks IMX6QDL_CLK_CKO>;
407 VDDA-supply = <&reg_2p5v>; 407 VDDA-supply = <&reg_2p5v>;
diff --git a/arch/arm/boot/dts/imx6qdl-nitrogen6_som2.dtsi b/arch/arm/boot/dts/imx6qdl-nitrogen6_som2.dtsi
index d80f21abea62..31d4cc62dbc7 100644
--- a/arch/arm/boot/dts/imx6qdl-nitrogen6_som2.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-nitrogen6_som2.dtsi
@@ -250,8 +250,6 @@
250 compatible = "fsl,imx6q-nitrogen6_som2-sgtl5000", 250 compatible = "fsl,imx6q-nitrogen6_som2-sgtl5000",
251 "fsl,imx-audio-sgtl5000"; 251 "fsl,imx-audio-sgtl5000";
252 model = "imx6q-nitrogen6_som2-sgtl5000"; 252 model = "imx6q-nitrogen6_som2-sgtl5000";
253 pinctrl-names = "default";
254 pinctrl-0 = <&pinctrl_sgtl5000>;
255 ssi-controller = <&ssi1>; 253 ssi-controller = <&ssi1>;
256 audio-codec = <&codec>; 254 audio-codec = <&codec>;
257 audio-routing = 255 audio-routing =
@@ -320,6 +318,8 @@
320 318
321 codec: sgtl5000@0a { 319 codec: sgtl5000@0a {
322 compatible = "fsl,sgtl5000"; 320 compatible = "fsl,sgtl5000";
321 pinctrl-names = "default";
322 pinctrl-0 = <&pinctrl_sgtl5000>;
323 reg = <0x0a>; 323 reg = <0x0a>;
324 clocks = <&clks IMX6QDL_CLK_CKO>; 324 clocks = <&clks IMX6QDL_CLK_CKO>;
325 VDDA-supply = <&reg_2p5v>; 325 VDDA-supply = <&reg_2p5v>;
diff --git a/arch/arm/boot/dts/imx6qdl-nitrogen6x.dtsi b/arch/arm/boot/dts/imx6qdl-nitrogen6x.dtsi
index e476d01959ea..26d060484728 100644
--- a/arch/arm/boot/dts/imx6qdl-nitrogen6x.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-nitrogen6x.dtsi
@@ -533,7 +533,6 @@
533 MX6QDL_PAD_SD2_DAT1__SD2_DATA1 0x17071 533 MX6QDL_PAD_SD2_DAT1__SD2_DATA1 0x17071
534 MX6QDL_PAD_SD2_DAT2__SD2_DATA2 0x17071 534 MX6QDL_PAD_SD2_DAT2__SD2_DATA2 0x17071
535 MX6QDL_PAD_SD2_DAT3__SD2_DATA3 0x17071 535 MX6QDL_PAD_SD2_DAT3__SD2_DATA3 0x17071
536 MX6QDL_PAD_NANDF_CS2__GPIO6_IO15 0x000b0
537 >; 536 >;
538 }; 537 };
539 538
diff --git a/arch/arm/boot/dts/imx6qdl.dtsi b/arch/arm/boot/dts/imx6qdl.dtsi
index 53e6e63cbb02..e7d30f45b161 100644
--- a/arch/arm/boot/dts/imx6qdl.dtsi
+++ b/arch/arm/boot/dts/imx6qdl.dtsi
@@ -16,6 +16,14 @@
16/ { 16/ {
17 #address-cells = <1>; 17 #address-cells = <1>;
18 #size-cells = <1>; 18 #size-cells = <1>;
19 /*
20 * The decompressor and also some bootloaders rely on a
21 * pre-existing /chosen node to be available to insert the
22 * command line and merge other ATAGS info.
23 * Also for U-Boot there must be a pre-existing /memory node.
24 */
25 chosen {};
26 memory { device_type = "memory"; reg = <0 0>; };
19 27
20 aliases { 28 aliases {
21 ethernet0 = &fec; 29 ethernet0 = &fec;
@@ -1100,6 +1108,7 @@
1100 interrupts = <0 14 IRQ_TYPE_LEVEL_HIGH>; 1108 interrupts = <0 14 IRQ_TYPE_LEVEL_HIGH>;
1101 clocks = <&clks IMX6QDL_CLK_EIM_SLOW>; 1109 clocks = <&clks IMX6QDL_CLK_EIM_SLOW>;
1102 fsl,weim-cs-gpr = <&gpr>; 1110 fsl,weim-cs-gpr = <&gpr>;
1111 status = "disabled";
1103 }; 1112 };
1104 1113
1105 ocotp: ocotp@021bc000 { 1114 ocotp: ocotp@021bc000 {
diff --git a/arch/arm/boot/dts/imx6sl.dtsi b/arch/arm/boot/dts/imx6sl.dtsi
index 4fd6de29f07d..cc9572ea2860 100644
--- a/arch/arm/boot/dts/imx6sl.dtsi
+++ b/arch/arm/boot/dts/imx6sl.dtsi
@@ -14,6 +14,14 @@
14/ { 14/ {
15 #address-cells = <1>; 15 #address-cells = <1>;
16 #size-cells = <1>; 16 #size-cells = <1>;
17 /*
18 * The decompressor and also some bootloaders rely on a
19 * pre-existing /chosen node to be available to insert the
20 * command line and merge other ATAGS info.
21 * Also for U-Boot there must be a pre-existing /memory node.
22 */
23 chosen {};
24 memory { device_type = "memory"; reg = <0 0>; };
17 25
18 aliases { 26 aliases {
19 ethernet0 = &fec; 27 ethernet0 = &fec;
@@ -900,6 +908,7 @@
900 reg = <0x021b8000 0x4000>; 908 reg = <0x021b8000 0x4000>;
901 interrupts = <0 14 IRQ_TYPE_LEVEL_HIGH>; 909 interrupts = <0 14 IRQ_TYPE_LEVEL_HIGH>;
902 fsl,weim-cs-gpr = <&gpr>; 910 fsl,weim-cs-gpr = <&gpr>;
911 status = "disabled";
903 }; 912 };
904 913
905 ocotp: ocotp@021bc000 { 914 ocotp: ocotp@021bc000 {
diff --git a/arch/arm/boot/dts/imx6sx.dtsi b/arch/arm/boot/dts/imx6sx.dtsi
index 076a30f9bcae..dd4ec85ecbaa 100644
--- a/arch/arm/boot/dts/imx6sx.dtsi
+++ b/arch/arm/boot/dts/imx6sx.dtsi
@@ -15,6 +15,14 @@
15/ { 15/ {
16 #address-cells = <1>; 16 #address-cells = <1>;
17 #size-cells = <1>; 17 #size-cells = <1>;
18 /*
19 * The decompressor and also some bootloaders rely on a
20 * pre-existing /chosen node to be available to insert the
21 * command line and merge other ATAGS info.
22 * Also for U-Boot there must be a pre-existing /memory node.
23 */
24 chosen {};
25 memory { device_type = "memory"; reg = <0 0>; };
18 26
19 aliases { 27 aliases {
20 can0 = &flexcan1; 28 can0 = &flexcan1;
@@ -977,6 +985,7 @@
977 interrupts = <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>; 985 interrupts = <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>;
978 clocks = <&clks IMX6SX_CLK_EIM_SLOW>; 986 clocks = <&clks IMX6SX_CLK_EIM_SLOW>;
979 fsl,weim-cs-gpr = <&gpr>; 987 fsl,weim-cs-gpr = <&gpr>;
988 status = "disabled";
980 }; 989 };
981 990
982 ocotp: ocotp@021bc000 { 991 ocotp: ocotp@021bc000 {
diff --git a/arch/arm/boot/dts/imx6ul.dtsi b/arch/arm/boot/dts/imx6ul.dtsi
index 39845a7e0463..53d3f8e41e9b 100644
--- a/arch/arm/boot/dts/imx6ul.dtsi
+++ b/arch/arm/boot/dts/imx6ul.dtsi
@@ -15,6 +15,14 @@
15/ { 15/ {
16 #address-cells = <1>; 16 #address-cells = <1>;
17 #size-cells = <1>; 17 #size-cells = <1>;
18 /*
19 * The decompressor and also some bootloaders rely on a
20 * pre-existing /chosen node to be available to insert the
21 * command line and merge other ATAGS info.
22 * Also for U-Boot there must be a pre-existing /memory node.
23 */
24 chosen {};
25 memory { device_type = "memory"; reg = <0 0>; };
18 26
19 aliases { 27 aliases {
20 ethernet0 = &fec1; 28 ethernet0 = &fec1;
diff --git a/arch/arm/boot/dts/imx7s.dtsi b/arch/arm/boot/dts/imx7s.dtsi
index 8ff2cbdd8f0d..be33dfc86838 100644
--- a/arch/arm/boot/dts/imx7s.dtsi
+++ b/arch/arm/boot/dts/imx7s.dtsi
@@ -50,6 +50,14 @@
50/ { 50/ {
51 #address-cells = <1>; 51 #address-cells = <1>;
52 #size-cells = <1>; 52 #size-cells = <1>;
53 /*
54 * The decompressor and also some bootloaders rely on a
55 * pre-existing /chosen node to be available to insert the
56 * command line and merge other ATAGS info.
57 * Also for U-Boot there must be a pre-existing /memory node.
58 */
59 chosen {};
60 memory { device_type = "memory"; reg = <0 0>; };
53 61
54 aliases { 62 aliases {
55 gpio0 = &gpio1; 63 gpio0 = &gpio1;
diff --git a/arch/arm/boot/dts/logicpd-som-lv-37xx-devkit.dts b/arch/arm/boot/dts/logicpd-som-lv-37xx-devkit.dts
index da8598402ab8..38faa90007d7 100644
--- a/arch/arm/boot/dts/logicpd-som-lv-37xx-devkit.dts
+++ b/arch/arm/boot/dts/logicpd-som-lv-37xx-devkit.dts
@@ -158,7 +158,7 @@
158&mmc1 { 158&mmc1 {
159 interrupts-extended = <&intc 83 &omap3_pmx_core 0x11a>; 159 interrupts-extended = <&intc 83 &omap3_pmx_core 0x11a>;
160 pinctrl-names = "default"; 160 pinctrl-names = "default";
161 pinctrl-0 = <&mmc1_pins &mmc1_cd>; 161 pinctrl-0 = <&mmc1_pins>;
162 wp-gpios = <&gpio4 30 GPIO_ACTIVE_HIGH>; /* gpio_126 */ 162 wp-gpios = <&gpio4 30 GPIO_ACTIVE_HIGH>; /* gpio_126 */
163 cd-gpios = <&gpio4 14 IRQ_TYPE_LEVEL_LOW>; /* gpio_110 */ 163 cd-gpios = <&gpio4 14 IRQ_TYPE_LEVEL_LOW>; /* gpio_110 */
164 vmmc-supply = <&vmmc1>; 164 vmmc-supply = <&vmmc1>;
@@ -193,7 +193,8 @@
193 OMAP3_CORE1_IOPAD(0x214a, PIN_INPUT | MUX_MODE0) /* sdmmc1_dat1.sdmmc1_dat1 */ 193 OMAP3_CORE1_IOPAD(0x214a, PIN_INPUT | MUX_MODE0) /* sdmmc1_dat1.sdmmc1_dat1 */
194 OMAP3_CORE1_IOPAD(0x214c, PIN_INPUT | MUX_MODE0) /* sdmmc1_dat2.sdmmc1_dat2 */ 194 OMAP3_CORE1_IOPAD(0x214c, PIN_INPUT | MUX_MODE0) /* sdmmc1_dat2.sdmmc1_dat2 */
195 OMAP3_CORE1_IOPAD(0x214e, PIN_INPUT | MUX_MODE0) /* sdmmc1_dat3.sdmmc1_dat3 */ 195 OMAP3_CORE1_IOPAD(0x214e, PIN_INPUT | MUX_MODE0) /* sdmmc1_dat3.sdmmc1_dat3 */
196 OMAP3_CORE1_IOPAD(0x2132, PIN_INPUT_PULLUP | MUX_MODE4) /* cam_strobe.gpio_126 sdmmc1_wp*/ 196 OMAP3_CORE1_IOPAD(0x2132, PIN_INPUT_PULLUP | MUX_MODE4) /* cam_strobe.gpio_126 */
197 OMAP3_CORE1_IOPAD(0x212c, PIN_INPUT_PULLUP | MUX_MODE4) /* cam_d11.gpio_110 */
197 >; 198 >;
198 }; 199 };
199 200
@@ -242,12 +243,6 @@
242 OMAP3_WKUP_IOPAD(0x2a16, PIN_OUTPUT | PIN_OFF_OUTPUT_LOW | MUX_MODE4) /* sys_boot6.gpio_8 */ 243 OMAP3_WKUP_IOPAD(0x2a16, PIN_OUTPUT | PIN_OFF_OUTPUT_LOW | MUX_MODE4) /* sys_boot6.gpio_8 */
243 >; 244 >;
244 }; 245 };
245
246 mmc1_cd: pinmux_mmc1_cd {
247 pinctrl-single,pins = <
248 OMAP3_WKUP_IOPAD(0x212c, PIN_INPUT_PULLUP | MUX_MODE4) /* cam_d11.gpio_110 */
249 >;
250 };
251}; 246};
252 247
253 248
diff --git a/arch/arm/boot/dts/omap2.dtsi b/arch/arm/boot/dts/omap2.dtsi
index 4f793a025a72..f1d6de8b3c19 100644
--- a/arch/arm/boot/dts/omap2.dtsi
+++ b/arch/arm/boot/dts/omap2.dtsi
@@ -17,6 +17,7 @@
17 interrupt-parent = <&intc>; 17 interrupt-parent = <&intc>;
18 #address-cells = <1>; 18 #address-cells = <1>;
19 #size-cells = <1>; 19 #size-cells = <1>;
20 chosen { };
20 21
21 aliases { 22 aliases {
22 serial0 = &uart1; 23 serial0 = &uart1;
diff --git a/arch/arm/boot/dts/omap3-n900.dts b/arch/arm/boot/dts/omap3-n900.dts
index b6ab0e3a3d07..b64cfda8dbb7 100644
--- a/arch/arm/boot/dts/omap3-n900.dts
+++ b/arch/arm/boot/dts/omap3-n900.dts
@@ -735,6 +735,8 @@
735 vmmc_aux-supply = <&vsim>; 735 vmmc_aux-supply = <&vsim>;
736 bus-width = <8>; 736 bus-width = <8>;
737 non-removable; 737 non-removable;
738 no-sdio;
739 no-sd;
738}; 740};
739 741
740&mmc3 { 742&mmc3 {
diff --git a/arch/arm/boot/dts/omap3.dtsi b/arch/arm/boot/dts/omap3.dtsi
index ecf5eb584c75..a3ff4933dbc1 100644
--- a/arch/arm/boot/dts/omap3.dtsi
+++ b/arch/arm/boot/dts/omap3.dtsi
@@ -17,6 +17,7 @@
17 interrupt-parent = <&intc>; 17 interrupt-parent = <&intc>;
18 #address-cells = <1>; 18 #address-cells = <1>;
19 #size-cells = <1>; 19 #size-cells = <1>;
20 chosen { };
20 21
21 aliases { 22 aliases {
22 i2c0 = &i2c1; 23 i2c0 = &i2c1;
diff --git a/arch/arm/boot/dts/omap4.dtsi b/arch/arm/boot/dts/omap4.dtsi
index 8087456b5fbe..578c53f08309 100644
--- a/arch/arm/boot/dts/omap4.dtsi
+++ b/arch/arm/boot/dts/omap4.dtsi
@@ -15,6 +15,7 @@
15 interrupt-parent = <&wakeupgen>; 15 interrupt-parent = <&wakeupgen>;
16 #address-cells = <1>; 16 #address-cells = <1>;
17 #size-cells = <1>; 17 #size-cells = <1>;
18 chosen { };
18 19
19 aliases { 20 aliases {
20 i2c0 = &i2c1; 21 i2c0 = &i2c1;
diff --git a/arch/arm/boot/dts/omap5.dtsi b/arch/arm/boot/dts/omap5.dtsi
index 968c67a49dbd..0844737b72b2 100644
--- a/arch/arm/boot/dts/omap5.dtsi
+++ b/arch/arm/boot/dts/omap5.dtsi
@@ -17,6 +17,7 @@
17 17
18 compatible = "ti,omap5"; 18 compatible = "ti,omap5";
19 interrupt-parent = <&wakeupgen>; 19 interrupt-parent = <&wakeupgen>;
20 chosen { };
20 21
21 aliases { 22 aliases {
22 i2c0 = &i2c1; 23 i2c0 = &i2c1;
@@ -987,6 +988,7 @@
987 phy-names = "sata-phy"; 988 phy-names = "sata-phy";
988 clocks = <&sata_ref_clk>; 989 clocks = <&sata_ref_clk>;
989 ti,hwmods = "sata"; 990 ti,hwmods = "sata";
991 ports-implemented = <0x1>;
990 }; 992 };
991 993
992 dss: dss@58000000 { 994 dss: dss@58000000 {
diff --git a/arch/arm/boot/dts/orion5x-lschl.dts b/arch/arm/boot/dts/orion5x-linkstation-lschl.dts
index 947409252845..ea6c881634b9 100644
--- a/arch/arm/boot/dts/orion5x-lschl.dts
+++ b/arch/arm/boot/dts/orion5x-linkstation-lschl.dts
@@ -2,7 +2,7 @@
2 * Device Tree file for Buffalo Linkstation LS-CHLv3 2 * Device Tree file for Buffalo Linkstation LS-CHLv3
3 * 3 *
4 * Copyright (C) 2016 Ash Hughes <ashley.hughes@blueyonder.co.uk> 4 * Copyright (C) 2016 Ash Hughes <ashley.hughes@blueyonder.co.uk>
5 * Copyright (C) 2015, 2016 5 * Copyright (C) 2015-2017
6 * Roger Shimizu <rogershimizu@gmail.com> 6 * Roger Shimizu <rogershimizu@gmail.com>
7 * 7 *
8 * This file is dual-licensed: you can use it either under the terms 8 * This file is dual-licensed: you can use it either under the terms
@@ -52,7 +52,7 @@
52#include <dt-bindings/gpio/gpio.h> 52#include <dt-bindings/gpio/gpio.h>
53 53
54/ { 54/ {
55 model = "Buffalo Linkstation Live v3 (LS-CHL)"; 55 model = "Buffalo Linkstation LiveV3 (LS-CHL)";
56 compatible = "buffalo,lschl", "marvell,orion5x-88f5182", "marvell,orion5x"; 56 compatible = "buffalo,lschl", "marvell,orion5x-88f5182", "marvell,orion5x";
57 57
58 memory { /* 128 MB */ 58 memory { /* 128 MB */
diff --git a/arch/arm/boot/dts/qcom-apq8064.dtsi b/arch/arm/boot/dts/qcom-apq8064.dtsi
index 268bd470c865..407a4610f4a7 100644
--- a/arch/arm/boot/dts/qcom-apq8064.dtsi
+++ b/arch/arm/boot/dts/qcom-apq8064.dtsi
@@ -4,6 +4,7 @@
4#include <dt-bindings/clock/qcom,gcc-msm8960.h> 4#include <dt-bindings/clock/qcom,gcc-msm8960.h>
5#include <dt-bindings/reset/qcom,gcc-msm8960.h> 5#include <dt-bindings/reset/qcom,gcc-msm8960.h>
6#include <dt-bindings/clock/qcom,mmcc-msm8960.h> 6#include <dt-bindings/clock/qcom,mmcc-msm8960.h>
7#include <dt-bindings/clock/qcom,rpmcc.h>
7#include <dt-bindings/soc/qcom,gsbi.h> 8#include <dt-bindings/soc/qcom,gsbi.h>
8#include <dt-bindings/interrupt-controller/irq.h> 9#include <dt-bindings/interrupt-controller/irq.h>
9#include <dt-bindings/interrupt-controller/arm-gic.h> 10#include <dt-bindings/interrupt-controller/arm-gic.h>
@@ -303,6 +304,9 @@
303 firmware { 304 firmware {
304 scm { 305 scm {
305 compatible = "qcom,scm-apq8064"; 306 compatible = "qcom,scm-apq8064";
307
308 clocks = <&rpmcc RPM_DAYTONA_FABRIC_CLK>;
309 clock-names = "core";
306 }; 310 };
307 }; 311 };
308 312
diff --git a/arch/arm/boot/dts/qcom-mdm9615.dtsi b/arch/arm/boot/dts/qcom-mdm9615.dtsi
index 5ae4ec59e6ea..c852b69229c9 100644
--- a/arch/arm/boot/dts/qcom-mdm9615.dtsi
+++ b/arch/arm/boot/dts/qcom-mdm9615.dtsi
@@ -357,7 +357,7 @@
357 }; 357 };
358 358
359 amba { 359 amba {
360 compatible = "arm,amba-bus"; 360 compatible = "simple-bus";
361 #address-cells = <1>; 361 #address-cells = <1>;
362 #size-cells = <1>; 362 #size-cells = <1>;
363 ranges; 363 ranges;
diff --git a/arch/arm/boot/dts/stih407-family.dtsi b/arch/arm/boot/dts/stih407-family.dtsi
index c8b2944e304a..ace97e8576db 100644
--- a/arch/arm/boot/dts/stih407-family.dtsi
+++ b/arch/arm/boot/dts/stih407-family.dtsi
@@ -680,6 +680,7 @@
680 phy-names = "usb2-phy", "usb3-phy"; 680 phy-names = "usb2-phy", "usb3-phy";
681 phys = <&usb2_picophy0>, 681 phys = <&usb2_picophy0>,
682 <&phy_port2 PHY_TYPE_USB3>; 682 <&phy_port2 PHY_TYPE_USB3>;
683 snps,dis_u3_susphy_quirk;
683 }; 684 };
684 }; 685 };
685 686
diff --git a/arch/arm/boot/dts/sun6i-a31-hummingbird.dts b/arch/arm/boot/dts/sun6i-a31-hummingbird.dts
index 735914f6ae44..7cae328398b1 100644
--- a/arch/arm/boot/dts/sun6i-a31-hummingbird.dts
+++ b/arch/arm/boot/dts/sun6i-a31-hummingbird.dts
@@ -140,6 +140,10 @@
140 cpu-supply = <&reg_dcdc3>; 140 cpu-supply = <&reg_dcdc3>;
141}; 141};
142 142
143&de {
144 status = "okay";
145};
146
143&ehci0 { 147&ehci0 {
144 status = "okay"; 148 status = "okay";
145}; 149};
diff --git a/arch/arm/boot/dts/sun6i-a31.dtsi b/arch/arm/boot/dts/sun6i-a31.dtsi
index 2b26175d55d1..e78faaf9243c 100644
--- a/arch/arm/boot/dts/sun6i-a31.dtsi
+++ b/arch/arm/boot/dts/sun6i-a31.dtsi
@@ -234,6 +234,7 @@
234 de: display-engine { 234 de: display-engine {
235 compatible = "allwinner,sun6i-a31-display-engine"; 235 compatible = "allwinner,sun6i-a31-display-engine";
236 allwinner,pipelines = <&fe0>; 236 allwinner,pipelines = <&fe0>;
237 status = "disabled";
237 }; 238 };
238 239
239 soc@01c00000 { 240 soc@01c00000 {
diff --git a/arch/arm/boot/dts/sun7i-a20-olinuxino-lime2-emmc.dts b/arch/arm/boot/dts/sun7i-a20-olinuxino-lime2-emmc.dts
index 5ea4915f6d75..10d307408f23 100644
--- a/arch/arm/boot/dts/sun7i-a20-olinuxino-lime2-emmc.dts
+++ b/arch/arm/boot/dts/sun7i-a20-olinuxino-lime2-emmc.dts
@@ -56,7 +56,7 @@
56}; 56};
57 57
58&pio { 58&pio {
59 mmc2_pins_nrst: mmc2@0 { 59 mmc2_pins_nrst: mmc2-rst-pin {
60 allwinner,pins = "PC16"; 60 allwinner,pins = "PC16";
61 allwinner,function = "gpio_out"; 61 allwinner,function = "gpio_out";
62 allwinner,drive = <SUN4I_PINCTRL_10_MA>; 62 allwinner,drive = <SUN4I_PINCTRL_10_MA>;
diff --git a/arch/arm/boot/dts/vexpress-v2p-ca15-tc1.dts b/arch/arm/boot/dts/vexpress-v2p-ca15-tc1.dts
index 102838fcc588..15f4fd3f4695 100644
--- a/arch/arm/boot/dts/vexpress-v2p-ca15-tc1.dts
+++ b/arch/arm/boot/dts/vexpress-v2p-ca15-tc1.dts
@@ -81,7 +81,7 @@
81 #address-cells = <0>; 81 #address-cells = <0>;
82 interrupt-controller; 82 interrupt-controller;
83 reg = <0 0x2c001000 0 0x1000>, 83 reg = <0 0x2c001000 0 0x1000>,
84 <0 0x2c002000 0 0x1000>, 84 <0 0x2c002000 0 0x2000>,
85 <0 0x2c004000 0 0x2000>, 85 <0 0x2c004000 0 0x2000>,
86 <0 0x2c006000 0 0x2000>; 86 <0 0x2c006000 0 0x2000>;
87 interrupts = <1 9 0xf04>; 87 interrupts = <1 9 0xf04>;
diff --git a/arch/arm/boot/dts/vexpress-v2p-ca15_a7.dts b/arch/arm/boot/dts/vexpress-v2p-ca15_a7.dts
index 45d08cc37b01..bd107c5a0226 100644
--- a/arch/arm/boot/dts/vexpress-v2p-ca15_a7.dts
+++ b/arch/arm/boot/dts/vexpress-v2p-ca15_a7.dts
@@ -131,7 +131,7 @@
131 #address-cells = <0>; 131 #address-cells = <0>;
132 interrupt-controller; 132 interrupt-controller;
133 reg = <0 0x2c001000 0 0x1000>, 133 reg = <0 0x2c001000 0 0x1000>,
134 <0 0x2c002000 0 0x1000>, 134 <0 0x2c002000 0 0x2000>,
135 <0 0x2c004000 0 0x2000>, 135 <0 0x2c004000 0 0x2000>,
136 <0 0x2c006000 0 0x2000>; 136 <0 0x2c006000 0 0x2000>;
137 interrupts = <1 9 0xf04>; 137 interrupts = <1 9 0xf04>;
diff --git a/arch/arm/boot/dts/vf610-zii-dev-rev-b.dts b/arch/arm/boot/dts/vf610-zii-dev-rev-b.dts
index 7ea617e47fe4..958b4c42d320 100644
--- a/arch/arm/boot/dts/vf610-zii-dev-rev-b.dts
+++ b/arch/arm/boot/dts/vf610-zii-dev-rev-b.dts
@@ -153,7 +153,8 @@
153 switch0phy1: switch1phy0@1 { 153 switch0phy1: switch1phy0@1 {
154 reg = <1>; 154 reg = <1>;
155 interrupt-parent = <&switch0>; 155 interrupt-parent = <&switch0>;
156 interrupts = <1 IRQ_TYPE_LEVEL_HIGH>; }; 156 interrupts = <1 IRQ_TYPE_LEVEL_HIGH>;
157 };
157 switch0phy2: switch1phy0@2 { 158 switch0phy2: switch1phy0@2 {
158 reg = <2>; 159 reg = <2>;
159 interrupt-parent = <&switch0>; 160 interrupt-parent = <&switch0>;
diff --git a/arch/arm/configs/ezx_defconfig b/arch/arm/configs/ezx_defconfig
index ea316c4b890e..d3f1768840e2 100644
--- a/arch/arm/configs/ezx_defconfig
+++ b/arch/arm/configs/ezx_defconfig
@@ -64,8 +64,8 @@ CONFIG_NETFILTER=y
64CONFIG_NETFILTER_NETLINK_QUEUE=m 64CONFIG_NETFILTER_NETLINK_QUEUE=m
65CONFIG_NF_CONNTRACK=m 65CONFIG_NF_CONNTRACK=m
66CONFIG_NF_CONNTRACK_EVENTS=y 66CONFIG_NF_CONNTRACK_EVENTS=y
67CONFIG_NF_CT_PROTO_SCTP=m 67CONFIG_NF_CT_PROTO_SCTP=y
68CONFIG_NF_CT_PROTO_UDPLITE=m 68CONFIG_NF_CT_PROTO_UDPLITE=y
69CONFIG_NF_CONNTRACK_AMANDA=m 69CONFIG_NF_CONNTRACK_AMANDA=m
70CONFIG_NF_CONNTRACK_FTP=m 70CONFIG_NF_CONNTRACK_FTP=m
71CONFIG_NF_CONNTRACK_H323=m 71CONFIG_NF_CONNTRACK_H323=m
diff --git a/arch/arm/configs/imote2_defconfig b/arch/arm/configs/imote2_defconfig
index 18e59feaa307..7f479cdb3479 100644
--- a/arch/arm/configs/imote2_defconfig
+++ b/arch/arm/configs/imote2_defconfig
@@ -56,8 +56,8 @@ CONFIG_NETFILTER=y
56CONFIG_NETFILTER_NETLINK_QUEUE=m 56CONFIG_NETFILTER_NETLINK_QUEUE=m
57CONFIG_NF_CONNTRACK=m 57CONFIG_NF_CONNTRACK=m
58CONFIG_NF_CONNTRACK_EVENTS=y 58CONFIG_NF_CONNTRACK_EVENTS=y
59CONFIG_NF_CT_PROTO_SCTP=m 59CONFIG_NF_CT_PROTO_SCTP=y
60CONFIG_NF_CT_PROTO_UDPLITE=m 60CONFIG_NF_CT_PROTO_UDPLITE=y
61CONFIG_NF_CONNTRACK_AMANDA=m 61CONFIG_NF_CONNTRACK_AMANDA=m
62CONFIG_NF_CONNTRACK_FTP=m 62CONFIG_NF_CONNTRACK_FTP=m
63CONFIG_NF_CONNTRACK_H323=m 63CONFIG_NF_CONNTRACK_H323=m
diff --git a/arch/arm/configs/multi_v7_defconfig b/arch/arm/configs/multi_v7_defconfig
index b01a43851294..f57ec511e7ae 100644
--- a/arch/arm/configs/multi_v7_defconfig
+++ b/arch/arm/configs/multi_v7_defconfig
@@ -471,7 +471,7 @@ CONFIG_MESON_WATCHDOG=y
471CONFIG_DW_WATCHDOG=y 471CONFIG_DW_WATCHDOG=y
472CONFIG_DIGICOLOR_WATCHDOG=y 472CONFIG_DIGICOLOR_WATCHDOG=y
473CONFIG_BCM2835_WDT=y 473CONFIG_BCM2835_WDT=y
474CONFIG_BCM47XX_WATCHDOG=y 474CONFIG_BCM47XX_WDT=y
475CONFIG_BCM7038_WDT=m 475CONFIG_BCM7038_WDT=m
476CONFIG_BCM_KONA_WDT=y 476CONFIG_BCM_KONA_WDT=y
477CONFIG_MFD_ACT8945A=y 477CONFIG_MFD_ACT8945A=y
@@ -824,6 +824,7 @@ CONFIG_QCOM_SMSM=y
824CONFIG_QCOM_WCNSS_CTRL=m 824CONFIG_QCOM_WCNSS_CTRL=m
825CONFIG_ROCKCHIP_PM_DOMAINS=y 825CONFIG_ROCKCHIP_PM_DOMAINS=y
826CONFIG_COMMON_CLK_QCOM=y 826CONFIG_COMMON_CLK_QCOM=y
827CONFIG_QCOM_CLK_RPM=y
827CONFIG_CHROME_PLATFORMS=y 828CONFIG_CHROME_PLATFORMS=y
828CONFIG_STAGING_BOARD=y 829CONFIG_STAGING_BOARD=y
829CONFIG_CROS_EC_CHARDEV=m 830CONFIG_CROS_EC_CHARDEV=m
@@ -893,7 +894,7 @@ CONFIG_BCM2835_MBOX=y
893CONFIG_RASPBERRYPI_FIRMWARE=y 894CONFIG_RASPBERRYPI_FIRMWARE=y
894CONFIG_EFI_VARS=m 895CONFIG_EFI_VARS=m
895CONFIG_EFI_CAPSULE_LOADER=m 896CONFIG_EFI_CAPSULE_LOADER=m
896CONFIG_CONFIG_BCM47XX_NVRAM=y 897CONFIG_BCM47XX_NVRAM=y
897CONFIG_BCM47XX_SPROM=y 898CONFIG_BCM47XX_SPROM=y
898CONFIG_EXT4_FS=y 899CONFIG_EXT4_FS=y
899CONFIG_AUTOFS4_FS=y 900CONFIG_AUTOFS4_FS=y
diff --git a/arch/arm/configs/omap2plus_defconfig b/arch/arm/configs/omap2plus_defconfig
index 195c98b85568..77ffccfd0c3f 100644
--- a/arch/arm/configs/omap2plus_defconfig
+++ b/arch/arm/configs/omap2plus_defconfig
@@ -188,6 +188,7 @@ CONFIG_WL12XX=m
188CONFIG_WL18XX=m 188CONFIG_WL18XX=m
189CONFIG_WLCORE_SPI=m 189CONFIG_WLCORE_SPI=m
190CONFIG_WLCORE_SDIO=m 190CONFIG_WLCORE_SDIO=m
191CONFIG_INPUT_MOUSEDEV=m
191CONFIG_INPUT_JOYDEV=m 192CONFIG_INPUT_JOYDEV=m
192CONFIG_INPUT_EVDEV=m 193CONFIG_INPUT_EVDEV=m
193CONFIG_KEYBOARD_ATKBD=m 194CONFIG_KEYBOARD_ATKBD=m
diff --git a/arch/arm/configs/s3c2410_defconfig b/arch/arm/configs/s3c2410_defconfig
index 4364040ed696..1e6c48dd7b11 100644
--- a/arch/arm/configs/s3c2410_defconfig
+++ b/arch/arm/configs/s3c2410_defconfig
@@ -86,9 +86,9 @@ CONFIG_IPV6_TUNNEL=m
86CONFIG_NETFILTER=y 86CONFIG_NETFILTER=y
87CONFIG_NF_CONNTRACK=m 87CONFIG_NF_CONNTRACK=m
88CONFIG_NF_CONNTRACK_EVENTS=y 88CONFIG_NF_CONNTRACK_EVENTS=y
89CONFIG_NF_CT_PROTO_DCCP=m 89CONFIG_NF_CT_PROTO_DCCP=y
90CONFIG_NF_CT_PROTO_SCTP=m 90CONFIG_NF_CT_PROTO_SCTP=y
91CONFIG_NF_CT_PROTO_UDPLITE=m 91CONFIG_NF_CT_PROTO_UDPLITE=y
92CONFIG_NF_CONNTRACK_AMANDA=m 92CONFIG_NF_CONNTRACK_AMANDA=m
93CONFIG_NF_CONNTRACK_FTP=m 93CONFIG_NF_CONNTRACK_FTP=m
94CONFIG_NF_CONNTRACK_H323=m 94CONFIG_NF_CONNTRACK_H323=m
diff --git a/arch/arm/include/asm/cputype.h b/arch/arm/include/asm/cputype.h
index 522b5feb4eaa..b62eaeb147aa 100644
--- a/arch/arm/include/asm/cputype.h
+++ b/arch/arm/include/asm/cputype.h
@@ -94,6 +94,9 @@
94#define ARM_CPU_XSCALE_ARCH_V2 0x4000 94#define ARM_CPU_XSCALE_ARCH_V2 0x4000
95#define ARM_CPU_XSCALE_ARCH_V3 0x6000 95#define ARM_CPU_XSCALE_ARCH_V3 0x6000
96 96
97/* Qualcomm implemented cores */
98#define ARM_CPU_PART_SCORPION 0x510002d0
99
97extern unsigned int processor_id; 100extern unsigned int processor_id;
98 101
99#ifdef CONFIG_CPU_CP15 102#ifdef CONFIG_CPU_CP15
diff --git a/arch/arm/include/asm/ftrace.h b/arch/arm/include/asm/ftrace.h
index bfe2a2f5a644..22b73112b75f 100644
--- a/arch/arm/include/asm/ftrace.h
+++ b/arch/arm/include/asm/ftrace.h
@@ -54,6 +54,24 @@ static inline void *return_address(unsigned int level)
54 54
55#define ftrace_return_address(n) return_address(n) 55#define ftrace_return_address(n) return_address(n)
56 56
57#define ARCH_HAS_SYSCALL_MATCH_SYM_NAME
58
59static inline bool arch_syscall_match_sym_name(const char *sym,
60 const char *name)
61{
62 if (!strcmp(sym, "sys_mmap2"))
63 sym = "sys_mmap_pgoff";
64 else if (!strcmp(sym, "sys_statfs64_wrapper"))
65 sym = "sys_statfs64";
66 else if (!strcmp(sym, "sys_fstatfs64_wrapper"))
67 sym = "sys_fstatfs64";
68 else if (!strcmp(sym, "sys_arm_fadvise64_64"))
69 sym = "sys_fadvise64_64";
70
71 /* Ignore case since sym may start with "SyS" instead of "sys" */
72 return !strcasecmp(sym, name);
73}
74
57#endif /* ifndef __ASSEMBLY__ */ 75#endif /* ifndef __ASSEMBLY__ */
58 76
59#endif /* _ASM_ARM_FTRACE */ 77#endif /* _ASM_ARM_FTRACE */
diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
index 1f59ea051bab..b7e0125c0bbf 100644
--- a/arch/arm/include/asm/uaccess.h
+++ b/arch/arm/include/asm/uaccess.h
@@ -478,11 +478,10 @@ extern unsigned long __must_check
478arm_copy_from_user(void *to, const void __user *from, unsigned long n); 478arm_copy_from_user(void *to, const void __user *from, unsigned long n);
479 479
480static inline unsigned long __must_check 480static inline unsigned long __must_check
481__copy_from_user(void *to, const void __user *from, unsigned long n) 481__arch_copy_from_user(void *to, const void __user *from, unsigned long n)
482{ 482{
483 unsigned int __ua_flags; 483 unsigned int __ua_flags;
484 484
485 check_object_size(to, n, false);
486 __ua_flags = uaccess_save_and_enable(); 485 __ua_flags = uaccess_save_and_enable();
487 n = arm_copy_from_user(to, from, n); 486 n = arm_copy_from_user(to, from, n);
488 uaccess_restore(__ua_flags); 487 uaccess_restore(__ua_flags);
@@ -495,18 +494,15 @@ extern unsigned long __must_check
495__copy_to_user_std(void __user *to, const void *from, unsigned long n); 494__copy_to_user_std(void __user *to, const void *from, unsigned long n);
496 495
497static inline unsigned long __must_check 496static inline unsigned long __must_check
498__copy_to_user(void __user *to, const void *from, unsigned long n) 497__arch_copy_to_user(void __user *to, const void *from, unsigned long n)
499{ 498{
500#ifndef CONFIG_UACCESS_WITH_MEMCPY 499#ifndef CONFIG_UACCESS_WITH_MEMCPY
501 unsigned int __ua_flags; 500 unsigned int __ua_flags;
502
503 check_object_size(from, n, true);
504 __ua_flags = uaccess_save_and_enable(); 501 __ua_flags = uaccess_save_and_enable();
505 n = arm_copy_to_user(to, from, n); 502 n = arm_copy_to_user(to, from, n);
506 uaccess_restore(__ua_flags); 503 uaccess_restore(__ua_flags);
507 return n; 504 return n;
508#else 505#else
509 check_object_size(from, n, true);
510 return arm_copy_to_user(to, from, n); 506 return arm_copy_to_user(to, from, n);
511#endif 507#endif
512} 508}
@@ -526,25 +522,49 @@ __clear_user(void __user *addr, unsigned long n)
526} 522}
527 523
528#else 524#else
529#define __copy_from_user(to, from, n) (memcpy(to, (void __force *)from, n), 0) 525#define __arch_copy_from_user(to, from, n) \
530#define __copy_to_user(to, from, n) (memcpy((void __force *)to, from, n), 0) 526 (memcpy(to, (void __force *)from, n), 0)
527#define __arch_copy_to_user(to, from, n) \
528 (memcpy((void __force *)to, from, n), 0)
531#define __clear_user(addr, n) (memset((void __force *)addr, 0, n), 0) 529#define __clear_user(addr, n) (memset((void __force *)addr, 0, n), 0)
532#endif 530#endif
533 531
534static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n) 532static inline unsigned long __must_check
533__copy_from_user(void *to, const void __user *from, unsigned long n)
534{
535 check_object_size(to, n, false);
536 return __arch_copy_from_user(to, from, n);
537}
538
539static inline unsigned long __must_check
540copy_from_user(void *to, const void __user *from, unsigned long n)
535{ 541{
536 unsigned long res = n; 542 unsigned long res = n;
543
544 check_object_size(to, n, false);
545
537 if (likely(access_ok(VERIFY_READ, from, n))) 546 if (likely(access_ok(VERIFY_READ, from, n)))
538 res = __copy_from_user(to, from, n); 547 res = __arch_copy_from_user(to, from, n);
539 if (unlikely(res)) 548 if (unlikely(res))
540 memset(to + (n - res), 0, res); 549 memset(to + (n - res), 0, res);
541 return res; 550 return res;
542} 551}
543 552
544static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n) 553static inline unsigned long __must_check
554__copy_to_user(void __user *to, const void *from, unsigned long n)
545{ 555{
556 check_object_size(from, n, true);
557
558 return __arch_copy_to_user(to, from, n);
559}
560
561static inline unsigned long __must_check
562copy_to_user(void __user *to, const void *from, unsigned long n)
563{
564 check_object_size(from, n, true);
565
546 if (access_ok(VERIFY_WRITE, to, n)) 566 if (access_ok(VERIFY_WRITE, to, n))
547 n = __copy_to_user(to, from, n); 567 n = __arch_copy_to_user(to, from, n);
548 return n; 568 return n;
549} 569}
550 570
diff --git a/arch/arm/include/asm/virt.h b/arch/arm/include/asm/virt.h
index a2e75b84e2ae..6dae1956c74d 100644
--- a/arch/arm/include/asm/virt.h
+++ b/arch/arm/include/asm/virt.h
@@ -80,6 +80,11 @@ static inline bool is_kernel_in_hyp_mode(void)
80 return false; 80 return false;
81} 81}
82 82
83static inline bool has_vhe(void)
84{
85 return false;
86}
87
83/* The section containing the hypervisor idmap text */ 88/* The section containing the hypervisor idmap text */
84extern char __hyp_idmap_text_start[]; 89extern char __hyp_idmap_text_start[];
85extern char __hyp_idmap_text_end[]; 90extern char __hyp_idmap_text_end[];
diff --git a/arch/arm/include/asm/types.h b/arch/arm/include/uapi/asm/types.h
index a53cdb8f068c..9435a42f575e 100644
--- a/arch/arm/include/asm/types.h
+++ b/arch/arm/include/uapi/asm/types.h
@@ -1,5 +1,5 @@
1#ifndef _ASM_TYPES_H 1#ifndef _UAPI_ASM_TYPES_H
2#define _ASM_TYPES_H 2#define _UAPI_ASM_TYPES_H
3 3
4#include <asm-generic/int-ll64.h> 4#include <asm-generic/int-ll64.h>
5 5
@@ -37,4 +37,4 @@
37#define __UINTPTR_TYPE__ unsigned long 37#define __UINTPTR_TYPE__ unsigned long
38#endif 38#endif
39 39
40#endif /* _ASM_TYPES_H */ 40#endif /* _UAPI_ASM_TYPES_H */
diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c
index 188180b5523d..be3b3fbd382f 100644
--- a/arch/arm/kernel/hw_breakpoint.c
+++ b/arch/arm/kernel/hw_breakpoint.c
@@ -1063,6 +1063,22 @@ static int __init arch_hw_breakpoint_init(void)
1063 return 0; 1063 return 0;
1064 } 1064 }
1065 1065
1066 /*
1067 * Scorpion CPUs (at least those in APQ8060) seem to set DBGPRSR.SPD
1068 * whenever a WFI is issued, even if the core is not powered down, in
1069 * violation of the architecture. When DBGPRSR.SPD is set, accesses to
1070 * breakpoint and watchpoint registers are treated as undefined, so
1071 * this results in boot time and runtime failures when these are
1072 * accessed and we unexpectedly take a trap.
1073 *
1074 * It's not clear if/how this can be worked around, so we blacklist
1075 * Scorpion CPUs to avoid these issues.
1076 */
1077 if (read_cpuid_part() == ARM_CPU_PART_SCORPION) {
1078 pr_info("Scorpion CPU detected. Hardware breakpoints and watchpoints disabled\n");
1079 return 0;
1080 }
1081
1066 has_ossr = core_has_os_save_restore(); 1082 has_ossr = core_has_os_save_restore();
1067 1083
1068 /* Determine how many BRPs/WRPs are available. */ 1084 /* Determine how many BRPs/WRPs are available. */
diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c
index ce131ed5939d..ae738a6319f6 100644
--- a/arch/arm/kernel/ptrace.c
+++ b/arch/arm/kernel/ptrace.c
@@ -600,7 +600,7 @@ static int gpr_set(struct task_struct *target,
600 const void *kbuf, const void __user *ubuf) 600 const void *kbuf, const void __user *ubuf)
601{ 601{
602 int ret; 602 int ret;
603 struct pt_regs newregs; 603 struct pt_regs newregs = *task_pt_regs(target);
604 604
605 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, 605 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
606 &newregs, 606 &newregs,
diff --git a/arch/arm/kernel/smp_tlb.c b/arch/arm/kernel/smp_tlb.c
index 22313cb53362..9af0701f7094 100644
--- a/arch/arm/kernel/smp_tlb.c
+++ b/arch/arm/kernel/smp_tlb.c
@@ -9,6 +9,7 @@
9 */ 9 */
10#include <linux/preempt.h> 10#include <linux/preempt.h>
11#include <linux/smp.h> 11#include <linux/smp.h>
12#include <linux/uaccess.h>
12 13
13#include <asm/smp_plat.h> 14#include <asm/smp_plat.h>
14#include <asm/tlbflush.h> 15#include <asm/tlbflush.h>
@@ -40,8 +41,11 @@ static inline void ipi_flush_tlb_mm(void *arg)
40static inline void ipi_flush_tlb_page(void *arg) 41static inline void ipi_flush_tlb_page(void *arg)
41{ 42{
42 struct tlb_args *ta = (struct tlb_args *)arg; 43 struct tlb_args *ta = (struct tlb_args *)arg;
44 unsigned int __ua_flags = uaccess_save_and_enable();
43 45
44 local_flush_tlb_page(ta->ta_vma, ta->ta_start); 46 local_flush_tlb_page(ta->ta_vma, ta->ta_start);
47
48 uaccess_restore(__ua_flags);
45} 49}
46 50
47static inline void ipi_flush_tlb_kernel_page(void *arg) 51static inline void ipi_flush_tlb_kernel_page(void *arg)
@@ -54,8 +58,11 @@ static inline void ipi_flush_tlb_kernel_page(void *arg)
54static inline void ipi_flush_tlb_range(void *arg) 58static inline void ipi_flush_tlb_range(void *arg)
55{ 59{
56 struct tlb_args *ta = (struct tlb_args *)arg; 60 struct tlb_args *ta = (struct tlb_args *)arg;
61 unsigned int __ua_flags = uaccess_save_and_enable();
57 62
58 local_flush_tlb_range(ta->ta_vma, ta->ta_start, ta->ta_end); 63 local_flush_tlb_range(ta->ta_vma, ta->ta_start, ta->ta_end);
64
65 uaccess_restore(__ua_flags);
59} 66}
60 67
61static inline void ipi_flush_tlb_kernel_range(void *arg) 68static inline void ipi_flush_tlb_kernel_range(void *arg)
diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
index 11676787ad49..9d7446456e0c 100644
--- a/arch/arm/kvm/arm.c
+++ b/arch/arm/kvm/arm.c
@@ -1099,6 +1099,9 @@ static void cpu_init_hyp_mode(void *dummy)
1099 __cpu_init_hyp_mode(pgd_ptr, hyp_stack_ptr, vector_ptr); 1099 __cpu_init_hyp_mode(pgd_ptr, hyp_stack_ptr, vector_ptr);
1100 __cpu_init_stage2(); 1100 __cpu_init_stage2();
1101 1101
1102 if (is_kernel_in_hyp_mode())
1103 kvm_timer_init_vhe();
1104
1102 kvm_arm_init_debug(); 1105 kvm_arm_init_debug();
1103} 1106}
1104 1107
diff --git a/arch/arm/lib/getuser.S b/arch/arm/lib/getuser.S
index 8ecfd15c3a02..df73914e81c8 100644
--- a/arch/arm/lib/getuser.S
+++ b/arch/arm/lib/getuser.S
@@ -67,7 +67,7 @@ ENTRY(__get_user_4)
67ENDPROC(__get_user_4) 67ENDPROC(__get_user_4)
68 68
69ENTRY(__get_user_8) 69ENTRY(__get_user_8)
70 check_uaccess r0, 8, r1, r2, __get_user_bad 70 check_uaccess r0, 8, r1, r2, __get_user_bad8
71#ifdef CONFIG_THUMB2_KERNEL 71#ifdef CONFIG_THUMB2_KERNEL
725: TUSER(ldr) r2, [r0] 725: TUSER(ldr) r2, [r0]
736: TUSER(ldr) r3, [r0, #4] 736: TUSER(ldr) r3, [r0, #4]
diff --git a/arch/arm/mach-davinci/clock.c b/arch/arm/mach-davinci/clock.c
index df42c93a93d6..f5dce9b4e617 100644
--- a/arch/arm/mach-davinci/clock.c
+++ b/arch/arm/mach-davinci/clock.c
@@ -31,10 +31,10 @@ static LIST_HEAD(clocks);
31static DEFINE_MUTEX(clocks_mutex); 31static DEFINE_MUTEX(clocks_mutex);
32static DEFINE_SPINLOCK(clockfw_lock); 32static DEFINE_SPINLOCK(clockfw_lock);
33 33
34static void __clk_enable(struct clk *clk) 34void davinci_clk_enable(struct clk *clk)
35{ 35{
36 if (clk->parent) 36 if (clk->parent)
37 __clk_enable(clk->parent); 37 davinci_clk_enable(clk->parent);
38 if (clk->usecount++ == 0) { 38 if (clk->usecount++ == 0) {
39 if (clk->flags & CLK_PSC) 39 if (clk->flags & CLK_PSC)
40 davinci_psc_config(clk->domain, clk->gpsc, clk->lpsc, 40 davinci_psc_config(clk->domain, clk->gpsc, clk->lpsc,
@@ -44,7 +44,7 @@ static void __clk_enable(struct clk *clk)
44 } 44 }
45} 45}
46 46
47static void __clk_disable(struct clk *clk) 47void davinci_clk_disable(struct clk *clk)
48{ 48{
49 if (WARN_ON(clk->usecount == 0)) 49 if (WARN_ON(clk->usecount == 0))
50 return; 50 return;
@@ -56,7 +56,7 @@ static void __clk_disable(struct clk *clk)
56 clk->clk_disable(clk); 56 clk->clk_disable(clk);
57 } 57 }
58 if (clk->parent) 58 if (clk->parent)
59 __clk_disable(clk->parent); 59 davinci_clk_disable(clk->parent);
60} 60}
61 61
62int davinci_clk_reset(struct clk *clk, bool reset) 62int davinci_clk_reset(struct clk *clk, bool reset)
@@ -103,7 +103,7 @@ int clk_enable(struct clk *clk)
103 return -EINVAL; 103 return -EINVAL;
104 104
105 spin_lock_irqsave(&clockfw_lock, flags); 105 spin_lock_irqsave(&clockfw_lock, flags);
106 __clk_enable(clk); 106 davinci_clk_enable(clk);
107 spin_unlock_irqrestore(&clockfw_lock, flags); 107 spin_unlock_irqrestore(&clockfw_lock, flags);
108 108
109 return 0; 109 return 0;
@@ -118,7 +118,7 @@ void clk_disable(struct clk *clk)
118 return; 118 return;
119 119
120 spin_lock_irqsave(&clockfw_lock, flags); 120 spin_lock_irqsave(&clockfw_lock, flags);
121 __clk_disable(clk); 121 davinci_clk_disable(clk);
122 spin_unlock_irqrestore(&clockfw_lock, flags); 122 spin_unlock_irqrestore(&clockfw_lock, flags);
123} 123}
124EXPORT_SYMBOL(clk_disable); 124EXPORT_SYMBOL(clk_disable);
diff --git a/arch/arm/mach-davinci/clock.h b/arch/arm/mach-davinci/clock.h
index e2a5437a1aee..fa2b83752e03 100644
--- a/arch/arm/mach-davinci/clock.h
+++ b/arch/arm/mach-davinci/clock.h
@@ -132,6 +132,8 @@ int davinci_set_sysclk_rate(struct clk *clk, unsigned long rate);
132int davinci_set_refclk_rate(unsigned long rate); 132int davinci_set_refclk_rate(unsigned long rate);
133int davinci_simple_set_rate(struct clk *clk, unsigned long rate); 133int davinci_simple_set_rate(struct clk *clk, unsigned long rate);
134int davinci_clk_reset(struct clk *clk, bool reset); 134int davinci_clk_reset(struct clk *clk, bool reset);
135void davinci_clk_enable(struct clk *clk);
136void davinci_clk_disable(struct clk *clk);
135 137
136extern struct platform_device davinci_wdt_device; 138extern struct platform_device davinci_wdt_device;
137extern void davinci_watchdog_reset(struct platform_device *); 139extern void davinci_watchdog_reset(struct platform_device *);
diff --git a/arch/arm/mach-davinci/da850.c b/arch/arm/mach-davinci/da850.c
index e770c97ea45c..1d873d15b545 100644
--- a/arch/arm/mach-davinci/da850.c
+++ b/arch/arm/mach-davinci/da850.c
@@ -319,6 +319,16 @@ static struct clk emac_clk = {
319 .gpsc = 1, 319 .gpsc = 1,
320}; 320};
321 321
322/*
323 * In order to avoid adding the emac_clk to the clock lookup table twice (and
324 * screwing up the linked list in the process) create a separate clock for
325 * mdio inheriting the rate from emac_clk.
326 */
327static struct clk mdio_clk = {
328 .name = "mdio",
329 .parent = &emac_clk,
330};
331
322static struct clk mcasp_clk = { 332static struct clk mcasp_clk = {
323 .name = "mcasp", 333 .name = "mcasp",
324 .parent = &async3_clk, 334 .parent = &async3_clk,
@@ -367,6 +377,16 @@ static struct clk aemif_clk = {
367 .flags = ALWAYS_ENABLED, 377 .flags = ALWAYS_ENABLED,
368}; 378};
369 379
380/*
381 * In order to avoid adding the aemif_clk to the clock lookup table twice (and
382 * screwing up the linked list in the process) create a separate clock for
383 * nand inheriting the rate from aemif_clk.
384 */
385static struct clk aemif_nand_clk = {
386 .name = "nand",
387 .parent = &aemif_clk,
388};
389
370static struct clk usb11_clk = { 390static struct clk usb11_clk = {
371 .name = "usb11", 391 .name = "usb11",
372 .parent = &pll0_sysclk4, 392 .parent = &pll0_sysclk4,
@@ -529,7 +549,7 @@ static struct clk_lookup da850_clks[] = {
529 CLK(NULL, "arm", &arm_clk), 549 CLK(NULL, "arm", &arm_clk),
530 CLK(NULL, "rmii", &rmii_clk), 550 CLK(NULL, "rmii", &rmii_clk),
531 CLK("davinci_emac.1", NULL, &emac_clk), 551 CLK("davinci_emac.1", NULL, &emac_clk),
532 CLK("davinci_mdio.0", "fck", &emac_clk), 552 CLK("davinci_mdio.0", "fck", &mdio_clk),
533 CLK("davinci-mcasp.0", NULL, &mcasp_clk), 553 CLK("davinci-mcasp.0", NULL, &mcasp_clk),
534 CLK("davinci-mcbsp.0", NULL, &mcbsp0_clk), 554 CLK("davinci-mcbsp.0", NULL, &mcbsp0_clk),
535 CLK("davinci-mcbsp.1", NULL, &mcbsp1_clk), 555 CLK("davinci-mcbsp.1", NULL, &mcbsp1_clk),
@@ -537,7 +557,15 @@ static struct clk_lookup da850_clks[] = {
537 CLK("da830-mmc.0", NULL, &mmcsd0_clk), 557 CLK("da830-mmc.0", NULL, &mmcsd0_clk),
538 CLK("da830-mmc.1", NULL, &mmcsd1_clk), 558 CLK("da830-mmc.1", NULL, &mmcsd1_clk),
539 CLK("ti-aemif", NULL, &aemif_clk), 559 CLK("ti-aemif", NULL, &aemif_clk),
540 CLK(NULL, "aemif", &aemif_clk), 560 /*
561 * The only user of this clock is davinci_nand and it get's it through
562 * con_id. The nand node itself is created from within the aemif
563 * driver to guarantee that it's probed after the aemif timing
564 * parameters are configured. of_dev_auxdata is not accessible from
565 * the aemif driver and can't be passed to of_platform_populate(). For
566 * that reason we're leaving the dev_id here as NULL.
567 */
568 CLK(NULL, "aemif", &aemif_nand_clk),
541 CLK("ohci-da8xx", "usb11", &usb11_clk), 569 CLK("ohci-da8xx", "usb11", &usb11_clk),
542 CLK("musb-da8xx", "usb20", &usb20_clk), 570 CLK("musb-da8xx", "usb20", &usb20_clk),
543 CLK("spi_davinci.0", NULL, &spi0_clk), 571 CLK("spi_davinci.0", NULL, &spi0_clk),
diff --git a/arch/arm/mach-davinci/usb-da8xx.c b/arch/arm/mach-davinci/usb-da8xx.c
index c6feecf7ae24..9a6af0bd5dc3 100644
--- a/arch/arm/mach-davinci/usb-da8xx.c
+++ b/arch/arm/mach-davinci/usb-da8xx.c
@@ -22,6 +22,8 @@
22#define DA8XX_USB0_BASE 0x01e00000 22#define DA8XX_USB0_BASE 0x01e00000
23#define DA8XX_USB1_BASE 0x01e25000 23#define DA8XX_USB1_BASE 0x01e25000
24 24
25static struct clk *usb20_clk;
26
25static struct platform_device da8xx_usb_phy = { 27static struct platform_device da8xx_usb_phy = {
26 .name = "da8xx-usb-phy", 28 .name = "da8xx-usb-phy",
27 .id = -1, 29 .id = -1,
@@ -158,26 +160,13 @@ int __init da8xx_register_usb_refclkin(int rate)
158 160
159static void usb20_phy_clk_enable(struct clk *clk) 161static void usb20_phy_clk_enable(struct clk *clk)
160{ 162{
161 struct clk *usb20_clk;
162 int err;
163 u32 val; 163 u32 val;
164 u32 timeout = 500000; /* 500 msec */ 164 u32 timeout = 500000; /* 500 msec */
165 165
166 val = readl(DA8XX_SYSCFG0_VIRT(DA8XX_CFGCHIP2_REG)); 166 val = readl(DA8XX_SYSCFG0_VIRT(DA8XX_CFGCHIP2_REG));
167 167
168 usb20_clk = clk_get(&da8xx_usb20_dev.dev, "usb20");
169 if (IS_ERR(usb20_clk)) {
170 pr_err("could not get usb20 clk: %ld\n", PTR_ERR(usb20_clk));
171 return;
172 }
173
174 /* The USB 2.O PLL requires that the USB 2.O PSC is enabled as well. */ 168 /* The USB 2.O PLL requires that the USB 2.O PSC is enabled as well. */
175 err = clk_prepare_enable(usb20_clk); 169 davinci_clk_enable(usb20_clk);
176 if (err) {
177 pr_err("failed to enable usb20 clk: %d\n", err);
178 clk_put(usb20_clk);
179 return;
180 }
181 170
182 /* 171 /*
183 * Turn on the USB 2.0 PHY, but just the PLL, and not OTG. The USB 1.1 172 * Turn on the USB 2.0 PHY, but just the PLL, and not OTG. The USB 1.1
@@ -197,8 +186,7 @@ static void usb20_phy_clk_enable(struct clk *clk)
197 186
198 pr_err("Timeout waiting for USB 2.0 PHY clock good\n"); 187 pr_err("Timeout waiting for USB 2.0 PHY clock good\n");
199done: 188done:
200 clk_disable_unprepare(usb20_clk); 189 davinci_clk_disable(usb20_clk);
201 clk_put(usb20_clk);
202} 190}
203 191
204static void usb20_phy_clk_disable(struct clk *clk) 192static void usb20_phy_clk_disable(struct clk *clk)
@@ -285,11 +273,19 @@ static struct clk_lookup usb20_phy_clk_lookup =
285int __init da8xx_register_usb20_phy_clk(bool use_usb_refclkin) 273int __init da8xx_register_usb20_phy_clk(bool use_usb_refclkin)
286{ 274{
287 struct clk *parent; 275 struct clk *parent;
288 int ret = 0; 276 int ret;
277
278 usb20_clk = clk_get(&da8xx_usb20_dev.dev, "usb20");
279 ret = PTR_ERR_OR_ZERO(usb20_clk);
280 if (ret)
281 return ret;
289 282
290 parent = clk_get(NULL, use_usb_refclkin ? "usb_refclkin" : "pll0_aux"); 283 parent = clk_get(NULL, use_usb_refclkin ? "usb_refclkin" : "pll0_aux");
291 if (IS_ERR(parent)) 284 ret = PTR_ERR_OR_ZERO(parent);
292 return PTR_ERR(parent); 285 if (ret) {
286 clk_put(usb20_clk);
287 return ret;
288 }
293 289
294 usb20_phy_clk.parent = parent; 290 usb20_phy_clk.parent = parent;
295 ret = clk_register(&usb20_phy_clk); 291 ret = clk_register(&usb20_phy_clk);
diff --git a/arch/arm/mach-exynos/platsmp.c b/arch/arm/mach-exynos/platsmp.c
index 98ffe1e62ad5..a5d68411a037 100644
--- a/arch/arm/mach-exynos/platsmp.c
+++ b/arch/arm/mach-exynos/platsmp.c
@@ -385,36 +385,6 @@ fail:
385 return pen_release != -1 ? ret : 0; 385 return pen_release != -1 ? ret : 0;
386} 386}
387 387
388/*
389 * Initialise the CPU possible map early - this describes the CPUs
390 * which may be present or become present in the system.
391 */
392
393static void __init exynos_smp_init_cpus(void)
394{
395 void __iomem *scu_base = scu_base_addr();
396 unsigned int i, ncores;
397
398 if (read_cpuid_part() == ARM_CPU_PART_CORTEX_A9)
399 ncores = scu_base ? scu_get_core_count(scu_base) : 1;
400 else
401 /*
402 * CPU Nodes are passed thru DT and set_cpu_possible
403 * is set by "arm_dt_init_cpu_maps".
404 */
405 return;
406
407 /* sanity check */
408 if (ncores > nr_cpu_ids) {
409 pr_warn("SMP: %u cores greater than maximum (%u), clipping\n",
410 ncores, nr_cpu_ids);
411 ncores = nr_cpu_ids;
412 }
413
414 for (i = 0; i < ncores; i++)
415 set_cpu_possible(i, true);
416}
417
418static void __init exynos_smp_prepare_cpus(unsigned int max_cpus) 388static void __init exynos_smp_prepare_cpus(unsigned int max_cpus)
419{ 389{
420 int i; 390 int i;
@@ -479,7 +449,6 @@ static void exynos_cpu_die(unsigned int cpu)
479#endif /* CONFIG_HOTPLUG_CPU */ 449#endif /* CONFIG_HOTPLUG_CPU */
480 450
481const struct smp_operations exynos_smp_ops __initconst = { 451const struct smp_operations exynos_smp_ops __initconst = {
482 .smp_init_cpus = exynos_smp_init_cpus,
483 .smp_prepare_cpus = exynos_smp_prepare_cpus, 452 .smp_prepare_cpus = exynos_smp_prepare_cpus,
484 .smp_secondary_init = exynos_secondary_init, 453 .smp_secondary_init = exynos_secondary_init,
485 .smp_boot_secondary = exynos_boot_secondary, 454 .smp_boot_secondary = exynos_boot_secondary,
diff --git a/arch/arm/mach-imx/mach-imx1.c b/arch/arm/mach-imx/mach-imx1.c
index de5ab8d88549..3a8406e45b65 100644
--- a/arch/arm/mach-imx/mach-imx1.c
+++ b/arch/arm/mach-imx/mach-imx1.c
@@ -37,7 +37,6 @@ static const char * const imx1_dt_board_compat[] __initconst = {
37}; 37};
38 38
39DT_MACHINE_START(IMX1_DT, "Freescale i.MX1 (Device Tree Support)") 39DT_MACHINE_START(IMX1_DT, "Freescale i.MX1 (Device Tree Support)")
40 .map_io = debug_ll_io_init,
41 .init_early = imx1_init_early, 40 .init_early = imx1_init_early,
42 .init_irq = imx1_init_irq, 41 .init_irq = imx1_init_irq,
43 .dt_compat = imx1_dt_board_compat, 42 .dt_compat = imx1_dt_board_compat,
diff --git a/arch/arm/mach-imx/mmdc.c b/arch/arm/mach-imx/mmdc.c
index 699157759120..c03bf28d8bbc 100644
--- a/arch/arm/mach-imx/mmdc.c
+++ b/arch/arm/mach-imx/mmdc.c
@@ -60,7 +60,6 @@
60 60
61#define to_mmdc_pmu(p) container_of(p, struct mmdc_pmu, pmu) 61#define to_mmdc_pmu(p) container_of(p, struct mmdc_pmu, pmu)
62 62
63static enum cpuhp_state cpuhp_mmdc_state;
64static int ddr_type; 63static int ddr_type;
65 64
66struct fsl_mmdc_devtype_data { 65struct fsl_mmdc_devtype_data {
@@ -82,6 +81,7 @@ static const struct of_device_id imx_mmdc_dt_ids[] = {
82 81
83#ifdef CONFIG_PERF_EVENTS 82#ifdef CONFIG_PERF_EVENTS
84 83
84static enum cpuhp_state cpuhp_mmdc_state;
85static DEFINE_IDA(mmdc_ida); 85static DEFINE_IDA(mmdc_ida);
86 86
87PMU_EVENT_ATTR_STRING(total-cycles, mmdc_pmu_total_cycles, "event=0x00") 87PMU_EVENT_ATTR_STRING(total-cycles, mmdc_pmu_total_cycles, "event=0x00")
diff --git a/arch/arm/mach-omap1/dma.c b/arch/arm/mach-omap1/dma.c
index f6ba589cd312..c821c1d5610e 100644
--- a/arch/arm/mach-omap1/dma.c
+++ b/arch/arm/mach-omap1/dma.c
@@ -32,7 +32,6 @@
32#include "soc.h" 32#include "soc.h"
33 33
34#define OMAP1_DMA_BASE (0xfffed800) 34#define OMAP1_DMA_BASE (0xfffed800)
35#define OMAP1_LOGICAL_DMA_CH_COUNT 17
36 35
37static u32 enable_1510_mode; 36static u32 enable_1510_mode;
38 37
@@ -348,8 +347,6 @@ static int __init omap1_system_dma_init(void)
348 goto exit_iounmap; 347 goto exit_iounmap;
349 } 348 }
350 349
351 d->lch_count = OMAP1_LOGICAL_DMA_CH_COUNT;
352
353 /* Valid attributes for omap1 plus processors */ 350 /* Valid attributes for omap1 plus processors */
354 if (cpu_is_omap15xx()) 351 if (cpu_is_omap15xx())
355 d->dev_caps = ENABLE_1510_MODE; 352 d->dev_caps = ENABLE_1510_MODE;
@@ -366,13 +363,14 @@ static int __init omap1_system_dma_init(void)
366 d->dev_caps |= CLEAR_CSR_ON_READ; 363 d->dev_caps |= CLEAR_CSR_ON_READ;
367 d->dev_caps |= IS_WORD_16; 364 d->dev_caps |= IS_WORD_16;
368 365
369 if (cpu_is_omap15xx()) 366 /* available logical channels */
370 d->chan_count = 9; 367 if (cpu_is_omap15xx()) {
371 else if (cpu_is_omap16xx() || cpu_is_omap7xx()) { 368 d->lch_count = 9;
372 if (!(d->dev_caps & ENABLE_1510_MODE)) 369 } else {
373 d->chan_count = 16; 370 if (d->dev_caps & ENABLE_1510_MODE)
371 d->lch_count = 9;
374 else 372 else
375 d->chan_count = 9; 373 d->lch_count = 16;
376 } 374 }
377 375
378 p = dma_plat_info; 376 p = dma_plat_info;
diff --git a/arch/arm/mach-omap2/Makefile b/arch/arm/mach-omap2/Makefile
index 469894082fea..093458b62c8d 100644
--- a/arch/arm/mach-omap2/Makefile
+++ b/arch/arm/mach-omap2/Makefile
@@ -7,7 +7,7 @@ ccflags-y := -I$(srctree)/$(src)/include \
7 7
8# Common support 8# Common support
9obj-y := id.o io.o control.o devices.o fb.o timer.o pm.o \ 9obj-y := id.o io.o control.o devices.o fb.o timer.o pm.o \
10 common.o gpio.o dma.o wd_timer.o display.o i2c.o hdq1w.o omap_hwmod.o \ 10 common.o dma.o wd_timer.o display.o i2c.o hdq1w.o omap_hwmod.o \
11 omap_device.o omap-headsmp.o sram.o drm.o 11 omap_device.o omap-headsmp.o sram.o drm.o
12 12
13hwmod-common = omap_hwmod.o omap_hwmod_reset.o \ 13hwmod-common = omap_hwmod.o omap_hwmod_reset.o \
diff --git a/arch/arm/mach-omap2/board-generic.c b/arch/arm/mach-omap2/board-generic.c
index 36d9943205ca..dc9e34e670a2 100644
--- a/arch/arm/mach-omap2/board-generic.c
+++ b/arch/arm/mach-omap2/board-generic.c
@@ -304,7 +304,7 @@ DT_MACHINE_START(AM43_DT, "Generic AM43 (Flattened Device Tree)")
304 .init_late = am43xx_init_late, 304 .init_late = am43xx_init_late,
305 .init_irq = omap_gic_of_init, 305 .init_irq = omap_gic_of_init,
306 .init_machine = omap_generic_init, 306 .init_machine = omap_generic_init,
307 .init_time = omap4_local_timer_init, 307 .init_time = omap3_gptimer_timer_init,
308 .dt_compat = am43_boards_compat, 308 .dt_compat = am43_boards_compat,
309 .restart = omap44xx_restart, 309 .restart = omap44xx_restart,
310MACHINE_END 310MACHINE_END
diff --git a/arch/arm/mach-omap2/gpio.c b/arch/arm/mach-omap2/gpio.c
deleted file mode 100644
index 7a577145b68b..000000000000
--- a/arch/arm/mach-omap2/gpio.c
+++ /dev/null
@@ -1,160 +0,0 @@
1/*
2 * OMAP2+ specific gpio initialization
3 *
4 * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com/
5 *
6 * Author:
7 * Charulatha V <charu@ti.com>
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License as
11 * published by the Free Software Foundation version 2.
12 *
13 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
14 * kind, whether express or implied; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 */
18
19#include <linux/gpio.h>
20#include <linux/err.h>
21#include <linux/slab.h>
22#include <linux/interrupt.h>
23#include <linux/of.h>
24#include <linux/platform_data/gpio-omap.h>
25
26#include "soc.h"
27#include "omap_hwmod.h"
28#include "omap_device.h"
29#include "omap-pm.h"
30
31#include "powerdomain.h"
32
33static int __init omap2_gpio_dev_init(struct omap_hwmod *oh, void *unused)
34{
35 struct platform_device *pdev;
36 struct omap_gpio_platform_data *pdata;
37 struct omap_gpio_dev_attr *dev_attr;
38 char *name = "omap_gpio";
39 int id;
40 struct powerdomain *pwrdm;
41
42 /*
43 * extract the device id from name field available in the
44 * hwmod database and use the same for constructing ids for
45 * gpio devices.
46 * CAUTION: Make sure the name in the hwmod database does
47 * not change. If changed, make corresponding change here
48 * or make use of static variable mechanism to handle this.
49 */
50 sscanf(oh->name, "gpio%d", &id);
51
52 pdata = kzalloc(sizeof(struct omap_gpio_platform_data), GFP_KERNEL);
53 if (!pdata) {
54 pr_err("gpio%d: Memory allocation failed\n", id);
55 return -ENOMEM;
56 }
57
58 dev_attr = (struct omap_gpio_dev_attr *)oh->dev_attr;
59 pdata->bank_width = dev_attr->bank_width;
60 pdata->dbck_flag = dev_attr->dbck_flag;
61 pdata->get_context_loss_count = omap_pm_get_dev_context_loss_count;
62 pdata->regs = kzalloc(sizeof(struct omap_gpio_reg_offs), GFP_KERNEL);
63 if (!pdata->regs) {
64 pr_err("gpio%d: Memory allocation failed\n", id);
65 kfree(pdata);
66 return -ENOMEM;
67 }
68
69 switch (oh->class->rev) {
70 case 0:
71 if (id == 1)
72 /* non-wakeup GPIO pins for OMAP2 Bank1 */
73 pdata->non_wakeup_gpios = 0xe203ffc0;
74 else if (id == 2)
75 /* non-wakeup GPIO pins for OMAP2 Bank2 */
76 pdata->non_wakeup_gpios = 0x08700040;
77 /* fall through */
78
79 case 1:
80 pdata->regs->revision = OMAP24XX_GPIO_REVISION;
81 pdata->regs->direction = OMAP24XX_GPIO_OE;
82 pdata->regs->datain = OMAP24XX_GPIO_DATAIN;
83 pdata->regs->dataout = OMAP24XX_GPIO_DATAOUT;
84 pdata->regs->set_dataout = OMAP24XX_GPIO_SETDATAOUT;
85 pdata->regs->clr_dataout = OMAP24XX_GPIO_CLEARDATAOUT;
86 pdata->regs->irqstatus = OMAP24XX_GPIO_IRQSTATUS1;
87 pdata->regs->irqstatus2 = OMAP24XX_GPIO_IRQSTATUS2;
88 pdata->regs->irqenable = OMAP24XX_GPIO_IRQENABLE1;
89 pdata->regs->irqenable2 = OMAP24XX_GPIO_IRQENABLE2;
90 pdata->regs->set_irqenable = OMAP24XX_GPIO_SETIRQENABLE1;
91 pdata->regs->clr_irqenable = OMAP24XX_GPIO_CLEARIRQENABLE1;
92 pdata->regs->debounce = OMAP24XX_GPIO_DEBOUNCE_VAL;
93 pdata->regs->debounce_en = OMAP24XX_GPIO_DEBOUNCE_EN;
94 pdata->regs->ctrl = OMAP24XX_GPIO_CTRL;
95 pdata->regs->wkup_en = OMAP24XX_GPIO_WAKE_EN;
96 pdata->regs->leveldetect0 = OMAP24XX_GPIO_LEVELDETECT0;
97 pdata->regs->leveldetect1 = OMAP24XX_GPIO_LEVELDETECT1;
98 pdata->regs->risingdetect = OMAP24XX_GPIO_RISINGDETECT;
99 pdata->regs->fallingdetect = OMAP24XX_GPIO_FALLINGDETECT;
100 break;
101 case 2:
102 pdata->regs->revision = OMAP4_GPIO_REVISION;
103 pdata->regs->direction = OMAP4_GPIO_OE;
104 pdata->regs->datain = OMAP4_GPIO_DATAIN;
105 pdata->regs->dataout = OMAP4_GPIO_DATAOUT;
106 pdata->regs->set_dataout = OMAP4_GPIO_SETDATAOUT;
107 pdata->regs->clr_dataout = OMAP4_GPIO_CLEARDATAOUT;
108 pdata->regs->irqstatus_raw0 = OMAP4_GPIO_IRQSTATUSRAW0;
109 pdata->regs->irqstatus_raw1 = OMAP4_GPIO_IRQSTATUSRAW1;
110 pdata->regs->irqstatus = OMAP4_GPIO_IRQSTATUS0;
111 pdata->regs->irqstatus2 = OMAP4_GPIO_IRQSTATUS1;
112 pdata->regs->irqenable = OMAP4_GPIO_IRQSTATUSSET0;
113 pdata->regs->irqenable2 = OMAP4_GPIO_IRQSTATUSSET1;
114 pdata->regs->set_irqenable = OMAP4_GPIO_IRQSTATUSSET0;
115 pdata->regs->clr_irqenable = OMAP4_GPIO_IRQSTATUSCLR0;
116 pdata->regs->debounce = OMAP4_GPIO_DEBOUNCINGTIME;
117 pdata->regs->debounce_en = OMAP4_GPIO_DEBOUNCENABLE;
118 pdata->regs->ctrl = OMAP4_GPIO_CTRL;
119 pdata->regs->wkup_en = OMAP4_GPIO_IRQWAKEN0;
120 pdata->regs->leveldetect0 = OMAP4_GPIO_LEVELDETECT0;
121 pdata->regs->leveldetect1 = OMAP4_GPIO_LEVELDETECT1;
122 pdata->regs->risingdetect = OMAP4_GPIO_RISINGDETECT;
123 pdata->regs->fallingdetect = OMAP4_GPIO_FALLINGDETECT;
124 break;
125 default:
126 WARN(1, "Invalid gpio bank_type\n");
127 kfree(pdata->regs);
128 kfree(pdata);
129 return -EINVAL;
130 }
131
132 pwrdm = omap_hwmod_get_pwrdm(oh);
133 pdata->loses_context = pwrdm_can_ever_lose_context(pwrdm);
134
135 pdev = omap_device_build(name, id - 1, oh, pdata, sizeof(*pdata));
136 kfree(pdata);
137
138 if (IS_ERR(pdev)) {
139 WARN(1, "Can't build omap_device for %s:%s.\n",
140 name, oh->name);
141 return PTR_ERR(pdev);
142 }
143
144 return 0;
145}
146
147/*
148 * gpio_init needs to be done before
149 * machine_init functions access gpio APIs.
150 * Hence gpio_init is a omap_postcore_initcall.
151 */
152static int __init omap2_gpio_init(void)
153{
154 /* If dtb is there, the devices will be created dynamically */
155 if (of_have_populated_dt())
156 return -ENODEV;
157
158 return omap_hwmod_for_each_by_class("gpio", omap2_gpio_dev_init, NULL);
159}
160omap_postcore_initcall(omap2_gpio_init);
diff --git a/arch/arm/mach-omap2/omap-headsmp.S b/arch/arm/mach-omap2/omap-headsmp.S
index fe36ce2734d4..4c6f14cf92a8 100644
--- a/arch/arm/mach-omap2/omap-headsmp.S
+++ b/arch/arm/mach-omap2/omap-headsmp.S
@@ -17,6 +17,7 @@
17 17
18#include <linux/linkage.h> 18#include <linux/linkage.h>
19#include <linux/init.h> 19#include <linux/init.h>
20#include <asm/assembler.h>
20 21
21#include "omap44xx.h" 22#include "omap44xx.h"
22 23
@@ -66,7 +67,7 @@ wait_2: ldr r2, =AUX_CORE_BOOT0_PA @ read from AuxCoreBoot0
66 cmp r0, r4 67 cmp r0, r4
67 bne wait_2 68 bne wait_2
68 ldr r12, =API_HYP_ENTRY 69 ldr r12, =API_HYP_ENTRY
69 adr r0, hyp_boot 70 badr r0, hyp_boot
70 smc #0 71 smc #0
71hyp_boot: 72hyp_boot:
72 b omap_secondary_startup 73 b omap_secondary_startup
diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
index 759e1d45ba25..e8b988714a09 100644
--- a/arch/arm/mach-omap2/omap_hwmod.c
+++ b/arch/arm/mach-omap2/omap_hwmod.c
@@ -741,14 +741,14 @@ static int _init_main_clk(struct omap_hwmod *oh)
741 int ret = 0; 741 int ret = 0;
742 char name[MOD_CLK_MAX_NAME_LEN]; 742 char name[MOD_CLK_MAX_NAME_LEN];
743 struct clk *clk; 743 struct clk *clk;
744 static const char modck[] = "_mod_ck";
744 745
745 /* +7 magic comes from '_mod_ck' suffix */ 746 if (strlen(oh->name) >= MOD_CLK_MAX_NAME_LEN - strlen(modck))
746 if (strlen(oh->name) + 7 > MOD_CLK_MAX_NAME_LEN)
747 pr_warn("%s: warning: cropping name for %s\n", __func__, 747 pr_warn("%s: warning: cropping name for %s\n", __func__,
748 oh->name); 748 oh->name);
749 749
750 strncpy(name, oh->name, MOD_CLK_MAX_NAME_LEN - 7); 750 strlcpy(name, oh->name, MOD_CLK_MAX_NAME_LEN - strlen(modck));
751 strcat(name, "_mod_ck"); 751 strlcat(name, modck, MOD_CLK_MAX_NAME_LEN);
752 752
753 clk = clk_get(NULL, name); 753 clk = clk_get(NULL, name);
754 if (!IS_ERR(clk)) { 754 if (!IS_ERR(clk)) {
diff --git a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
index 56f917ec8621..507ff0795a8e 100644
--- a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
@@ -2112,11 +2112,20 @@ static struct omap_hwmod_ocp_if omap3_l4_core__i2c3 = {
2112}; 2112};
2113 2113
2114/* L4 CORE -> SR1 interface */ 2114/* L4 CORE -> SR1 interface */
2115static struct omap_hwmod_addr_space omap3_sr1_addr_space[] = {
2116 {
2117 .pa_start = OMAP34XX_SR1_BASE,
2118 .pa_end = OMAP34XX_SR1_BASE + SZ_1K - 1,
2119 .flags = ADDR_TYPE_RT,
2120 },
2121 { },
2122};
2115 2123
2116static struct omap_hwmod_ocp_if omap34xx_l4_core__sr1 = { 2124static struct omap_hwmod_ocp_if omap34xx_l4_core__sr1 = {
2117 .master = &omap3xxx_l4_core_hwmod, 2125 .master = &omap3xxx_l4_core_hwmod,
2118 .slave = &omap34xx_sr1_hwmod, 2126 .slave = &omap34xx_sr1_hwmod,
2119 .clk = "sr_l4_ick", 2127 .clk = "sr_l4_ick",
2128 .addr = omap3_sr1_addr_space,
2120 .user = OCP_USER_MPU, 2129 .user = OCP_USER_MPU,
2121}; 2130};
2122 2131
@@ -2124,15 +2133,25 @@ static struct omap_hwmod_ocp_if omap36xx_l4_core__sr1 = {
2124 .master = &omap3xxx_l4_core_hwmod, 2133 .master = &omap3xxx_l4_core_hwmod,
2125 .slave = &omap36xx_sr1_hwmod, 2134 .slave = &omap36xx_sr1_hwmod,
2126 .clk = "sr_l4_ick", 2135 .clk = "sr_l4_ick",
2136 .addr = omap3_sr1_addr_space,
2127 .user = OCP_USER_MPU, 2137 .user = OCP_USER_MPU,
2128}; 2138};
2129 2139
2130/* L4 CORE -> SR1 interface */ 2140/* L4 CORE -> SR1 interface */
2141static struct omap_hwmod_addr_space omap3_sr2_addr_space[] = {
2142 {
2143 .pa_start = OMAP34XX_SR2_BASE,
2144 .pa_end = OMAP34XX_SR2_BASE + SZ_1K - 1,
2145 .flags = ADDR_TYPE_RT,
2146 },
2147 { },
2148};
2131 2149
2132static struct omap_hwmod_ocp_if omap34xx_l4_core__sr2 = { 2150static struct omap_hwmod_ocp_if omap34xx_l4_core__sr2 = {
2133 .master = &omap3xxx_l4_core_hwmod, 2151 .master = &omap3xxx_l4_core_hwmod,
2134 .slave = &omap34xx_sr2_hwmod, 2152 .slave = &omap34xx_sr2_hwmod,
2135 .clk = "sr_l4_ick", 2153 .clk = "sr_l4_ick",
2154 .addr = omap3_sr2_addr_space,
2136 .user = OCP_USER_MPU, 2155 .user = OCP_USER_MPU,
2137}; 2156};
2138 2157
@@ -2140,6 +2159,7 @@ static struct omap_hwmod_ocp_if omap36xx_l4_core__sr2 = {
2140 .master = &omap3xxx_l4_core_hwmod, 2159 .master = &omap3xxx_l4_core_hwmod,
2141 .slave = &omap36xx_sr2_hwmod, 2160 .slave = &omap36xx_sr2_hwmod,
2142 .clk = "sr_l4_ick", 2161 .clk = "sr_l4_ick",
2162 .addr = omap3_sr2_addr_space,
2143 .user = OCP_USER_MPU, 2163 .user = OCP_USER_MPU,
2144}; 2164};
2145 2165
diff --git a/arch/arm/mach-omap2/omap_hwmod_common_data.h b/arch/arm/mach-omap2/omap_hwmod_common_data.h
index cdfbb44ceb0c..f22e9cb39f4a 100644
--- a/arch/arm/mach-omap2/omap_hwmod_common_data.h
+++ b/arch/arm/mach-omap2/omap_hwmod_common_data.h
@@ -121,10 +121,6 @@ extern struct omap_hwmod_irq_info omap2_uart3_mpu_irqs[];
121extern struct omap_hwmod_irq_info omap2_dispc_irqs[]; 121extern struct omap_hwmod_irq_info omap2_dispc_irqs[];
122extern struct omap_hwmod_irq_info omap2_i2c1_mpu_irqs[]; 122extern struct omap_hwmod_irq_info omap2_i2c1_mpu_irqs[];
123extern struct omap_hwmod_irq_info omap2_i2c2_mpu_irqs[]; 123extern struct omap_hwmod_irq_info omap2_i2c2_mpu_irqs[];
124extern struct omap_hwmod_irq_info omap2_gpio1_irqs[];
125extern struct omap_hwmod_irq_info omap2_gpio2_irqs[];
126extern struct omap_hwmod_irq_info omap2_gpio3_irqs[];
127extern struct omap_hwmod_irq_info omap2_gpio4_irqs[];
128extern struct omap_hwmod_irq_info omap2_dma_system_irqs[]; 124extern struct omap_hwmod_irq_info omap2_dma_system_irqs[];
129extern struct omap_hwmod_irq_info omap2_mcspi1_mpu_irqs[]; 125extern struct omap_hwmod_irq_info omap2_mcspi1_mpu_irqs[];
130extern struct omap_hwmod_irq_info omap2_mcspi2_mpu_irqs[]; 126extern struct omap_hwmod_irq_info omap2_mcspi2_mpu_irqs[];
diff --git a/arch/arm/mach-omap2/pdata-quirks.c b/arch/arm/mach-omap2/pdata-quirks.c
index 477910a48448..70c004794880 100644
--- a/arch/arm/mach-omap2/pdata-quirks.c
+++ b/arch/arm/mach-omap2/pdata-quirks.c
@@ -161,7 +161,7 @@ static struct ti_st_plat_data wilink7_pdata = {
161 .nshutdown_gpio = 162, 161 .nshutdown_gpio = 162,
162 .dev_name = "/dev/ttyO1", 162 .dev_name = "/dev/ttyO1",
163 .flow_cntrl = 1, 163 .flow_cntrl = 1,
164 .baud_rate = 300000, 164 .baud_rate = 3000000,
165}; 165};
166 166
167static struct platform_device wl128x_device = { 167static struct platform_device wl128x_device = {
diff --git a/arch/arm/mach-omap2/prm_common.c b/arch/arm/mach-omap2/prm_common.c
index 5b2f5138d938..2b138b65129a 100644
--- a/arch/arm/mach-omap2/prm_common.c
+++ b/arch/arm/mach-omap2/prm_common.c
@@ -295,10 +295,8 @@ int omap_prcm_register_chain_handler(struct omap_prcm_irq_setup *irq_setup)
295 GFP_KERNEL); 295 GFP_KERNEL);
296 296
297 if (!prcm_irq_chips || !prcm_irq_setup->saved_mask || 297 if (!prcm_irq_chips || !prcm_irq_setup->saved_mask ||
298 !prcm_irq_setup->priority_mask) { 298 !prcm_irq_setup->priority_mask)
299 pr_err("PRCM: kzalloc failed\n");
300 goto err; 299 goto err;
301 }
302 300
303 memset(mask, 0, sizeof(mask)); 301 memset(mask, 0, sizeof(mask));
304 302
diff --git a/arch/arm/mach-omap2/timer.c b/arch/arm/mach-omap2/timer.c
index 56128da23c3a..07dd692c4737 100644
--- a/arch/arm/mach-omap2/timer.c
+++ b/arch/arm/mach-omap2/timer.c
@@ -510,18 +510,19 @@ void __init omap3_secure_sync32k_timer_init(void)
510} 510}
511#endif /* CONFIG_ARCH_OMAP3 */ 511#endif /* CONFIG_ARCH_OMAP3 */
512 512
513#if defined(CONFIG_ARCH_OMAP3) || defined(CONFIG_SOC_AM33XX) 513#if defined(CONFIG_ARCH_OMAP3) || defined(CONFIG_SOC_AM33XX) || \
514 defined(CONFIG_SOC_AM43XX)
514void __init omap3_gptimer_timer_init(void) 515void __init omap3_gptimer_timer_init(void)
515{ 516{
516 __omap_sync32k_timer_init(2, "timer_sys_ck", NULL, 517 __omap_sync32k_timer_init(2, "timer_sys_ck", NULL,
517 1, "timer_sys_ck", "ti,timer-alwon", true); 518 1, "timer_sys_ck", "ti,timer-alwon", true);
518 519 if (of_have_populated_dt())
519 clocksource_probe(); 520 clocksource_probe();
520} 521}
521#endif 522#endif
522 523
523#if defined(CONFIG_ARCH_OMAP4) || defined(CONFIG_SOC_OMAP5) || \ 524#if defined(CONFIG_ARCH_OMAP4) || defined(CONFIG_SOC_OMAP5) || \
524 defined(CONFIG_SOC_DRA7XX) || defined(CONFIG_SOC_AM43XX) 525 defined(CONFIG_SOC_DRA7XX)
525static void __init omap4_sync32k_timer_init(void) 526static void __init omap4_sync32k_timer_init(void)
526{ 527{
527 __omap_sync32k_timer_init(1, "timer_32k_ck", "ti,timer-alwon", 528 __omap_sync32k_timer_init(1, "timer_32k_ck", "ti,timer-alwon",
diff --git a/arch/arm/mach-s3c24xx/common.c b/arch/arm/mach-s3c24xx/common.c
index f6c3f151d0d4..b59f4f4f256f 100644
--- a/arch/arm/mach-s3c24xx/common.c
+++ b/arch/arm/mach-s3c24xx/common.c
@@ -345,10 +345,40 @@ static struct s3c24xx_dma_channel s3c2410_dma_channels[DMACH_MAX] = {
345 [DMACH_USB_EP4] = { S3C24XX_DMA_APB, true, S3C24XX_DMA_CHANREQ(4, 3), }, 345 [DMACH_USB_EP4] = { S3C24XX_DMA_APB, true, S3C24XX_DMA_CHANREQ(4, 3), },
346}; 346};
347 347
348static const struct dma_slave_map s3c2410_dma_slave_map[] = {
349 { "s3c2410-sdi", "rx-tx", (void *)DMACH_SDI },
350 { "s3c2410-spi.0", "rx", (void *)DMACH_SPI0_RX },
351 { "s3c2410-spi.0", "tx", (void *)DMACH_SPI0_TX },
352 { "s3c2410-spi.1", "rx", (void *)DMACH_SPI1_RX },
353 { "s3c2410-spi.1", "tx", (void *)DMACH_SPI1_TX },
354 /*
355 * The DMA request source[1] (DMACH_UARTx_SRC2) are
356 * not used in the UART driver.
357 */
358 { "s3c2410-uart.0", "rx", (void *)DMACH_UART0 },
359 { "s3c2410-uart.0", "tx", (void *)DMACH_UART0 },
360 { "s3c2410-uart.1", "rx", (void *)DMACH_UART1 },
361 { "s3c2410-uart.1", "tx", (void *)DMACH_UART1 },
362 { "s3c2410-uart.2", "rx", (void *)DMACH_UART2 },
363 { "s3c2410-uart.2", "tx", (void *)DMACH_UART2 },
364 { "s3c24xx-iis", "rx", (void *)DMACH_I2S_IN },
365 { "s3c24xx-iis", "tx", (void *)DMACH_I2S_OUT },
366 { "s3c-hsudc", "rx0", (void *)DMACH_USB_EP1 },
367 { "s3c-hsudc", "tx0", (void *)DMACH_USB_EP1 },
368 { "s3c-hsudc", "rx1", (void *)DMACH_USB_EP2 },
369 { "s3c-hsudc", "tx1", (void *)DMACH_USB_EP2 },
370 { "s3c-hsudc", "rx2", (void *)DMACH_USB_EP3 },
371 { "s3c-hsudc", "tx2", (void *)DMACH_USB_EP3 },
372 { "s3c-hsudc", "rx3", (void *)DMACH_USB_EP4 },
373 { "s3c-hsudc", "tx3", (void *)DMACH_USB_EP4 }
374};
375
348static struct s3c24xx_dma_platdata s3c2410_dma_platdata = { 376static struct s3c24xx_dma_platdata s3c2410_dma_platdata = {
349 .num_phy_channels = 4, 377 .num_phy_channels = 4,
350 .channels = s3c2410_dma_channels, 378 .channels = s3c2410_dma_channels,
351 .num_channels = DMACH_MAX, 379 .num_channels = DMACH_MAX,
380 .slave_map = s3c2410_dma_slave_map,
381 .slavecnt = ARRAY_SIZE(s3c2410_dma_slave_map),
352}; 382};
353 383
354struct platform_device s3c2410_device_dma = { 384struct platform_device s3c2410_device_dma = {
@@ -388,10 +418,36 @@ static struct s3c24xx_dma_channel s3c2412_dma_channels[DMACH_MAX] = {
388 [DMACH_USB_EP4] = { S3C24XX_DMA_APB, true, 16 }, 418 [DMACH_USB_EP4] = { S3C24XX_DMA_APB, true, 16 },
389}; 419};
390 420
421static const struct dma_slave_map s3c2412_dma_slave_map[] = {
422 { "s3c2412-sdi", "rx-tx", (void *)DMACH_SDI },
423 { "s3c2412-spi.0", "rx", (void *)DMACH_SPI0_RX },
424 { "s3c2412-spi.0", "tx", (void *)DMACH_SPI0_TX },
425 { "s3c2412-spi.1", "rx", (void *)DMACH_SPI1_RX },
426 { "s3c2412-spi.1", "tx", (void *)DMACH_SPI1_TX },
427 { "s3c2440-uart.0", "rx", (void *)DMACH_UART0 },
428 { "s3c2440-uart.0", "tx", (void *)DMACH_UART0 },
429 { "s3c2440-uart.1", "rx", (void *)DMACH_UART1 },
430 { "s3c2440-uart.1", "tx", (void *)DMACH_UART1 },
431 { "s3c2440-uart.2", "rx", (void *)DMACH_UART2 },
432 { "s3c2440-uart.2", "tx", (void *)DMACH_UART2 },
433 { "s3c2412-iis", "rx", (void *)DMACH_I2S_IN },
434 { "s3c2412-iis", "tx", (void *)DMACH_I2S_OUT },
435 { "s3c-hsudc", "rx0", (void *)DMACH_USB_EP1 },
436 { "s3c-hsudc", "tx0", (void *)DMACH_USB_EP1 },
437 { "s3c-hsudc", "rx1", (void *)DMACH_USB_EP2 },
438 { "s3c-hsudc", "tx1", (void *)DMACH_USB_EP2 },
439 { "s3c-hsudc", "rx2", (void *)DMACH_USB_EP3 },
440 { "s3c-hsudc", "tx2", (void *)DMACH_USB_EP3 },
441 { "s3c-hsudc", "rx3", (void *)DMACH_USB_EP4 },
442 { "s3c-hsudc", "tx3", (void *)DMACH_USB_EP4 }
443};
444
391static struct s3c24xx_dma_platdata s3c2412_dma_platdata = { 445static struct s3c24xx_dma_platdata s3c2412_dma_platdata = {
392 .num_phy_channels = 4, 446 .num_phy_channels = 4,
393 .channels = s3c2412_dma_channels, 447 .channels = s3c2412_dma_channels,
394 .num_channels = DMACH_MAX, 448 .num_channels = DMACH_MAX,
449 .slave_map = s3c2412_dma_slave_map,
450 .slavecnt = ARRAY_SIZE(s3c2412_dma_slave_map),
395}; 451};
396 452
397struct platform_device s3c2412_device_dma = { 453struct platform_device s3c2412_device_dma = {
@@ -534,10 +590,30 @@ static struct s3c24xx_dma_channel s3c2443_dma_channels[DMACH_MAX] = {
534 [DMACH_MIC_IN] = { S3C24XX_DMA_APB, true, 29 }, 590 [DMACH_MIC_IN] = { S3C24XX_DMA_APB, true, 29 },
535}; 591};
536 592
593static const struct dma_slave_map s3c2443_dma_slave_map[] = {
594 { "s3c2440-sdi", "rx-tx", (void *)DMACH_SDI },
595 { "s3c2443-spi.0", "rx", (void *)DMACH_SPI0_RX },
596 { "s3c2443-spi.0", "tx", (void *)DMACH_SPI0_TX },
597 { "s3c2443-spi.1", "rx", (void *)DMACH_SPI1_RX },
598 { "s3c2443-spi.1", "tx", (void *)DMACH_SPI1_TX },
599 { "s3c2440-uart.0", "rx", (void *)DMACH_UART0 },
600 { "s3c2440-uart.0", "tx", (void *)DMACH_UART0 },
601 { "s3c2440-uart.1", "rx", (void *)DMACH_UART1 },
602 { "s3c2440-uart.1", "tx", (void *)DMACH_UART1 },
603 { "s3c2440-uart.2", "rx", (void *)DMACH_UART2 },
604 { "s3c2440-uart.2", "tx", (void *)DMACH_UART2 },
605 { "s3c2440-uart.3", "rx", (void *)DMACH_UART3 },
606 { "s3c2440-uart.3", "tx", (void *)DMACH_UART3 },
607 { "s3c24xx-iis", "rx", (void *)DMACH_I2S_IN },
608 { "s3c24xx-iis", "tx", (void *)DMACH_I2S_OUT },
609};
610
537static struct s3c24xx_dma_platdata s3c2443_dma_platdata = { 611static struct s3c24xx_dma_platdata s3c2443_dma_platdata = {
538 .num_phy_channels = 6, 612 .num_phy_channels = 6,
539 .channels = s3c2443_dma_channels, 613 .channels = s3c2443_dma_channels,
540 .num_channels = DMACH_MAX, 614 .num_channels = DMACH_MAX,
615 .slave_map = s3c2443_dma_slave_map,
616 .slavecnt = ARRAY_SIZE(s3c2443_dma_slave_map),
541}; 617};
542 618
543struct platform_device s3c2443_device_dma = { 619struct platform_device s3c2443_device_dma = {
diff --git a/arch/arm/mach-ux500/pm.c b/arch/arm/mach-ux500/pm.c
index 8538910db202..a970e7fcba9e 100644
--- a/arch/arm/mach-ux500/pm.c
+++ b/arch/arm/mach-ux500/pm.c
@@ -134,8 +134,8 @@ bool prcmu_pending_irq(void)
134 */ 134 */
135bool prcmu_is_cpu_in_wfi(int cpu) 135bool prcmu_is_cpu_in_wfi(int cpu)
136{ 136{
137 return readl(PRCM_ARM_WFI_STANDBY) & cpu ? PRCM_ARM_WFI_STANDBY_WFI1 : 137 return readl(PRCM_ARM_WFI_STANDBY) &
138 PRCM_ARM_WFI_STANDBY_WFI0; 138 (cpu ? PRCM_ARM_WFI_STANDBY_WFI1 : PRCM_ARM_WFI_STANDBY_WFI0);
139} 139}
140 140
141/* 141/*
diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
index 3a2e678b8d30..0122ad1a6027 100644
--- a/arch/arm/mm/fault.c
+++ b/arch/arm/mm/fault.c
@@ -610,9 +610,9 @@ static int __init early_abort_handler(unsigned long addr, unsigned int fsr,
610 610
611void __init early_abt_enable(void) 611void __init early_abt_enable(void)
612{ 612{
613 fsr_info[22].fn = early_abort_handler; 613 fsr_info[FSR_FS_AEA].fn = early_abort_handler;
614 local_abt_enable(); 614 local_abt_enable();
615 fsr_info[22].fn = do_bad; 615 fsr_info[FSR_FS_AEA].fn = do_bad;
616} 616}
617 617
618#ifndef CONFIG_ARM_LPAE 618#ifndef CONFIG_ARM_LPAE
diff --git a/arch/arm/mm/fault.h b/arch/arm/mm/fault.h
index 67532f242271..afc1f84e763b 100644
--- a/arch/arm/mm/fault.h
+++ b/arch/arm/mm/fault.h
@@ -11,11 +11,15 @@
11#define FSR_FS5_0 (0x3f) 11#define FSR_FS5_0 (0x3f)
12 12
13#ifdef CONFIG_ARM_LPAE 13#ifdef CONFIG_ARM_LPAE
14#define FSR_FS_AEA 17
15
14static inline int fsr_fs(unsigned int fsr) 16static inline int fsr_fs(unsigned int fsr)
15{ 17{
16 return fsr & FSR_FS5_0; 18 return fsr & FSR_FS5_0;
17} 19}
18#else 20#else
21#define FSR_FS_AEA 22
22
19static inline int fsr_fs(unsigned int fsr) 23static inline int fsr_fs(unsigned int fsr)
20{ 24{
21 return (fsr & FSR_FS3_0) | (fsr & FSR_FS4) >> 6; 25 return (fsr & FSR_FS3_0) | (fsr & FSR_FS4) >> 6;
diff --git a/arch/arm64/boot/dts/amlogic/meson-gx.dtsi b/arch/arm64/boot/dts/amlogic/meson-gx.dtsi
index fc033c0d2a0f..0cbe24b49710 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gx.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-gx.dtsi
@@ -55,6 +55,24 @@
55 #address-cells = <2>; 55 #address-cells = <2>;
56 #size-cells = <2>; 56 #size-cells = <2>;
57 57
58 reserved-memory {
59 #address-cells = <2>;
60 #size-cells = <2>;
61 ranges;
62
63 /* 16 MiB reserved for Hardware ROM Firmware */
64 hwrom_reserved: hwrom@0 {
65 reg = <0x0 0x0 0x0 0x1000000>;
66 no-map;
67 };
68
69 /* 2 MiB reserved for ARM Trusted Firmware (BL31) */
70 secmon_reserved: secmon@10000000 {
71 reg = <0x0 0x10000000 0x0 0x200000>;
72 no-map;
73 };
74 };
75
58 cpus { 76 cpus {
59 #address-cells = <0x2>; 77 #address-cells = <0x2>;
60 #size-cells = <0x0>; 78 #size-cells = <0x0>;
@@ -356,5 +374,21 @@
356 status = "disabled"; 374 status = "disabled";
357 }; 375 };
358 }; 376 };
377
378 vpu: vpu@d0100000 {
379 compatible = "amlogic,meson-gx-vpu";
380 reg = <0x0 0xd0100000 0x0 0x100000>,
381 <0x0 0xc883c000 0x0 0x1000>,
382 <0x0 0xc8838000 0x0 0x1000>;
383 reg-names = "vpu", "hhi", "dmc";
384 interrupts = <GIC_SPI 3 IRQ_TYPE_EDGE_RISING>;
385 #address-cells = <1>;
386 #size-cells = <0>;
387
388 /* CVBS VDAC output port */
389 cvbs_vdac_port: port@0 {
390 reg = <0>;
391 };
392 };
359 }; 393 };
360}; 394};
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-nexbox-a95x.dts b/arch/arm64/boot/dts/amlogic/meson-gxbb-nexbox-a95x.dts
index 969682092e0f..4cbd626a9e88 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxbb-nexbox-a95x.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-nexbox-a95x.dts
@@ -142,6 +142,16 @@
142 clocks = <&wifi32k>; 142 clocks = <&wifi32k>;
143 clock-names = "ext_clock"; 143 clock-names = "ext_clock";
144 }; 144 };
145
146 cvbs-connector {
147 compatible = "composite-video-connector";
148
149 port {
150 cvbs_connector_in: endpoint {
151 remote-endpoint = <&cvbs_vdac_out>;
152 };
153 };
154 };
145}; 155};
146 156
147&uart_AO { 157&uart_AO {
@@ -229,3 +239,9 @@
229 clocks = <&clkc CLKID_FCLK_DIV4>; 239 clocks = <&clkc CLKID_FCLK_DIV4>;
230 clock-names = "clkin0"; 240 clock-names = "clkin0";
231}; 241};
242
243&cvbs_vdac_port {
244 cvbs_vdac_out: endpoint {
245 remote-endpoint = <&cvbs_connector_in>;
246 };
247};
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts b/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts
index 238fbeacd330..c59403adb387 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts
@@ -137,6 +137,10 @@
137 }; 137 };
138}; 138};
139 139
140&scpi_clocks {
141 status = "disabled";
142};
143
140&uart_AO { 144&uart_AO {
141 status = "okay"; 145 status = "okay";
142 pinctrl-0 = <&uart_ao_a_pins>; 146 pinctrl-0 = <&uart_ao_a_pins>;
@@ -147,6 +151,18 @@
147 status = "okay"; 151 status = "okay";
148 pinctrl-0 = <&eth_rgmii_pins>; 152 pinctrl-0 = <&eth_rgmii_pins>;
149 pinctrl-names = "default"; 153 pinctrl-names = "default";
154 phy-handle = <&eth_phy0>;
155
156 mdio {
157 compatible = "snps,dwmac-mdio";
158 #address-cells = <1>;
159 #size-cells = <0>;
160
161 eth_phy0: ethernet-phy@0 {
162 reg = <0>;
163 eee-broken-1000t;
164 };
165 };
150}; 166};
151 167
152&ir { 168&ir {
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-p20x.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxbb-p20x.dtsi
index 203be28978d5..4a96e0f6f926 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxbb-p20x.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-p20x.dtsi
@@ -125,6 +125,16 @@
125 clocks = <&wifi32k>; 125 clocks = <&wifi32k>;
126 clock-names = "ext_clock"; 126 clock-names = "ext_clock";
127 }; 127 };
128
129 cvbs-connector {
130 compatible = "composite-video-connector";
131
132 port {
133 cvbs_connector_in: endpoint {
134 remote-endpoint = <&cvbs_vdac_out>;
135 };
136 };
137 };
128}; 138};
129 139
130/* This UART is brought out to the DB9 connector */ 140/* This UART is brought out to the DB9 connector */
@@ -234,3 +244,9 @@
234 clocks = <&clkc CLKID_FCLK_DIV4>; 244 clocks = <&clkc CLKID_FCLK_DIV4>;
235 clock-names = "clkin0"; 245 clock-names = "clkin0";
236}; 246};
247
248&cvbs_vdac_port {
249 cvbs_vdac_out: endpoint {
250 remote-endpoint = <&cvbs_connector_in>;
251 };
252};
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxbb.dtsi
index 51edd5b5c460..b35307321b63 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxbb.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-gxbb.dtsi
@@ -55,7 +55,7 @@
55 mboxes = <&mailbox 1 &mailbox 2>; 55 mboxes = <&mailbox 1 &mailbox 2>;
56 shmem = <&cpu_scp_lpri &cpu_scp_hpri>; 56 shmem = <&cpu_scp_lpri &cpu_scp_hpri>;
57 57
58 clocks { 58 scpi_clocks: clocks {
59 compatible = "arm,scpi-clocks"; 59 compatible = "arm,scpi-clocks";
60 60
61 scpi_dvfs: scpi_clocks@0 { 61 scpi_dvfs: scpi_clocks@0 {
@@ -506,3 +506,7 @@
506 <&clkc CLKID_FCLK_DIV2>; 506 <&clkc CLKID_FCLK_DIV2>;
507 clock-names = "core", "clkin0", "clkin1"; 507 clock-names = "core", "clkin0", "clkin1";
508}; 508};
509
510&vpu {
511 compatible = "amlogic,meson-gxbb-vpu", "amlogic,meson-gx-vpu";
512};
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-nexbox-a95x.dts b/arch/arm64/boot/dts/amlogic/meson-gxl-nexbox-a95x.dts
index e99101ae9664..cea4a3eded9b 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxl-nexbox-a95x.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-gxl-nexbox-a95x.dts
@@ -117,6 +117,16 @@
117 clocks = <&wifi32k>; 117 clocks = <&wifi32k>;
118 clock-names = "ext_clock"; 118 clock-names = "ext_clock";
119 }; 119 };
120
121 cvbs-connector {
122 compatible = "composite-video-connector";
123
124 port {
125 cvbs_connector_in: endpoint {
126 remote-endpoint = <&cvbs_vdac_out>;
127 };
128 };
129 };
120}; 130};
121 131
122&uart_AO { 132&uart_AO {
@@ -203,3 +213,9 @@
203 clocks = <&clkc CLKID_FCLK_DIV4>; 213 clocks = <&clkc CLKID_FCLK_DIV4>;
204 clock-names = "clkin0"; 214 clock-names = "clkin0";
205}; 215};
216
217&cvbs_vdac_port {
218 cvbs_vdac_out: endpoint {
219 remote-endpoint = <&cvbs_connector_in>;
220 };
221};
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi
index 9f89b99c4806..69216246275d 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi
@@ -43,7 +43,7 @@
43 43
44#include "meson-gx.dtsi" 44#include "meson-gx.dtsi"
45#include <dt-bindings/clock/gxbb-clkc.h> 45#include <dt-bindings/clock/gxbb-clkc.h>
46#include <dt-bindings/gpio/meson-gxbb-gpio.h> 46#include <dt-bindings/gpio/meson-gxl-gpio.h>
47 47
48/ { 48/ {
49 compatible = "amlogic,meson-gxl"; 49 compatible = "amlogic,meson-gxl";
@@ -299,3 +299,7 @@
299 <&clkc CLKID_FCLK_DIV2>; 299 <&clkc CLKID_FCLK_DIV2>;
300 clock-names = "core", "clkin0", "clkin1"; 300 clock-names = "core", "clkin0", "clkin1";
301}; 301};
302
303&vpu {
304 compatible = "amlogic,meson-gxl-vpu", "amlogic,meson-gx-vpu";
305};
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxm-nexbox-a1.dts b/arch/arm64/boot/dts/amlogic/meson-gxm-nexbox-a1.dts
index f859d75db8bd..5a337d339df1 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxm-nexbox-a1.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-gxm-nexbox-a1.dts
@@ -90,6 +90,16 @@
90 compatible = "mmc-pwrseq-emmc"; 90 compatible = "mmc-pwrseq-emmc";
91 reset-gpios = <&gpio BOOT_9 GPIO_ACTIVE_LOW>; 91 reset-gpios = <&gpio BOOT_9 GPIO_ACTIVE_LOW>;
92 }; 92 };
93
94 cvbs-connector {
95 compatible = "composite-video-connector";
96
97 port {
98 cvbs_connector_in: endpoint {
99 remote-endpoint = <&cvbs_vdac_out>;
100 };
101 };
102 };
93}; 103};
94 104
95/* This UART is brought out to the DB9 connector */ 105/* This UART is brought out to the DB9 connector */
@@ -167,3 +177,9 @@
167 max-speed = <1000>; 177 max-speed = <1000>;
168 }; 178 };
169}; 179};
180
181&cvbs_vdac_port {
182 cvbs_vdac_out: endpoint {
183 remote-endpoint = <&cvbs_connector_in>;
184 };
185};
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxm.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxm.dtsi
index c1974bbbddea..eb2f0c3e5e53 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxm.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-gxm.dtsi
@@ -112,3 +112,7 @@
112 }; 112 };
113 }; 113 };
114}; 114};
115
116&vpu {
117 compatible = "amlogic,meson-gxm-vpu", "amlogic,meson-gx-vpu";
118};
diff --git a/arch/arm64/boot/dts/arm/rtsm_ve-aemv8a.dts b/arch/arm64/boot/dts/arm/rtsm_ve-aemv8a.dts
index a852e28a40e1..a83ed2c6bbf7 100644
--- a/arch/arm64/boot/dts/arm/rtsm_ve-aemv8a.dts
+++ b/arch/arm64/boot/dts/arm/rtsm_ve-aemv8a.dts
@@ -81,7 +81,7 @@
81 #address-cells = <0>; 81 #address-cells = <0>;
82 interrupt-controller; 82 interrupt-controller;
83 reg = <0x0 0x2c001000 0 0x1000>, 83 reg = <0x0 0x2c001000 0 0x1000>,
84 <0x0 0x2c002000 0 0x1000>, 84 <0x0 0x2c002000 0 0x2000>,
85 <0x0 0x2c004000 0 0x2000>, 85 <0x0 0x2c004000 0 0x2000>,
86 <0x0 0x2c006000 0 0x2000>; 86 <0x0 0x2c006000 0 0x2000>;
87 interrupts = <1 9 0xf04>; 87 interrupts = <1 9 0xf04>;
diff --git a/arch/arm64/boot/dts/exynos/exynos5433.dtsi b/arch/arm64/boot/dts/exynos/exynos5433.dtsi
index 64226d5ae471..135890cd8a85 100644
--- a/arch/arm64/boot/dts/exynos/exynos5433.dtsi
+++ b/arch/arm64/boot/dts/exynos/exynos5433.dtsi
@@ -1367,7 +1367,7 @@
1367 }; 1367 };
1368 1368
1369 amba { 1369 amba {
1370 compatible = "arm,amba-bus"; 1370 compatible = "simple-bus";
1371 #address-cells = <1>; 1371 #address-cells = <1>;
1372 #size-cells = <1>; 1372 #size-cells = <1>;
1373 ranges; 1373 ranges;
diff --git a/arch/arm64/boot/dts/qcom/msm8996.dtsi b/arch/arm64/boot/dts/qcom/msm8996.dtsi
index 9d1d7ad9b075..29ed6b61c737 100644
--- a/arch/arm64/boot/dts/qcom/msm8996.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8996.dtsi
@@ -64,6 +64,16 @@
64 reg = <0x0 0x86000000 0x0 0x200000>; 64 reg = <0x0 0x86000000 0x0 0x200000>;
65 no-map; 65 no-map;
66 }; 66 };
67
68 memory@85800000 {
69 reg = <0x0 0x85800000 0x0 0x800000>;
70 no-map;
71 };
72
73 memory@86200000 {
74 reg = <0x0 0x86200000 0x0 0x2600000>;
75 no-map;
76 };
67 }; 77 };
68 78
69 cpus { 79 cpus {
diff --git a/arch/arm64/boot/dts/renesas/r8a7795-h3ulcb.dts b/arch/arm64/boot/dts/renesas/r8a7795-h3ulcb.dts
index 6ffb0517421a..dbea2c3d8f0c 100644
--- a/arch/arm64/boot/dts/renesas/r8a7795-h3ulcb.dts
+++ b/arch/arm64/boot/dts/renesas/r8a7795-h3ulcb.dts
@@ -169,7 +169,7 @@
169 power-source = <3300>; 169 power-source = <3300>;
170 }; 170 };
171 171
172 sdhi0_pins_uhs: sd0 { 172 sdhi0_pins_uhs: sd0_uhs {
173 groups = "sdhi0_data4", "sdhi0_ctrl"; 173 groups = "sdhi0_data4", "sdhi0_ctrl";
174 function = "sdhi0"; 174 function = "sdhi0";
175 power-source = <1800>; 175 power-source = <1800>;
diff --git a/arch/arm64/boot/dts/xilinx/zynqmp-ep108.dts b/arch/arm64/boot/dts/xilinx/zynqmp-ep108.dts
index 358089687a69..ef1b9e573af0 100644
--- a/arch/arm64/boot/dts/xilinx/zynqmp-ep108.dts
+++ b/arch/arm64/boot/dts/xilinx/zynqmp-ep108.dts
@@ -27,7 +27,7 @@
27 stdout-path = "serial0:115200n8"; 27 stdout-path = "serial0:115200n8";
28 }; 28 };
29 29
30 memory { 30 memory@0 {
31 device_type = "memory"; 31 device_type = "memory";
32 reg = <0x0 0x0 0x0 0x40000000>; 32 reg = <0x0 0x0 0x0 0x40000000>;
33 }; 33 };
diff --git a/arch/arm64/boot/dts/xilinx/zynqmp.dtsi b/arch/arm64/boot/dts/xilinx/zynqmp.dtsi
index 68a908334c7b..54dc28351c8c 100644
--- a/arch/arm64/boot/dts/xilinx/zynqmp.dtsi
+++ b/arch/arm64/boot/dts/xilinx/zynqmp.dtsi
@@ -72,7 +72,7 @@
72 <1 10 0xf08>; 72 <1 10 0xf08>;
73 }; 73 };
74 74
75 amba_apu { 75 amba_apu: amba_apu@0 {
76 compatible = "simple-bus"; 76 compatible = "simple-bus";
77 #address-cells = <2>; 77 #address-cells = <2>;
78 #size-cells = <1>; 78 #size-cells = <1>;
@@ -175,7 +175,7 @@
175 }; 175 };
176 176
177 i2c0: i2c@ff020000 { 177 i2c0: i2c@ff020000 {
178 compatible = "cdns,i2c-r1p10"; 178 compatible = "cdns,i2c-r1p14", "cdns,i2c-r1p10";
179 status = "disabled"; 179 status = "disabled";
180 interrupt-parent = <&gic>; 180 interrupt-parent = <&gic>;
181 interrupts = <0 17 4>; 181 interrupts = <0 17 4>;
@@ -185,7 +185,7 @@
185 }; 185 };
186 186
187 i2c1: i2c@ff030000 { 187 i2c1: i2c@ff030000 {
188 compatible = "cdns,i2c-r1p10"; 188 compatible = "cdns,i2c-r1p14", "cdns,i2c-r1p10";
189 status = "disabled"; 189 status = "disabled";
190 interrupt-parent = <&gic>; 190 interrupt-parent = <&gic>;
191 interrupts = <0 18 4>; 191 interrupts = <0 18 4>;
diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig
index 869dded0f09f..33b744d54739 100644
--- a/arch/arm64/configs/defconfig
+++ b/arch/arm64/configs/defconfig
@@ -331,6 +331,7 @@ CONFIG_DRM_VC4=m
331CONFIG_DRM_PANEL_SIMPLE=m 331CONFIG_DRM_PANEL_SIMPLE=m
332CONFIG_DRM_I2C_ADV7511=m 332CONFIG_DRM_I2C_ADV7511=m
333CONFIG_DRM_HISI_KIRIN=m 333CONFIG_DRM_HISI_KIRIN=m
334CONFIG_DRM_MESON=m
334CONFIG_FB=y 335CONFIG_FB=y
335CONFIG_FB_ARMCLCD=y 336CONFIG_FB_ARMCLCD=y
336CONFIG_BACKLIGHT_GENERIC=m 337CONFIG_BACKLIGHT_GENERIC=m
diff --git a/arch/arm64/crypto/aes-modes.S b/arch/arm64/crypto/aes-modes.S
index c53dbeae79f2..838dad5c209f 100644
--- a/arch/arm64/crypto/aes-modes.S
+++ b/arch/arm64/crypto/aes-modes.S
@@ -193,15 +193,16 @@ AES_ENTRY(aes_cbc_encrypt)
193 cbz w6, .Lcbcencloop 193 cbz w6, .Lcbcencloop
194 194
195 ld1 {v0.16b}, [x5] /* get iv */ 195 ld1 {v0.16b}, [x5] /* get iv */
196 enc_prepare w3, x2, x5 196 enc_prepare w3, x2, x6
197 197
198.Lcbcencloop: 198.Lcbcencloop:
199 ld1 {v1.16b}, [x1], #16 /* get next pt block */ 199 ld1 {v1.16b}, [x1], #16 /* get next pt block */
200 eor v0.16b, v0.16b, v1.16b /* ..and xor with iv */ 200 eor v0.16b, v0.16b, v1.16b /* ..and xor with iv */
201 encrypt_block v0, w3, x2, x5, w6 201 encrypt_block v0, w3, x2, x6, w7
202 st1 {v0.16b}, [x0], #16 202 st1 {v0.16b}, [x0], #16
203 subs w4, w4, #1 203 subs w4, w4, #1
204 bne .Lcbcencloop 204 bne .Lcbcencloop
205 st1 {v0.16b}, [x5] /* return iv */
205 ret 206 ret
206AES_ENDPROC(aes_cbc_encrypt) 207AES_ENDPROC(aes_cbc_encrypt)
207 208
@@ -211,7 +212,7 @@ AES_ENTRY(aes_cbc_decrypt)
211 cbz w6, .LcbcdecloopNx 212 cbz w6, .LcbcdecloopNx
212 213
213 ld1 {v7.16b}, [x5] /* get iv */ 214 ld1 {v7.16b}, [x5] /* get iv */
214 dec_prepare w3, x2, x5 215 dec_prepare w3, x2, x6
215 216
216.LcbcdecloopNx: 217.LcbcdecloopNx:
217#if INTERLEAVE >= 2 218#if INTERLEAVE >= 2
@@ -248,7 +249,7 @@ AES_ENTRY(aes_cbc_decrypt)
248.Lcbcdecloop: 249.Lcbcdecloop:
249 ld1 {v1.16b}, [x1], #16 /* get next ct block */ 250 ld1 {v1.16b}, [x1], #16 /* get next ct block */
250 mov v0.16b, v1.16b /* ...and copy to v0 */ 251 mov v0.16b, v1.16b /* ...and copy to v0 */
251 decrypt_block v0, w3, x2, x5, w6 252 decrypt_block v0, w3, x2, x6, w7
252 eor v0.16b, v0.16b, v7.16b /* xor with iv => pt */ 253 eor v0.16b, v0.16b, v7.16b /* xor with iv => pt */
253 mov v7.16b, v1.16b /* ct is next iv */ 254 mov v7.16b, v1.16b /* ct is next iv */
254 st1 {v0.16b}, [x0], #16 255 st1 {v0.16b}, [x0], #16
@@ -256,6 +257,7 @@ AES_ENTRY(aes_cbc_decrypt)
256 bne .Lcbcdecloop 257 bne .Lcbcdecloop
257.Lcbcdecout: 258.Lcbcdecout:
258 FRAME_POP 259 FRAME_POP
260 st1 {v7.16b}, [x5] /* return iv */
259 ret 261 ret
260AES_ENDPROC(aes_cbc_decrypt) 262AES_ENDPROC(aes_cbc_decrypt)
261 263
@@ -267,24 +269,15 @@ AES_ENDPROC(aes_cbc_decrypt)
267 269
268AES_ENTRY(aes_ctr_encrypt) 270AES_ENTRY(aes_ctr_encrypt)
269 FRAME_PUSH 271 FRAME_PUSH
270 cbnz w6, .Lctrfirst /* 1st time around? */ 272 cbz w6, .Lctrnotfirst /* 1st time around? */
271 umov x5, v4.d[1] /* keep swabbed ctr in reg */
272 rev x5, x5
273#if INTERLEAVE >= 2
274 cmn w5, w4 /* 32 bit overflow? */
275 bcs .Lctrinc
276 add x5, x5, #1 /* increment BE ctr */
277 b .LctrincNx
278#else
279 b .Lctrinc
280#endif
281.Lctrfirst:
282 enc_prepare w3, x2, x6 273 enc_prepare w3, x2, x6
283 ld1 {v4.16b}, [x5] 274 ld1 {v4.16b}, [x5]
284 umov x5, v4.d[1] /* keep swabbed ctr in reg */ 275
285 rev x5, x5 276.Lctrnotfirst:
277 umov x8, v4.d[1] /* keep swabbed ctr in reg */
278 rev x8, x8
286#if INTERLEAVE >= 2 279#if INTERLEAVE >= 2
287 cmn w5, w4 /* 32 bit overflow? */ 280 cmn w8, w4 /* 32 bit overflow? */
288 bcs .Lctrloop 281 bcs .Lctrloop
289.LctrloopNx: 282.LctrloopNx:
290 subs w4, w4, #INTERLEAVE 283 subs w4, w4, #INTERLEAVE
@@ -292,11 +285,11 @@ AES_ENTRY(aes_ctr_encrypt)
292#if INTERLEAVE == 2 285#if INTERLEAVE == 2
293 mov v0.8b, v4.8b 286 mov v0.8b, v4.8b
294 mov v1.8b, v4.8b 287 mov v1.8b, v4.8b
295 rev x7, x5 288 rev x7, x8
296 add x5, x5, #1 289 add x8, x8, #1
297 ins v0.d[1], x7 290 ins v0.d[1], x7
298 rev x7, x5 291 rev x7, x8
299 add x5, x5, #1 292 add x8, x8, #1
300 ins v1.d[1], x7 293 ins v1.d[1], x7
301 ld1 {v2.16b-v3.16b}, [x1], #32 /* get 2 input blocks */ 294 ld1 {v2.16b-v3.16b}, [x1], #32 /* get 2 input blocks */
302 do_encrypt_block2x 295 do_encrypt_block2x
@@ -305,7 +298,7 @@ AES_ENTRY(aes_ctr_encrypt)
305 st1 {v0.16b-v1.16b}, [x0], #32 298 st1 {v0.16b-v1.16b}, [x0], #32
306#else 299#else
307 ldr q8, =0x30000000200000001 /* addends 1,2,3[,0] */ 300 ldr q8, =0x30000000200000001 /* addends 1,2,3[,0] */
308 dup v7.4s, w5 301 dup v7.4s, w8
309 mov v0.16b, v4.16b 302 mov v0.16b, v4.16b
310 add v7.4s, v7.4s, v8.4s 303 add v7.4s, v7.4s, v8.4s
311 mov v1.16b, v4.16b 304 mov v1.16b, v4.16b
@@ -323,18 +316,12 @@ AES_ENTRY(aes_ctr_encrypt)
323 eor v2.16b, v7.16b, v2.16b 316 eor v2.16b, v7.16b, v2.16b
324 eor v3.16b, v5.16b, v3.16b 317 eor v3.16b, v5.16b, v3.16b
325 st1 {v0.16b-v3.16b}, [x0], #64 318 st1 {v0.16b-v3.16b}, [x0], #64
326 add x5, x5, #INTERLEAVE 319 add x8, x8, #INTERLEAVE
327#endif 320#endif
328 cbz w4, .LctroutNx 321 rev x7, x8
329.LctrincNx:
330 rev x7, x5
331 ins v4.d[1], x7 322 ins v4.d[1], x7
323 cbz w4, .Lctrout
332 b .LctrloopNx 324 b .LctrloopNx
333.LctroutNx:
334 sub x5, x5, #1
335 rev x7, x5
336 ins v4.d[1], x7
337 b .Lctrout
338.Lctr1x: 325.Lctr1x:
339 adds w4, w4, #INTERLEAVE 326 adds w4, w4, #INTERLEAVE
340 beq .Lctrout 327 beq .Lctrout
@@ -342,30 +329,39 @@ AES_ENTRY(aes_ctr_encrypt)
342.Lctrloop: 329.Lctrloop:
343 mov v0.16b, v4.16b 330 mov v0.16b, v4.16b
344 encrypt_block v0, w3, x2, x6, w7 331 encrypt_block v0, w3, x2, x6, w7
332
333 adds x8, x8, #1 /* increment BE ctr */
334 rev x7, x8
335 ins v4.d[1], x7
336 bcs .Lctrcarry /* overflow? */
337
338.Lctrcarrydone:
345 subs w4, w4, #1 339 subs w4, w4, #1
346 bmi .Lctrhalfblock /* blocks < 0 means 1/2 block */ 340 bmi .Lctrhalfblock /* blocks < 0 means 1/2 block */
347 ld1 {v3.16b}, [x1], #16 341 ld1 {v3.16b}, [x1], #16
348 eor v3.16b, v0.16b, v3.16b 342 eor v3.16b, v0.16b, v3.16b
349 st1 {v3.16b}, [x0], #16 343 st1 {v3.16b}, [x0], #16
350 beq .Lctrout 344 bne .Lctrloop
351.Lctrinc: 345
352 adds x5, x5, #1 /* increment BE ctr */ 346.Lctrout:
353 rev x7, x5 347 st1 {v4.16b}, [x5] /* return next CTR value */
354 ins v4.d[1], x7 348 FRAME_POP
355 bcc .Lctrloop /* no overflow? */ 349 ret
356 umov x7, v4.d[0] /* load upper word of ctr */ 350
357 rev x7, x7 /* ... to handle the carry */
358 add x7, x7, #1
359 rev x7, x7
360 ins v4.d[0], x7
361 b .Lctrloop
362.Lctrhalfblock: 351.Lctrhalfblock:
363 ld1 {v3.8b}, [x1] 352 ld1 {v3.8b}, [x1]
364 eor v3.8b, v0.8b, v3.8b 353 eor v3.8b, v0.8b, v3.8b
365 st1 {v3.8b}, [x0] 354 st1 {v3.8b}, [x0]
366.Lctrout:
367 FRAME_POP 355 FRAME_POP
368 ret 356 ret
357
358.Lctrcarry:
359 umov x7, v4.d[0] /* load upper word of ctr */
360 rev x7, x7 /* ... to handle the carry */
361 add x7, x7, #1
362 rev x7, x7
363 ins v4.d[0], x7
364 b .Lctrcarrydone
369AES_ENDPROC(aes_ctr_encrypt) 365AES_ENDPROC(aes_ctr_encrypt)
370 .ltorg 366 .ltorg
371 367
diff --git a/arch/arm64/include/asm/asm-uaccess.h b/arch/arm64/include/asm/asm-uaccess.h
new file mode 100644
index 000000000000..df411f3e083c
--- /dev/null
+++ b/arch/arm64/include/asm/asm-uaccess.h
@@ -0,0 +1,65 @@
1#ifndef __ASM_ASM_UACCESS_H
2#define __ASM_ASM_UACCESS_H
3
4#include <asm/alternative.h>
5#include <asm/kernel-pgtable.h>
6#include <asm/sysreg.h>
7#include <asm/assembler.h>
8
9/*
10 * User access enabling/disabling macros.
11 */
12#ifdef CONFIG_ARM64_SW_TTBR0_PAN
13 .macro __uaccess_ttbr0_disable, tmp1
14 mrs \tmp1, ttbr1_el1 // swapper_pg_dir
15 add \tmp1, \tmp1, #SWAPPER_DIR_SIZE // reserved_ttbr0 at the end of swapper_pg_dir
16 msr ttbr0_el1, \tmp1 // set reserved TTBR0_EL1
17 isb
18 .endm
19
20 .macro __uaccess_ttbr0_enable, tmp1
21 get_thread_info \tmp1
22 ldr \tmp1, [\tmp1, #TSK_TI_TTBR0] // load saved TTBR0_EL1
23 msr ttbr0_el1, \tmp1 // set the non-PAN TTBR0_EL1
24 isb
25 .endm
26
27 .macro uaccess_ttbr0_disable, tmp1
28alternative_if_not ARM64_HAS_PAN
29 __uaccess_ttbr0_disable \tmp1
30alternative_else_nop_endif
31 .endm
32
33 .macro uaccess_ttbr0_enable, tmp1, tmp2
34alternative_if_not ARM64_HAS_PAN
35 save_and_disable_irq \tmp2 // avoid preemption
36 __uaccess_ttbr0_enable \tmp1
37 restore_irq \tmp2
38alternative_else_nop_endif
39 .endm
40#else
41 .macro uaccess_ttbr0_disable, tmp1
42 .endm
43
44 .macro uaccess_ttbr0_enable, tmp1, tmp2
45 .endm
46#endif
47
48/*
49 * These macros are no-ops when UAO is present.
50 */
51 .macro uaccess_disable_not_uao, tmp1
52 uaccess_ttbr0_disable \tmp1
53alternative_if ARM64_ALT_PAN_NOT_UAO
54 SET_PSTATE_PAN(1)
55alternative_else_nop_endif
56 .endm
57
58 .macro uaccess_enable_not_uao, tmp1, tmp2
59 uaccess_ttbr0_enable \tmp1, \tmp2
60alternative_if ARM64_ALT_PAN_NOT_UAO
61 SET_PSTATE_PAN(0)
62alternative_else_nop_endif
63 .endm
64
65#endif
diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h
index 446f6c46d4b1..3a4301163e04 100644
--- a/arch/arm64/include/asm/assembler.h
+++ b/arch/arm64/include/asm/assembler.h
@@ -164,22 +164,25 @@ lr .req x30 // link register
164 164
165/* 165/*
166 * Pseudo-ops for PC-relative adr/ldr/str <reg>, <symbol> where 166 * Pseudo-ops for PC-relative adr/ldr/str <reg>, <symbol> where
167 * <symbol> is within the range +/- 4 GB of the PC. 167 * <symbol> is within the range +/- 4 GB of the PC when running
168 * in core kernel context. In module context, a movz/movk sequence
169 * is used, since modules may be loaded far away from the kernel
170 * when KASLR is in effect.
168 */ 171 */
169 /* 172 /*
170 * @dst: destination register (64 bit wide) 173 * @dst: destination register (64 bit wide)
171 * @sym: name of the symbol 174 * @sym: name of the symbol
172 * @tmp: optional scratch register to be used if <dst> == sp, which
173 * is not allowed in an adrp instruction
174 */ 175 */
175 .macro adr_l, dst, sym, tmp= 176 .macro adr_l, dst, sym
176 .ifb \tmp 177#ifndef MODULE
177 adrp \dst, \sym 178 adrp \dst, \sym
178 add \dst, \dst, :lo12:\sym 179 add \dst, \dst, :lo12:\sym
179 .else 180#else
180 adrp \tmp, \sym 181 movz \dst, #:abs_g3:\sym
181 add \dst, \tmp, :lo12:\sym 182 movk \dst, #:abs_g2_nc:\sym
182 .endif 183 movk \dst, #:abs_g1_nc:\sym
184 movk \dst, #:abs_g0_nc:\sym
185#endif
183 .endm 186 .endm
184 187
185 /* 188 /*
@@ -190,6 +193,7 @@ lr .req x30 // link register
190 * the address 193 * the address
191 */ 194 */
192 .macro ldr_l, dst, sym, tmp= 195 .macro ldr_l, dst, sym, tmp=
196#ifndef MODULE
193 .ifb \tmp 197 .ifb \tmp
194 adrp \dst, \sym 198 adrp \dst, \sym
195 ldr \dst, [\dst, :lo12:\sym] 199 ldr \dst, [\dst, :lo12:\sym]
@@ -197,6 +201,15 @@ lr .req x30 // link register
197 adrp \tmp, \sym 201 adrp \tmp, \sym
198 ldr \dst, [\tmp, :lo12:\sym] 202 ldr \dst, [\tmp, :lo12:\sym]
199 .endif 203 .endif
204#else
205 .ifb \tmp
206 adr_l \dst, \sym
207 ldr \dst, [\dst]
208 .else
209 adr_l \tmp, \sym
210 ldr \dst, [\tmp]
211 .endif
212#endif
200 .endm 213 .endm
201 214
202 /* 215 /*
@@ -206,8 +219,13 @@ lr .req x30 // link register
206 * while <src> needs to be preserved. 219 * while <src> needs to be preserved.
207 */ 220 */
208 .macro str_l, src, sym, tmp 221 .macro str_l, src, sym, tmp
222#ifndef MODULE
209 adrp \tmp, \sym 223 adrp \tmp, \sym
210 str \src, [\tmp, :lo12:\sym] 224 str \src, [\tmp, :lo12:\sym]
225#else
226 adr_l \tmp, \sym
227 str \src, [\tmp]
228#endif
211 .endm 229 .endm
212 230
213 /* 231 /*
diff --git a/arch/arm64/include/asm/current.h b/arch/arm64/include/asm/current.h
index f2bcbe2d9889..86c404171305 100644
--- a/arch/arm64/include/asm/current.h
+++ b/arch/arm64/include/asm/current.h
@@ -9,9 +9,17 @@
9 9
10struct task_struct; 10struct task_struct;
11 11
12/*
13 * We don't use read_sysreg() as we want the compiler to cache the value where
14 * possible.
15 */
12static __always_inline struct task_struct *get_current(void) 16static __always_inline struct task_struct *get_current(void)
13{ 17{
14 return (struct task_struct *)read_sysreg(sp_el0); 18 unsigned long sp_el0;
19
20 asm ("mrs %0, sp_el0" : "=r" (sp_el0));
21
22 return (struct task_struct *)sp_el0;
15} 23}
16 24
17#define current get_current() 25#define current get_current()
diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h
index bfe632808d77..90c39a662379 100644
--- a/arch/arm64/include/asm/memory.h
+++ b/arch/arm64/include/asm/memory.h
@@ -222,7 +222,7 @@ static inline void *phys_to_virt(phys_addr_t x)
222#define _virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT) 222#define _virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
223#else 223#else
224#define __virt_to_pgoff(kaddr) (((u64)(kaddr) & ~PAGE_OFFSET) / PAGE_SIZE * sizeof(struct page)) 224#define __virt_to_pgoff(kaddr) (((u64)(kaddr) & ~PAGE_OFFSET) / PAGE_SIZE * sizeof(struct page))
225#define __page_to_voff(page) (((u64)(page) & ~VMEMMAP_START) * PAGE_SIZE / sizeof(struct page)) 225#define __page_to_voff(kaddr) (((u64)(kaddr) & ~VMEMMAP_START) * PAGE_SIZE / sizeof(struct page))
226 226
227#define page_to_virt(page) ((void *)((__page_to_voff(page)) | PAGE_OFFSET)) 227#define page_to_virt(page) ((void *)((__page_to_voff(page)) | PAGE_OFFSET))
228#define virt_to_page(vaddr) ((struct page *)((__virt_to_pgoff(vaddr)) | VMEMMAP_START)) 228#define virt_to_page(vaddr) ((struct page *)((__virt_to_pgoff(vaddr)) | VMEMMAP_START))
diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h
index d26750ca6e06..46da3ea638bb 100644
--- a/arch/arm64/include/asm/uaccess.h
+++ b/arch/arm64/include/asm/uaccess.h
@@ -22,8 +22,6 @@
22#include <asm/kernel-pgtable.h> 22#include <asm/kernel-pgtable.h>
23#include <asm/sysreg.h> 23#include <asm/sysreg.h>
24 24
25#ifndef __ASSEMBLY__
26
27/* 25/*
28 * User space memory access functions 26 * User space memory access functions
29 */ 27 */
@@ -424,66 +422,4 @@ extern long strncpy_from_user(char *dest, const char __user *src, long count);
424extern __must_check long strlen_user(const char __user *str); 422extern __must_check long strlen_user(const char __user *str);
425extern __must_check long strnlen_user(const char __user *str, long n); 423extern __must_check long strnlen_user(const char __user *str, long n);
426 424
427#else /* __ASSEMBLY__ */
428
429#include <asm/assembler.h>
430
431/*
432 * User access enabling/disabling macros.
433 */
434#ifdef CONFIG_ARM64_SW_TTBR0_PAN
435 .macro __uaccess_ttbr0_disable, tmp1
436 mrs \tmp1, ttbr1_el1 // swapper_pg_dir
437 add \tmp1, \tmp1, #SWAPPER_DIR_SIZE // reserved_ttbr0 at the end of swapper_pg_dir
438 msr ttbr0_el1, \tmp1 // set reserved TTBR0_EL1
439 isb
440 .endm
441
442 .macro __uaccess_ttbr0_enable, tmp1
443 get_thread_info \tmp1
444 ldr \tmp1, [\tmp1, #TSK_TI_TTBR0] // load saved TTBR0_EL1
445 msr ttbr0_el1, \tmp1 // set the non-PAN TTBR0_EL1
446 isb
447 .endm
448
449 .macro uaccess_ttbr0_disable, tmp1
450alternative_if_not ARM64_HAS_PAN
451 __uaccess_ttbr0_disable \tmp1
452alternative_else_nop_endif
453 .endm
454
455 .macro uaccess_ttbr0_enable, tmp1, tmp2
456alternative_if_not ARM64_HAS_PAN
457 save_and_disable_irq \tmp2 // avoid preemption
458 __uaccess_ttbr0_enable \tmp1
459 restore_irq \tmp2
460alternative_else_nop_endif
461 .endm
462#else
463 .macro uaccess_ttbr0_disable, tmp1
464 .endm
465
466 .macro uaccess_ttbr0_enable, tmp1, tmp2
467 .endm
468#endif
469
470/*
471 * These macros are no-ops when UAO is present.
472 */
473 .macro uaccess_disable_not_uao, tmp1
474 uaccess_ttbr0_disable \tmp1
475alternative_if ARM64_ALT_PAN_NOT_UAO
476 SET_PSTATE_PAN(1)
477alternative_else_nop_endif
478 .endm
479
480 .macro uaccess_enable_not_uao, tmp1, tmp2
481 uaccess_ttbr0_enable \tmp1, \tmp2
482alternative_if ARM64_ALT_PAN_NOT_UAO
483 SET_PSTATE_PAN(0)
484alternative_else_nop_endif
485 .endm
486
487#endif /* __ASSEMBLY__ */
488
489#endif /* __ASM_UACCESS_H */ 425#endif /* __ASM_UACCESS_H */
diff --git a/arch/arm64/include/asm/virt.h b/arch/arm64/include/asm/virt.h
index fea10736b11f..439f6b5d31f6 100644
--- a/arch/arm64/include/asm/virt.h
+++ b/arch/arm64/include/asm/virt.h
@@ -47,6 +47,7 @@
47#include <asm/ptrace.h> 47#include <asm/ptrace.h>
48#include <asm/sections.h> 48#include <asm/sections.h>
49#include <asm/sysreg.h> 49#include <asm/sysreg.h>
50#include <asm/cpufeature.h>
50 51
51/* 52/*
52 * __boot_cpu_mode records what mode CPUs were booted in. 53 * __boot_cpu_mode records what mode CPUs were booted in.
@@ -80,6 +81,14 @@ static inline bool is_kernel_in_hyp_mode(void)
80 return read_sysreg(CurrentEL) == CurrentEL_EL2; 81 return read_sysreg(CurrentEL) == CurrentEL_EL2;
81} 82}
82 83
84static inline bool has_vhe(void)
85{
86 if (cpus_have_const_cap(ARM64_HAS_VIRT_HOST_EXTN))
87 return true;
88
89 return false;
90}
91
83#ifdef CONFIG_ARM64_VHE 92#ifdef CONFIG_ARM64_VHE
84extern void verify_cpu_run_el(void); 93extern void verify_cpu_run_el(void);
85#else 94#else
diff --git a/arch/arm64/include/uapi/asm/ptrace.h b/arch/arm64/include/uapi/asm/ptrace.h
index b5c3933ed441..d1ff83dfe5de 100644
--- a/arch/arm64/include/uapi/asm/ptrace.h
+++ b/arch/arm64/include/uapi/asm/ptrace.h
@@ -77,6 +77,7 @@ struct user_fpsimd_state {
77 __uint128_t vregs[32]; 77 __uint128_t vregs[32];
78 __u32 fpsr; 78 __u32 fpsr;
79 __u32 fpcr; 79 __u32 fpcr;
80 __u32 __reserved[2];
80}; 81};
81 82
82struct user_hwdebug_state { 83struct user_hwdebug_state {
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index a7504f40d7ee..43512d4d7df2 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -31,7 +31,7 @@
31#include <asm/memory.h> 31#include <asm/memory.h>
32#include <asm/ptrace.h> 32#include <asm/ptrace.h>
33#include <asm/thread_info.h> 33#include <asm/thread_info.h>
34#include <linux/uaccess.h> 34#include <asm/asm-uaccess.h>
35#include <asm/unistd.h> 35#include <asm/unistd.h>
36 36
37/* 37/*
@@ -683,7 +683,7 @@ el0_inv:
683 mov x0, sp 683 mov x0, sp
684 mov x1, #BAD_SYNC 684 mov x1, #BAD_SYNC
685 mov x2, x25 685 mov x2, x25
686 bl bad_mode 686 bl bad_el0_sync
687 b ret_to_user 687 b ret_to_user
688ENDPROC(el0_sync) 688ENDPROC(el0_sync)
689 689
diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c
index fc35e06ccaac..a22161ccf447 100644
--- a/arch/arm64/kernel/ptrace.c
+++ b/arch/arm64/kernel/ptrace.c
@@ -551,6 +551,8 @@ static int hw_break_set(struct task_struct *target,
551 /* (address, ctrl) registers */ 551 /* (address, ctrl) registers */
552 limit = regset->n * regset->size; 552 limit = regset->n * regset->size;
553 while (count && offset < limit) { 553 while (count && offset < limit) {
554 if (count < PTRACE_HBP_ADDR_SZ)
555 return -EINVAL;
554 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &addr, 556 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &addr,
555 offset, offset + PTRACE_HBP_ADDR_SZ); 557 offset, offset + PTRACE_HBP_ADDR_SZ);
556 if (ret) 558 if (ret)
@@ -560,6 +562,8 @@ static int hw_break_set(struct task_struct *target,
560 return ret; 562 return ret;
561 offset += PTRACE_HBP_ADDR_SZ; 563 offset += PTRACE_HBP_ADDR_SZ;
562 564
565 if (!count)
566 break;
563 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ctrl, 567 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ctrl,
564 offset, offset + PTRACE_HBP_CTRL_SZ); 568 offset, offset + PTRACE_HBP_CTRL_SZ);
565 if (ret) 569 if (ret)
@@ -596,7 +600,7 @@ static int gpr_set(struct task_struct *target, const struct user_regset *regset,
596 const void *kbuf, const void __user *ubuf) 600 const void *kbuf, const void __user *ubuf)
597{ 601{
598 int ret; 602 int ret;
599 struct user_pt_regs newregs; 603 struct user_pt_regs newregs = task_pt_regs(target)->user_regs;
600 604
601 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newregs, 0, -1); 605 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newregs, 0, -1);
602 if (ret) 606 if (ret)
@@ -626,7 +630,8 @@ static int fpr_set(struct task_struct *target, const struct user_regset *regset,
626 const void *kbuf, const void __user *ubuf) 630 const void *kbuf, const void __user *ubuf)
627{ 631{
628 int ret; 632 int ret;
629 struct user_fpsimd_state newstate; 633 struct user_fpsimd_state newstate =
634 target->thread.fpsimd_state.user_fpsimd;
630 635
631 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newstate, 0, -1); 636 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newstate, 0, -1);
632 if (ret) 637 if (ret)
@@ -650,7 +655,7 @@ static int tls_set(struct task_struct *target, const struct user_regset *regset,
650 const void *kbuf, const void __user *ubuf) 655 const void *kbuf, const void __user *ubuf)
651{ 656{
652 int ret; 657 int ret;
653 unsigned long tls; 658 unsigned long tls = target->thread.tp_value;
654 659
655 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &tls, 0, -1); 660 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &tls, 0, -1);
656 if (ret) 661 if (ret)
@@ -676,7 +681,8 @@ static int system_call_set(struct task_struct *target,
676 unsigned int pos, unsigned int count, 681 unsigned int pos, unsigned int count,
677 const void *kbuf, const void __user *ubuf) 682 const void *kbuf, const void __user *ubuf)
678{ 683{
679 int syscallno, ret; 684 int syscallno = task_pt_regs(target)->syscallno;
685 int ret;
680 686
681 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &syscallno, 0, -1); 687 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &syscallno, 0, -1);
682 if (ret) 688 if (ret)
@@ -948,7 +954,7 @@ static int compat_tls_set(struct task_struct *target,
948 const void __user *ubuf) 954 const void __user *ubuf)
949{ 955{
950 int ret; 956 int ret;
951 compat_ulong_t tls; 957 compat_ulong_t tls = target->thread.tp_value;
952 958
953 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &tls, 0, -1); 959 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &tls, 0, -1);
954 if (ret) 960 if (ret)
diff --git a/arch/arm64/kernel/topology.c b/arch/arm64/kernel/topology.c
index 23e9e13bd2aa..655e65f38f31 100644
--- a/arch/arm64/kernel/topology.c
+++ b/arch/arm64/kernel/topology.c
@@ -11,6 +11,7 @@
11 * for more details. 11 * for more details.
12 */ 12 */
13 13
14#include <linux/acpi.h>
14#include <linux/cpu.h> 15#include <linux/cpu.h>
15#include <linux/cpumask.h> 16#include <linux/cpumask.h>
16#include <linux/init.h> 17#include <linux/init.h>
@@ -209,7 +210,12 @@ static struct notifier_block init_cpu_capacity_notifier = {
209 210
210static int __init register_cpufreq_notifier(void) 211static int __init register_cpufreq_notifier(void)
211{ 212{
212 if (cap_parsing_failed) 213 /*
214 * on ACPI-based systems we need to use the default cpu capacity
215 * until we have the necessary code to parse the cpu capacity, so
216 * skip registering cpufreq notifier.
217 */
218 if (!acpi_disabled || cap_parsing_failed)
213 return -EINVAL; 219 return -EINVAL;
214 220
215 if (!alloc_cpumask_var(&cpus_to_visit, GFP_KERNEL)) { 221 if (!alloc_cpumask_var(&cpus_to_visit, GFP_KERNEL)) {
diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
index 5b830be79c01..659b2e6b6cf7 100644
--- a/arch/arm64/kernel/traps.c
+++ b/arch/arm64/kernel/traps.c
@@ -604,17 +604,34 @@ const char *esr_get_class_string(u32 esr)
604} 604}
605 605
606/* 606/*
607 * bad_mode handles the impossible case in the exception vector. 607 * bad_mode handles the impossible case in the exception vector. This is always
608 * fatal.
608 */ 609 */
609asmlinkage void bad_mode(struct pt_regs *regs, int reason, unsigned int esr) 610asmlinkage void bad_mode(struct pt_regs *regs, int reason, unsigned int esr)
610{ 611{
611 siginfo_t info;
612 void __user *pc = (void __user *)instruction_pointer(regs);
613 console_verbose(); 612 console_verbose();
614 613
615 pr_crit("Bad mode in %s handler detected on CPU%d, code 0x%08x -- %s\n", 614 pr_crit("Bad mode in %s handler detected on CPU%d, code 0x%08x -- %s\n",
616 handler[reason], smp_processor_id(), esr, 615 handler[reason], smp_processor_id(), esr,
617 esr_get_class_string(esr)); 616 esr_get_class_string(esr));
617
618 die("Oops - bad mode", regs, 0);
619 local_irq_disable();
620 panic("bad mode");
621}
622
623/*
624 * bad_el0_sync handles unexpected, but potentially recoverable synchronous
625 * exceptions taken from EL0. Unlike bad_mode, this returns.
626 */
627asmlinkage void bad_el0_sync(struct pt_regs *regs, int reason, unsigned int esr)
628{
629 siginfo_t info;
630 void __user *pc = (void __user *)instruction_pointer(regs);
631 console_verbose();
632
633 pr_crit("Bad EL0 synchronous exception detected on CPU%d, code 0x%08x -- %s\n",
634 smp_processor_id(), esr, esr_get_class_string(esr));
618 __show_regs(regs); 635 __show_regs(regs);
619 636
620 info.si_signo = SIGILL; 637 info.si_signo = SIGILL;
@@ -622,7 +639,10 @@ asmlinkage void bad_mode(struct pt_regs *regs, int reason, unsigned int esr)
622 info.si_code = ILL_ILLOPC; 639 info.si_code = ILL_ILLOPC;
623 info.si_addr = pc; 640 info.si_addr = pc;
624 641
625 arm64_notify_die("Oops - bad mode", regs, &info, 0); 642 current->thread.fault_address = 0;
643 current->thread.fault_code = 0;
644
645 force_sig_info(info.si_signo, &info, current);
626} 646}
627 647
628void __pte_error(const char *file, int line, unsigned long val) 648void __pte_error(const char *file, int line, unsigned long val)
diff --git a/arch/arm64/lib/clear_user.S b/arch/arm64/lib/clear_user.S
index add4a1334085..e88fb99c1561 100644
--- a/arch/arm64/lib/clear_user.S
+++ b/arch/arm64/lib/clear_user.S
@@ -17,7 +17,7 @@
17 */ 17 */
18#include <linux/linkage.h> 18#include <linux/linkage.h>
19 19
20#include <linux/uaccess.h> 20#include <asm/asm-uaccess.h>
21 21
22 .text 22 .text
23 23
diff --git a/arch/arm64/lib/copy_from_user.S b/arch/arm64/lib/copy_from_user.S
index fd6cd05593f9..4b5d826895ff 100644
--- a/arch/arm64/lib/copy_from_user.S
+++ b/arch/arm64/lib/copy_from_user.S
@@ -17,7 +17,7 @@
17#include <linux/linkage.h> 17#include <linux/linkage.h>
18 18
19#include <asm/cache.h> 19#include <asm/cache.h>
20#include <linux/uaccess.h> 20#include <asm/asm-uaccess.h>
21 21
22/* 22/*
23 * Copy from user space to a kernel buffer (alignment handled by the hardware) 23 * Copy from user space to a kernel buffer (alignment handled by the hardware)
diff --git a/arch/arm64/lib/copy_in_user.S b/arch/arm64/lib/copy_in_user.S
index d828540ded6f..47184c3a97da 100644
--- a/arch/arm64/lib/copy_in_user.S
+++ b/arch/arm64/lib/copy_in_user.S
@@ -19,7 +19,7 @@
19#include <linux/linkage.h> 19#include <linux/linkage.h>
20 20
21#include <asm/cache.h> 21#include <asm/cache.h>
22#include <linux/uaccess.h> 22#include <asm/asm-uaccess.h>
23 23
24/* 24/*
25 * Copy from user space to user space (alignment handled by the hardware) 25 * Copy from user space to user space (alignment handled by the hardware)
diff --git a/arch/arm64/lib/copy_to_user.S b/arch/arm64/lib/copy_to_user.S
index 3e6ae2663b82..351f0766f7a6 100644
--- a/arch/arm64/lib/copy_to_user.S
+++ b/arch/arm64/lib/copy_to_user.S
@@ -17,7 +17,7 @@
17#include <linux/linkage.h> 17#include <linux/linkage.h>
18 18
19#include <asm/cache.h> 19#include <asm/cache.h>
20#include <linux/uaccess.h> 20#include <asm/asm-uaccess.h>
21 21
22/* 22/*
23 * Copy to user space from a kernel buffer (alignment handled by the hardware) 23 * Copy to user space from a kernel buffer (alignment handled by the hardware)
diff --git a/arch/arm64/mm/cache.S b/arch/arm64/mm/cache.S
index 17f422a4dc55..83c27b6e6dca 100644
--- a/arch/arm64/mm/cache.S
+++ b/arch/arm64/mm/cache.S
@@ -23,7 +23,7 @@
23#include <asm/assembler.h> 23#include <asm/assembler.h>
24#include <asm/cpufeature.h> 24#include <asm/cpufeature.h>
25#include <asm/alternative.h> 25#include <asm/alternative.h>
26#include <linux/uaccess.h> 26#include <asm/asm-uaccess.h>
27 27
28/* 28/*
29 * flush_icache_range(start,end) 29 * flush_icache_range(start,end)
diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
index 290a84f3351f..e04082700bb1 100644
--- a/arch/arm64/mm/dma-mapping.c
+++ b/arch/arm64/mm/dma-mapping.c
@@ -524,7 +524,8 @@ EXPORT_SYMBOL(dummy_dma_ops);
524 524
525static int __init arm64_dma_init(void) 525static int __init arm64_dma_init(void)
526{ 526{
527 if (swiotlb_force || max_pfn > (arm64_dma_phys_limit >> PAGE_SHIFT)) 527 if (swiotlb_force == SWIOTLB_FORCE ||
528 max_pfn > (arm64_dma_phys_limit >> PAGE_SHIFT))
528 swiotlb = 1; 529 swiotlb = 1;
529 530
530 return atomic_pool_init(); 531 return atomic_pool_init();
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index a78a5c401806..156169c6981b 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -88,21 +88,21 @@ void show_pte(struct mm_struct *mm, unsigned long addr)
88 break; 88 break;
89 89
90 pud = pud_offset(pgd, addr); 90 pud = pud_offset(pgd, addr);
91 printk(", *pud=%016llx", pud_val(*pud)); 91 pr_cont(", *pud=%016llx", pud_val(*pud));
92 if (pud_none(*pud) || pud_bad(*pud)) 92 if (pud_none(*pud) || pud_bad(*pud))
93 break; 93 break;
94 94
95 pmd = pmd_offset(pud, addr); 95 pmd = pmd_offset(pud, addr);
96 printk(", *pmd=%016llx", pmd_val(*pmd)); 96 pr_cont(", *pmd=%016llx", pmd_val(*pmd));
97 if (pmd_none(*pmd) || pmd_bad(*pmd)) 97 if (pmd_none(*pmd) || pmd_bad(*pmd))
98 break; 98 break;
99 99
100 pte = pte_offset_map(pmd, addr); 100 pte = pte_offset_map(pmd, addr);
101 printk(", *pte=%016llx", pte_val(*pte)); 101 pr_cont(", *pte=%016llx", pte_val(*pte));
102 pte_unmap(pte); 102 pte_unmap(pte);
103 } while(0); 103 } while(0);
104 104
105 printk("\n"); 105 pr_cont("\n");
106} 106}
107 107
108#ifdef CONFIG_ARM64_HW_AFDBM 108#ifdef CONFIG_ARM64_HW_AFDBM
diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c
index 964b7549af5c..e25584d72396 100644
--- a/arch/arm64/mm/hugetlbpage.c
+++ b/arch/arm64/mm/hugetlbpage.c
@@ -239,7 +239,7 @@ int huge_ptep_set_access_flags(struct vm_area_struct *vma,
239 ncontig = find_num_contig(vma->vm_mm, addr, cpte, 239 ncontig = find_num_contig(vma->vm_mm, addr, cpte,
240 *cpte, &pgsize); 240 *cpte, &pgsize);
241 for (i = 0; i < ncontig; ++i, ++cpte, addr += pgsize) { 241 for (i = 0; i < ncontig; ++i, ++cpte, addr += pgsize) {
242 changed = ptep_set_access_flags(vma, addr, cpte, 242 changed |= ptep_set_access_flags(vma, addr, cpte,
243 pfn_pte(pfn, 243 pfn_pte(pfn,
244 hugeprot), 244 hugeprot),
245 dirty); 245 dirty);
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index 212c4d1e2f26..380ebe705093 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -401,8 +401,11 @@ static void __init free_unused_memmap(void)
401 */ 401 */
402void __init mem_init(void) 402void __init mem_init(void)
403{ 403{
404 if (swiotlb_force || max_pfn > (arm64_dma_phys_limit >> PAGE_SHIFT)) 404 if (swiotlb_force == SWIOTLB_FORCE ||
405 max_pfn > (arm64_dma_phys_limit >> PAGE_SHIFT))
405 swiotlb_init(1); 406 swiotlb_init(1);
407 else
408 swiotlb_force = SWIOTLB_NO_FORCE;
406 409
407 set_max_mapnr(pfn_to_page(max_pfn) - mem_map); 410 set_max_mapnr(pfn_to_page(max_pfn) - mem_map);
408 411
diff --git a/arch/arm64/xen/hypercall.S b/arch/arm64/xen/hypercall.S
index 47cf3f9d89ff..947830a459d2 100644
--- a/arch/arm64/xen/hypercall.S
+++ b/arch/arm64/xen/hypercall.S
@@ -49,7 +49,7 @@
49 49
50#include <linux/linkage.h> 50#include <linux/linkage.h>
51#include <asm/assembler.h> 51#include <asm/assembler.h>
52#include <linux/uaccess.h> 52#include <asm/asm-uaccess.h>
53#include <xen/interface/xen.h> 53#include <xen/interface/xen.h>
54 54
55 55
diff --git a/arch/frv/include/asm/atomic.h b/arch/frv/include/asm/atomic.h
index 1c2a5e264fc7..e93c9494503a 100644
--- a/arch/frv/include/asm/atomic.h
+++ b/arch/frv/include/asm/atomic.h
@@ -139,7 +139,7 @@ static inline void atomic64_dec(atomic64_t *v)
139#define atomic64_sub_and_test(i,v) (atomic64_sub_return((i), (v)) == 0) 139#define atomic64_sub_and_test(i,v) (atomic64_sub_return((i), (v)) == 0)
140#define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0) 140#define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
141#define atomic64_inc_and_test(v) (atomic64_inc_return((v)) == 0) 141#define atomic64_inc_and_test(v) (atomic64_inc_return((v)) == 0)
142 142#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
143 143
144#define atomic_cmpxchg(v, old, new) (cmpxchg(&(v)->counter, old, new)) 144#define atomic_cmpxchg(v, old, new) (cmpxchg(&(v)->counter, old, new))
145#define atomic_xchg(v, new) (xchg(&(v)->counter, new)) 145#define atomic_xchg(v, new) (xchg(&(v)->counter, new))
@@ -161,6 +161,39 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
161 return c; 161 return c;
162} 162}
163 163
164static inline int atomic64_add_unless(atomic64_t *v, long long i, long long u)
165{
166 long long c, old;
167
168 c = atomic64_read(v);
169 for (;;) {
170 if (unlikely(c == u))
171 break;
172 old = atomic64_cmpxchg(v, c, c + i);
173 if (likely(old == c))
174 break;
175 c = old;
176 }
177 return c != u;
178}
179
180static inline long long atomic64_dec_if_positive(atomic64_t *v)
181{
182 long long c, old, dec;
183
184 c = atomic64_read(v);
185 for (;;) {
186 dec = c - 1;
187 if (unlikely(dec < 0))
188 break;
189 old = atomic64_cmpxchg((v), c, dec);
190 if (likely(old == c))
191 break;
192 c = old;
193 }
194 return dec;
195}
196
164#define ATOMIC_OP(op) \ 197#define ATOMIC_OP(op) \
165static inline int atomic_fetch_##op(int i, atomic_t *v) \ 198static inline int atomic_fetch_##op(int i, atomic_t *v) \
166{ \ 199{ \
diff --git a/arch/mips/kvm/entry.c b/arch/mips/kvm/entry.c
index 6a02b3a3fa65..e92fb190e2d6 100644
--- a/arch/mips/kvm/entry.c
+++ b/arch/mips/kvm/entry.c
@@ -521,6 +521,9 @@ void *kvm_mips_build_exit(void *addr)
521 uasm_i_and(&p, V0, V0, AT); 521 uasm_i_and(&p, V0, V0, AT);
522 uasm_i_lui(&p, AT, ST0_CU0 >> 16); 522 uasm_i_lui(&p, AT, ST0_CU0 >> 16);
523 uasm_i_or(&p, V0, V0, AT); 523 uasm_i_or(&p, V0, V0, AT);
524#ifdef CONFIG_64BIT
525 uasm_i_ori(&p, V0, V0, ST0_SX | ST0_UX);
526#endif
524 uasm_i_mtc0(&p, V0, C0_STATUS); 527 uasm_i_mtc0(&p, V0, C0_STATUS);
525 uasm_i_ehb(&p); 528 uasm_i_ehb(&p);
526 529
@@ -643,7 +646,7 @@ static void *kvm_mips_build_ret_to_guest(void *addr)
643 646
644 /* Setup status register for running guest in UM */ 647 /* Setup status register for running guest in UM */
645 uasm_i_ori(&p, V1, V1, ST0_EXL | KSU_USER | ST0_IE); 648 uasm_i_ori(&p, V1, V1, ST0_EXL | KSU_USER | ST0_IE);
646 UASM_i_LA(&p, AT, ~(ST0_CU0 | ST0_MX)); 649 UASM_i_LA(&p, AT, ~(ST0_CU0 | ST0_MX | ST0_SX | ST0_UX));
647 uasm_i_and(&p, V1, V1, AT); 650 uasm_i_and(&p, V1, V1, AT);
648 uasm_i_mtc0(&p, V1, C0_STATUS); 651 uasm_i_mtc0(&p, V1, C0_STATUS);
649 uasm_i_ehb(&p); 652 uasm_i_ehb(&p);
diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c
index 06a60b19acfb..29ec9ab3fd55 100644
--- a/arch/mips/kvm/mips.c
+++ b/arch/mips/kvm/mips.c
@@ -360,8 +360,8 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
360 dump_handler("kvm_exit", gebase + 0x2000, vcpu->arch.vcpu_run); 360 dump_handler("kvm_exit", gebase + 0x2000, vcpu->arch.vcpu_run);
361 361
362 /* Invalidate the icache for these ranges */ 362 /* Invalidate the icache for these ranges */
363 local_flush_icache_range((unsigned long)gebase, 363 flush_icache_range((unsigned long)gebase,
364 (unsigned long)gebase + ALIGN(size, PAGE_SIZE)); 364 (unsigned long)gebase + ALIGN(size, PAGE_SIZE));
365 365
366 /* 366 /*
367 * Allocate comm page for guest kernel, a TLB will be reserved for 367 * Allocate comm page for guest kernel, a TLB will be reserved for
diff --git a/arch/mn10300/include/asm/switch_to.h b/arch/mn10300/include/asm/switch_to.h
index 393d311735c8..67e333aa7629 100644
--- a/arch/mn10300/include/asm/switch_to.h
+++ b/arch/mn10300/include/asm/switch_to.h
@@ -16,7 +16,7 @@
16struct task_struct; 16struct task_struct;
17struct thread_struct; 17struct thread_struct;
18 18
19#if !defined(CONFIG_LAZY_SAVE_FPU) 19#if defined(CONFIG_FPU) && !defined(CONFIG_LAZY_SAVE_FPU)
20struct fpu_state_struct; 20struct fpu_state_struct;
21extern asmlinkage void fpu_save(struct fpu_state_struct *); 21extern asmlinkage void fpu_save(struct fpu_state_struct *);
22#define switch_fpu(prev, next) \ 22#define switch_fpu(prev, next) \
diff --git a/arch/openrisc/kernel/vmlinux.lds.S b/arch/openrisc/kernel/vmlinux.lds.S
index ef31fc24344e..552544616b9d 100644
--- a/arch/openrisc/kernel/vmlinux.lds.S
+++ b/arch/openrisc/kernel/vmlinux.lds.S
@@ -44,6 +44,8 @@ SECTIONS
44 /* Read-only sections, merged into text segment: */ 44 /* Read-only sections, merged into text segment: */
45 . = LOAD_BASE ; 45 . = LOAD_BASE ;
46 46
47 _text = .;
48
47 /* _s_kernel_ro must be page aligned */ 49 /* _s_kernel_ro must be page aligned */
48 . = ALIGN(PAGE_SIZE); 50 . = ALIGN(PAGE_SIZE);
49 _s_kernel_ro = .; 51 _s_kernel_ro = .;
diff --git a/arch/parisc/include/asm/bitops.h b/arch/parisc/include/asm/bitops.h
index 3f9406d9b9d6..da87943328a5 100644
--- a/arch/parisc/include/asm/bitops.h
+++ b/arch/parisc/include/asm/bitops.h
@@ -6,7 +6,7 @@
6#endif 6#endif
7 7
8#include <linux/compiler.h> 8#include <linux/compiler.h>
9#include <asm/types.h> /* for BITS_PER_LONG/SHIFT_PER_LONG */ 9#include <asm/types.h>
10#include <asm/byteorder.h> 10#include <asm/byteorder.h>
11#include <asm/barrier.h> 11#include <asm/barrier.h>
12#include <linux/atomic.h> 12#include <linux/atomic.h>
@@ -17,6 +17,12 @@
17 * to include/asm-i386/bitops.h or kerneldoc 17 * to include/asm-i386/bitops.h or kerneldoc
18 */ 18 */
19 19
20#if __BITS_PER_LONG == 64
21#define SHIFT_PER_LONG 6
22#else
23#define SHIFT_PER_LONG 5
24#endif
25
20#define CHOP_SHIFTCOUNT(x) (((unsigned long) (x)) & (BITS_PER_LONG - 1)) 26#define CHOP_SHIFTCOUNT(x) (((unsigned long) (x)) & (BITS_PER_LONG - 1))
21 27
22 28
diff --git a/arch/parisc/include/asm/thread_info.h b/arch/parisc/include/asm/thread_info.h
index 7581330ea35b..88fe0aad4390 100644
--- a/arch/parisc/include/asm/thread_info.h
+++ b/arch/parisc/include/asm/thread_info.h
@@ -49,7 +49,6 @@ struct thread_info {
49#define TIF_POLLING_NRFLAG 3 /* true if poll_idle() is polling TIF_NEED_RESCHED */ 49#define TIF_POLLING_NRFLAG 3 /* true if poll_idle() is polling TIF_NEED_RESCHED */
50#define TIF_32BIT 4 /* 32 bit binary */ 50#define TIF_32BIT 4 /* 32 bit binary */
51#define TIF_MEMDIE 5 /* is terminating due to OOM killer */ 51#define TIF_MEMDIE 5 /* is terminating due to OOM killer */
52#define TIF_RESTORE_SIGMASK 6 /* restore saved signal mask */
53#define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */ 52#define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */
54#define TIF_NOTIFY_RESUME 8 /* callback before returning to user */ 53#define TIF_NOTIFY_RESUME 8 /* callback before returning to user */
55#define TIF_SINGLESTEP 9 /* single stepping? */ 54#define TIF_SINGLESTEP 9 /* single stepping? */
diff --git a/arch/parisc/include/uapi/asm/bitsperlong.h b/arch/parisc/include/uapi/asm/bitsperlong.h
index e0a23c7bdd43..07fa7e50bdc0 100644
--- a/arch/parisc/include/uapi/asm/bitsperlong.h
+++ b/arch/parisc/include/uapi/asm/bitsperlong.h
@@ -3,10 +3,8 @@
3 3
4#if defined(__LP64__) 4#if defined(__LP64__)
5#define __BITS_PER_LONG 64 5#define __BITS_PER_LONG 64
6#define SHIFT_PER_LONG 6
7#else 6#else
8#define __BITS_PER_LONG 32 7#define __BITS_PER_LONG 32
9#define SHIFT_PER_LONG 5
10#endif 8#endif
11 9
12#include <asm-generic/bitsperlong.h> 10#include <asm-generic/bitsperlong.h>
diff --git a/arch/parisc/include/uapi/asm/swab.h b/arch/parisc/include/uapi/asm/swab.h
index e78403b129ef..928e1bbac98f 100644
--- a/arch/parisc/include/uapi/asm/swab.h
+++ b/arch/parisc/include/uapi/asm/swab.h
@@ -1,6 +1,7 @@
1#ifndef _PARISC_SWAB_H 1#ifndef _PARISC_SWAB_H
2#define _PARISC_SWAB_H 2#define _PARISC_SWAB_H
3 3
4#include <asm/bitsperlong.h>
4#include <linux/types.h> 5#include <linux/types.h>
5#include <linux/compiler.h> 6#include <linux/compiler.h>
6 7
@@ -38,7 +39,7 @@ static inline __attribute_const__ __u32 __arch_swab32(__u32 x)
38} 39}
39#define __arch_swab32 __arch_swab32 40#define __arch_swab32 __arch_swab32
40 41
41#if BITS_PER_LONG > 32 42#if __BITS_PER_LONG > 32
42/* 43/*
43** From "PA-RISC 2.0 Architecture", HP Professional Books. 44** From "PA-RISC 2.0 Architecture", HP Professional Books.
44** See Appendix I page 8 , "Endian Byte Swapping". 45** See Appendix I page 8 , "Endian Byte Swapping".
@@ -61,6 +62,6 @@ static inline __attribute_const__ __u64 __arch_swab64(__u64 x)
61 return x; 62 return x;
62} 63}
63#define __arch_swab64 __arch_swab64 64#define __arch_swab64 __arch_swab64
64#endif /* BITS_PER_LONG > 32 */ 65#endif /* __BITS_PER_LONG > 32 */
65 66
66#endif /* _PARISC_SWAB_H */ 67#endif /* _PARISC_SWAB_H */
diff --git a/arch/parisc/kernel/time.c b/arch/parisc/kernel/time.c
index da0d9cb63403..1e22f981cd81 100644
--- a/arch/parisc/kernel/time.c
+++ b/arch/parisc/kernel/time.c
@@ -235,9 +235,26 @@ void __init time_init(void)
235 235
236 cr16_hz = 100 * PAGE0->mem_10msec; /* Hz */ 236 cr16_hz = 100 * PAGE0->mem_10msec; /* Hz */
237 237
238 /* register at clocksource framework */
239 clocksource_register_hz(&clocksource_cr16, cr16_hz);
240
241 /* register as sched_clock source */ 238 /* register as sched_clock source */
242 sched_clock_register(read_cr16_sched_clock, BITS_PER_LONG, cr16_hz); 239 sched_clock_register(read_cr16_sched_clock, BITS_PER_LONG, cr16_hz);
243} 240}
241
242static int __init init_cr16_clocksource(void)
243{
244 /*
245 * The cr16 interval timers are not syncronized across CPUs, so mark
246 * them unstable and lower rating on SMP systems.
247 */
248 if (num_online_cpus() > 1) {
249 clocksource_cr16.flags = CLOCK_SOURCE_UNSTABLE;
250 clocksource_cr16.rating = 0;
251 }
252
253 /* register at clocksource framework */
254 clocksource_register_hz(&clocksource_cr16,
255 100 * PAGE0->mem_10msec);
256
257 return 0;
258}
259
260device_initcall(init_cr16_clocksource);
diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
index 8ff9253930af..1a0b4f63f0e9 100644
--- a/arch/parisc/mm/fault.c
+++ b/arch/parisc/mm/fault.c
@@ -234,7 +234,7 @@ show_signal_msg(struct pt_regs *regs, unsigned long code,
234 tsk->comm, code, address); 234 tsk->comm, code, address);
235 print_vma_addr(KERN_CONT " in ", regs->iaoq[0]); 235 print_vma_addr(KERN_CONT " in ", regs->iaoq[0]);
236 236
237 pr_cont(" trap #%lu: %s%c", code, trap_name(code), 237 pr_cont("\ntrap #%lu: %s%c", code, trap_name(code),
238 vma ? ',':'\n'); 238 vma ? ',':'\n');
239 239
240 if (vma) 240 if (vma)
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index a8ee573fe610..281f4f1fcd1f 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -164,7 +164,6 @@ config PPC
164 select ARCH_HAS_SCALED_CPUTIME if VIRT_CPU_ACCOUNTING_NATIVE 164 select ARCH_HAS_SCALED_CPUTIME if VIRT_CPU_ACCOUNTING_NATIVE
165 select HAVE_ARCH_HARDENED_USERCOPY 165 select HAVE_ARCH_HARDENED_USERCOPY
166 select HAVE_KERNEL_GZIP 166 select HAVE_KERNEL_GZIP
167 select HAVE_CC_STACKPROTECTOR
168 167
169config GENERIC_CSUM 168config GENERIC_CSUM
170 def_bool CPU_LITTLE_ENDIAN 169 def_bool CPU_LITTLE_ENDIAN
@@ -484,6 +483,7 @@ config RELOCATABLE
484 bool "Build a relocatable kernel" 483 bool "Build a relocatable kernel"
485 depends on (PPC64 && !COMPILE_TEST) || (FLATMEM && (44x || FSL_BOOKE)) 484 depends on (PPC64 && !COMPILE_TEST) || (FLATMEM && (44x || FSL_BOOKE))
486 select NONSTATIC_KERNEL 485 select NONSTATIC_KERNEL
486 select MODULE_REL_CRCS if MODVERSIONS
487 help 487 help
488 This builds a kernel image that is capable of running at the 488 This builds a kernel image that is capable of running at the
489 location the kernel is loaded at. For ppc32, there is no any 489 location the kernel is loaded at. For ppc32, there is no any
diff --git a/arch/powerpc/include/asm/book3s/64/hash-4k.h b/arch/powerpc/include/asm/book3s/64/hash-4k.h
index 1c64bc6330bc..0c4e470571ca 100644
--- a/arch/powerpc/include/asm/book3s/64/hash-4k.h
+++ b/arch/powerpc/include/asm/book3s/64/hash-4k.h
@@ -36,12 +36,13 @@
36#ifdef CONFIG_HUGETLB_PAGE 36#ifdef CONFIG_HUGETLB_PAGE
37static inline int hash__hugepd_ok(hugepd_t hpd) 37static inline int hash__hugepd_ok(hugepd_t hpd)
38{ 38{
39 unsigned long hpdval = hpd_val(hpd);
39 /* 40 /*
40 * if it is not a pte and have hugepd shift mask 41 * if it is not a pte and have hugepd shift mask
41 * set, then it is a hugepd directory pointer 42 * set, then it is a hugepd directory pointer
42 */ 43 */
43 if (!(hpd.pd & _PAGE_PTE) && 44 if (!(hpdval & _PAGE_PTE) &&
44 ((hpd.pd & HUGEPD_SHIFT_MASK) != 0)) 45 ((hpdval & HUGEPD_SHIFT_MASK) != 0))
45 return true; 46 return true;
46 return false; 47 return false;
47} 48}
diff --git a/arch/powerpc/include/asm/book3s/64/hash.h b/arch/powerpc/include/asm/book3s/64/hash.h
index f61cad3de4e6..4c935f7504f7 100644
--- a/arch/powerpc/include/asm/book3s/64/hash.h
+++ b/arch/powerpc/include/asm/book3s/64/hash.h
@@ -201,6 +201,10 @@ extern int __meminit hash__vmemmap_create_mapping(unsigned long start,
201 unsigned long phys); 201 unsigned long phys);
202extern void hash__vmemmap_remove_mapping(unsigned long start, 202extern void hash__vmemmap_remove_mapping(unsigned long start,
203 unsigned long page_size); 203 unsigned long page_size);
204
205int hash__create_section_mapping(unsigned long start, unsigned long end);
206int hash__remove_section_mapping(unsigned long start, unsigned long end);
207
204#endif /* !__ASSEMBLY__ */ 208#endif /* !__ASSEMBLY__ */
205#endif /* __KERNEL__ */ 209#endif /* __KERNEL__ */
206#endif /* _ASM_POWERPC_BOOK3S_64_HASH_H */ 210#endif /* _ASM_POWERPC_BOOK3S_64_HASH_H */
diff --git a/arch/powerpc/include/asm/cpu_has_feature.h b/arch/powerpc/include/asm/cpu_has_feature.h
index b312b152461b..6e834caa3720 100644
--- a/arch/powerpc/include/asm/cpu_has_feature.h
+++ b/arch/powerpc/include/asm/cpu_has_feature.h
@@ -23,7 +23,9 @@ static __always_inline bool cpu_has_feature(unsigned long feature)
23{ 23{
24 int i; 24 int i;
25 25
26#ifndef __clang__ /* clang can't cope with this */
26 BUILD_BUG_ON(!__builtin_constant_p(feature)); 27 BUILD_BUG_ON(!__builtin_constant_p(feature));
28#endif
27 29
28#ifdef CONFIG_JUMP_LABEL_FEATURE_CHECK_DEBUG 30#ifdef CONFIG_JUMP_LABEL_FEATURE_CHECK_DEBUG
29 if (!static_key_initialized) { 31 if (!static_key_initialized) {
diff --git a/arch/powerpc/include/asm/hugetlb.h b/arch/powerpc/include/asm/hugetlb.h
index ede215167d1a..7f4025a6c69e 100644
--- a/arch/powerpc/include/asm/hugetlb.h
+++ b/arch/powerpc/include/asm/hugetlb.h
@@ -21,12 +21,12 @@ static inline pte_t *hugepd_page(hugepd_t hpd)
21 * We have only four bits to encode, MMU page size 21 * We have only four bits to encode, MMU page size
22 */ 22 */
23 BUILD_BUG_ON((MMU_PAGE_COUNT - 1) > 0xf); 23 BUILD_BUG_ON((MMU_PAGE_COUNT - 1) > 0xf);
24 return __va(hpd.pd & HUGEPD_ADDR_MASK); 24 return __va(hpd_val(hpd) & HUGEPD_ADDR_MASK);
25} 25}
26 26
27static inline unsigned int hugepd_mmu_psize(hugepd_t hpd) 27static inline unsigned int hugepd_mmu_psize(hugepd_t hpd)
28{ 28{
29 return (hpd.pd & HUGEPD_SHIFT_MASK) >> 2; 29 return (hpd_val(hpd) & HUGEPD_SHIFT_MASK) >> 2;
30} 30}
31 31
32static inline unsigned int hugepd_shift(hugepd_t hpd) 32static inline unsigned int hugepd_shift(hugepd_t hpd)
@@ -52,18 +52,20 @@ static inline pte_t *hugepd_page(hugepd_t hpd)
52{ 52{
53 BUG_ON(!hugepd_ok(hpd)); 53 BUG_ON(!hugepd_ok(hpd));
54#ifdef CONFIG_PPC_8xx 54#ifdef CONFIG_PPC_8xx
55 return (pte_t *)__va(hpd.pd & ~(_PMD_PAGE_MASK | _PMD_PRESENT_MASK)); 55 return (pte_t *)__va(hpd_val(hpd) &
56 ~(_PMD_PAGE_MASK | _PMD_PRESENT_MASK));
56#else 57#else
57 return (pte_t *)((hpd.pd & ~HUGEPD_SHIFT_MASK) | PD_HUGE); 58 return (pte_t *)((hpd_val(hpd) &
59 ~HUGEPD_SHIFT_MASK) | PD_HUGE);
58#endif 60#endif
59} 61}
60 62
61static inline unsigned int hugepd_shift(hugepd_t hpd) 63static inline unsigned int hugepd_shift(hugepd_t hpd)
62{ 64{
63#ifdef CONFIG_PPC_8xx 65#ifdef CONFIG_PPC_8xx
64 return ((hpd.pd & _PMD_PAGE_MASK) >> 1) + 17; 66 return ((hpd_val(hpd) & _PMD_PAGE_MASK) >> 1) + 17;
65#else 67#else
66 return hpd.pd & HUGEPD_SHIFT_MASK; 68 return hpd_val(hpd) & HUGEPD_SHIFT_MASK;
67#endif 69#endif
68} 70}
69 71
diff --git a/arch/powerpc/include/asm/mmu.h b/arch/powerpc/include/asm/mmu.h
index a34c764ca8dd..233a7e8cc8e3 100644
--- a/arch/powerpc/include/asm/mmu.h
+++ b/arch/powerpc/include/asm/mmu.h
@@ -160,7 +160,9 @@ static __always_inline bool mmu_has_feature(unsigned long feature)
160{ 160{
161 int i; 161 int i;
162 162
163#ifndef __clang__ /* clang can't cope with this */
163 BUILD_BUG_ON(!__builtin_constant_p(feature)); 164 BUILD_BUG_ON(!__builtin_constant_p(feature));
165#endif
164 166
165#ifdef CONFIG_JUMP_LABEL_FEATURE_CHECK_DEBUG 167#ifdef CONFIG_JUMP_LABEL_FEATURE_CHECK_DEBUG
166 if (!static_key_initialized) { 168 if (!static_key_initialized) {
diff --git a/arch/powerpc/include/asm/module.h b/arch/powerpc/include/asm/module.h
index cc12c61ef315..53885512b8d3 100644
--- a/arch/powerpc/include/asm/module.h
+++ b/arch/powerpc/include/asm/module.h
@@ -90,9 +90,5 @@ static inline int module_finalize_ftrace(struct module *mod, const Elf_Shdr *sec
90} 90}
91#endif 91#endif
92 92
93#if defined(CONFIG_MODVERSIONS) && defined(CONFIG_PPC64)
94#define ARCH_RELOCATES_KCRCTAB
95#define reloc_start PHYSICAL_START
96#endif
97#endif /* __KERNEL__ */ 93#endif /* __KERNEL__ */
98#endif /* _ASM_POWERPC_MODULE_H */ 94#endif /* _ASM_POWERPC_MODULE_H */
diff --git a/arch/powerpc/include/asm/nohash/pgtable.h b/arch/powerpc/include/asm/nohash/pgtable.h
index 172849727054..0cd8a3852763 100644
--- a/arch/powerpc/include/asm/nohash/pgtable.h
+++ b/arch/powerpc/include/asm/nohash/pgtable.h
@@ -227,9 +227,10 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
227static inline int hugepd_ok(hugepd_t hpd) 227static inline int hugepd_ok(hugepd_t hpd)
228{ 228{
229#ifdef CONFIG_PPC_8xx 229#ifdef CONFIG_PPC_8xx
230 return ((hpd.pd & 0x4) != 0); 230 return ((hpd_val(hpd) & 0x4) != 0);
231#else 231#else
232 return (hpd.pd > 0); 232 /* We clear the top bit to indicate hugepd */
233 return ((hpd_val(hpd) & PD_HUGE) == 0);
233#endif 234#endif
234} 235}
235 236
diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
index 56398e7e6100..47120bf2670c 100644
--- a/arch/powerpc/include/asm/page.h
+++ b/arch/powerpc/include/asm/page.h
@@ -294,15 +294,12 @@ extern long long virt_phys_offset;
294#include <asm/pgtable-types.h> 294#include <asm/pgtable-types.h>
295#endif 295#endif
296 296
297typedef struct { signed long pd; } hugepd_t;
298 297
299#ifndef CONFIG_HUGETLB_PAGE 298#ifndef CONFIG_HUGETLB_PAGE
300#define is_hugepd(pdep) (0) 299#define is_hugepd(pdep) (0)
301#define pgd_huge(pgd) (0) 300#define pgd_huge(pgd) (0)
302#endif /* CONFIG_HUGETLB_PAGE */ 301#endif /* CONFIG_HUGETLB_PAGE */
303 302
304#define __hugepd(x) ((hugepd_t) { (x) })
305
306struct page; 303struct page;
307extern void clear_user_page(void *page, unsigned long vaddr, struct page *pg); 304extern void clear_user_page(void *page, unsigned long vaddr, struct page *pg);
308extern void copy_user_page(void *to, void *from, unsigned long vaddr, 305extern void copy_user_page(void *to, void *from, unsigned long vaddr,
diff --git a/arch/powerpc/include/asm/perf_event_server.h b/arch/powerpc/include/asm/perf_event_server.h
index e157489ee7a1..ae0a23091a9b 100644
--- a/arch/powerpc/include/asm/perf_event_server.h
+++ b/arch/powerpc/include/asm/perf_event_server.h
@@ -65,6 +65,7 @@ struct power_pmu {
65#define PPMU_HAS_SSLOT 0x00000020 /* Has sampled slot in MMCRA */ 65#define PPMU_HAS_SSLOT 0x00000020 /* Has sampled slot in MMCRA */
66#define PPMU_HAS_SIER 0x00000040 /* Has SIER */ 66#define PPMU_HAS_SIER 0x00000040 /* Has SIER */
67#define PPMU_ARCH_207S 0x00000080 /* PMC is architecture v2.07S */ 67#define PPMU_ARCH_207S 0x00000080 /* PMC is architecture v2.07S */
68#define PPMU_NO_SIAR 0x00000100 /* Do not use SIAR */
68 69
69/* 70/*
70 * Values for flags to get_alternatives() 71 * Values for flags to get_alternatives()
diff --git a/arch/powerpc/include/asm/pgtable-be-types.h b/arch/powerpc/include/asm/pgtable-be-types.h
index 49c0a5a80efa..9c0f5db5cf46 100644
--- a/arch/powerpc/include/asm/pgtable-be-types.h
+++ b/arch/powerpc/include/asm/pgtable-be-types.h
@@ -104,4 +104,12 @@ static inline bool pmd_xchg(pmd_t *pmdp, pmd_t old, pmd_t new)
104 return pmd_raw(old) == prev; 104 return pmd_raw(old) == prev;
105} 105}
106 106
107typedef struct { __be64 pdbe; } hugepd_t;
108#define __hugepd(x) ((hugepd_t) { cpu_to_be64(x) })
109
110static inline unsigned long hpd_val(hugepd_t x)
111{
112 return be64_to_cpu(x.pdbe);
113}
114
107#endif /* _ASM_POWERPC_PGTABLE_BE_TYPES_H */ 115#endif /* _ASM_POWERPC_PGTABLE_BE_TYPES_H */
diff --git a/arch/powerpc/include/asm/pgtable-types.h b/arch/powerpc/include/asm/pgtable-types.h
index e7f4f3e0fcde..8bd3b13fe2fb 100644
--- a/arch/powerpc/include/asm/pgtable-types.h
+++ b/arch/powerpc/include/asm/pgtable-types.h
@@ -66,4 +66,11 @@ static inline bool pte_xchg(pte_t *ptep, pte_t old, pte_t new)
66} 66}
67#endif 67#endif
68 68
69typedef struct { unsigned long pd; } hugepd_t;
70#define __hugepd(x) ((hugepd_t) { (x) })
71static inline unsigned long hpd_val(hugepd_t x)
72{
73 return x.pd;
74}
75
69#endif /* _ASM_POWERPC_PGTABLE_TYPES_H */ 76#endif /* _ASM_POWERPC_PGTABLE_TYPES_H */
diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h
index c56ea8c84abb..c4ced1d01d57 100644
--- a/arch/powerpc/include/asm/ppc-opcode.h
+++ b/arch/powerpc/include/asm/ppc-opcode.h
@@ -157,7 +157,7 @@
157#define PPC_INST_MCRXR 0x7c000400 157#define PPC_INST_MCRXR 0x7c000400
158#define PPC_INST_MCRXR_MASK 0xfc0007fe 158#define PPC_INST_MCRXR_MASK 0xfc0007fe
159#define PPC_INST_MFSPR_PVR 0x7c1f42a6 159#define PPC_INST_MFSPR_PVR 0x7c1f42a6
160#define PPC_INST_MFSPR_PVR_MASK 0xfc1fffff 160#define PPC_INST_MFSPR_PVR_MASK 0xfc1ffffe
161#define PPC_INST_MFTMR 0x7c0002dc 161#define PPC_INST_MFTMR 0x7c0002dc
162#define PPC_INST_MSGSND 0x7c00019c 162#define PPC_INST_MSGSND 0x7c00019c
163#define PPC_INST_MSGCLR 0x7c0001dc 163#define PPC_INST_MSGCLR 0x7c0001dc
@@ -174,13 +174,13 @@
174#define PPC_INST_RFDI 0x4c00004e 174#define PPC_INST_RFDI 0x4c00004e
175#define PPC_INST_RFMCI 0x4c00004c 175#define PPC_INST_RFMCI 0x4c00004c
176#define PPC_INST_MFSPR_DSCR 0x7c1102a6 176#define PPC_INST_MFSPR_DSCR 0x7c1102a6
177#define PPC_INST_MFSPR_DSCR_MASK 0xfc1fffff 177#define PPC_INST_MFSPR_DSCR_MASK 0xfc1ffffe
178#define PPC_INST_MTSPR_DSCR 0x7c1103a6 178#define PPC_INST_MTSPR_DSCR 0x7c1103a6
179#define PPC_INST_MTSPR_DSCR_MASK 0xfc1fffff 179#define PPC_INST_MTSPR_DSCR_MASK 0xfc1ffffe
180#define PPC_INST_MFSPR_DSCR_USER 0x7c0302a6 180#define PPC_INST_MFSPR_DSCR_USER 0x7c0302a6
181#define PPC_INST_MFSPR_DSCR_USER_MASK 0xfc1fffff 181#define PPC_INST_MFSPR_DSCR_USER_MASK 0xfc1ffffe
182#define PPC_INST_MTSPR_DSCR_USER 0x7c0303a6 182#define PPC_INST_MTSPR_DSCR_USER 0x7c0303a6
183#define PPC_INST_MTSPR_DSCR_USER_MASK 0xfc1fffff 183#define PPC_INST_MTSPR_DSCR_USER_MASK 0xfc1ffffe
184#define PPC_INST_MFVSRD 0x7c000066 184#define PPC_INST_MFVSRD 0x7c000066
185#define PPC_INST_MTVSRD 0x7c000166 185#define PPC_INST_MTVSRD 0x7c000166
186#define PPC_INST_SLBFEE 0x7c0007a7 186#define PPC_INST_SLBFEE 0x7c0007a7
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index 0d4531aa2052..dff79798903d 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -649,9 +649,10 @@
649#define SRR1_ISI_N_OR_G 0x10000000 /* ISI: Access is no-exec or G */ 649#define SRR1_ISI_N_OR_G 0x10000000 /* ISI: Access is no-exec or G */
650#define SRR1_ISI_PROT 0x08000000 /* ISI: Other protection fault */ 650#define SRR1_ISI_PROT 0x08000000 /* ISI: Other protection fault */
651#define SRR1_WAKEMASK 0x00380000 /* reason for wakeup */ 651#define SRR1_WAKEMASK 0x00380000 /* reason for wakeup */
652#define SRR1_WAKEMASK_P8 0x003c0000 /* reason for wakeup on POWER8 */ 652#define SRR1_WAKEMASK_P8 0x003c0000 /* reason for wakeup on POWER8 and 9 */
653#define SRR1_WAKESYSERR 0x00300000 /* System error */ 653#define SRR1_WAKESYSERR 0x00300000 /* System error */
654#define SRR1_WAKEEE 0x00200000 /* External interrupt */ 654#define SRR1_WAKEEE 0x00200000 /* External interrupt */
655#define SRR1_WAKEHVI 0x00240000 /* Hypervisor Virtualization Interrupt (P9) */
655#define SRR1_WAKEMT 0x00280000 /* mtctrl */ 656#define SRR1_WAKEMT 0x00280000 /* mtctrl */
656#define SRR1_WAKEHMI 0x00280000 /* Hypervisor maintenance */ 657#define SRR1_WAKEHMI 0x00280000 /* Hypervisor maintenance */
657#define SRR1_WAKEDEC 0x00180000 /* Decrementer interrupt */ 658#define SRR1_WAKEDEC 0x00180000 /* Decrementer interrupt */
diff --git a/arch/powerpc/include/asm/stackprotector.h b/arch/powerpc/include/asm/stackprotector.h
deleted file mode 100644
index 6720190eabec..000000000000
--- a/arch/powerpc/include/asm/stackprotector.h
+++ /dev/null
@@ -1,40 +0,0 @@
1/*
2 * GCC stack protector support.
3 *
4 * Stack protector works by putting predefined pattern at the start of
5 * the stack frame and verifying that it hasn't been overwritten when
6 * returning from the function. The pattern is called stack canary
7 * and gcc expects it to be defined by a global variable called
8 * "__stack_chk_guard" on PPC. This unfortunately means that on SMP
9 * we cannot have a different canary value per task.
10 */
11
12#ifndef _ASM_STACKPROTECTOR_H
13#define _ASM_STACKPROTECTOR_H
14
15#include <linux/random.h>
16#include <linux/version.h>
17#include <asm/reg.h>
18
19extern unsigned long __stack_chk_guard;
20
21/*
22 * Initialize the stackprotector canary value.
23 *
24 * NOTE: this must only be called from functions that never return,
25 * and it must always be inlined.
26 */
27static __always_inline void boot_init_stack_canary(void)
28{
29 unsigned long canary;
30
31 /* Try to get a semi random initial value. */
32 get_random_bytes(&canary, sizeof(canary));
33 canary ^= mftb();
34 canary ^= LINUX_VERSION_CODE;
35
36 current->stack_canary = canary;
37 __stack_chk_guard = current->stack_canary;
38}
39
40#endif /* _ASM_STACKPROTECTOR_H */
diff --git a/arch/powerpc/include/asm/xics.h b/arch/powerpc/include/asm/xics.h
index f0b238516e9b..e0b9e576905a 100644
--- a/arch/powerpc/include/asm/xics.h
+++ b/arch/powerpc/include/asm/xics.h
@@ -44,6 +44,7 @@ static inline int icp_hv_init(void) { return -ENODEV; }
44 44
45#ifdef CONFIG_PPC_POWERNV 45#ifdef CONFIG_PPC_POWERNV
46extern int icp_opal_init(void); 46extern int icp_opal_init(void);
47extern void icp_opal_flush_interrupt(void);
47#else 48#else
48static inline int icp_opal_init(void) { return -ENODEV; } 49static inline int icp_opal_init(void) { return -ENODEV; }
49#endif 50#endif
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index 23f8082d7bfa..f4c2b52e58b3 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -19,10 +19,6 @@ CFLAGS_init.o += $(DISABLE_LATENT_ENTROPY_PLUGIN)
19CFLAGS_btext.o += $(DISABLE_LATENT_ENTROPY_PLUGIN) 19CFLAGS_btext.o += $(DISABLE_LATENT_ENTROPY_PLUGIN)
20CFLAGS_prom.o += $(DISABLE_LATENT_ENTROPY_PLUGIN) 20CFLAGS_prom.o += $(DISABLE_LATENT_ENTROPY_PLUGIN)
21 21
22# -fstack-protector triggers protection checks in this code,
23# but it is being used too early to link to meaningful stack_chk logic.
24CFLAGS_prom_init.o += $(call cc-option, -fno-stack-protector)
25
26ifdef CONFIG_FUNCTION_TRACER 22ifdef CONFIG_FUNCTION_TRACER
27# Do not trace early boot code 23# Do not trace early boot code
28CFLAGS_REMOVE_cputable.o = -mno-sched-epilog $(CC_FLAGS_FTRACE) 24CFLAGS_REMOVE_cputable.o = -mno-sched-epilog $(CC_FLAGS_FTRACE)
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index 0601e6a7297c..195a9fc8f81c 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -91,9 +91,6 @@ int main(void)
91 DEFINE(TI_livepatch_sp, offsetof(struct thread_info, livepatch_sp)); 91 DEFINE(TI_livepatch_sp, offsetof(struct thread_info, livepatch_sp));
92#endif 92#endif
93 93
94#ifdef CONFIG_CC_STACKPROTECTOR
95 DEFINE(TSK_STACK_CANARY, offsetof(struct task_struct, stack_canary));
96#endif
97 DEFINE(KSP, offsetof(struct thread_struct, ksp)); 94 DEFINE(KSP, offsetof(struct thread_struct, ksp));
98 DEFINE(PT_REGS, offsetof(struct thread_struct, regs)); 95 DEFINE(PT_REGS, offsetof(struct thread_struct, regs));
99#ifdef CONFIG_BOOKE 96#ifdef CONFIG_BOOKE
diff --git a/arch/powerpc/kernel/eeh.c b/arch/powerpc/kernel/eeh.c
index 8180bfd7ab93..9de7f79e702b 100644
--- a/arch/powerpc/kernel/eeh.c
+++ b/arch/powerpc/kernel/eeh.c
@@ -298,9 +298,17 @@ void eeh_slot_error_detail(struct eeh_pe *pe, int severity)
298 * 298 *
299 * For pHyp, we have to enable IO for log retrieval. Otherwise, 299 * For pHyp, we have to enable IO for log retrieval. Otherwise,
300 * 0xFF's is always returned from PCI config space. 300 * 0xFF's is always returned from PCI config space.
301 *
302 * When the @severity is EEH_LOG_PERM, the PE is going to be
303 * removed. Prior to that, the drivers for devices included in
304 * the PE will be closed. The drivers rely on working IO path
305 * to bring the devices to quiet state. Otherwise, PCI traffic
306 * from those devices after they are removed is like to cause
307 * another unexpected EEH error.
301 */ 308 */
302 if (!(pe->type & EEH_PE_PHB)) { 309 if (!(pe->type & EEH_PE_PHB)) {
303 if (eeh_has_flag(EEH_ENABLE_IO_FOR_LOG)) 310 if (eeh_has_flag(EEH_ENABLE_IO_FOR_LOG) ||
311 severity == EEH_LOG_PERM)
304 eeh_pci_enable(pe, EEH_OPT_THAW_MMIO); 312 eeh_pci_enable(pe, EEH_OPT_THAW_MMIO);
305 313
306 /* 314 /*
diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c
index d88573bdd090..b94887165a10 100644
--- a/arch/powerpc/kernel/eeh_driver.c
+++ b/arch/powerpc/kernel/eeh_driver.c
@@ -545,7 +545,7 @@ static void *eeh_pe_detach_dev(void *data, void *userdata)
545static void *__eeh_clear_pe_frozen_state(void *data, void *flag) 545static void *__eeh_clear_pe_frozen_state(void *data, void *flag)
546{ 546{
547 struct eeh_pe *pe = (struct eeh_pe *)data; 547 struct eeh_pe *pe = (struct eeh_pe *)data;
548 bool *clear_sw_state = flag; 548 bool clear_sw_state = *(bool *)flag;
549 int i, rc = 1; 549 int i, rc = 1;
550 550
551 for (i = 0; rc && i < 3; i++) 551 for (i = 0; rc && i < 3; i++)
diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S
index 5742dbdbee46..3841d749a430 100644
--- a/arch/powerpc/kernel/entry_32.S
+++ b/arch/powerpc/kernel/entry_32.S
@@ -674,11 +674,7 @@ BEGIN_FTR_SECTION
674 mtspr SPRN_SPEFSCR,r0 /* restore SPEFSCR reg */ 674 mtspr SPRN_SPEFSCR,r0 /* restore SPEFSCR reg */
675END_FTR_SECTION_IFSET(CPU_FTR_SPE) 675END_FTR_SECTION_IFSET(CPU_FTR_SPE)
676#endif /* CONFIG_SPE */ 676#endif /* CONFIG_SPE */
677#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP) 677
678 lwz r0,TSK_STACK_CANARY(r2)
679 lis r4,__stack_chk_guard@ha
680 stw r0,__stack_chk_guard@l(r4)
681#endif
682 lwz r0,_CCR(r1) 678 lwz r0,_CCR(r1)
683 mtcrf 0xFF,r0 679 mtcrf 0xFF,r0
684 /* r3-r12 are destroyed -- Cort */ 680 /* r3-r12 are destroyed -- Cort */
diff --git a/arch/powerpc/kernel/module_64.c b/arch/powerpc/kernel/module_64.c
index bb1807184bad..0b0f89685b67 100644
--- a/arch/powerpc/kernel/module_64.c
+++ b/arch/powerpc/kernel/module_64.c
@@ -286,14 +286,6 @@ static void dedotify_versions(struct modversion_info *vers,
286 for (end = (void *)vers + size; vers < end; vers++) 286 for (end = (void *)vers + size; vers < end; vers++)
287 if (vers->name[0] == '.') { 287 if (vers->name[0] == '.') {
288 memmove(vers->name, vers->name+1, strlen(vers->name)); 288 memmove(vers->name, vers->name+1, strlen(vers->name));
289#ifdef ARCH_RELOCATES_KCRCTAB
290 /* The TOC symbol has no CRC computed. To avoid CRC
291 * check failing, we must force it to the expected
292 * value (see CRC check in module.c).
293 */
294 if (!strcmp(vers->name, "TOC."))
295 vers->crc = -(unsigned long)reloc_start;
296#endif
297 } 289 }
298} 290}
299 291
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 04885cec24df..5dd056df0baa 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -64,12 +64,6 @@
64#include <linux/kprobes.h> 64#include <linux/kprobes.h>
65#include <linux/kdebug.h> 65#include <linux/kdebug.h>
66 66
67#ifdef CONFIG_CC_STACKPROTECTOR
68#include <linux/stackprotector.h>
69unsigned long __stack_chk_guard __read_mostly;
70EXPORT_SYMBOL(__stack_chk_guard);
71#endif
72
73/* Transactional Memory debug */ 67/* Transactional Memory debug */
74#ifdef TM_DEBUG_SW 68#ifdef TM_DEBUG_SW
75#define TM_DEBUG(x...) printk(KERN_INFO x) 69#define TM_DEBUG(x...) printk(KERN_INFO x)
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
index ec47a939cbdd..ac83eb04a8b8 100644
--- a/arch/powerpc/kernel/prom_init.c
+++ b/arch/powerpc/kernel/prom_init.c
@@ -2834,6 +2834,9 @@ static void __init prom_find_boot_cpu(void)
2834 2834
2835 cpu_pkg = call_prom("instance-to-package", 1, 1, prom_cpu); 2835 cpu_pkg = call_prom("instance-to-package", 1, 1, prom_cpu);
2836 2836
2837 if (!PHANDLE_VALID(cpu_pkg))
2838 return;
2839
2837 prom_getprop(cpu_pkg, "reg", &rval, sizeof(rval)); 2840 prom_getprop(cpu_pkg, "reg", &rval, sizeof(rval));
2838 prom.cpu = be32_to_cpu(rval); 2841 prom.cpu = be32_to_cpu(rval);
2839 2842
diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
index e4744ff38a17..925a4ef90559 100644
--- a/arch/powerpc/kernel/ptrace.c
+++ b/arch/powerpc/kernel/ptrace.c
@@ -463,6 +463,10 @@ static int fpr_set(struct task_struct *target, const struct user_regset *regset,
463 463
464 flush_fp_to_thread(target); 464 flush_fp_to_thread(target);
465 465
466 for (i = 0; i < 32 ; i++)
467 buf[i] = target->thread.TS_FPR(i);
468 buf[32] = target->thread.fp_state.fpscr;
469
466 /* copy to local buffer then write that out */ 470 /* copy to local buffer then write that out */
467 i = user_regset_copyin(&pos, &count, &kbuf, &ubuf, buf, 0, -1); 471 i = user_regset_copyin(&pos, &count, &kbuf, &ubuf, buf, 0, -1);
468 if (i) 472 if (i)
@@ -672,6 +676,9 @@ static int vsr_set(struct task_struct *target, const struct user_regset *regset,
672 flush_altivec_to_thread(target); 676 flush_altivec_to_thread(target);
673 flush_vsx_to_thread(target); 677 flush_vsx_to_thread(target);
674 678
679 for (i = 0; i < 32 ; i++)
680 buf[i] = target->thread.fp_state.fpr[i][TS_VSRLOWOFFSET];
681
675 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, 682 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
676 buf, 0, 32 * sizeof(double)); 683 buf, 0, 32 * sizeof(double));
677 if (!ret) 684 if (!ret)
@@ -1019,6 +1026,10 @@ static int tm_cfpr_set(struct task_struct *target,
1019 flush_fp_to_thread(target); 1026 flush_fp_to_thread(target);
1020 flush_altivec_to_thread(target); 1027 flush_altivec_to_thread(target);
1021 1028
1029 for (i = 0; i < 32; i++)
1030 buf[i] = target->thread.TS_CKFPR(i);
1031 buf[32] = target->thread.ckfp_state.fpscr;
1032
1022 /* copy to local buffer then write that out */ 1033 /* copy to local buffer then write that out */
1023 i = user_regset_copyin(&pos, &count, &kbuf, &ubuf, buf, 0, -1); 1034 i = user_regset_copyin(&pos, &count, &kbuf, &ubuf, buf, 0, -1);
1024 if (i) 1035 if (i)
@@ -1283,6 +1294,9 @@ static int tm_cvsx_set(struct task_struct *target,
1283 flush_altivec_to_thread(target); 1294 flush_altivec_to_thread(target);
1284 flush_vsx_to_thread(target); 1295 flush_vsx_to_thread(target);
1285 1296
1297 for (i = 0; i < 32 ; i++)
1298 buf[i] = target->thread.ckfp_state.fpr[i][TS_VSRLOWOFFSET];
1299
1286 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, 1300 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1287 buf, 0, 32 * sizeof(double)); 1301 buf, 0, 32 * sizeof(double));
1288 if (!ret) 1302 if (!ret)
diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
index 6fd30ac7d14a..62a50d6d1053 100644
--- a/arch/powerpc/mm/fault.c
+++ b/arch/powerpc/mm/fault.c
@@ -253,8 +253,11 @@ int do_page_fault(struct pt_regs *regs, unsigned long address,
253 if (unlikely(debugger_fault_handler(regs))) 253 if (unlikely(debugger_fault_handler(regs)))
254 goto bail; 254 goto bail;
255 255
256 /* On a kernel SLB miss we can only check for a valid exception entry */ 256 /*
257 if (!user_mode(regs) && (address >= TASK_SIZE)) { 257 * The kernel should never take an execute fault nor should it
258 * take a page fault to a kernel address.
259 */
260 if (!user_mode(regs) && (is_exec || (address >= TASK_SIZE))) {
258 rc = SIGSEGV; 261 rc = SIGSEGV;
259 goto bail; 262 goto bail;
260 } 263 }
@@ -391,20 +394,6 @@ good_area:
391 394
392 if (is_exec) { 395 if (is_exec) {
393 /* 396 /*
394 * An execution fault + no execute ?
395 *
396 * On CPUs that don't have CPU_FTR_COHERENT_ICACHE we
397 * deliberately create NX mappings, and use the fault to do the
398 * cache flush. This is usually handled in hash_page_do_lazy_icache()
399 * but we could end up here if that races with a concurrent PTE
400 * update. In that case we need to fall through here to the VMA
401 * check below.
402 */
403 if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE) &&
404 (regs->msr & SRR1_ISI_N_OR_G))
405 goto bad_area;
406
407 /*
408 * Allow execution from readable areas if the MMU does not 397 * Allow execution from readable areas if the MMU does not
409 * provide separate controls over reading and executing. 398 * provide separate controls over reading and executing.
410 * 399 *
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index 80334937e14f..67e19a0821be 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -747,7 +747,7 @@ static unsigned long __init htab_get_table_size(void)
747} 747}
748 748
749#ifdef CONFIG_MEMORY_HOTPLUG 749#ifdef CONFIG_MEMORY_HOTPLUG
750int create_section_mapping(unsigned long start, unsigned long end) 750int hash__create_section_mapping(unsigned long start, unsigned long end)
751{ 751{
752 int rc = htab_bolt_mapping(start, end, __pa(start), 752 int rc = htab_bolt_mapping(start, end, __pa(start),
753 pgprot_val(PAGE_KERNEL), mmu_linear_psize, 753 pgprot_val(PAGE_KERNEL), mmu_linear_psize,
@@ -761,7 +761,7 @@ int create_section_mapping(unsigned long start, unsigned long end)
761 return rc; 761 return rc;
762} 762}
763 763
764int remove_section_mapping(unsigned long start, unsigned long end) 764int hash__remove_section_mapping(unsigned long start, unsigned long end)
765{ 765{
766 int rc = htab_remove_mapping(start, end, mmu_linear_psize, 766 int rc = htab_remove_mapping(start, end, mmu_linear_psize,
767 mmu_kernel_ssize); 767 mmu_kernel_ssize);
diff --git a/arch/powerpc/mm/hugetlbpage-hash64.c b/arch/powerpc/mm/hugetlbpage-hash64.c
index d5026f3800b6..37b5f91e381b 100644
--- a/arch/powerpc/mm/hugetlbpage-hash64.c
+++ b/arch/powerpc/mm/hugetlbpage-hash64.c
@@ -125,11 +125,14 @@ int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid,
125int hugepd_ok(hugepd_t hpd) 125int hugepd_ok(hugepd_t hpd)
126{ 126{
127 bool is_hugepd; 127 bool is_hugepd;
128 unsigned long hpdval;
129
130 hpdval = hpd_val(hpd);
128 131
129 /* 132 /*
130 * We should not find this format in page directory, warn otherwise. 133 * We should not find this format in page directory, warn otherwise.
131 */ 134 */
132 is_hugepd = (((hpd.pd & 0x3) == 0x0) && ((hpd.pd & HUGEPD_SHIFT_MASK) != 0)); 135 is_hugepd = (((hpdval & 0x3) == 0x0) && ((hpdval & HUGEPD_SHIFT_MASK) != 0));
133 WARN(is_hugepd, "Found wrong page directory format\n"); 136 WARN(is_hugepd, "Found wrong page directory format\n");
134 return 0; 137 return 0;
135} 138}
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index 289df38fb7e0..8c3389cbcd12 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -53,7 +53,7 @@ static u64 gpage_freearray[MAX_NUMBER_GPAGES];
53static unsigned nr_gpages; 53static unsigned nr_gpages;
54#endif 54#endif
55 55
56#define hugepd_none(hpd) ((hpd).pd == 0) 56#define hugepd_none(hpd) (hpd_val(hpd) == 0)
57 57
58pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr) 58pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
59{ 59{
@@ -103,24 +103,24 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp,
103 for (i = 0; i < num_hugepd; i++, hpdp++) { 103 for (i = 0; i < num_hugepd; i++, hpdp++) {
104 if (unlikely(!hugepd_none(*hpdp))) 104 if (unlikely(!hugepd_none(*hpdp)))
105 break; 105 break;
106 else 106 else {
107#ifdef CONFIG_PPC_BOOK3S_64 107#ifdef CONFIG_PPC_BOOK3S_64
108 hpdp->pd = __pa(new) | 108 *hpdp = __hugepd(__pa(new) |
109 (shift_to_mmu_psize(pshift) << 2); 109 (shift_to_mmu_psize(pshift) << 2));
110#elif defined(CONFIG_PPC_8xx) 110#elif defined(CONFIG_PPC_8xx)
111 hpdp->pd = __pa(new) | 111 *hpdp = __hugepd(__pa(new) |
112 (pshift == PAGE_SHIFT_8M ? _PMD_PAGE_8M : 112 (pshift == PAGE_SHIFT_8M ? _PMD_PAGE_8M :
113 _PMD_PAGE_512K) | 113 _PMD_PAGE_512K) | _PMD_PRESENT);
114 _PMD_PRESENT;
115#else 114#else
116 /* We use the old format for PPC_FSL_BOOK3E */ 115 /* We use the old format for PPC_FSL_BOOK3E */
117 hpdp->pd = ((unsigned long)new & ~PD_HUGE) | pshift; 116 *hpdp = __hugepd(((unsigned long)new & ~PD_HUGE) | pshift);
118#endif 117#endif
118 }
119 } 119 }
120 /* If we bailed from the for loop early, an error occurred, clean up */ 120 /* If we bailed from the for loop early, an error occurred, clean up */
121 if (i < num_hugepd) { 121 if (i < num_hugepd) {
122 for (i = i - 1 ; i >= 0; i--, hpdp--) 122 for (i = i - 1 ; i >= 0; i--, hpdp--)
123 hpdp->pd = 0; 123 *hpdp = __hugepd(0);
124 kmem_cache_free(cachep, new); 124 kmem_cache_free(cachep, new);
125 } 125 }
126 spin_unlock(&mm->page_table_lock); 126 spin_unlock(&mm->page_table_lock);
@@ -454,7 +454,7 @@ static void free_hugepd_range(struct mmu_gather *tlb, hugepd_t *hpdp, int pdshif
454 return; 454 return;
455 455
456 for (i = 0; i < num_hugepd; i++, hpdp++) 456 for (i = 0; i < num_hugepd; i++, hpdp++)
457 hpdp->pd = 0; 457 *hpdp = __hugepd(0);
458 458
459 if (shift >= pdshift) 459 if (shift >= pdshift)
460 hugepd_free(tlb, hugepte); 460 hugepd_free(tlb, hugepte);
@@ -810,12 +810,8 @@ static int __init hugetlbpage_init(void)
810 * if we have pdshift and shift value same, we don't 810 * if we have pdshift and shift value same, we don't
811 * use pgt cache for hugepd. 811 * use pgt cache for hugepd.
812 */ 812 */
813 if (pdshift > shift) { 813 if (pdshift > shift)
814 pgtable_cache_add(pdshift - shift, NULL); 814 pgtable_cache_add(pdshift - shift, NULL);
815 if (!PGT_CACHE(pdshift - shift))
816 panic("hugetlbpage_init(): could not create "
817 "pgtable cache for %d bit pagesize\n", shift);
818 }
819#if defined(CONFIG_PPC_FSL_BOOK3E) || defined(CONFIG_PPC_8xx) 815#if defined(CONFIG_PPC_FSL_BOOK3E) || defined(CONFIG_PPC_8xx)
820 else if (!hugepte_cache) { 816 else if (!hugepte_cache) {
821 /* 817 /*
@@ -852,9 +848,6 @@ static int __init hugetlbpage_init(void)
852 else if (mmu_psize_defs[MMU_PAGE_2M].shift) 848 else if (mmu_psize_defs[MMU_PAGE_2M].shift)
853 HPAGE_SHIFT = mmu_psize_defs[MMU_PAGE_2M].shift; 849 HPAGE_SHIFT = mmu_psize_defs[MMU_PAGE_2M].shift;
854#endif 850#endif
855 else
856 panic("%s: Unable to set default huge page size\n", __func__);
857
858 return 0; 851 return 0;
859} 852}
860 853
diff --git a/arch/powerpc/mm/init-common.c b/arch/powerpc/mm/init-common.c
index a175cd82ae8c..f2108c40e697 100644
--- a/arch/powerpc/mm/init-common.c
+++ b/arch/powerpc/mm/init-common.c
@@ -78,8 +78,12 @@ void pgtable_cache_add(unsigned shift, void (*ctor)(void *))
78 align = max_t(unsigned long, align, minalign); 78 align = max_t(unsigned long, align, minalign);
79 name = kasprintf(GFP_KERNEL, "pgtable-2^%d", shift); 79 name = kasprintf(GFP_KERNEL, "pgtable-2^%d", shift);
80 new = kmem_cache_create(name, table_size, align, 0, ctor); 80 new = kmem_cache_create(name, table_size, align, 0, ctor);
81 if (!new)
82 panic("Could not allocate pgtable cache for order %d", shift);
83
81 kfree(name); 84 kfree(name);
82 pgtable_cache[shift - 1] = new; 85 pgtable_cache[shift - 1] = new;
86
83 pr_debug("Allocated pgtable cache for order %d\n", shift); 87 pr_debug("Allocated pgtable cache for order %d\n", shift);
84} 88}
85 89
@@ -88,7 +92,7 @@ void pgtable_cache_init(void)
88{ 92{
89 pgtable_cache_add(PGD_INDEX_SIZE, pgd_ctor); 93 pgtable_cache_add(PGD_INDEX_SIZE, pgd_ctor);
90 94
91 if (PMD_INDEX_SIZE && !PGT_CACHE(PMD_INDEX_SIZE)) 95 if (PMD_CACHE_INDEX && !PGT_CACHE(PMD_CACHE_INDEX))
92 pgtable_cache_add(PMD_CACHE_INDEX, pmd_ctor); 96 pgtable_cache_add(PMD_CACHE_INDEX, pmd_ctor);
93 /* 97 /*
94 * In all current configs, when the PUD index exists it's the 98 * In all current configs, when the PUD index exists it's the
@@ -97,11 +101,4 @@ void pgtable_cache_init(void)
97 */ 101 */
98 if (PUD_INDEX_SIZE && !PGT_CACHE(PUD_INDEX_SIZE)) 102 if (PUD_INDEX_SIZE && !PGT_CACHE(PUD_INDEX_SIZE))
99 pgtable_cache_add(PUD_INDEX_SIZE, pud_ctor); 103 pgtable_cache_add(PUD_INDEX_SIZE, pud_ctor);
100
101 if (!PGT_CACHE(PGD_INDEX_SIZE))
102 panic("Couldn't allocate pgd cache");
103 if (PMD_INDEX_SIZE && !PGT_CACHE(PMD_INDEX_SIZE))
104 panic("Couldn't allocate pmd pgtable caches");
105 if (PUD_INDEX_SIZE && !PGT_CACHE(PUD_INDEX_SIZE))
106 panic("Couldn't allocate pud pgtable caches");
107} 104}
diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c
index 93abf8a9813d..8e1588021d1c 100644
--- a/arch/powerpc/mm/init_64.c
+++ b/arch/powerpc/mm/init_64.c
@@ -347,7 +347,8 @@ early_param("disable_radix", parse_disable_radix);
347void __init mmu_early_init_devtree(void) 347void __init mmu_early_init_devtree(void)
348{ 348{
349 /* Disable radix mode based on kernel command line. */ 349 /* Disable radix mode based on kernel command line. */
350 if (disable_radix) 350 /* We don't yet have the machinery to do radix as a guest. */
351 if (disable_radix || !(mfmsr() & MSR_HV))
351 cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX; 352 cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX;
352 353
353 if (early_radix_enabled()) 354 if (early_radix_enabled())
diff --git a/arch/powerpc/mm/pgtable-book3s64.c b/arch/powerpc/mm/pgtable-book3s64.c
index ebf9782bacf9..653ff6c74ebe 100644
--- a/arch/powerpc/mm/pgtable-book3s64.c
+++ b/arch/powerpc/mm/pgtable-book3s64.c
@@ -126,3 +126,21 @@ void mmu_cleanup_all(void)
126 else if (mmu_hash_ops.hpte_clear_all) 126 else if (mmu_hash_ops.hpte_clear_all)
127 mmu_hash_ops.hpte_clear_all(); 127 mmu_hash_ops.hpte_clear_all();
128} 128}
129
130#ifdef CONFIG_MEMORY_HOTPLUG
131int create_section_mapping(unsigned long start, unsigned long end)
132{
133 if (radix_enabled())
134 return -ENODEV;
135
136 return hash__create_section_mapping(start, end);
137}
138
139int remove_section_mapping(unsigned long start, unsigned long end)
140{
141 if (radix_enabled())
142 return -ENODEV;
143
144 return hash__remove_section_mapping(start, end);
145}
146#endif /* CONFIG_MEMORY_HOTPLUG */
diff --git a/arch/powerpc/mm/pgtable-radix.c b/arch/powerpc/mm/pgtable-radix.c
index cfa53ccc8baf..34f1a0dbc898 100644
--- a/arch/powerpc/mm/pgtable-radix.c
+++ b/arch/powerpc/mm/pgtable-radix.c
@@ -65,7 +65,7 @@ int radix__map_kernel_page(unsigned long ea, unsigned long pa,
65 if (!pmdp) 65 if (!pmdp)
66 return -ENOMEM; 66 return -ENOMEM;
67 if (map_page_size == PMD_SIZE) { 67 if (map_page_size == PMD_SIZE) {
68 ptep = (pte_t *)pudp; 68 ptep = pmdp_ptep(pmdp);
69 goto set_the_pte; 69 goto set_the_pte;
70 } 70 }
71 ptep = pte_alloc_kernel(pmdp, ea); 71 ptep = pte_alloc_kernel(pmdp, ea);
@@ -90,7 +90,7 @@ int radix__map_kernel_page(unsigned long ea, unsigned long pa,
90 } 90 }
91 pmdp = pmd_offset(pudp, ea); 91 pmdp = pmd_offset(pudp, ea);
92 if (map_page_size == PMD_SIZE) { 92 if (map_page_size == PMD_SIZE) {
93 ptep = (pte_t *)pudp; 93 ptep = pmdp_ptep(pmdp);
94 goto set_the_pte; 94 goto set_the_pte;
95 } 95 }
96 if (!pmd_present(*pmdp)) { 96 if (!pmd_present(*pmdp)) {
diff --git a/arch/powerpc/mm/tlb-radix.c b/arch/powerpc/mm/tlb-radix.c
index 61b79119065f..952713d6cf04 100644
--- a/arch/powerpc/mm/tlb-radix.c
+++ b/arch/powerpc/mm/tlb-radix.c
@@ -50,9 +50,7 @@ static inline void _tlbiel_pid(unsigned long pid, unsigned long ric)
50 for (set = 0; set < POWER9_TLB_SETS_RADIX ; set++) { 50 for (set = 0; set < POWER9_TLB_SETS_RADIX ; set++) {
51 __tlbiel_pid(pid, set, ric); 51 __tlbiel_pid(pid, set, ric);
52 } 52 }
53 if (cpu_has_feature(CPU_FTR_POWER9_DD1)) 53 asm volatile(PPC_INVALIDATE_ERAT "; isync" : : :"memory");
54 asm volatile(PPC_INVALIDATE_ERAT : : :"memory");
55 return;
56} 54}
57 55
58static inline void _tlbie_pid(unsigned long pid, unsigned long ric) 56static inline void _tlbie_pid(unsigned long pid, unsigned long ric)
@@ -85,8 +83,6 @@ static inline void _tlbiel_va(unsigned long va, unsigned long pid,
85 asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1) 83 asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1)
86 : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory"); 84 : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
87 asm volatile("ptesync": : :"memory"); 85 asm volatile("ptesync": : :"memory");
88 if (cpu_has_feature(CPU_FTR_POWER9_DD1))
89 asm volatile(PPC_INVALIDATE_ERAT : : :"memory");
90} 86}
91 87
92static inline void _tlbie_va(unsigned long va, unsigned long pid, 88static inline void _tlbie_va(unsigned long va, unsigned long pid,
diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c
index fd3e4034c04d..270eb9b74e2e 100644
--- a/arch/powerpc/perf/core-book3s.c
+++ b/arch/powerpc/perf/core-book3s.c
@@ -295,6 +295,8 @@ static inline void perf_read_regs(struct pt_regs *regs)
295 */ 295 */
296 if (TRAP(regs) != 0xf00) 296 if (TRAP(regs) != 0xf00)
297 use_siar = 0; 297 use_siar = 0;
298 else if ((ppmu->flags & PPMU_NO_SIAR))
299 use_siar = 0;
298 else if (marked) 300 else if (marked)
299 use_siar = 1; 301 use_siar = 1;
300 else if ((ppmu->flags & PPMU_NO_CONT_SAMPLING)) 302 else if ((ppmu->flags & PPMU_NO_CONT_SAMPLING))
diff --git a/arch/powerpc/perf/power9-events-list.h b/arch/powerpc/perf/power9-events-list.h
index 6447dc1c3d89..929b56d47ad9 100644
--- a/arch/powerpc/perf/power9-events-list.h
+++ b/arch/powerpc/perf/power9-events-list.h
@@ -16,7 +16,7 @@ EVENT(PM_CYC, 0x0001e)
16EVENT(PM_ICT_NOSLOT_CYC, 0x100f8) 16EVENT(PM_ICT_NOSLOT_CYC, 0x100f8)
17EVENT(PM_CMPLU_STALL, 0x1e054) 17EVENT(PM_CMPLU_STALL, 0x1e054)
18EVENT(PM_INST_CMPL, 0x00002) 18EVENT(PM_INST_CMPL, 0x00002)
19EVENT(PM_BRU_CMPL, 0x40060) 19EVENT(PM_BRU_CMPL, 0x10012)
20EVENT(PM_BR_MPRED_CMPL, 0x400f6) 20EVENT(PM_BR_MPRED_CMPL, 0x400f6)
21 21
22/* All L1 D cache load references counted at finish, gated by reject */ 22/* All L1 D cache load references counted at finish, gated by reject */
diff --git a/arch/powerpc/perf/power9-pmu.c b/arch/powerpc/perf/power9-pmu.c
index 346010e8d463..7332634e18c9 100644
--- a/arch/powerpc/perf/power9-pmu.c
+++ b/arch/powerpc/perf/power9-pmu.c
@@ -384,7 +384,7 @@ static struct power_pmu power9_isa207_pmu = {
384 .bhrb_filter_map = power9_bhrb_filter_map, 384 .bhrb_filter_map = power9_bhrb_filter_map,
385 .get_constraint = isa207_get_constraint, 385 .get_constraint = isa207_get_constraint,
386 .disable_pmc = isa207_disable_pmc, 386 .disable_pmc = isa207_disable_pmc,
387 .flags = PPMU_HAS_SIER | PPMU_ARCH_207S, 387 .flags = PPMU_NO_SIAR | PPMU_ARCH_207S,
388 .n_generic = ARRAY_SIZE(power9_generic_events), 388 .n_generic = ARRAY_SIZE(power9_generic_events),
389 .generic_events = power9_generic_events, 389 .generic_events = power9_generic_events,
390 .cache_events = &power9_cache_events, 390 .cache_events = &power9_cache_events,
diff --git a/arch/powerpc/platforms/powernv/smp.c b/arch/powerpc/platforms/powernv/smp.c
index c789258ae1e1..eec0e8d0454d 100644
--- a/arch/powerpc/platforms/powernv/smp.c
+++ b/arch/powerpc/platforms/powernv/smp.c
@@ -155,8 +155,10 @@ static void pnv_smp_cpu_kill_self(void)
155 wmask = SRR1_WAKEMASK_P8; 155 wmask = SRR1_WAKEMASK_P8;
156 156
157 idle_states = pnv_get_supported_cpuidle_states(); 157 idle_states = pnv_get_supported_cpuidle_states();
158
158 /* We don't want to take decrementer interrupts while we are offline, 159 /* We don't want to take decrementer interrupts while we are offline,
159 * so clear LPCR:PECE1. We keep PECE2 enabled. 160 * so clear LPCR:PECE1. We keep PECE2 (and LPCR_PECE_HVEE on P9)
161 * enabled as to let IPIs in.
160 */ 162 */
161 mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) & ~(u64)LPCR_PECE1); 163 mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) & ~(u64)LPCR_PECE1);
162 164
@@ -206,8 +208,12 @@ static void pnv_smp_cpu_kill_self(void)
206 * contains 0. 208 * contains 0.
207 */ 209 */
208 if (((srr1 & wmask) == SRR1_WAKEEE) || 210 if (((srr1 & wmask) == SRR1_WAKEEE) ||
211 ((srr1 & wmask) == SRR1_WAKEHVI) ||
209 (local_paca->irq_happened & PACA_IRQ_EE)) { 212 (local_paca->irq_happened & PACA_IRQ_EE)) {
210 icp_native_flush_interrupt(); 213 if (cpu_has_feature(CPU_FTR_ARCH_300))
214 icp_opal_flush_interrupt();
215 else
216 icp_native_flush_interrupt();
211 } else if ((srr1 & wmask) == SRR1_WAKEHDBELL) { 217 } else if ((srr1 & wmask) == SRR1_WAKEHDBELL) {
212 unsigned long msg = PPC_DBELL_TYPE(PPC_DBELL_SERVER); 218 unsigned long msg = PPC_DBELL_TYPE(PPC_DBELL_SERVER);
213 asm volatile(PPC_MSGCLR(%0) : : "r" (msg)); 219 asm volatile(PPC_MSGCLR(%0) : : "r" (msg));
@@ -221,6 +227,8 @@ static void pnv_smp_cpu_kill_self(void)
221 if (srr1 && !generic_check_cpu_restart(cpu)) 227 if (srr1 && !generic_check_cpu_restart(cpu))
222 DBG("CPU%d Unexpected exit while offline !\n", cpu); 228 DBG("CPU%d Unexpected exit while offline !\n", cpu);
223 } 229 }
230
231 /* Re-enable decrementer interrupts */
224 mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) | LPCR_PECE1); 232 mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) | LPCR_PECE1);
225 DBG("CPU%d coming online...\n", cpu); 233 DBG("CPU%d coming online...\n", cpu);
226} 234}
diff --git a/arch/powerpc/sysdev/xics/icp-opal.c b/arch/powerpc/sysdev/xics/icp-opal.c
index d38e86fd5720..f9670eabfcfa 100644
--- a/arch/powerpc/sysdev/xics/icp-opal.c
+++ b/arch/powerpc/sysdev/xics/icp-opal.c
@@ -20,6 +20,7 @@
20#include <asm/xics.h> 20#include <asm/xics.h>
21#include <asm/io.h> 21#include <asm/io.h>
22#include <asm/opal.h> 22#include <asm/opal.h>
23#include <asm/kvm_ppc.h>
23 24
24static void icp_opal_teardown_cpu(void) 25static void icp_opal_teardown_cpu(void)
25{ 26{
@@ -39,7 +40,26 @@ static void icp_opal_flush_ipi(void)
39 * Should we be flagging idle loop instead? 40 * Should we be flagging idle loop instead?
40 * Or creating some task to be scheduled? 41 * Or creating some task to be scheduled?
41 */ 42 */
42 opal_int_eoi((0x00 << 24) | XICS_IPI); 43 if (opal_int_eoi((0x00 << 24) | XICS_IPI) > 0)
44 force_external_irq_replay();
45}
46
47static unsigned int icp_opal_get_xirr(void)
48{
49 unsigned int kvm_xirr;
50 __be32 hw_xirr;
51 int64_t rc;
52
53 /* Handle an interrupt latched by KVM first */
54 kvm_xirr = kvmppc_get_xics_latch();
55 if (kvm_xirr)
56 return kvm_xirr;
57
58 /* Then ask OPAL */
59 rc = opal_int_get_xirr(&hw_xirr, false);
60 if (rc < 0)
61 return 0;
62 return be32_to_cpu(hw_xirr);
43} 63}
44 64
45static unsigned int icp_opal_get_irq(void) 65static unsigned int icp_opal_get_irq(void)
@@ -47,12 +67,8 @@ static unsigned int icp_opal_get_irq(void)
47 unsigned int xirr; 67 unsigned int xirr;
48 unsigned int vec; 68 unsigned int vec;
49 unsigned int irq; 69 unsigned int irq;
50 int64_t rc;
51 70
52 rc = opal_int_get_xirr(&xirr, false); 71 xirr = icp_opal_get_xirr();
53 if (rc < 0)
54 return 0;
55 xirr = be32_to_cpu(xirr);
56 vec = xirr & 0x00ffffff; 72 vec = xirr & 0x00ffffff;
57 if (vec == XICS_IRQ_SPURIOUS) 73 if (vec == XICS_IRQ_SPURIOUS)
58 return 0; 74 return 0;
@@ -67,7 +83,8 @@ static unsigned int icp_opal_get_irq(void)
67 xics_mask_unknown_vec(vec); 83 xics_mask_unknown_vec(vec);
68 84
69 /* We might learn about it later, so EOI it */ 85 /* We might learn about it later, so EOI it */
70 opal_int_eoi(xirr); 86 if (opal_int_eoi(xirr) > 0)
87 force_external_irq_replay();
71 88
72 return 0; 89 return 0;
73} 90}
@@ -103,18 +120,49 @@ static void icp_opal_cause_ipi(int cpu, unsigned long data)
103{ 120{
104 int hw_cpu = get_hard_smp_processor_id(cpu); 121 int hw_cpu = get_hard_smp_processor_id(cpu);
105 122
123 kvmppc_set_host_ipi(cpu, 1);
106 opal_int_set_mfrr(hw_cpu, IPI_PRIORITY); 124 opal_int_set_mfrr(hw_cpu, IPI_PRIORITY);
107} 125}
108 126
109static irqreturn_t icp_opal_ipi_action(int irq, void *dev_id) 127static irqreturn_t icp_opal_ipi_action(int irq, void *dev_id)
110{ 128{
111 int hw_cpu = hard_smp_processor_id(); 129 int cpu = smp_processor_id();
112 130
113 opal_int_set_mfrr(hw_cpu, 0xff); 131 kvmppc_set_host_ipi(cpu, 0);
132 opal_int_set_mfrr(get_hard_smp_processor_id(cpu), 0xff);
114 133
115 return smp_ipi_demux(); 134 return smp_ipi_demux();
116} 135}
117 136
137/*
138 * Called when an interrupt is received on an off-line CPU to
139 * clear the interrupt, so that the CPU can go back to nap mode.
140 */
141void icp_opal_flush_interrupt(void)
142{
143 unsigned int xirr;
144 unsigned int vec;
145
146 do {
147 xirr = icp_opal_get_xirr();
148 vec = xirr & 0x00ffffff;
149 if (vec == XICS_IRQ_SPURIOUS)
150 break;
151 if (vec == XICS_IPI) {
152 /* Clear pending IPI */
153 int cpu = smp_processor_id();
154 kvmppc_set_host_ipi(cpu, 0);
155 opal_int_set_mfrr(get_hard_smp_processor_id(cpu), 0xff);
156 } else {
157 pr_err("XICS: hw interrupt 0x%x to offline cpu, "
158 "disabling\n", vec);
159 xics_mask_unknown_vec(vec);
160 }
161
162 /* EOI the interrupt */
163 } while (opal_int_eoi(xirr) > 0);
164}
165
118#endif /* CONFIG_SMP */ 166#endif /* CONFIG_SMP */
119 167
120static const struct icp_ops icp_opal_ops = { 168static const struct icp_ops icp_opal_ops = {
diff --git a/arch/s390/configs/default_defconfig b/arch/s390/configs/default_defconfig
index e659daffe368..e00975361fec 100644
--- a/arch/s390/configs/default_defconfig
+++ b/arch/s390/configs/default_defconfig
@@ -69,7 +69,7 @@ CONFIG_CMA=y
69CONFIG_CMA_DEBUG=y 69CONFIG_CMA_DEBUG=y
70CONFIG_CMA_DEBUGFS=y 70CONFIG_CMA_DEBUGFS=y
71CONFIG_MEM_SOFT_DIRTY=y 71CONFIG_MEM_SOFT_DIRTY=y
72CONFIG_ZPOOL=m 72CONFIG_ZSWAP=y
73CONFIG_ZBUD=m 73CONFIG_ZBUD=m
74CONFIG_ZSMALLOC=m 74CONFIG_ZSMALLOC=m
75CONFIG_ZSMALLOC_STAT=y 75CONFIG_ZSMALLOC_STAT=y
@@ -141,8 +141,6 @@ CONFIG_NF_CONNTRACK_SECMARK=y
141CONFIG_NF_CONNTRACK_EVENTS=y 141CONFIG_NF_CONNTRACK_EVENTS=y
142CONFIG_NF_CONNTRACK_TIMEOUT=y 142CONFIG_NF_CONNTRACK_TIMEOUT=y
143CONFIG_NF_CONNTRACK_TIMESTAMP=y 143CONFIG_NF_CONNTRACK_TIMESTAMP=y
144CONFIG_NF_CT_PROTO_DCCP=m
145CONFIG_NF_CT_PROTO_UDPLITE=m
146CONFIG_NF_CONNTRACK_AMANDA=m 144CONFIG_NF_CONNTRACK_AMANDA=m
147CONFIG_NF_CONNTRACK_FTP=m 145CONFIG_NF_CONNTRACK_FTP=m
148CONFIG_NF_CONNTRACK_H323=m 146CONFIG_NF_CONNTRACK_H323=m
@@ -159,13 +157,12 @@ CONFIG_NF_TABLES=m
159CONFIG_NFT_EXTHDR=m 157CONFIG_NFT_EXTHDR=m
160CONFIG_NFT_META=m 158CONFIG_NFT_META=m
161CONFIG_NFT_CT=m 159CONFIG_NFT_CT=m
162CONFIG_NFT_RBTREE=m
163CONFIG_NFT_HASH=m
164CONFIG_NFT_COUNTER=m 160CONFIG_NFT_COUNTER=m
165CONFIG_NFT_LOG=m 161CONFIG_NFT_LOG=m
166CONFIG_NFT_LIMIT=m 162CONFIG_NFT_LIMIT=m
167CONFIG_NFT_NAT=m 163CONFIG_NFT_NAT=m
168CONFIG_NFT_COMPAT=m 164CONFIG_NFT_COMPAT=m
165CONFIG_NFT_HASH=m
169CONFIG_NETFILTER_XT_SET=m 166CONFIG_NETFILTER_XT_SET=m
170CONFIG_NETFILTER_XT_TARGET_AUDIT=m 167CONFIG_NETFILTER_XT_TARGET_AUDIT=m
171CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m 168CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
@@ -219,7 +216,6 @@ CONFIG_NETFILTER_XT_MATCH_QUOTA=m
219CONFIG_NETFILTER_XT_MATCH_RATEEST=m 216CONFIG_NETFILTER_XT_MATCH_RATEEST=m
220CONFIG_NETFILTER_XT_MATCH_REALM=m 217CONFIG_NETFILTER_XT_MATCH_REALM=m
221CONFIG_NETFILTER_XT_MATCH_RECENT=m 218CONFIG_NETFILTER_XT_MATCH_RECENT=m
222CONFIG_NETFILTER_XT_MATCH_SOCKET=m
223CONFIG_NETFILTER_XT_MATCH_STATE=m 219CONFIG_NETFILTER_XT_MATCH_STATE=m
224CONFIG_NETFILTER_XT_MATCH_STATISTIC=m 220CONFIG_NETFILTER_XT_MATCH_STATISTIC=m
225CONFIG_NETFILTER_XT_MATCH_STRING=m 221CONFIG_NETFILTER_XT_MATCH_STRING=m
@@ -258,7 +254,6 @@ CONFIG_IP_VS_NQ=m
258CONFIG_IP_VS_FTP=m 254CONFIG_IP_VS_FTP=m
259CONFIG_IP_VS_PE_SIP=m 255CONFIG_IP_VS_PE_SIP=m
260CONFIG_NF_CONNTRACK_IPV4=m 256CONFIG_NF_CONNTRACK_IPV4=m
261# CONFIG_NF_CONNTRACK_PROC_COMPAT is not set
262CONFIG_NF_TABLES_IPV4=m 257CONFIG_NF_TABLES_IPV4=m
263CONFIG_NFT_CHAIN_ROUTE_IPV4=m 258CONFIG_NFT_CHAIN_ROUTE_IPV4=m
264CONFIG_NF_TABLES_ARP=m 259CONFIG_NF_TABLES_ARP=m
@@ -436,7 +431,6 @@ CONFIG_EQUALIZER=m
436CONFIG_IFB=m 431CONFIG_IFB=m
437CONFIG_MACVLAN=m 432CONFIG_MACVLAN=m
438CONFIG_MACVTAP=m 433CONFIG_MACVTAP=m
439CONFIG_IPVLAN=m
440CONFIG_VXLAN=m 434CONFIG_VXLAN=m
441CONFIG_TUN=m 435CONFIG_TUN=m
442CONFIG_VETH=m 436CONFIG_VETH=m
@@ -480,6 +474,7 @@ CONFIG_VIRTIO_BALLOON=m
480CONFIG_EXT4_FS=y 474CONFIG_EXT4_FS=y
481CONFIG_EXT4_FS_POSIX_ACL=y 475CONFIG_EXT4_FS_POSIX_ACL=y
482CONFIG_EXT4_FS_SECURITY=y 476CONFIG_EXT4_FS_SECURITY=y
477CONFIG_EXT4_ENCRYPTION=y
483CONFIG_JBD2_DEBUG=y 478CONFIG_JBD2_DEBUG=y
484CONFIG_JFS_FS=m 479CONFIG_JFS_FS=m
485CONFIG_JFS_POSIX_ACL=y 480CONFIG_JFS_POSIX_ACL=y
@@ -592,14 +587,12 @@ CONFIG_LOCK_STAT=y
592CONFIG_DEBUG_LOCKDEP=y 587CONFIG_DEBUG_LOCKDEP=y
593CONFIG_DEBUG_ATOMIC_SLEEP=y 588CONFIG_DEBUG_ATOMIC_SLEEP=y
594CONFIG_DEBUG_LOCKING_API_SELFTESTS=y 589CONFIG_DEBUG_LOCKING_API_SELFTESTS=y
595CONFIG_DEBUG_LIST=y
596CONFIG_DEBUG_SG=y 590CONFIG_DEBUG_SG=y
597CONFIG_DEBUG_NOTIFIERS=y 591CONFIG_DEBUG_NOTIFIERS=y
598CONFIG_DEBUG_CREDENTIALS=y 592CONFIG_DEBUG_CREDENTIALS=y
599CONFIG_RCU_TORTURE_TEST=m 593CONFIG_RCU_TORTURE_TEST=m
600CONFIG_RCU_CPU_STALL_TIMEOUT=300 594CONFIG_RCU_CPU_STALL_TIMEOUT=300
601CONFIG_NOTIFIER_ERROR_INJECTION=m 595CONFIG_NOTIFIER_ERROR_INJECTION=m
602CONFIG_CPU_NOTIFIER_ERROR_INJECT=m
603CONFIG_PM_NOTIFIER_ERROR_INJECT=m 596CONFIG_PM_NOTIFIER_ERROR_INJECT=m
604CONFIG_FAULT_INJECTION=y 597CONFIG_FAULT_INJECTION=y
605CONFIG_FAILSLAB=y 598CONFIG_FAILSLAB=y
@@ -618,6 +611,7 @@ CONFIG_STACK_TRACER=y
618CONFIG_BLK_DEV_IO_TRACE=y 611CONFIG_BLK_DEV_IO_TRACE=y
619CONFIG_UPROBE_EVENT=y 612CONFIG_UPROBE_EVENT=y
620CONFIG_FUNCTION_PROFILER=y 613CONFIG_FUNCTION_PROFILER=y
614CONFIG_HIST_TRIGGERS=y
621CONFIG_TRACE_ENUM_MAP_FILE=y 615CONFIG_TRACE_ENUM_MAP_FILE=y
622CONFIG_LKDTM=m 616CONFIG_LKDTM=m
623CONFIG_TEST_LIST_SORT=y 617CONFIG_TEST_LIST_SORT=y
@@ -630,6 +624,7 @@ CONFIG_TEST_STRING_HELPERS=y
630CONFIG_TEST_KSTRTOX=y 624CONFIG_TEST_KSTRTOX=y
631CONFIG_DMA_API_DEBUG=y 625CONFIG_DMA_API_DEBUG=y
632CONFIG_TEST_BPF=m 626CONFIG_TEST_BPF=m
627CONFIG_BUG_ON_DATA_CORRUPTION=y
633CONFIG_S390_PTDUMP=y 628CONFIG_S390_PTDUMP=y
634CONFIG_ENCRYPTED_KEYS=m 629CONFIG_ENCRYPTED_KEYS=m
635CONFIG_SECURITY=y 630CONFIG_SECURITY=y
@@ -640,16 +635,18 @@ CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=0
640CONFIG_SECURITY_SELINUX_DISABLE=y 635CONFIG_SECURITY_SELINUX_DISABLE=y
641CONFIG_IMA=y 636CONFIG_IMA=y
642CONFIG_IMA_APPRAISE=y 637CONFIG_IMA_APPRAISE=y
638CONFIG_CRYPTO_RSA=m
639CONFIG_CRYPTO_DH=m
640CONFIG_CRYPTO_ECDH=m
643CONFIG_CRYPTO_USER=m 641CONFIG_CRYPTO_USER=m
644# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set
645CONFIG_CRYPTO_CRYPTD=m 642CONFIG_CRYPTO_CRYPTD=m
646CONFIG_CRYPTO_TEST=m 643CONFIG_CRYPTO_TEST=m
647CONFIG_CRYPTO_CCM=m 644CONFIG_CRYPTO_CCM=m
648CONFIG_CRYPTO_GCM=m 645CONFIG_CRYPTO_GCM=m
649CONFIG_CRYPTO_CTS=m 646CONFIG_CRYPTO_CHACHA20POLY1305=m
650CONFIG_CRYPTO_LRW=m 647CONFIG_CRYPTO_LRW=m
651CONFIG_CRYPTO_PCBC=m 648CONFIG_CRYPTO_PCBC=m
652CONFIG_CRYPTO_XTS=m 649CONFIG_CRYPTO_KEYWRAP=m
653CONFIG_CRYPTO_XCBC=m 650CONFIG_CRYPTO_XCBC=m
654CONFIG_CRYPTO_VMAC=m 651CONFIG_CRYPTO_VMAC=m
655CONFIG_CRYPTO_CRC32=m 652CONFIG_CRYPTO_CRC32=m
@@ -673,11 +670,13 @@ CONFIG_CRYPTO_SEED=m
673CONFIG_CRYPTO_SERPENT=m 670CONFIG_CRYPTO_SERPENT=m
674CONFIG_CRYPTO_TEA=m 671CONFIG_CRYPTO_TEA=m
675CONFIG_CRYPTO_TWOFISH=m 672CONFIG_CRYPTO_TWOFISH=m
676CONFIG_CRYPTO_LZO=m 673CONFIG_CRYPTO_842=m
677CONFIG_CRYPTO_LZ4=m 674CONFIG_CRYPTO_LZ4=m
678CONFIG_CRYPTO_LZ4HC=m 675CONFIG_CRYPTO_LZ4HC=m
679CONFIG_CRYPTO_USER_API_HASH=m 676CONFIG_CRYPTO_USER_API_HASH=m
680CONFIG_CRYPTO_USER_API_SKCIPHER=m 677CONFIG_CRYPTO_USER_API_SKCIPHER=m
678CONFIG_CRYPTO_USER_API_RNG=m
679CONFIG_CRYPTO_USER_API_AEAD=m
681CONFIG_ZCRYPT=m 680CONFIG_ZCRYPT=m
682CONFIG_CRYPTO_SHA1_S390=m 681CONFIG_CRYPTO_SHA1_S390=m
683CONFIG_CRYPTO_SHA256_S390=m 682CONFIG_CRYPTO_SHA256_S390=m
diff --git a/arch/s390/configs/gcov_defconfig b/arch/s390/configs/gcov_defconfig
index 95ceac50bc65..f05d2d6e1087 100644
--- a/arch/s390/configs/gcov_defconfig
+++ b/arch/s390/configs/gcov_defconfig
@@ -12,6 +12,7 @@ CONFIG_TASK_IO_ACCOUNTING=y
12CONFIG_IKCONFIG=y 12CONFIG_IKCONFIG=y
13CONFIG_IKCONFIG_PROC=y 13CONFIG_IKCONFIG_PROC=y
14CONFIG_NUMA_BALANCING=y 14CONFIG_NUMA_BALANCING=y
15# CONFIG_NUMA_BALANCING_DEFAULT_ENABLED is not set
15CONFIG_MEMCG=y 16CONFIG_MEMCG=y
16CONFIG_MEMCG_SWAP=y 17CONFIG_MEMCG_SWAP=y
17CONFIG_BLK_CGROUP=y 18CONFIG_BLK_CGROUP=y
@@ -54,8 +55,9 @@ CONFIG_SOLARIS_X86_PARTITION=y
54CONFIG_UNIXWARE_DISKLABEL=y 55CONFIG_UNIXWARE_DISKLABEL=y
55CONFIG_CFQ_GROUP_IOSCHED=y 56CONFIG_CFQ_GROUP_IOSCHED=y
56CONFIG_DEFAULT_DEADLINE=y 57CONFIG_DEFAULT_DEADLINE=y
58CONFIG_LIVEPATCH=y
57CONFIG_TUNE_ZEC12=y 59CONFIG_TUNE_ZEC12=y
58CONFIG_NR_CPUS=256 60CONFIG_NR_CPUS=512
59CONFIG_NUMA=y 61CONFIG_NUMA=y
60CONFIG_HZ_100=y 62CONFIG_HZ_100=y
61CONFIG_MEMORY_HOTPLUG=y 63CONFIG_MEMORY_HOTPLUG=y
@@ -65,6 +67,7 @@ CONFIG_TRANSPARENT_HUGEPAGE=y
65CONFIG_CLEANCACHE=y 67CONFIG_CLEANCACHE=y
66CONFIG_FRONTSWAP=y 68CONFIG_FRONTSWAP=y
67CONFIG_CMA=y 69CONFIG_CMA=y
70CONFIG_MEM_SOFT_DIRTY=y
68CONFIG_ZSWAP=y 71CONFIG_ZSWAP=y
69CONFIG_ZBUD=m 72CONFIG_ZBUD=m
70CONFIG_ZSMALLOC=m 73CONFIG_ZSMALLOC=m
@@ -136,8 +139,6 @@ CONFIG_NF_CONNTRACK_SECMARK=y
136CONFIG_NF_CONNTRACK_EVENTS=y 139CONFIG_NF_CONNTRACK_EVENTS=y
137CONFIG_NF_CONNTRACK_TIMEOUT=y 140CONFIG_NF_CONNTRACK_TIMEOUT=y
138CONFIG_NF_CONNTRACK_TIMESTAMP=y 141CONFIG_NF_CONNTRACK_TIMESTAMP=y
139CONFIG_NF_CT_PROTO_DCCP=m
140CONFIG_NF_CT_PROTO_UDPLITE=m
141CONFIG_NF_CONNTRACK_AMANDA=m 142CONFIG_NF_CONNTRACK_AMANDA=m
142CONFIG_NF_CONNTRACK_FTP=m 143CONFIG_NF_CONNTRACK_FTP=m
143CONFIG_NF_CONNTRACK_H323=m 144CONFIG_NF_CONNTRACK_H323=m
@@ -154,13 +155,12 @@ CONFIG_NF_TABLES=m
154CONFIG_NFT_EXTHDR=m 155CONFIG_NFT_EXTHDR=m
155CONFIG_NFT_META=m 156CONFIG_NFT_META=m
156CONFIG_NFT_CT=m 157CONFIG_NFT_CT=m
157CONFIG_NFT_RBTREE=m
158CONFIG_NFT_HASH=m
159CONFIG_NFT_COUNTER=m 158CONFIG_NFT_COUNTER=m
160CONFIG_NFT_LOG=m 159CONFIG_NFT_LOG=m
161CONFIG_NFT_LIMIT=m 160CONFIG_NFT_LIMIT=m
162CONFIG_NFT_NAT=m 161CONFIG_NFT_NAT=m
163CONFIG_NFT_COMPAT=m 162CONFIG_NFT_COMPAT=m
163CONFIG_NFT_HASH=m
164CONFIG_NETFILTER_XT_SET=m 164CONFIG_NETFILTER_XT_SET=m
165CONFIG_NETFILTER_XT_TARGET_AUDIT=m 165CONFIG_NETFILTER_XT_TARGET_AUDIT=m
166CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m 166CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
@@ -214,7 +214,6 @@ CONFIG_NETFILTER_XT_MATCH_QUOTA=m
214CONFIG_NETFILTER_XT_MATCH_RATEEST=m 214CONFIG_NETFILTER_XT_MATCH_RATEEST=m
215CONFIG_NETFILTER_XT_MATCH_REALM=m 215CONFIG_NETFILTER_XT_MATCH_REALM=m
216CONFIG_NETFILTER_XT_MATCH_RECENT=m 216CONFIG_NETFILTER_XT_MATCH_RECENT=m
217CONFIG_NETFILTER_XT_MATCH_SOCKET=m
218CONFIG_NETFILTER_XT_MATCH_STATE=m 217CONFIG_NETFILTER_XT_MATCH_STATE=m
219CONFIG_NETFILTER_XT_MATCH_STATISTIC=m 218CONFIG_NETFILTER_XT_MATCH_STATISTIC=m
220CONFIG_NETFILTER_XT_MATCH_STRING=m 219CONFIG_NETFILTER_XT_MATCH_STRING=m
@@ -253,7 +252,6 @@ CONFIG_IP_VS_NQ=m
253CONFIG_IP_VS_FTP=m 252CONFIG_IP_VS_FTP=m
254CONFIG_IP_VS_PE_SIP=m 253CONFIG_IP_VS_PE_SIP=m
255CONFIG_NF_CONNTRACK_IPV4=m 254CONFIG_NF_CONNTRACK_IPV4=m
256# CONFIG_NF_CONNTRACK_PROC_COMPAT is not set
257CONFIG_NF_TABLES_IPV4=m 255CONFIG_NF_TABLES_IPV4=m
258CONFIG_NFT_CHAIN_ROUTE_IPV4=m 256CONFIG_NFT_CHAIN_ROUTE_IPV4=m
259CONFIG_NF_TABLES_ARP=m 257CONFIG_NF_TABLES_ARP=m
@@ -430,7 +428,6 @@ CONFIG_EQUALIZER=m
430CONFIG_IFB=m 428CONFIG_IFB=m
431CONFIG_MACVLAN=m 429CONFIG_MACVLAN=m
432CONFIG_MACVTAP=m 430CONFIG_MACVTAP=m
433CONFIG_IPVLAN=m
434CONFIG_VXLAN=m 431CONFIG_VXLAN=m
435CONFIG_TUN=m 432CONFIG_TUN=m
436CONFIG_VETH=m 433CONFIG_VETH=m
@@ -460,6 +457,7 @@ CONFIG_HW_RANDOM_VIRTIO=m
460CONFIG_RAW_DRIVER=m 457CONFIG_RAW_DRIVER=m
461CONFIG_HANGCHECK_TIMER=m 458CONFIG_HANGCHECK_TIMER=m
462CONFIG_TN3270_FS=y 459CONFIG_TN3270_FS=y
460# CONFIG_HWMON is not set
463CONFIG_WATCHDOG=y 461CONFIG_WATCHDOG=y
464CONFIG_WATCHDOG_NOWAYOUT=y 462CONFIG_WATCHDOG_NOWAYOUT=y
465CONFIG_SOFT_WATCHDOG=m 463CONFIG_SOFT_WATCHDOG=m
@@ -473,6 +471,7 @@ CONFIG_VIRTIO_BALLOON=m
473CONFIG_EXT4_FS=y 471CONFIG_EXT4_FS=y
474CONFIG_EXT4_FS_POSIX_ACL=y 472CONFIG_EXT4_FS_POSIX_ACL=y
475CONFIG_EXT4_FS_SECURITY=y 473CONFIG_EXT4_FS_SECURITY=y
474CONFIG_EXT4_ENCRYPTION=y
476CONFIG_JBD2_DEBUG=y 475CONFIG_JBD2_DEBUG=y
477CONFIG_JFS_FS=m 476CONFIG_JFS_FS=m
478CONFIG_JFS_POSIX_ACL=y 477CONFIG_JFS_POSIX_ACL=y
@@ -495,6 +494,7 @@ CONFIG_AUTOFS4_FS=m
495CONFIG_FUSE_FS=y 494CONFIG_FUSE_FS=y
496CONFIG_CUSE=m 495CONFIG_CUSE=m
497CONFIG_OVERLAY_FS=m 496CONFIG_OVERLAY_FS=m
497CONFIG_OVERLAY_FS_REDIRECT_DIR=y
498CONFIG_FSCACHE=m 498CONFIG_FSCACHE=m
499CONFIG_CACHEFILES=m 499CONFIG_CACHEFILES=m
500CONFIG_ISO9660_FS=y 500CONFIG_ISO9660_FS=y
@@ -551,25 +551,27 @@ CONFIG_FRAME_WARN=1024
551CONFIG_UNUSED_SYMBOLS=y 551CONFIG_UNUSED_SYMBOLS=y
552CONFIG_MAGIC_SYSRQ=y 552CONFIG_MAGIC_SYSRQ=y
553CONFIG_DEBUG_MEMORY_INIT=y 553CONFIG_DEBUG_MEMORY_INIT=y
554CONFIG_MEMORY_NOTIFIER_ERROR_INJECT=m
555CONFIG_PANIC_ON_OOPS=y 554CONFIG_PANIC_ON_OOPS=y
556CONFIG_TIMER_STATS=y 555CONFIG_TIMER_STATS=y
557CONFIG_RCU_TORTURE_TEST=m 556CONFIG_RCU_TORTURE_TEST=m
558CONFIG_RCU_CPU_STALL_TIMEOUT=60 557CONFIG_RCU_CPU_STALL_TIMEOUT=60
559CONFIG_NOTIFIER_ERROR_INJECTION=m
560CONFIG_CPU_NOTIFIER_ERROR_INJECT=m
561CONFIG_PM_NOTIFIER_ERROR_INJECT=m
562CONFIG_LATENCYTOP=y 558CONFIG_LATENCYTOP=y
559CONFIG_SCHED_TRACER=y
560CONFIG_FTRACE_SYSCALLS=y
561CONFIG_STACK_TRACER=y
563CONFIG_BLK_DEV_IO_TRACE=y 562CONFIG_BLK_DEV_IO_TRACE=y
564# CONFIG_KPROBE_EVENT is not set 563CONFIG_UPROBE_EVENT=y
564CONFIG_FUNCTION_PROFILER=y
565CONFIG_HIST_TRIGGERS=y
565CONFIG_TRACE_ENUM_MAP_FILE=y 566CONFIG_TRACE_ENUM_MAP_FILE=y
566CONFIG_LKDTM=m 567CONFIG_LKDTM=m
567CONFIG_RBTREE_TEST=m
568CONFIG_INTERVAL_TREE_TEST=m
569CONFIG_PERCPU_TEST=m 568CONFIG_PERCPU_TEST=m
570CONFIG_ATOMIC64_SELFTEST=y 569CONFIG_ATOMIC64_SELFTEST=y
571CONFIG_TEST_BPF=m 570CONFIG_TEST_BPF=m
571CONFIG_BUG_ON_DATA_CORRUPTION=y
572CONFIG_S390_PTDUMP=y 572CONFIG_S390_PTDUMP=y
573CONFIG_PERSISTENT_KEYRINGS=y
574CONFIG_BIG_KEYS=y
573CONFIG_ENCRYPTED_KEYS=m 575CONFIG_ENCRYPTED_KEYS=m
574CONFIG_SECURITY=y 576CONFIG_SECURITY=y
575CONFIG_SECURITY_NETWORK=y 577CONFIG_SECURITY_NETWORK=y
@@ -577,18 +579,25 @@ CONFIG_SECURITY_SELINUX=y
577CONFIG_SECURITY_SELINUX_BOOTPARAM=y 579CONFIG_SECURITY_SELINUX_BOOTPARAM=y
578CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=0 580CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=0
579CONFIG_SECURITY_SELINUX_DISABLE=y 581CONFIG_SECURITY_SELINUX_DISABLE=y
582CONFIG_INTEGRITY_SIGNATURE=y
583CONFIG_INTEGRITY_ASYMMETRIC_KEYS=y
580CONFIG_IMA=y 584CONFIG_IMA=y
585CONFIG_IMA_WRITE_POLICY=y
581CONFIG_IMA_APPRAISE=y 586CONFIG_IMA_APPRAISE=y
587CONFIG_CRYPTO_DH=m
588CONFIG_CRYPTO_ECDH=m
582CONFIG_CRYPTO_USER=m 589CONFIG_CRYPTO_USER=m
583# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set 590# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set
591CONFIG_CRYPTO_PCRYPT=m
584CONFIG_CRYPTO_CRYPTD=m 592CONFIG_CRYPTO_CRYPTD=m
593CONFIG_CRYPTO_MCRYPTD=m
585CONFIG_CRYPTO_TEST=m 594CONFIG_CRYPTO_TEST=m
586CONFIG_CRYPTO_CCM=m 595CONFIG_CRYPTO_CCM=m
587CONFIG_CRYPTO_GCM=m 596CONFIG_CRYPTO_GCM=m
588CONFIG_CRYPTO_CTS=m 597CONFIG_CRYPTO_CHACHA20POLY1305=m
589CONFIG_CRYPTO_LRW=m 598CONFIG_CRYPTO_LRW=m
590CONFIG_CRYPTO_PCBC=m 599CONFIG_CRYPTO_PCBC=m
591CONFIG_CRYPTO_XTS=m 600CONFIG_CRYPTO_KEYWRAP=m
592CONFIG_CRYPTO_XCBC=m 601CONFIG_CRYPTO_XCBC=m
593CONFIG_CRYPTO_VMAC=m 602CONFIG_CRYPTO_VMAC=m
594CONFIG_CRYPTO_CRC32=m 603CONFIG_CRYPTO_CRC32=m
@@ -598,6 +607,7 @@ CONFIG_CRYPTO_RMD160=m
598CONFIG_CRYPTO_RMD256=m 607CONFIG_CRYPTO_RMD256=m
599CONFIG_CRYPTO_RMD320=m 608CONFIG_CRYPTO_RMD320=m
600CONFIG_CRYPTO_SHA512=m 609CONFIG_CRYPTO_SHA512=m
610CONFIG_CRYPTO_SHA3=m
601CONFIG_CRYPTO_TGR192=m 611CONFIG_CRYPTO_TGR192=m
602CONFIG_CRYPTO_WP512=m 612CONFIG_CRYPTO_WP512=m
603CONFIG_CRYPTO_ANUBIS=m 613CONFIG_CRYPTO_ANUBIS=m
@@ -612,10 +622,13 @@ CONFIG_CRYPTO_SEED=m
612CONFIG_CRYPTO_SERPENT=m 622CONFIG_CRYPTO_SERPENT=m
613CONFIG_CRYPTO_TEA=m 623CONFIG_CRYPTO_TEA=m
614CONFIG_CRYPTO_TWOFISH=m 624CONFIG_CRYPTO_TWOFISH=m
625CONFIG_CRYPTO_842=m
615CONFIG_CRYPTO_LZ4=m 626CONFIG_CRYPTO_LZ4=m
616CONFIG_CRYPTO_LZ4HC=m 627CONFIG_CRYPTO_LZ4HC=m
617CONFIG_CRYPTO_USER_API_HASH=m 628CONFIG_CRYPTO_USER_API_HASH=m
618CONFIG_CRYPTO_USER_API_SKCIPHER=m 629CONFIG_CRYPTO_USER_API_SKCIPHER=m
630CONFIG_CRYPTO_USER_API_RNG=m
631CONFIG_CRYPTO_USER_API_AEAD=m
619CONFIG_ZCRYPT=m 632CONFIG_ZCRYPT=m
620CONFIG_CRYPTO_SHA1_S390=m 633CONFIG_CRYPTO_SHA1_S390=m
621CONFIG_CRYPTO_SHA256_S390=m 634CONFIG_CRYPTO_SHA256_S390=m
@@ -624,9 +637,6 @@ CONFIG_CRYPTO_DES_S390=m
624CONFIG_CRYPTO_AES_S390=m 637CONFIG_CRYPTO_AES_S390=m
625CONFIG_CRYPTO_GHASH_S390=m 638CONFIG_CRYPTO_GHASH_S390=m
626CONFIG_CRYPTO_CRC32_S390=y 639CONFIG_CRYPTO_CRC32_S390=y
627CONFIG_ASYMMETRIC_KEY_TYPE=y
628CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=m
629CONFIG_X509_CERTIFICATE_PARSER=m
630CONFIG_CRC7=m 640CONFIG_CRC7=m
631CONFIG_CRC8=m 641CONFIG_CRC8=m
632CONFIG_CORDIC=m 642CONFIG_CORDIC=m
diff --git a/arch/s390/configs/performance_defconfig b/arch/s390/configs/performance_defconfig
index bc7b176f5795..2cf87343b590 100644
--- a/arch/s390/configs/performance_defconfig
+++ b/arch/s390/configs/performance_defconfig
@@ -65,6 +65,7 @@ CONFIG_TRANSPARENT_HUGEPAGE=y
65CONFIG_CLEANCACHE=y 65CONFIG_CLEANCACHE=y
66CONFIG_FRONTSWAP=y 66CONFIG_FRONTSWAP=y
67CONFIG_CMA=y 67CONFIG_CMA=y
68CONFIG_MEM_SOFT_DIRTY=y
68CONFIG_ZSWAP=y 69CONFIG_ZSWAP=y
69CONFIG_ZBUD=m 70CONFIG_ZBUD=m
70CONFIG_ZSMALLOC=m 71CONFIG_ZSMALLOC=m
@@ -136,8 +137,6 @@ CONFIG_NF_CONNTRACK_SECMARK=y
136CONFIG_NF_CONNTRACK_EVENTS=y 137CONFIG_NF_CONNTRACK_EVENTS=y
137CONFIG_NF_CONNTRACK_TIMEOUT=y 138CONFIG_NF_CONNTRACK_TIMEOUT=y
138CONFIG_NF_CONNTRACK_TIMESTAMP=y 139CONFIG_NF_CONNTRACK_TIMESTAMP=y
139CONFIG_NF_CT_PROTO_DCCP=m
140CONFIG_NF_CT_PROTO_UDPLITE=m
141CONFIG_NF_CONNTRACK_AMANDA=m 140CONFIG_NF_CONNTRACK_AMANDA=m
142CONFIG_NF_CONNTRACK_FTP=m 141CONFIG_NF_CONNTRACK_FTP=m
143CONFIG_NF_CONNTRACK_H323=m 142CONFIG_NF_CONNTRACK_H323=m
@@ -154,13 +153,12 @@ CONFIG_NF_TABLES=m
154CONFIG_NFT_EXTHDR=m 153CONFIG_NFT_EXTHDR=m
155CONFIG_NFT_META=m 154CONFIG_NFT_META=m
156CONFIG_NFT_CT=m 155CONFIG_NFT_CT=m
157CONFIG_NFT_RBTREE=m
158CONFIG_NFT_HASH=m
159CONFIG_NFT_COUNTER=m 156CONFIG_NFT_COUNTER=m
160CONFIG_NFT_LOG=m 157CONFIG_NFT_LOG=m
161CONFIG_NFT_LIMIT=m 158CONFIG_NFT_LIMIT=m
162CONFIG_NFT_NAT=m 159CONFIG_NFT_NAT=m
163CONFIG_NFT_COMPAT=m 160CONFIG_NFT_COMPAT=m
161CONFIG_NFT_HASH=m
164CONFIG_NETFILTER_XT_SET=m 162CONFIG_NETFILTER_XT_SET=m
165CONFIG_NETFILTER_XT_TARGET_AUDIT=m 163CONFIG_NETFILTER_XT_TARGET_AUDIT=m
166CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m 164CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
@@ -214,7 +212,6 @@ CONFIG_NETFILTER_XT_MATCH_QUOTA=m
214CONFIG_NETFILTER_XT_MATCH_RATEEST=m 212CONFIG_NETFILTER_XT_MATCH_RATEEST=m
215CONFIG_NETFILTER_XT_MATCH_REALM=m 213CONFIG_NETFILTER_XT_MATCH_REALM=m
216CONFIG_NETFILTER_XT_MATCH_RECENT=m 214CONFIG_NETFILTER_XT_MATCH_RECENT=m
217CONFIG_NETFILTER_XT_MATCH_SOCKET=m
218CONFIG_NETFILTER_XT_MATCH_STATE=m 215CONFIG_NETFILTER_XT_MATCH_STATE=m
219CONFIG_NETFILTER_XT_MATCH_STATISTIC=m 216CONFIG_NETFILTER_XT_MATCH_STATISTIC=m
220CONFIG_NETFILTER_XT_MATCH_STRING=m 217CONFIG_NETFILTER_XT_MATCH_STRING=m
@@ -253,7 +250,6 @@ CONFIG_IP_VS_NQ=m
253CONFIG_IP_VS_FTP=m 250CONFIG_IP_VS_FTP=m
254CONFIG_IP_VS_PE_SIP=m 251CONFIG_IP_VS_PE_SIP=m
255CONFIG_NF_CONNTRACK_IPV4=m 252CONFIG_NF_CONNTRACK_IPV4=m
256# CONFIG_NF_CONNTRACK_PROC_COMPAT is not set
257CONFIG_NF_TABLES_IPV4=m 253CONFIG_NF_TABLES_IPV4=m
258CONFIG_NFT_CHAIN_ROUTE_IPV4=m 254CONFIG_NFT_CHAIN_ROUTE_IPV4=m
259CONFIG_NF_TABLES_ARP=m 255CONFIG_NF_TABLES_ARP=m
@@ -430,7 +426,6 @@ CONFIG_EQUALIZER=m
430CONFIG_IFB=m 426CONFIG_IFB=m
431CONFIG_MACVLAN=m 427CONFIG_MACVLAN=m
432CONFIG_MACVTAP=m 428CONFIG_MACVTAP=m
433CONFIG_IPVLAN=m
434CONFIG_VXLAN=m 429CONFIG_VXLAN=m
435CONFIG_TUN=m 430CONFIG_TUN=m
436CONFIG_VETH=m 431CONFIG_VETH=m
@@ -474,6 +469,7 @@ CONFIG_VIRTIO_BALLOON=m
474CONFIG_EXT4_FS=y 469CONFIG_EXT4_FS=y
475CONFIG_EXT4_FS_POSIX_ACL=y 470CONFIG_EXT4_FS_POSIX_ACL=y
476CONFIG_EXT4_FS_SECURITY=y 471CONFIG_EXT4_FS_SECURITY=y
472CONFIG_EXT4_ENCRYPTION=y
477CONFIG_JBD2_DEBUG=y 473CONFIG_JBD2_DEBUG=y
478CONFIG_JFS_FS=m 474CONFIG_JFS_FS=m
479CONFIG_JFS_POSIX_ACL=y 475CONFIG_JFS_POSIX_ACL=y
@@ -496,6 +492,7 @@ CONFIG_AUTOFS4_FS=m
496CONFIG_FUSE_FS=y 492CONFIG_FUSE_FS=y
497CONFIG_CUSE=m 493CONFIG_CUSE=m
498CONFIG_OVERLAY_FS=m 494CONFIG_OVERLAY_FS=m
495CONFIG_OVERLAY_FS_REDIRECT_DIR=y
499CONFIG_FSCACHE=m 496CONFIG_FSCACHE=m
500CONFIG_CACHEFILES=m 497CONFIG_CACHEFILES=m
501CONFIG_ISO9660_FS=y 498CONFIG_ISO9660_FS=y
@@ -563,12 +560,16 @@ CONFIG_STACK_TRACER=y
563CONFIG_BLK_DEV_IO_TRACE=y 560CONFIG_BLK_DEV_IO_TRACE=y
564CONFIG_UPROBE_EVENT=y 561CONFIG_UPROBE_EVENT=y
565CONFIG_FUNCTION_PROFILER=y 562CONFIG_FUNCTION_PROFILER=y
563CONFIG_HIST_TRIGGERS=y
566CONFIG_TRACE_ENUM_MAP_FILE=y 564CONFIG_TRACE_ENUM_MAP_FILE=y
567CONFIG_LKDTM=m 565CONFIG_LKDTM=m
568CONFIG_PERCPU_TEST=m 566CONFIG_PERCPU_TEST=m
569CONFIG_ATOMIC64_SELFTEST=y 567CONFIG_ATOMIC64_SELFTEST=y
570CONFIG_TEST_BPF=m 568CONFIG_TEST_BPF=m
569CONFIG_BUG_ON_DATA_CORRUPTION=y
571CONFIG_S390_PTDUMP=y 570CONFIG_S390_PTDUMP=y
571CONFIG_PERSISTENT_KEYRINGS=y
572CONFIG_BIG_KEYS=y
572CONFIG_ENCRYPTED_KEYS=m 573CONFIG_ENCRYPTED_KEYS=m
573CONFIG_SECURITY=y 574CONFIG_SECURITY=y
574CONFIG_SECURITY_NETWORK=y 575CONFIG_SECURITY_NETWORK=y
@@ -576,18 +577,25 @@ CONFIG_SECURITY_SELINUX=y
576CONFIG_SECURITY_SELINUX_BOOTPARAM=y 577CONFIG_SECURITY_SELINUX_BOOTPARAM=y
577CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=0 578CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=0
578CONFIG_SECURITY_SELINUX_DISABLE=y 579CONFIG_SECURITY_SELINUX_DISABLE=y
580CONFIG_INTEGRITY_SIGNATURE=y
581CONFIG_INTEGRITY_ASYMMETRIC_KEYS=y
579CONFIG_IMA=y 582CONFIG_IMA=y
583CONFIG_IMA_WRITE_POLICY=y
580CONFIG_IMA_APPRAISE=y 584CONFIG_IMA_APPRAISE=y
585CONFIG_CRYPTO_DH=m
586CONFIG_CRYPTO_ECDH=m
581CONFIG_CRYPTO_USER=m 587CONFIG_CRYPTO_USER=m
582# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set 588# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set
589CONFIG_CRYPTO_PCRYPT=m
583CONFIG_CRYPTO_CRYPTD=m 590CONFIG_CRYPTO_CRYPTD=m
591CONFIG_CRYPTO_MCRYPTD=m
584CONFIG_CRYPTO_TEST=m 592CONFIG_CRYPTO_TEST=m
585CONFIG_CRYPTO_CCM=m 593CONFIG_CRYPTO_CCM=m
586CONFIG_CRYPTO_GCM=m 594CONFIG_CRYPTO_GCM=m
587CONFIG_CRYPTO_CTS=m 595CONFIG_CRYPTO_CHACHA20POLY1305=m
588CONFIG_CRYPTO_LRW=m 596CONFIG_CRYPTO_LRW=m
589CONFIG_CRYPTO_PCBC=m 597CONFIG_CRYPTO_PCBC=m
590CONFIG_CRYPTO_XTS=m 598CONFIG_CRYPTO_KEYWRAP=m
591CONFIG_CRYPTO_XCBC=m 599CONFIG_CRYPTO_XCBC=m
592CONFIG_CRYPTO_VMAC=m 600CONFIG_CRYPTO_VMAC=m
593CONFIG_CRYPTO_CRC32=m 601CONFIG_CRYPTO_CRC32=m
@@ -597,6 +605,7 @@ CONFIG_CRYPTO_RMD160=m
597CONFIG_CRYPTO_RMD256=m 605CONFIG_CRYPTO_RMD256=m
598CONFIG_CRYPTO_RMD320=m 606CONFIG_CRYPTO_RMD320=m
599CONFIG_CRYPTO_SHA512=m 607CONFIG_CRYPTO_SHA512=m
608CONFIG_CRYPTO_SHA3=m
600CONFIG_CRYPTO_TGR192=m 609CONFIG_CRYPTO_TGR192=m
601CONFIG_CRYPTO_WP512=m 610CONFIG_CRYPTO_WP512=m
602CONFIG_CRYPTO_ANUBIS=m 611CONFIG_CRYPTO_ANUBIS=m
@@ -611,10 +620,13 @@ CONFIG_CRYPTO_SEED=m
611CONFIG_CRYPTO_SERPENT=m 620CONFIG_CRYPTO_SERPENT=m
612CONFIG_CRYPTO_TEA=m 621CONFIG_CRYPTO_TEA=m
613CONFIG_CRYPTO_TWOFISH=m 622CONFIG_CRYPTO_TWOFISH=m
623CONFIG_CRYPTO_842=m
614CONFIG_CRYPTO_LZ4=m 624CONFIG_CRYPTO_LZ4=m
615CONFIG_CRYPTO_LZ4HC=m 625CONFIG_CRYPTO_LZ4HC=m
616CONFIG_CRYPTO_USER_API_HASH=m 626CONFIG_CRYPTO_USER_API_HASH=m
617CONFIG_CRYPTO_USER_API_SKCIPHER=m 627CONFIG_CRYPTO_USER_API_SKCIPHER=m
628CONFIG_CRYPTO_USER_API_RNG=m
629CONFIG_CRYPTO_USER_API_AEAD=m
618CONFIG_ZCRYPT=m 630CONFIG_ZCRYPT=m
619CONFIG_CRYPTO_SHA1_S390=m 631CONFIG_CRYPTO_SHA1_S390=m
620CONFIG_CRYPTO_SHA256_S390=m 632CONFIG_CRYPTO_SHA256_S390=m
@@ -623,9 +635,6 @@ CONFIG_CRYPTO_DES_S390=m
623CONFIG_CRYPTO_AES_S390=m 635CONFIG_CRYPTO_AES_S390=m
624CONFIG_CRYPTO_GHASH_S390=m 636CONFIG_CRYPTO_GHASH_S390=m
625CONFIG_CRYPTO_CRC32_S390=y 637CONFIG_CRYPTO_CRC32_S390=y
626CONFIG_ASYMMETRIC_KEY_TYPE=y
627CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=m
628CONFIG_X509_CERTIFICATE_PARSER=m
629CONFIG_CRC7=m 638CONFIG_CRC7=m
630CONFIG_CRC8=m 639CONFIG_CRC8=m
631CONFIG_CORDIC=m 640CONFIG_CORDIC=m
diff --git a/arch/s390/defconfig b/arch/s390/defconfig
index 2d40ef0a6295..d00e368fb5e6 100644
--- a/arch/s390/defconfig
+++ b/arch/s390/defconfig
@@ -38,7 +38,6 @@ CONFIG_JUMP_LABEL=y
38CONFIG_STATIC_KEYS_SELFTEST=y 38CONFIG_STATIC_KEYS_SELFTEST=y
39CONFIG_MODULES=y 39CONFIG_MODULES=y
40CONFIG_MODULE_UNLOAD=y 40CONFIG_MODULE_UNLOAD=y
41CONFIG_MODVERSIONS=y
42CONFIG_BLK_DEV_INTEGRITY=y 41CONFIG_BLK_DEV_INTEGRITY=y
43CONFIG_PARTITION_ADVANCED=y 42CONFIG_PARTITION_ADVANCED=y
44CONFIG_IBM_PARTITION=y 43CONFIG_IBM_PARTITION=y
@@ -130,8 +129,11 @@ CONFIG_DUMMY=m
130CONFIG_EQUALIZER=m 129CONFIG_EQUALIZER=m
131CONFIG_TUN=m 130CONFIG_TUN=m
132CONFIG_VIRTIO_NET=y 131CONFIG_VIRTIO_NET=y
132# CONFIG_NET_VENDOR_ALACRITECH is not set
133# CONFIG_NET_VENDOR_SOLARFLARE is not set
133# CONFIG_INPUT is not set 134# CONFIG_INPUT is not set
134# CONFIG_SERIO is not set 135# CONFIG_SERIO is not set
136CONFIG_DEVKMEM=y
135CONFIG_RAW_DRIVER=m 137CONFIG_RAW_DRIVER=m
136CONFIG_VIRTIO_BALLOON=y 138CONFIG_VIRTIO_BALLOON=y
137CONFIG_EXT4_FS=y 139CONFIG_EXT4_FS=y
@@ -183,7 +185,6 @@ CONFIG_TRACE_ENUM_MAP_FILE=y
183CONFIG_KPROBES_SANITY_TEST=y 185CONFIG_KPROBES_SANITY_TEST=y
184CONFIG_S390_PTDUMP=y 186CONFIG_S390_PTDUMP=y
185CONFIG_CRYPTO_CRYPTD=m 187CONFIG_CRYPTO_CRYPTD=m
186CONFIG_CRYPTO_AUTHENC=m
187CONFIG_CRYPTO_TEST=m 188CONFIG_CRYPTO_TEST=m
188CONFIG_CRYPTO_CCM=m 189CONFIG_CRYPTO_CCM=m
189CONFIG_CRYPTO_GCM=m 190CONFIG_CRYPTO_GCM=m
diff --git a/arch/s390/include/asm/asm-prototypes.h b/arch/s390/include/asm/asm-prototypes.h
new file mode 100644
index 000000000000..2c3413b0ca52
--- /dev/null
+++ b/arch/s390/include/asm/asm-prototypes.h
@@ -0,0 +1,8 @@
1#ifndef _ASM_S390_PROTOTYPES_H
2
3#include <linux/kvm_host.h>
4#include <linux/ftrace.h>
5#include <asm/fpu/api.h>
6#include <asm-generic/asm-prototypes.h>
7
8#endif /* _ASM_S390_PROTOTYPES_H */
diff --git a/arch/s390/include/asm/ctl_reg.h b/arch/s390/include/asm/ctl_reg.h
index d7697ab802f6..8e136b88cdf4 100644
--- a/arch/s390/include/asm/ctl_reg.h
+++ b/arch/s390/include/asm/ctl_reg.h
@@ -15,7 +15,9 @@
15 BUILD_BUG_ON(sizeof(addrtype) != (high - low + 1) * sizeof(long));\ 15 BUILD_BUG_ON(sizeof(addrtype) != (high - low + 1) * sizeof(long));\
16 asm volatile( \ 16 asm volatile( \
17 " lctlg %1,%2,%0\n" \ 17 " lctlg %1,%2,%0\n" \
18 : : "Q" (*(addrtype *)(&array)), "i" (low), "i" (high));\ 18 : \
19 : "Q" (*(addrtype *)(&array)), "i" (low), "i" (high) \
20 : "memory"); \
19} 21}
20 22
21#define __ctl_store(array, low, high) { \ 23#define __ctl_store(array, low, high) { \
diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c
index 7447ba509c30..12020b55887b 100644
--- a/arch/s390/kernel/ptrace.c
+++ b/arch/s390/kernel/ptrace.c
@@ -963,6 +963,11 @@ static int s390_fpregs_set(struct task_struct *target,
963 if (target == current) 963 if (target == current)
964 save_fpu_regs(); 964 save_fpu_regs();
965 965
966 if (MACHINE_HAS_VX)
967 convert_vx_to_fp(fprs, target->thread.fpu.vxrs);
968 else
969 memcpy(&fprs, target->thread.fpu.fprs, sizeof(fprs));
970
966 /* If setting FPC, must validate it first. */ 971 /* If setting FPC, must validate it first. */
967 if (count > 0 && pos < offsetof(s390_fp_regs, fprs)) { 972 if (count > 0 && pos < offsetof(s390_fp_regs, fprs)) {
968 u32 ufpc[2] = { target->thread.fpu.fpc, 0 }; 973 u32 ufpc[2] = { target->thread.fpu.fpc, 0 };
@@ -1067,6 +1072,9 @@ static int s390_vxrs_low_set(struct task_struct *target,
1067 if (target == current) 1072 if (target == current)
1068 save_fpu_regs(); 1073 save_fpu_regs();
1069 1074
1075 for (i = 0; i < __NUM_VXRS_LOW; i++)
1076 vxrs[i] = *((__u64 *)(target->thread.fpu.vxrs + i) + 1);
1077
1070 rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf, vxrs, 0, -1); 1078 rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf, vxrs, 0, -1);
1071 if (rc == 0) 1079 if (rc == 0)
1072 for (i = 0; i < __NUM_VXRS_LOW; i++) 1080 for (i = 0; i < __NUM_VXRS_LOW; i++)
diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c
index 6b246aadf311..1b5c5ee9fc1b 100644
--- a/arch/s390/kernel/vtime.c
+++ b/arch/s390/kernel/vtime.c
@@ -94,7 +94,7 @@ static void update_mt_scaling(void)
94 * Update process times based on virtual cpu times stored by entry.S 94 * Update process times based on virtual cpu times stored by entry.S
95 * to the lowcore fields user_timer, system_timer & steal_clock. 95 * to the lowcore fields user_timer, system_timer & steal_clock.
96 */ 96 */
97static int do_account_vtime(struct task_struct *tsk, int hardirq_offset) 97static int do_account_vtime(struct task_struct *tsk)
98{ 98{
99 u64 timer, clock, user, system, steal; 99 u64 timer, clock, user, system, steal;
100 u64 user_scaled, system_scaled; 100 u64 user_scaled, system_scaled;
@@ -138,7 +138,7 @@ static int do_account_vtime(struct task_struct *tsk, int hardirq_offset)
138 } 138 }
139 account_user_time(tsk, user); 139 account_user_time(tsk, user);
140 tsk->utimescaled += user_scaled; 140 tsk->utimescaled += user_scaled;
141 account_system_time(tsk, hardirq_offset, system); 141 account_system_time(tsk, 0, system);
142 tsk->stimescaled += system_scaled; 142 tsk->stimescaled += system_scaled;
143 143
144 steal = S390_lowcore.steal_timer; 144 steal = S390_lowcore.steal_timer;
@@ -152,7 +152,7 @@ static int do_account_vtime(struct task_struct *tsk, int hardirq_offset)
152 152
153void vtime_task_switch(struct task_struct *prev) 153void vtime_task_switch(struct task_struct *prev)
154{ 154{
155 do_account_vtime(prev, 0); 155 do_account_vtime(prev);
156 prev->thread.user_timer = S390_lowcore.user_timer; 156 prev->thread.user_timer = S390_lowcore.user_timer;
157 prev->thread.system_timer = S390_lowcore.system_timer; 157 prev->thread.system_timer = S390_lowcore.system_timer;
158 S390_lowcore.user_timer = current->thread.user_timer; 158 S390_lowcore.user_timer = current->thread.user_timer;
@@ -166,7 +166,7 @@ void vtime_task_switch(struct task_struct *prev)
166 */ 166 */
167void vtime_account_user(struct task_struct *tsk) 167void vtime_account_user(struct task_struct *tsk)
168{ 168{
169 if (do_account_vtime(tsk, HARDIRQ_OFFSET)) 169 if (do_account_vtime(tsk))
170 virt_timer_expire(); 170 virt_timer_expire();
171} 171}
172 172
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index bec71e902be3..6484a250021e 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -916,7 +916,7 @@ static int kvm_s390_get_machine(struct kvm *kvm, struct kvm_device_attr *attr)
916 memcpy(&mach->fac_mask, kvm->arch.model.fac_mask, 916 memcpy(&mach->fac_mask, kvm->arch.model.fac_mask,
917 S390_ARCH_FAC_LIST_SIZE_BYTE); 917 S390_ARCH_FAC_LIST_SIZE_BYTE);
918 memcpy((unsigned long *)&mach->fac_list, S390_lowcore.stfle_fac_list, 918 memcpy((unsigned long *)&mach->fac_list, S390_lowcore.stfle_fac_list,
919 S390_ARCH_FAC_LIST_SIZE_BYTE); 919 sizeof(S390_lowcore.stfle_fac_list));
920 if (copy_to_user((void __user *)attr->addr, mach, sizeof(*mach))) 920 if (copy_to_user((void __user *)attr->addr, mach, sizeof(*mach)))
921 ret = -EFAULT; 921 ret = -EFAULT;
922 kfree(mach); 922 kfree(mach);
@@ -1437,7 +1437,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
1437 1437
1438 /* Populate the facility mask initially. */ 1438 /* Populate the facility mask initially. */
1439 memcpy(kvm->arch.model.fac_mask, S390_lowcore.stfle_fac_list, 1439 memcpy(kvm->arch.model.fac_mask, S390_lowcore.stfle_fac_list,
1440 S390_ARCH_FAC_LIST_SIZE_BYTE); 1440 sizeof(S390_lowcore.stfle_fac_list));
1441 for (i = 0; i < S390_ARCH_FAC_LIST_SIZE_U64; i++) { 1441 for (i = 0; i < S390_ARCH_FAC_LIST_SIZE_U64; i++) {
1442 if (i < kvm_s390_fac_list_mask_size()) 1442 if (i < kvm_s390_fac_list_mask_size())
1443 kvm->arch.model.fac_mask[i] &= kvm_s390_fac_list_mask[i]; 1443 kvm->arch.model.fac_mask[i] &= kvm_s390_fac_list_mask[i];
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index 7a1897c51c54..d56ef26d4681 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -202,7 +202,7 @@ static inline pgste_t ptep_xchg_start(struct mm_struct *mm,
202 return pgste; 202 return pgste;
203} 203}
204 204
205static inline void ptep_xchg_commit(struct mm_struct *mm, 205static inline pte_t ptep_xchg_commit(struct mm_struct *mm,
206 unsigned long addr, pte_t *ptep, 206 unsigned long addr, pte_t *ptep,
207 pgste_t pgste, pte_t old, pte_t new) 207 pgste_t pgste, pte_t old, pte_t new)
208{ 208{
@@ -220,6 +220,7 @@ static inline void ptep_xchg_commit(struct mm_struct *mm,
220 } else { 220 } else {
221 *ptep = new; 221 *ptep = new;
222 } 222 }
223 return old;
223} 224}
224 225
225pte_t ptep_xchg_direct(struct mm_struct *mm, unsigned long addr, 226pte_t ptep_xchg_direct(struct mm_struct *mm, unsigned long addr,
@@ -231,7 +232,7 @@ pte_t ptep_xchg_direct(struct mm_struct *mm, unsigned long addr,
231 preempt_disable(); 232 preempt_disable();
232 pgste = ptep_xchg_start(mm, addr, ptep); 233 pgste = ptep_xchg_start(mm, addr, ptep);
233 old = ptep_flush_direct(mm, addr, ptep); 234 old = ptep_flush_direct(mm, addr, ptep);
234 ptep_xchg_commit(mm, addr, ptep, pgste, old, new); 235 old = ptep_xchg_commit(mm, addr, ptep, pgste, old, new);
235 preempt_enable(); 236 preempt_enable();
236 return old; 237 return old;
237} 238}
@@ -246,7 +247,7 @@ pte_t ptep_xchg_lazy(struct mm_struct *mm, unsigned long addr,
246 preempt_disable(); 247 preempt_disable();
247 pgste = ptep_xchg_start(mm, addr, ptep); 248 pgste = ptep_xchg_start(mm, addr, ptep);
248 old = ptep_flush_lazy(mm, addr, ptep); 249 old = ptep_flush_lazy(mm, addr, ptep);
249 ptep_xchg_commit(mm, addr, ptep, pgste, old, new); 250 old = ptep_xchg_commit(mm, addr, ptep, pgste, old, new);
250 preempt_enable(); 251 preempt_enable();
251 return old; 252 return old;
252} 253}
diff --git a/arch/sparc/include/asm/mmu_context_64.h b/arch/sparc/include/asm/mmu_context_64.h
index b84be675e507..d0317993e947 100644
--- a/arch/sparc/include/asm/mmu_context_64.h
+++ b/arch/sparc/include/asm/mmu_context_64.h
@@ -35,15 +35,15 @@ void __tsb_context_switch(unsigned long pgd_pa,
35static inline void tsb_context_switch(struct mm_struct *mm) 35static inline void tsb_context_switch(struct mm_struct *mm)
36{ 36{
37 __tsb_context_switch(__pa(mm->pgd), 37 __tsb_context_switch(__pa(mm->pgd),
38 &mm->context.tsb_block[0], 38 &mm->context.tsb_block[MM_TSB_BASE],
39#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) 39#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
40 (mm->context.tsb_block[1].tsb ? 40 (mm->context.tsb_block[MM_TSB_HUGE].tsb ?
41 &mm->context.tsb_block[1] : 41 &mm->context.tsb_block[MM_TSB_HUGE] :
42 NULL) 42 NULL)
43#else 43#else
44 NULL 44 NULL
45#endif 45#endif
46 , __pa(&mm->context.tsb_descr[0])); 46 , __pa(&mm->context.tsb_descr[MM_TSB_BASE]));
47} 47}
48 48
49void tsb_grow(struct mm_struct *mm, 49void tsb_grow(struct mm_struct *mm,
diff --git a/arch/sparc/kernel/irq_64.c b/arch/sparc/kernel/irq_64.c
index 3bebf395252c..4d0248aa0928 100644
--- a/arch/sparc/kernel/irq_64.c
+++ b/arch/sparc/kernel/irq_64.c
@@ -1021,7 +1021,7 @@ static void __init alloc_one_queue(unsigned long *pa_ptr, unsigned long qmask)
1021 unsigned long order = get_order(size); 1021 unsigned long order = get_order(size);
1022 unsigned long p; 1022 unsigned long p;
1023 1023
1024 p = __get_free_pages(GFP_KERNEL, order); 1024 p = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
1025 if (!p) { 1025 if (!p) {
1026 prom_printf("SUN4V: Error, cannot allocate queue.\n"); 1026 prom_printf("SUN4V: Error, cannot allocate queue.\n");
1027 prom_halt(); 1027 prom_halt();
diff --git a/arch/sparc/kernel/sstate.c b/arch/sparc/kernel/sstate.c
index c59af546f522..3caed4023589 100644
--- a/arch/sparc/kernel/sstate.c
+++ b/arch/sparc/kernel/sstate.c
@@ -43,8 +43,8 @@ static const char poweroff_msg[32] __attribute__((aligned(32))) =
43 "Linux powering off"; 43 "Linux powering off";
44static const char rebooting_msg[32] __attribute__((aligned(32))) = 44static const char rebooting_msg[32] __attribute__((aligned(32))) =
45 "Linux rebooting"; 45 "Linux rebooting";
46static const char panicing_msg[32] __attribute__((aligned(32))) = 46static const char panicking_msg[32] __attribute__((aligned(32))) =
47 "Linux panicing"; 47 "Linux panicking";
48 48
49static int sstate_reboot_call(struct notifier_block *np, unsigned long type, void *_unused) 49static int sstate_reboot_call(struct notifier_block *np, unsigned long type, void *_unused)
50{ 50{
@@ -76,7 +76,7 @@ static struct notifier_block sstate_reboot_notifier = {
76 76
77static int sstate_panic_event(struct notifier_block *n, unsigned long event, void *ptr) 77static int sstate_panic_event(struct notifier_block *n, unsigned long event, void *ptr)
78{ 78{
79 do_set_sstate(HV_SOFT_STATE_TRANSITION, panicing_msg); 79 do_set_sstate(HV_SOFT_STATE_TRANSITION, panicking_msg);
80 80
81 return NOTIFY_DONE; 81 return NOTIFY_DONE;
82} 82}
diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
index 4bc10e44d1ca..dfc97a47c9a0 100644
--- a/arch/sparc/kernel/traps_64.c
+++ b/arch/sparc/kernel/traps_64.c
@@ -2051,6 +2051,73 @@ void sun4v_resum_overflow(struct pt_regs *regs)
2051 atomic_inc(&sun4v_resum_oflow_cnt); 2051 atomic_inc(&sun4v_resum_oflow_cnt);
2052} 2052}
2053 2053
2054/* Given a set of registers, get the virtual addressi that was being accessed
2055 * by the faulting instructions at tpc.
2056 */
2057static unsigned long sun4v_get_vaddr(struct pt_regs *regs)
2058{
2059 unsigned int insn;
2060
2061 if (!copy_from_user(&insn, (void __user *)regs->tpc, 4)) {
2062 return compute_effective_address(regs, insn,
2063 (insn >> 25) & 0x1f);
2064 }
2065 return 0;
2066}
2067
2068/* Attempt to handle non-resumable errors generated from userspace.
2069 * Returns true if the signal was handled, false otherwise.
2070 */
2071bool sun4v_nonresum_error_user_handled(struct pt_regs *regs,
2072 struct sun4v_error_entry *ent) {
2073
2074 unsigned int attrs = ent->err_attrs;
2075
2076 if (attrs & SUN4V_ERR_ATTRS_MEMORY) {
2077 unsigned long addr = ent->err_raddr;
2078 siginfo_t info;
2079
2080 if (addr == ~(u64)0) {
2081 /* This seems highly unlikely to ever occur */
2082 pr_emerg("SUN4V NON-RECOVERABLE ERROR: Memory error detected in unknown location!\n");
2083 } else {
2084 unsigned long page_cnt = DIV_ROUND_UP(ent->err_size,
2085 PAGE_SIZE);
2086
2087 /* Break the unfortunate news. */
2088 pr_emerg("SUN4V NON-RECOVERABLE ERROR: Memory failed at %016lX\n",
2089 addr);
2090 pr_emerg("SUN4V NON-RECOVERABLE ERROR: Claiming %lu ages.\n",
2091 page_cnt);
2092
2093 while (page_cnt-- > 0) {
2094 if (pfn_valid(addr >> PAGE_SHIFT))
2095 get_page(pfn_to_page(addr >> PAGE_SHIFT));
2096 addr += PAGE_SIZE;
2097 }
2098 }
2099 info.si_signo = SIGKILL;
2100 info.si_errno = 0;
2101 info.si_trapno = 0;
2102 force_sig_info(info.si_signo, &info, current);
2103
2104 return true;
2105 }
2106 if (attrs & SUN4V_ERR_ATTRS_PIO) {
2107 siginfo_t info;
2108
2109 info.si_signo = SIGBUS;
2110 info.si_code = BUS_ADRERR;
2111 info.si_addr = (void __user *)sun4v_get_vaddr(regs);
2112 force_sig_info(info.si_signo, &info, current);
2113
2114 return true;
2115 }
2116
2117 /* Default to doing nothing */
2118 return false;
2119}
2120
2054/* We run with %pil set to PIL_NORMAL_MAX and PSTATE_IE enabled in %pstate. 2121/* We run with %pil set to PIL_NORMAL_MAX and PSTATE_IE enabled in %pstate.
2055 * Log the event, clear the first word of the entry, and die. 2122 * Log the event, clear the first word of the entry, and die.
2056 */ 2123 */
@@ -2075,6 +2142,12 @@ void sun4v_nonresum_error(struct pt_regs *regs, unsigned long offset)
2075 2142
2076 put_cpu(); 2143 put_cpu();
2077 2144
2145 if (!(regs->tstate & TSTATE_PRIV) &&
2146 sun4v_nonresum_error_user_handled(regs, &local_copy)) {
2147 /* DON'T PANIC: This userspace error was handled. */
2148 return;
2149 }
2150
2078#ifdef CONFIG_PCI 2151#ifdef CONFIG_PCI
2079 /* Check for the special PCI poke sequence. */ 2152 /* Check for the special PCI poke sequence. */
2080 if (pci_poke_in_progress && pci_poke_cpu == cpu) { 2153 if (pci_poke_in_progress && pci_poke_cpu == cpu) {
diff --git a/arch/tile/kernel/ptrace.c b/arch/tile/kernel/ptrace.c
index d89b7011667c..e279572824b1 100644
--- a/arch/tile/kernel/ptrace.c
+++ b/arch/tile/kernel/ptrace.c
@@ -111,7 +111,7 @@ static int tile_gpr_set(struct task_struct *target,
111 const void *kbuf, const void __user *ubuf) 111 const void *kbuf, const void __user *ubuf)
112{ 112{
113 int ret; 113 int ret;
114 struct pt_regs regs; 114 struct pt_regs regs = *task_pt_regs(target);
115 115
116 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &regs, 0, 116 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &regs, 0,
117 sizeof(regs)); 117 sizeof(regs));
diff --git a/arch/x86/boot/string.c b/arch/x86/boot/string.c
index cc3bd583dce1..9e240fcba784 100644
--- a/arch/x86/boot/string.c
+++ b/arch/x86/boot/string.c
@@ -14,6 +14,7 @@
14 14
15#include <linux/types.h> 15#include <linux/types.h>
16#include "ctype.h" 16#include "ctype.h"
17#include "string.h"
17 18
18int memcmp(const void *s1, const void *s2, size_t len) 19int memcmp(const void *s1, const void *s2, size_t len)
19{ 20{
diff --git a/arch/x86/boot/string.h b/arch/x86/boot/string.h
index 725e820602b1..113588ddb43f 100644
--- a/arch/x86/boot/string.h
+++ b/arch/x86/boot/string.h
@@ -18,4 +18,13 @@ int memcmp(const void *s1, const void *s2, size_t len);
18#define memset(d,c,l) __builtin_memset(d,c,l) 18#define memset(d,c,l) __builtin_memset(d,c,l)
19#define memcmp __builtin_memcmp 19#define memcmp __builtin_memcmp
20 20
21extern int strcmp(const char *str1, const char *str2);
22extern int strncmp(const char *cs, const char *ct, size_t count);
23extern size_t strlen(const char *s);
24extern char *strstr(const char *s1, const char *s2);
25extern size_t strnlen(const char *s, size_t maxlen);
26extern unsigned int atou(const char *s);
27extern unsigned long long simple_strtoull(const char *cp, char **endp,
28 unsigned int base);
29
21#endif /* BOOT_STRING_H */ 30#endif /* BOOT_STRING_H */
diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c
index 31c34ee131f3..7ff1b0c86a8e 100644
--- a/arch/x86/crypto/aesni-intel_glue.c
+++ b/arch/x86/crypto/aesni-intel_glue.c
@@ -1020,7 +1020,8 @@ struct {
1020 const char *basename; 1020 const char *basename;
1021 struct simd_skcipher_alg *simd; 1021 struct simd_skcipher_alg *simd;
1022} aesni_simd_skciphers2[] = { 1022} aesni_simd_skciphers2[] = {
1023#if IS_ENABLED(CONFIG_CRYPTO_PCBC) 1023#if (defined(MODULE) && IS_ENABLED(CONFIG_CRYPTO_PCBC)) || \
1024 IS_BUILTIN(CONFIG_CRYPTO_PCBC)
1024 { 1025 {
1025 .algname = "pcbc(aes)", 1026 .algname = "pcbc(aes)",
1026 .drvname = "pcbc-aes-aesni", 1027 .drvname = "pcbc-aes-aesni",
@@ -1084,9 +1085,9 @@ static void aesni_free_simds(void)
1084 aesni_simd_skciphers[i]; i++) 1085 aesni_simd_skciphers[i]; i++)
1085 simd_skcipher_free(aesni_simd_skciphers[i]); 1086 simd_skcipher_free(aesni_simd_skciphers[i]);
1086 1087
1087 for (i = 0; i < ARRAY_SIZE(aesni_simd_skciphers2) && 1088 for (i = 0; i < ARRAY_SIZE(aesni_simd_skciphers2); i++)
1088 aesni_simd_skciphers2[i].simd; i++) 1089 if (aesni_simd_skciphers2[i].simd)
1089 simd_skcipher_free(aesni_simd_skciphers2[i].simd); 1090 simd_skcipher_free(aesni_simd_skciphers2[i].simd);
1090} 1091}
1091 1092
1092static int __init aesni_init(void) 1093static int __init aesni_init(void)
@@ -1167,7 +1168,7 @@ static int __init aesni_init(void)
1167 simd = simd_skcipher_create_compat(algname, drvname, basename); 1168 simd = simd_skcipher_create_compat(algname, drvname, basename);
1168 err = PTR_ERR(simd); 1169 err = PTR_ERR(simd);
1169 if (IS_ERR(simd)) 1170 if (IS_ERR(simd))
1170 goto unregister_simds; 1171 continue;
1171 1172
1172 aesni_simd_skciphers2[i].simd = simd; 1173 aesni_simd_skciphers2[i].simd = simd;
1173 } 1174 }
diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S
index 701d29f8e4d3..57f7ec35216e 100644
--- a/arch/x86/entry/entry_32.S
+++ b/arch/x86/entry/entry_32.S
@@ -255,23 +255,6 @@ ENTRY(__switch_to_asm)
255END(__switch_to_asm) 255END(__switch_to_asm)
256 256
257/* 257/*
258 * The unwinder expects the last frame on the stack to always be at the same
259 * offset from the end of the page, which allows it to validate the stack.
260 * Calling schedule_tail() directly would break that convention because its an
261 * asmlinkage function so its argument has to be pushed on the stack. This
262 * wrapper creates a proper "end of stack" frame header before the call.
263 */
264ENTRY(schedule_tail_wrapper)
265 FRAME_BEGIN
266
267 pushl %eax
268 call schedule_tail
269 popl %eax
270
271 FRAME_END
272 ret
273ENDPROC(schedule_tail_wrapper)
274/*
275 * A newly forked process directly context switches into this address. 258 * A newly forked process directly context switches into this address.
276 * 259 *
277 * eax: prev task we switched from 260 * eax: prev task we switched from
@@ -279,15 +262,24 @@ ENDPROC(schedule_tail_wrapper)
279 * edi: kernel thread arg 262 * edi: kernel thread arg
280 */ 263 */
281ENTRY(ret_from_fork) 264ENTRY(ret_from_fork)
282 call schedule_tail_wrapper 265 FRAME_BEGIN /* help unwinder find end of stack */
266
267 /*
268 * schedule_tail() is asmlinkage so we have to put its 'prev' argument
269 * on the stack.
270 */
271 pushl %eax
272 call schedule_tail
273 popl %eax
283 274
284 testl %ebx, %ebx 275 testl %ebx, %ebx
285 jnz 1f /* kernel threads are uncommon */ 276 jnz 1f /* kernel threads are uncommon */
286 277
2872: 2782:
288 /* When we fork, we trace the syscall return in the child, too. */ 279 /* When we fork, we trace the syscall return in the child, too. */
289 movl %esp, %eax 280 leal FRAME_OFFSET(%esp), %eax
290 call syscall_return_slowpath 281 call syscall_return_slowpath
282 FRAME_END
291 jmp restore_all 283 jmp restore_all
292 284
293 /* kernel thread */ 285 /* kernel thread */
diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
index 5b219707c2f2..044d18ebc43c 100644
--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
@@ -36,6 +36,7 @@
36#include <asm/smap.h> 36#include <asm/smap.h>
37#include <asm/pgtable_types.h> 37#include <asm/pgtable_types.h>
38#include <asm/export.h> 38#include <asm/export.h>
39#include <asm/frame.h>
39#include <linux/err.h> 40#include <linux/err.h>
40 41
41.code64 42.code64
@@ -408,17 +409,19 @@ END(__switch_to_asm)
408 * r12: kernel thread arg 409 * r12: kernel thread arg
409 */ 410 */
410ENTRY(ret_from_fork) 411ENTRY(ret_from_fork)
412 FRAME_BEGIN /* help unwinder find end of stack */
411 movq %rax, %rdi 413 movq %rax, %rdi
412 call schedule_tail /* rdi: 'prev' task parameter */ 414 call schedule_tail /* rdi: 'prev' task parameter */
413 415
414 testq %rbx, %rbx /* from kernel_thread? */ 416 testq %rbx, %rbx /* from kernel_thread? */
415 jnz 1f /* kernel threads are uncommon */ 417 jnz 1f /* kernel threads are uncommon */
416 418
4172: 4192:
418 movq %rsp, %rdi 420 leaq FRAME_OFFSET(%rsp),%rdi /* pt_regs pointer */
419 call syscall_return_slowpath /* returns with IRQs disabled */ 421 call syscall_return_slowpath /* returns with IRQs disabled */
420 TRACE_IRQS_ON /* user mode is traced as IRQS on */ 422 TRACE_IRQS_ON /* user mode is traced as IRQS on */
421 SWAPGS 423 SWAPGS
424 FRAME_END
422 jmp restore_regs_and_iret 425 jmp restore_regs_and_iret
423 426
4241: 4271:
diff --git a/arch/x86/events/amd/ibs.c b/arch/x86/events/amd/ibs.c
index 05612a2529c8..496e60391fac 100644
--- a/arch/x86/events/amd/ibs.c
+++ b/arch/x86/events/amd/ibs.c
@@ -1010,7 +1010,7 @@ static __init int amd_ibs_init(void)
1010 * all online cpus. 1010 * all online cpus.
1011 */ 1011 */
1012 cpuhp_setup_state(CPUHP_AP_PERF_X86_AMD_IBS_STARTING, 1012 cpuhp_setup_state(CPUHP_AP_PERF_X86_AMD_IBS_STARTING,
1013 "perf/x86/amd/ibs:STARTING", 1013 "perf/x86/amd/ibs:starting",
1014 x86_pmu_amd_ibs_starting_cpu, 1014 x86_pmu_amd_ibs_starting_cpu,
1015 x86_pmu_amd_ibs_dying_cpu); 1015 x86_pmu_amd_ibs_dying_cpu);
1016 1016
diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
index 019c5887b698..1635c0c8df23 100644
--- a/arch/x86/events/core.c
+++ b/arch/x86/events/core.c
@@ -505,6 +505,10 @@ int x86_pmu_hw_config(struct perf_event *event)
505 505
506 if (event->attr.precise_ip > precise) 506 if (event->attr.precise_ip > precise)
507 return -EOPNOTSUPP; 507 return -EOPNOTSUPP;
508
509 /* There's no sense in having PEBS for non sampling events: */
510 if (!is_sampling_event(event))
511 return -EINVAL;
508 } 512 }
509 /* 513 /*
510 * check that PEBS LBR correction does not conflict with 514 * check that PEBS LBR correction does not conflict with
diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
index 86138267b68a..eb1484c86bb4 100644
--- a/arch/x86/events/intel/core.c
+++ b/arch/x86/events/intel/core.c
@@ -3176,13 +3176,16 @@ static void intel_pmu_cpu_starting(int cpu)
3176 3176
3177 if (x86_pmu.flags & PMU_FL_EXCL_CNTRS) { 3177 if (x86_pmu.flags & PMU_FL_EXCL_CNTRS) {
3178 for_each_cpu(i, topology_sibling_cpumask(cpu)) { 3178 for_each_cpu(i, topology_sibling_cpumask(cpu)) {
3179 struct cpu_hw_events *sibling;
3179 struct intel_excl_cntrs *c; 3180 struct intel_excl_cntrs *c;
3180 3181
3181 c = per_cpu(cpu_hw_events, i).excl_cntrs; 3182 sibling = &per_cpu(cpu_hw_events, i);
3183 c = sibling->excl_cntrs;
3182 if (c && c->core_id == core_id) { 3184 if (c && c->core_id == core_id) {
3183 cpuc->kfree_on_online[1] = cpuc->excl_cntrs; 3185 cpuc->kfree_on_online[1] = cpuc->excl_cntrs;
3184 cpuc->excl_cntrs = c; 3186 cpuc->excl_cntrs = c;
3185 cpuc->excl_thread_id = 1; 3187 if (!sibling->excl_thread_id)
3188 cpuc->excl_thread_id = 1;
3186 break; 3189 break;
3187 } 3190 }
3188 } 3191 }
@@ -3987,7 +3990,7 @@ __init int intel_pmu_init(void)
3987 x86_pmu.num_counters, INTEL_PMC_MAX_GENERIC); 3990 x86_pmu.num_counters, INTEL_PMC_MAX_GENERIC);
3988 x86_pmu.num_counters = INTEL_PMC_MAX_GENERIC; 3991 x86_pmu.num_counters = INTEL_PMC_MAX_GENERIC;
3989 } 3992 }
3990 x86_pmu.intel_ctrl = (1 << x86_pmu.num_counters) - 1; 3993 x86_pmu.intel_ctrl = (1ULL << x86_pmu.num_counters) - 1;
3991 3994
3992 if (x86_pmu.num_counters_fixed > INTEL_PMC_MAX_FIXED) { 3995 if (x86_pmu.num_counters_fixed > INTEL_PMC_MAX_FIXED) {
3993 WARN(1, KERN_ERR "hw perf events fixed %d > max(%d), clipping!", 3996 WARN(1, KERN_ERR "hw perf events fixed %d > max(%d), clipping!",
diff --git a/arch/x86/events/intel/cstate.c b/arch/x86/events/intel/cstate.c
index fec8a461bdef..1076c9a77292 100644
--- a/arch/x86/events/intel/cstate.c
+++ b/arch/x86/events/intel/cstate.c
@@ -434,6 +434,7 @@ static struct pmu cstate_core_pmu = {
434 .stop = cstate_pmu_event_stop, 434 .stop = cstate_pmu_event_stop,
435 .read = cstate_pmu_event_update, 435 .read = cstate_pmu_event_update,
436 .capabilities = PERF_PMU_CAP_NO_INTERRUPT, 436 .capabilities = PERF_PMU_CAP_NO_INTERRUPT,
437 .module = THIS_MODULE,
437}; 438};
438 439
439static struct pmu cstate_pkg_pmu = { 440static struct pmu cstate_pkg_pmu = {
@@ -447,6 +448,7 @@ static struct pmu cstate_pkg_pmu = {
447 .stop = cstate_pmu_event_stop, 448 .stop = cstate_pmu_event_stop,
448 .read = cstate_pmu_event_update, 449 .read = cstate_pmu_event_update,
449 .capabilities = PERF_PMU_CAP_NO_INTERRUPT, 450 .capabilities = PERF_PMU_CAP_NO_INTERRUPT,
451 .module = THIS_MODULE,
450}; 452};
451 453
452static const struct cstate_model nhm_cstates __initconst = { 454static const struct cstate_model nhm_cstates __initconst = {
diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c
index be202390bbd3..9dfeeeca0ea8 100644
--- a/arch/x86/events/intel/ds.c
+++ b/arch/x86/events/intel/ds.c
@@ -1389,9 +1389,13 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs)
1389 continue; 1389 continue;
1390 1390
1391 /* log dropped samples number */ 1391 /* log dropped samples number */
1392 if (error[bit]) 1392 if (error[bit]) {
1393 perf_log_lost_samples(event, error[bit]); 1393 perf_log_lost_samples(event, error[bit]);
1394 1394
1395 if (perf_event_account_interrupt(event))
1396 x86_pmu_stop(event, 0);
1397 }
1398
1395 if (counts[bit]) { 1399 if (counts[bit]) {
1396 __intel_pmu_pebs_event(event, iregs, base, 1400 __intel_pmu_pebs_event(event, iregs, base,
1397 top, bit, counts[bit]); 1401 top, bit, counts[bit]);
diff --git a/arch/x86/events/intel/rapl.c b/arch/x86/events/intel/rapl.c
index bd34124449b0..22ef4f72cf32 100644
--- a/arch/x86/events/intel/rapl.c
+++ b/arch/x86/events/intel/rapl.c
@@ -161,7 +161,13 @@ static u64 rapl_timer_ms;
161 161
162static inline struct rapl_pmu *cpu_to_rapl_pmu(unsigned int cpu) 162static inline struct rapl_pmu *cpu_to_rapl_pmu(unsigned int cpu)
163{ 163{
164 return rapl_pmus->pmus[topology_logical_package_id(cpu)]; 164 unsigned int pkgid = topology_logical_package_id(cpu);
165
166 /*
167 * The unsigned check also catches the '-1' return value for non
168 * existent mappings in the topology map.
169 */
170 return pkgid < rapl_pmus->maxpkg ? rapl_pmus->pmus[pkgid] : NULL;
165} 171}
166 172
167static inline u64 rapl_read_counter(struct perf_event *event) 173static inline u64 rapl_read_counter(struct perf_event *event)
@@ -402,6 +408,8 @@ static int rapl_pmu_event_init(struct perf_event *event)
402 408
403 /* must be done before validate_group */ 409 /* must be done before validate_group */
404 pmu = cpu_to_rapl_pmu(event->cpu); 410 pmu = cpu_to_rapl_pmu(event->cpu);
411 if (!pmu)
412 return -EINVAL;
405 event->cpu = pmu->cpu; 413 event->cpu = pmu->cpu;
406 event->pmu_private = pmu; 414 event->pmu_private = pmu;
407 event->hw.event_base = msr; 415 event->hw.event_base = msr;
@@ -585,6 +593,20 @@ static int rapl_cpu_online(unsigned int cpu)
585 struct rapl_pmu *pmu = cpu_to_rapl_pmu(cpu); 593 struct rapl_pmu *pmu = cpu_to_rapl_pmu(cpu);
586 int target; 594 int target;
587 595
596 if (!pmu) {
597 pmu = kzalloc_node(sizeof(*pmu), GFP_KERNEL, cpu_to_node(cpu));
598 if (!pmu)
599 return -ENOMEM;
600
601 raw_spin_lock_init(&pmu->lock);
602 INIT_LIST_HEAD(&pmu->active_list);
603 pmu->pmu = &rapl_pmus->pmu;
604 pmu->timer_interval = ms_to_ktime(rapl_timer_ms);
605 rapl_hrtimer_init(pmu);
606
607 rapl_pmus->pmus[topology_logical_package_id(cpu)] = pmu;
608 }
609
588 /* 610 /*
589 * Check if there is an online cpu in the package which collects rapl 611 * Check if there is an online cpu in the package which collects rapl
590 * events already. 612 * events already.
@@ -598,27 +620,6 @@ static int rapl_cpu_online(unsigned int cpu)
598 return 0; 620 return 0;
599} 621}
600 622
601static int rapl_cpu_prepare(unsigned int cpu)
602{
603 struct rapl_pmu *pmu = cpu_to_rapl_pmu(cpu);
604
605 if (pmu)
606 return 0;
607
608 pmu = kzalloc_node(sizeof(*pmu), GFP_KERNEL, cpu_to_node(cpu));
609 if (!pmu)
610 return -ENOMEM;
611
612 raw_spin_lock_init(&pmu->lock);
613 INIT_LIST_HEAD(&pmu->active_list);
614 pmu->pmu = &rapl_pmus->pmu;
615 pmu->timer_interval = ms_to_ktime(rapl_timer_ms);
616 pmu->cpu = -1;
617 rapl_hrtimer_init(pmu);
618 rapl_pmus->pmus[topology_logical_package_id(cpu)] = pmu;
619 return 0;
620}
621
622static int rapl_check_hw_unit(bool apply_quirk) 623static int rapl_check_hw_unit(bool apply_quirk)
623{ 624{
624 u64 msr_rapl_power_unit_bits; 625 u64 msr_rapl_power_unit_bits;
@@ -697,6 +698,7 @@ static int __init init_rapl_pmus(void)
697 rapl_pmus->pmu.start = rapl_pmu_event_start; 698 rapl_pmus->pmu.start = rapl_pmu_event_start;
698 rapl_pmus->pmu.stop = rapl_pmu_event_stop; 699 rapl_pmus->pmu.stop = rapl_pmu_event_stop;
699 rapl_pmus->pmu.read = rapl_pmu_event_read; 700 rapl_pmus->pmu.read = rapl_pmu_event_read;
701 rapl_pmus->pmu.module = THIS_MODULE;
700 return 0; 702 return 0;
701} 703}
702 704
@@ -802,29 +804,21 @@ static int __init rapl_pmu_init(void)
802 /* 804 /*
803 * Install callbacks. Core will call them for each online cpu. 805 * Install callbacks. Core will call them for each online cpu.
804 */ 806 */
805
806 ret = cpuhp_setup_state(CPUHP_PERF_X86_RAPL_PREP, "perf/x86/rapl:prepare",
807 rapl_cpu_prepare, NULL);
808 if (ret)
809 goto out;
810
811 ret = cpuhp_setup_state(CPUHP_AP_PERF_X86_RAPL_ONLINE, 807 ret = cpuhp_setup_state(CPUHP_AP_PERF_X86_RAPL_ONLINE,
812 "perf/x86/rapl:online", 808 "perf/x86/rapl:online",
813 rapl_cpu_online, rapl_cpu_offline); 809 rapl_cpu_online, rapl_cpu_offline);
814 if (ret) 810 if (ret)
815 goto out1; 811 goto out;
816 812
817 ret = perf_pmu_register(&rapl_pmus->pmu, "power", -1); 813 ret = perf_pmu_register(&rapl_pmus->pmu, "power", -1);
818 if (ret) 814 if (ret)
819 goto out2; 815 goto out1;
820 816
821 rapl_advertise(); 817 rapl_advertise();
822 return 0; 818 return 0;
823 819
824out2:
825 cpuhp_remove_state(CPUHP_AP_PERF_X86_RAPL_ONLINE);
826out1: 820out1:
827 cpuhp_remove_state(CPUHP_PERF_X86_RAPL_PREP); 821 cpuhp_remove_state(CPUHP_AP_PERF_X86_RAPL_ONLINE);
828out: 822out:
829 pr_warn("Initialization failed (%d), disabled\n", ret); 823 pr_warn("Initialization failed (%d), disabled\n", ret);
830 cleanup_rapl_pmus(); 824 cleanup_rapl_pmus();
@@ -835,7 +829,6 @@ module_init(rapl_pmu_init);
835static void __exit intel_rapl_exit(void) 829static void __exit intel_rapl_exit(void)
836{ 830{
837 cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_RAPL_ONLINE); 831 cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_RAPL_ONLINE);
838 cpuhp_remove_state_nocalls(CPUHP_PERF_X86_RAPL_PREP);
839 perf_pmu_unregister(&rapl_pmus->pmu); 832 perf_pmu_unregister(&rapl_pmus->pmu);
840 cleanup_rapl_pmus(); 833 cleanup_rapl_pmus();
841} 834}
diff --git a/arch/x86/events/intel/uncore.c b/arch/x86/events/intel/uncore.c
index 97c246f84dea..1ab45976474d 100644
--- a/arch/x86/events/intel/uncore.c
+++ b/arch/x86/events/intel/uncore.c
@@ -100,7 +100,13 @@ ssize_t uncore_event_show(struct kobject *kobj,
100 100
101struct intel_uncore_box *uncore_pmu_to_box(struct intel_uncore_pmu *pmu, int cpu) 101struct intel_uncore_box *uncore_pmu_to_box(struct intel_uncore_pmu *pmu, int cpu)
102{ 102{
103 return pmu->boxes[topology_logical_package_id(cpu)]; 103 unsigned int pkgid = topology_logical_package_id(cpu);
104
105 /*
106 * The unsigned check also catches the '-1' return value for non
107 * existent mappings in the topology map.
108 */
109 return pkgid < max_packages ? pmu->boxes[pkgid] : NULL;
104} 110}
105 111
106u64 uncore_msr_read_counter(struct intel_uncore_box *box, struct perf_event *event) 112u64 uncore_msr_read_counter(struct intel_uncore_box *box, struct perf_event *event)
@@ -733,6 +739,7 @@ static int uncore_pmu_register(struct intel_uncore_pmu *pmu)
733 .start = uncore_pmu_event_start, 739 .start = uncore_pmu_event_start,
734 .stop = uncore_pmu_event_stop, 740 .stop = uncore_pmu_event_stop,
735 .read = uncore_pmu_event_read, 741 .read = uncore_pmu_event_read,
742 .module = THIS_MODULE,
736 }; 743 };
737 } else { 744 } else {
738 pmu->pmu = *pmu->type->pmu; 745 pmu->pmu = *pmu->type->pmu;
@@ -763,30 +770,6 @@ static void uncore_pmu_unregister(struct intel_uncore_pmu *pmu)
763 pmu->registered = false; 770 pmu->registered = false;
764} 771}
765 772
766static void __uncore_exit_boxes(struct intel_uncore_type *type, int cpu)
767{
768 struct intel_uncore_pmu *pmu = type->pmus;
769 struct intel_uncore_box *box;
770 int i, pkg;
771
772 if (pmu) {
773 pkg = topology_physical_package_id(cpu);
774 for (i = 0; i < type->num_boxes; i++, pmu++) {
775 box = pmu->boxes[pkg];
776 if (box)
777 uncore_box_exit(box);
778 }
779 }
780}
781
782static void uncore_exit_boxes(void *dummy)
783{
784 struct intel_uncore_type **types;
785
786 for (types = uncore_msr_uncores; *types; types++)
787 __uncore_exit_boxes(*types++, smp_processor_id());
788}
789
790static void uncore_free_boxes(struct intel_uncore_pmu *pmu) 773static void uncore_free_boxes(struct intel_uncore_pmu *pmu)
791{ 774{
792 int pkg; 775 int pkg;
@@ -1057,86 +1040,6 @@ static void uncore_pci_exit(void)
1057 } 1040 }
1058} 1041}
1059 1042
1060static int uncore_cpu_dying(unsigned int cpu)
1061{
1062 struct intel_uncore_type *type, **types = uncore_msr_uncores;
1063 struct intel_uncore_pmu *pmu;
1064 struct intel_uncore_box *box;
1065 int i, pkg;
1066
1067 pkg = topology_logical_package_id(cpu);
1068 for (; *types; types++) {
1069 type = *types;
1070 pmu = type->pmus;
1071 for (i = 0; i < type->num_boxes; i++, pmu++) {
1072 box = pmu->boxes[pkg];
1073 if (box && atomic_dec_return(&box->refcnt) == 0)
1074 uncore_box_exit(box);
1075 }
1076 }
1077 return 0;
1078}
1079
1080static int first_init;
1081
1082static int uncore_cpu_starting(unsigned int cpu)
1083{
1084 struct intel_uncore_type *type, **types = uncore_msr_uncores;
1085 struct intel_uncore_pmu *pmu;
1086 struct intel_uncore_box *box;
1087 int i, pkg, ncpus = 1;
1088
1089 if (first_init) {
1090 /*
1091 * On init we get the number of online cpus in the package
1092 * and set refcount for all of them.
1093 */
1094 ncpus = cpumask_weight(topology_core_cpumask(cpu));
1095 }
1096
1097 pkg = topology_logical_package_id(cpu);
1098 for (; *types; types++) {
1099 type = *types;
1100 pmu = type->pmus;
1101 for (i = 0; i < type->num_boxes; i++, pmu++) {
1102 box = pmu->boxes[pkg];
1103 if (!box)
1104 continue;
1105 /* The first cpu on a package activates the box */
1106 if (atomic_add_return(ncpus, &box->refcnt) == ncpus)
1107 uncore_box_init(box);
1108 }
1109 }
1110
1111 return 0;
1112}
1113
1114static int uncore_cpu_prepare(unsigned int cpu)
1115{
1116 struct intel_uncore_type *type, **types = uncore_msr_uncores;
1117 struct intel_uncore_pmu *pmu;
1118 struct intel_uncore_box *box;
1119 int i, pkg;
1120
1121 pkg = topology_logical_package_id(cpu);
1122 for (; *types; types++) {
1123 type = *types;
1124 pmu = type->pmus;
1125 for (i = 0; i < type->num_boxes; i++, pmu++) {
1126 if (pmu->boxes[pkg])
1127 continue;
1128 /* First cpu of a package allocates the box */
1129 box = uncore_alloc_box(type, cpu_to_node(cpu));
1130 if (!box)
1131 return -ENOMEM;
1132 box->pmu = pmu;
1133 box->pkgid = pkg;
1134 pmu->boxes[pkg] = box;
1135 }
1136 }
1137 return 0;
1138}
1139
1140static void uncore_change_type_ctx(struct intel_uncore_type *type, int old_cpu, 1043static void uncore_change_type_ctx(struct intel_uncore_type *type, int old_cpu,
1141 int new_cpu) 1044 int new_cpu)
1142{ 1045{
@@ -1176,12 +1079,14 @@ static void uncore_change_context(struct intel_uncore_type **uncores,
1176 1079
1177static int uncore_event_cpu_offline(unsigned int cpu) 1080static int uncore_event_cpu_offline(unsigned int cpu)
1178{ 1081{
1179 int target; 1082 struct intel_uncore_type *type, **types = uncore_msr_uncores;
1083 struct intel_uncore_pmu *pmu;
1084 struct intel_uncore_box *box;
1085 int i, pkg, target;
1180 1086
1181 /* Check if exiting cpu is used for collecting uncore events */ 1087 /* Check if exiting cpu is used for collecting uncore events */
1182 if (!cpumask_test_and_clear_cpu(cpu, &uncore_cpu_mask)) 1088 if (!cpumask_test_and_clear_cpu(cpu, &uncore_cpu_mask))
1183 return 0; 1089 goto unref;
1184
1185 /* Find a new cpu to collect uncore events */ 1090 /* Find a new cpu to collect uncore events */
1186 target = cpumask_any_but(topology_core_cpumask(cpu), cpu); 1091 target = cpumask_any_but(topology_core_cpumask(cpu), cpu);
1187 1092
@@ -1193,12 +1098,82 @@ static int uncore_event_cpu_offline(unsigned int cpu)
1193 1098
1194 uncore_change_context(uncore_msr_uncores, cpu, target); 1099 uncore_change_context(uncore_msr_uncores, cpu, target);
1195 uncore_change_context(uncore_pci_uncores, cpu, target); 1100 uncore_change_context(uncore_pci_uncores, cpu, target);
1101
1102unref:
1103 /* Clear the references */
1104 pkg = topology_logical_package_id(cpu);
1105 for (; *types; types++) {
1106 type = *types;
1107 pmu = type->pmus;
1108 for (i = 0; i < type->num_boxes; i++, pmu++) {
1109 box = pmu->boxes[pkg];
1110 if (box && atomic_dec_return(&box->refcnt) == 0)
1111 uncore_box_exit(box);
1112 }
1113 }
1196 return 0; 1114 return 0;
1197} 1115}
1198 1116
1117static int allocate_boxes(struct intel_uncore_type **types,
1118 unsigned int pkg, unsigned int cpu)
1119{
1120 struct intel_uncore_box *box, *tmp;
1121 struct intel_uncore_type *type;
1122 struct intel_uncore_pmu *pmu;
1123 LIST_HEAD(allocated);
1124 int i;
1125
1126 /* Try to allocate all required boxes */
1127 for (; *types; types++) {
1128 type = *types;
1129 pmu = type->pmus;
1130 for (i = 0; i < type->num_boxes; i++, pmu++) {
1131 if (pmu->boxes[pkg])
1132 continue;
1133 box = uncore_alloc_box(type, cpu_to_node(cpu));
1134 if (!box)
1135 goto cleanup;
1136 box->pmu = pmu;
1137 box->pkgid = pkg;
1138 list_add(&box->active_list, &allocated);
1139 }
1140 }
1141 /* Install them in the pmus */
1142 list_for_each_entry_safe(box, tmp, &allocated, active_list) {
1143 list_del_init(&box->active_list);
1144 box->pmu->boxes[pkg] = box;
1145 }
1146 return 0;
1147
1148cleanup:
1149 list_for_each_entry_safe(box, tmp, &allocated, active_list) {
1150 list_del_init(&box->active_list);
1151 kfree(box);
1152 }
1153 return -ENOMEM;
1154}
1155
1199static int uncore_event_cpu_online(unsigned int cpu) 1156static int uncore_event_cpu_online(unsigned int cpu)
1200{ 1157{
1201 int target; 1158 struct intel_uncore_type *type, **types = uncore_msr_uncores;
1159 struct intel_uncore_pmu *pmu;
1160 struct intel_uncore_box *box;
1161 int i, ret, pkg, target;
1162
1163 pkg = topology_logical_package_id(cpu);
1164 ret = allocate_boxes(types, pkg, cpu);
1165 if (ret)
1166 return ret;
1167
1168 for (; *types; types++) {
1169 type = *types;
1170 pmu = type->pmus;
1171 for (i = 0; i < type->num_boxes; i++, pmu++) {
1172 box = pmu->boxes[pkg];
1173 if (!box && atomic_inc_return(&box->refcnt) == 1)
1174 uncore_box_init(box);
1175 }
1176 }
1202 1177
1203 /* 1178 /*
1204 * Check if there is an online cpu in the package 1179 * Check if there is an online cpu in the package
@@ -1388,38 +1363,16 @@ static int __init intel_uncore_init(void)
1388 if (cret && pret) 1363 if (cret && pret)
1389 return -ENODEV; 1364 return -ENODEV;
1390 1365
1391 /* 1366 /* Install hotplug callbacks to setup the targets for each package */
1392 * Install callbacks. Core will call them for each online cpu. 1367 ret = cpuhp_setup_state(CPUHP_AP_PERF_X86_UNCORE_ONLINE,
1393 * 1368 "perf/x86/intel/uncore:online",
1394 * The first online cpu of each package allocates and takes 1369 uncore_event_cpu_online,
1395 * the refcounts for all other online cpus in that package. 1370 uncore_event_cpu_offline);
1396 * If msrs are not enabled no allocation is required and 1371 if (ret)
1397 * uncore_cpu_prepare() is not called for each online cpu. 1372 goto err;
1398 */
1399 if (!cret) {
1400 ret = cpuhp_setup_state(CPUHP_PERF_X86_UNCORE_PREP,
1401 "perf/x86/intel/uncore:prepare",
1402 uncore_cpu_prepare, NULL);
1403 if (ret)
1404 goto err;
1405 } else {
1406 cpuhp_setup_state_nocalls(CPUHP_PERF_X86_UNCORE_PREP,
1407 "perf/x86/intel/uncore:prepare",
1408 uncore_cpu_prepare, NULL);
1409 }
1410 first_init = 1;
1411 cpuhp_setup_state(CPUHP_AP_PERF_X86_UNCORE_STARTING,
1412 "perf/x86/uncore:starting",
1413 uncore_cpu_starting, uncore_cpu_dying);
1414 first_init = 0;
1415 cpuhp_setup_state(CPUHP_AP_PERF_X86_UNCORE_ONLINE,
1416 "perf/x86/uncore:online",
1417 uncore_event_cpu_online, uncore_event_cpu_offline);
1418 return 0; 1373 return 0;
1419 1374
1420err: 1375err:
1421 /* Undo box->init_box() */
1422 on_each_cpu_mask(&uncore_cpu_mask, uncore_exit_boxes, NULL, 1);
1423 uncore_types_exit(uncore_msr_uncores); 1376 uncore_types_exit(uncore_msr_uncores);
1424 uncore_pci_exit(); 1377 uncore_pci_exit();
1425 return ret; 1378 return ret;
@@ -1428,9 +1381,7 @@ module_init(intel_uncore_init);
1428 1381
1429static void __exit intel_uncore_exit(void) 1382static void __exit intel_uncore_exit(void)
1430{ 1383{
1431 cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_UNCORE_ONLINE); 1384 cpuhp_remove_state(CPUHP_AP_PERF_X86_UNCORE_ONLINE);
1432 cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_UNCORE_STARTING);
1433 cpuhp_remove_state_nocalls(CPUHP_PERF_X86_UNCORE_PREP);
1434 uncore_types_exit(uncore_msr_uncores); 1385 uncore_types_exit(uncore_msr_uncores);
1435 uncore_pci_exit(); 1386 uncore_pci_exit();
1436} 1387}
diff --git a/arch/x86/events/intel/uncore_snbep.c b/arch/x86/events/intel/uncore_snbep.c
index e6832be714bc..dae2fedc1601 100644
--- a/arch/x86/events/intel/uncore_snbep.c
+++ b/arch/x86/events/intel/uncore_snbep.c
@@ -2686,7 +2686,7 @@ static struct intel_uncore_type *hswep_msr_uncores[] = {
2686 2686
2687void hswep_uncore_cpu_init(void) 2687void hswep_uncore_cpu_init(void)
2688{ 2688{
2689 int pkg = topology_phys_to_logical_pkg(0); 2689 int pkg = boot_cpu_data.logical_proc_id;
2690 2690
2691 if (hswep_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores) 2691 if (hswep_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
2692 hswep_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores; 2692 hswep_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
index 68557f52b961..854022772c5b 100644
--- a/arch/x86/include/asm/bitops.h
+++ b/arch/x86/include/asm/bitops.h
@@ -139,6 +139,19 @@ static __always_inline void __clear_bit(long nr, volatile unsigned long *addr)
139 asm volatile("btr %1,%0" : ADDR : "Ir" (nr)); 139 asm volatile("btr %1,%0" : ADDR : "Ir" (nr));
140} 140}
141 141
142static __always_inline bool clear_bit_unlock_is_negative_byte(long nr, volatile unsigned long *addr)
143{
144 bool negative;
145 asm volatile(LOCK_PREFIX "andb %2,%1\n\t"
146 CC_SET(s)
147 : CC_OUT(s) (negative), ADDR
148 : "ir" ((char) ~(1 << nr)) : "memory");
149 return negative;
150}
151
152// Let everybody know we have it
153#define clear_bit_unlock_is_negative_byte clear_bit_unlock_is_negative_byte
154
142/* 155/*
143 * __clear_bit_unlock - Clears a bit in memory 156 * __clear_bit_unlock - Clears a bit in memory
144 * @nr: Bit to clear 157 * @nr: Bit to clear
diff --git a/arch/x86/include/asm/intel-family.h b/arch/x86/include/asm/intel-family.h
index 34a46dc076d3..8167fdb67ae8 100644
--- a/arch/x86/include/asm/intel-family.h
+++ b/arch/x86/include/asm/intel-family.h
@@ -57,7 +57,7 @@
57#define INTEL_FAM6_ATOM_SILVERMONT2 0x4D /* Avaton/Rangely */ 57#define INTEL_FAM6_ATOM_SILVERMONT2 0x4D /* Avaton/Rangely */
58#define INTEL_FAM6_ATOM_AIRMONT 0x4C /* CherryTrail / Braswell */ 58#define INTEL_FAM6_ATOM_AIRMONT 0x4C /* CherryTrail / Braswell */
59#define INTEL_FAM6_ATOM_MERRIFIELD 0x4A /* Tangier */ 59#define INTEL_FAM6_ATOM_MERRIFIELD 0x4A /* Tangier */
60#define INTEL_FAM6_ATOM_MOOREFIELD 0x5A /* Annidale */ 60#define INTEL_FAM6_ATOM_MOOREFIELD 0x5A /* Anniedale */
61#define INTEL_FAM6_ATOM_GOLDMONT 0x5C 61#define INTEL_FAM6_ATOM_GOLDMONT 0x5C
62#define INTEL_FAM6_ATOM_DENVERTON 0x5F /* Goldmont Microserver */ 62#define INTEL_FAM6_ATOM_DENVERTON 0x5F /* Goldmont Microserver */
63 63
diff --git a/arch/x86/include/asm/microcode.h b/arch/x86/include/asm/microcode.h
index 38711df3bcb5..2266f864b747 100644
--- a/arch/x86/include/asm/microcode.h
+++ b/arch/x86/include/asm/microcode.h
@@ -140,6 +140,7 @@ extern void __init load_ucode_bsp(void);
140extern void load_ucode_ap(void); 140extern void load_ucode_ap(void);
141void reload_early_microcode(void); 141void reload_early_microcode(void);
142extern bool get_builtin_firmware(struct cpio_data *cd, const char *name); 142extern bool get_builtin_firmware(struct cpio_data *cd, const char *name);
143extern bool initrd_gone;
143#else 144#else
144static inline int __init microcode_init(void) { return 0; }; 145static inline int __init microcode_init(void) { return 0; };
145static inline void __init load_ucode_bsp(void) { } 146static inline void __init load_ucode_bsp(void) { }
diff --git a/arch/x86/include/asm/microcode_intel.h b/arch/x86/include/asm/microcode_intel.h
index 195becc6f780..e793fc9a9b20 100644
--- a/arch/x86/include/asm/microcode_intel.h
+++ b/arch/x86/include/asm/microcode_intel.h
@@ -52,6 +52,21 @@ struct extended_sigtable {
52 52
53#define exttable_size(et) ((et)->count * EXT_SIGNATURE_SIZE + EXT_HEADER_SIZE) 53#define exttable_size(et) ((et)->count * EXT_SIGNATURE_SIZE + EXT_HEADER_SIZE)
54 54
55static inline u32 intel_get_microcode_revision(void)
56{
57 u32 rev, dummy;
58
59 native_wrmsrl(MSR_IA32_UCODE_REV, 0);
60
61 /* As documented in the SDM: Do a CPUID 1 here */
62 native_cpuid_eax(1);
63
64 /* get the current revision from MSR 0x8B */
65 native_rdmsr(MSR_IA32_UCODE_REV, dummy, rev);
66
67 return rev;
68}
69
55#ifdef CONFIG_MICROCODE_INTEL 70#ifdef CONFIG_MICROCODE_INTEL
56extern void __init load_ucode_intel_bsp(void); 71extern void __init load_ucode_intel_bsp(void);
57extern void load_ucode_intel_ap(void); 72extern void load_ucode_intel_ap(void);
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index eaf100508c36..e6cfe7ba2d65 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -104,6 +104,7 @@ struct cpuinfo_x86 {
104 __u8 x86_phys_bits; 104 __u8 x86_phys_bits;
105 /* CPUID returned core id bits: */ 105 /* CPUID returned core id bits: */
106 __u8 x86_coreid_bits; 106 __u8 x86_coreid_bits;
107 __u8 cu_id;
107 /* Max extended CPUID function supported: */ 108 /* Max extended CPUID function supported: */
108 __u32 extended_cpuid_level; 109 __u32 extended_cpuid_level;
109 /* Maximum supported CPUID level, -1=no CPUID: */ 110 /* Maximum supported CPUID level, -1=no CPUID: */
@@ -219,6 +220,24 @@ static inline void native_cpuid(unsigned int *eax, unsigned int *ebx,
219 : "memory"); 220 : "memory");
220} 221}
221 222
223#define native_cpuid_reg(reg) \
224static inline unsigned int native_cpuid_##reg(unsigned int op) \
225{ \
226 unsigned int eax = op, ebx, ecx = 0, edx; \
227 \
228 native_cpuid(&eax, &ebx, &ecx, &edx); \
229 \
230 return reg; \
231}
232
233/*
234 * Native CPUID functions returning a single datum.
235 */
236native_cpuid_reg(eax)
237native_cpuid_reg(ebx)
238native_cpuid_reg(ecx)
239native_cpuid_reg(edx)
240
222static inline void load_cr3(pgd_t *pgdir) 241static inline void load_cr3(pgd_t *pgdir)
223{ 242{
224 write_cr3(__pa(pgdir)); 243 write_cr3(__pa(pgdir));
diff --git a/arch/x86/include/asm/stacktrace.h b/arch/x86/include/asm/stacktrace.h
index a3269c897ec5..2e41c50ddf47 100644
--- a/arch/x86/include/asm/stacktrace.h
+++ b/arch/x86/include/asm/stacktrace.h
@@ -58,7 +58,7 @@ get_frame_pointer(struct task_struct *task, struct pt_regs *regs)
58 if (task == current) 58 if (task == current)
59 return __builtin_frame_address(0); 59 return __builtin_frame_address(0);
60 60
61 return (unsigned long *)((struct inactive_task_frame *)task->thread.sp)->bp; 61 return &((struct inactive_task_frame *)task->thread.sp)->bp;
62} 62}
63#else 63#else
64static inline unsigned long * 64static inline unsigned long *
diff --git a/arch/x86/include/asm/switch_to.h b/arch/x86/include/asm/switch_to.h
index 5cb436acd463..fcc5cd387fd1 100644
--- a/arch/x86/include/asm/switch_to.h
+++ b/arch/x86/include/asm/switch_to.h
@@ -36,7 +36,10 @@ static inline void prepare_switch_to(struct task_struct *prev,
36 36
37asmlinkage void ret_from_fork(void); 37asmlinkage void ret_from_fork(void);
38 38
39/* data that is pointed to by thread.sp */ 39/*
40 * This is the structure pointed to by thread.sp for an inactive task. The
41 * order of the fields must match the code in __switch_to_asm().
42 */
40struct inactive_task_frame { 43struct inactive_task_frame {
41#ifdef CONFIG_X86_64 44#ifdef CONFIG_X86_64
42 unsigned long r15; 45 unsigned long r15;
@@ -48,6 +51,11 @@ struct inactive_task_frame {
48 unsigned long di; 51 unsigned long di;
49#endif 52#endif
50 unsigned long bx; 53 unsigned long bx;
54
55 /*
56 * These two fields must be together. They form a stack frame header,
57 * needed by get_frame_pointer().
58 */
51 unsigned long bp; 59 unsigned long bp;
52 unsigned long ret_addr; 60 unsigned long ret_addr;
53}; 61};
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index 945e512a112a..bd6b8c270c24 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -2115,6 +2115,7 @@ static inline void __init check_timer(void)
2115 if (idx != -1 && irq_trigger(idx)) 2115 if (idx != -1 && irq_trigger(idx))
2116 unmask_ioapic_irq(irq_get_chip_data(0)); 2116 unmask_ioapic_irq(irq_get_chip_data(0));
2117 } 2117 }
2118 irq_domain_deactivate_irq(irq_data);
2118 irq_domain_activate_irq(irq_data); 2119 irq_domain_activate_irq(irq_data);
2119 if (timer_irq_works()) { 2120 if (timer_irq_works()) {
2120 if (disable_timer_pin_1 > 0) 2121 if (disable_timer_pin_1 > 0)
@@ -2136,6 +2137,7 @@ static inline void __init check_timer(void)
2136 * legacy devices should be connected to IO APIC #0 2137 * legacy devices should be connected to IO APIC #0
2137 */ 2138 */
2138 replace_pin_at_irq_node(data, node, apic1, pin1, apic2, pin2); 2139 replace_pin_at_irq_node(data, node, apic1, pin1, apic2, pin2);
2140 irq_domain_deactivate_irq(irq_data);
2139 irq_domain_activate_irq(irq_data); 2141 irq_domain_activate_irq(irq_data);
2140 legacy_pic->unmask(0); 2142 legacy_pic->unmask(0);
2141 if (timer_irq_works()) { 2143 if (timer_irq_works()) {
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index 71cae73a5076..2b4cf04239b6 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -312,12 +312,19 @@ static void amd_get_topology(struct cpuinfo_x86 *c)
312 u32 eax, ebx, ecx, edx; 312 u32 eax, ebx, ecx, edx;
313 313
314 cpuid(0x8000001e, &eax, &ebx, &ecx, &edx); 314 cpuid(0x8000001e, &eax, &ebx, &ecx, &edx);
315 node_id = ecx & 7;
316 315
317 /* get compute unit information */ 316 node_id = ecx & 0xff;
318 smp_num_siblings = ((ebx >> 8) & 3) + 1; 317 smp_num_siblings = ((ebx >> 8) & 0xff) + 1;
319 c->x86_max_cores /= smp_num_siblings; 318
320 c->cpu_core_id = ebx & 0xff; 319 if (c->x86 == 0x15)
320 c->cu_id = ebx & 0xff;
321
322 if (c->x86 >= 0x17) {
323 c->cpu_core_id = ebx & 0xff;
324
325 if (smp_num_siblings > 1)
326 c->x86_max_cores /= smp_num_siblings;
327 }
321 328
322 /* 329 /*
323 * We may have multiple LLCs if L3 caches exist, so check if we 330 * We may have multiple LLCs if L3 caches exist, so check if we
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index dc1697ca5191..ede03e849a8b 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -1015,6 +1015,7 @@ static void identify_cpu(struct cpuinfo_x86 *c)
1015 c->x86_model_id[0] = '\0'; /* Unset */ 1015 c->x86_model_id[0] = '\0'; /* Unset */
1016 c->x86_max_cores = 1; 1016 c->x86_max_cores = 1;
1017 c->x86_coreid_bits = 0; 1017 c->x86_coreid_bits = 0;
1018 c->cu_id = 0xff;
1018#ifdef CONFIG_X86_64 1019#ifdef CONFIG_X86_64
1019 c->x86_clflush_size = 64; 1020 c->x86_clflush_size = 64;
1020 c->x86_phys_bits = 36; 1021 c->x86_phys_bits = 36;
@@ -1221,7 +1222,7 @@ static __init int setup_disablecpuid(char *arg)
1221{ 1222{
1222 int bit; 1223 int bit;
1223 1224
1224 if (get_option(&arg, &bit) && bit < NCAPINTS*32) 1225 if (get_option(&arg, &bit) && bit >= 0 && bit < NCAPINTS * 32)
1225 setup_clear_cpu_cap(bit); 1226 setup_clear_cpu_cap(bit);
1226 else 1227 else
1227 return 0; 1228 return 0;
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index fcd484d2bb03..203f860d2ab3 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -14,6 +14,7 @@
14#include <asm/bugs.h> 14#include <asm/bugs.h>
15#include <asm/cpu.h> 15#include <asm/cpu.h>
16#include <asm/intel-family.h> 16#include <asm/intel-family.h>
17#include <asm/microcode_intel.h>
17 18
18#ifdef CONFIG_X86_64 19#ifdef CONFIG_X86_64
19#include <linux/topology.h> 20#include <linux/topology.h>
@@ -78,14 +79,8 @@ static void early_init_intel(struct cpuinfo_x86 *c)
78 (c->x86 == 0x6 && c->x86_model >= 0x0e)) 79 (c->x86 == 0x6 && c->x86_model >= 0x0e))
79 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); 80 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
80 81
81 if (c->x86 >= 6 && !cpu_has(c, X86_FEATURE_IA64)) { 82 if (c->x86 >= 6 && !cpu_has(c, X86_FEATURE_IA64))
82 unsigned lower_word; 83 c->microcode = intel_get_microcode_revision();
83
84 wrmsr(MSR_IA32_UCODE_REV, 0, 0);
85 /* Required by the SDM */
86 sync_core();
87 rdmsr(MSR_IA32_UCODE_REV, lower_word, c->microcode);
88 }
89 84
90 /* 85 /*
91 * Atom erratum AAE44/AAF40/AAG38/AAH41: 86 * Atom erratum AAE44/AAF40/AAG38/AAH41:
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
index 00ef43233e03..537c6647d84c 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -1373,20 +1373,15 @@ static unsigned long mce_adjust_timer_default(unsigned long interval)
1373 1373
1374static unsigned long (*mce_adjust_timer)(unsigned long interval) = mce_adjust_timer_default; 1374static unsigned long (*mce_adjust_timer)(unsigned long interval) = mce_adjust_timer_default;
1375 1375
1376static void __restart_timer(struct timer_list *t, unsigned long interval) 1376static void __start_timer(struct timer_list *t, unsigned long interval)
1377{ 1377{
1378 unsigned long when = jiffies + interval; 1378 unsigned long when = jiffies + interval;
1379 unsigned long flags; 1379 unsigned long flags;
1380 1380
1381 local_irq_save(flags); 1381 local_irq_save(flags);
1382 1382
1383 if (timer_pending(t)) { 1383 if (!timer_pending(t) || time_before(when, t->expires))
1384 if (time_before(when, t->expires)) 1384 mod_timer(t, round_jiffies(when));
1385 mod_timer(t, when);
1386 } else {
1387 t->expires = round_jiffies(when);
1388 add_timer_on(t, smp_processor_id());
1389 }
1390 1385
1391 local_irq_restore(flags); 1386 local_irq_restore(flags);
1392} 1387}
@@ -1421,7 +1416,7 @@ static void mce_timer_fn(unsigned long data)
1421 1416
1422done: 1417done:
1423 __this_cpu_write(mce_next_interval, iv); 1418 __this_cpu_write(mce_next_interval, iv);
1424 __restart_timer(t, iv); 1419 __start_timer(t, iv);
1425} 1420}
1426 1421
1427/* 1422/*
@@ -1432,7 +1427,7 @@ void mce_timer_kick(unsigned long interval)
1432 struct timer_list *t = this_cpu_ptr(&mce_timer); 1427 struct timer_list *t = this_cpu_ptr(&mce_timer);
1433 unsigned long iv = __this_cpu_read(mce_next_interval); 1428 unsigned long iv = __this_cpu_read(mce_next_interval);
1434 1429
1435 __restart_timer(t, interval); 1430 __start_timer(t, interval);
1436 1431
1437 if (interval < iv) 1432 if (interval < iv)
1438 __this_cpu_write(mce_next_interval, interval); 1433 __this_cpu_write(mce_next_interval, interval);
@@ -1779,17 +1774,15 @@ static void __mcheck_cpu_clear_vendor(struct cpuinfo_x86 *c)
1779 } 1774 }
1780} 1775}
1781 1776
1782static void mce_start_timer(unsigned int cpu, struct timer_list *t) 1777static void mce_start_timer(struct timer_list *t)
1783{ 1778{
1784 unsigned long iv = check_interval * HZ; 1779 unsigned long iv = check_interval * HZ;
1785 1780
1786 if (mca_cfg.ignore_ce || !iv) 1781 if (mca_cfg.ignore_ce || !iv)
1787 return; 1782 return;
1788 1783
1789 per_cpu(mce_next_interval, cpu) = iv; 1784 this_cpu_write(mce_next_interval, iv);
1790 1785 __start_timer(t, iv);
1791 t->expires = round_jiffies(jiffies + iv);
1792 add_timer_on(t, cpu);
1793} 1786}
1794 1787
1795static void __mcheck_cpu_setup_timer(void) 1788static void __mcheck_cpu_setup_timer(void)
@@ -1806,7 +1799,7 @@ static void __mcheck_cpu_init_timer(void)
1806 unsigned int cpu = smp_processor_id(); 1799 unsigned int cpu = smp_processor_id();
1807 1800
1808 setup_pinned_timer(t, mce_timer_fn, cpu); 1801 setup_pinned_timer(t, mce_timer_fn, cpu);
1809 mce_start_timer(cpu, t); 1802 mce_start_timer(t);
1810} 1803}
1811 1804
1812/* Handle unconfigured int18 (should never happen) */ 1805/* Handle unconfigured int18 (should never happen) */
@@ -2566,7 +2559,7 @@ static int mce_cpu_dead(unsigned int cpu)
2566 2559
2567static int mce_cpu_online(unsigned int cpu) 2560static int mce_cpu_online(unsigned int cpu)
2568{ 2561{
2569 struct timer_list *t = &per_cpu(mce_timer, cpu); 2562 struct timer_list *t = this_cpu_ptr(&mce_timer);
2570 int ret; 2563 int ret;
2571 2564
2572 mce_device_create(cpu); 2565 mce_device_create(cpu);
@@ -2577,13 +2570,13 @@ static int mce_cpu_online(unsigned int cpu)
2577 return ret; 2570 return ret;
2578 } 2571 }
2579 mce_reenable_cpu(); 2572 mce_reenable_cpu();
2580 mce_start_timer(cpu, t); 2573 mce_start_timer(t);
2581 return 0; 2574 return 0;
2582} 2575}
2583 2576
2584static int mce_cpu_pre_down(unsigned int cpu) 2577static int mce_cpu_pre_down(unsigned int cpu)
2585{ 2578{
2586 struct timer_list *t = &per_cpu(mce_timer, cpu); 2579 struct timer_list *t = this_cpu_ptr(&mce_timer);
2587 2580
2588 mce_disable_cpu(); 2581 mce_disable_cpu();
2589 del_timer_sync(t); 2582 del_timer_sync(t);
diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c
index ffacfdcacb85..a5fd137417a2 100644
--- a/arch/x86/kernel/cpu/mcheck/mce_amd.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c
@@ -1182,6 +1182,9 @@ static int threshold_create_bank(unsigned int cpu, unsigned int bank)
1182 const char *name = get_name(bank, NULL); 1182 const char *name = get_name(bank, NULL);
1183 int err = 0; 1183 int err = 0;
1184 1184
1185 if (!dev)
1186 return -ENODEV;
1187
1185 if (is_shared_bank(bank)) { 1188 if (is_shared_bank(bank)) {
1186 nb = node_to_amd_nb(amd_get_nb_id(cpu)); 1189 nb = node_to_amd_nb(amd_get_nb_id(cpu));
1187 1190
diff --git a/arch/x86/kernel/cpu/microcode/amd.c b/arch/x86/kernel/cpu/microcode/amd.c
index 6a31e2691f3a..079e81733a58 100644
--- a/arch/x86/kernel/cpu/microcode/amd.c
+++ b/arch/x86/kernel/cpu/microcode/amd.c
@@ -384,8 +384,9 @@ void load_ucode_amd_ap(unsigned int family)
384reget: 384reget:
385 if (!get_builtin_microcode(&cp, family)) { 385 if (!get_builtin_microcode(&cp, family)) {
386#ifdef CONFIG_BLK_DEV_INITRD 386#ifdef CONFIG_BLK_DEV_INITRD
387 cp = find_cpio_data(ucode_path, (void *)initrd_start, 387 if (!initrd_gone)
388 initrd_end - initrd_start, NULL); 388 cp = find_cpio_data(ucode_path, (void *)initrd_start,
389 initrd_end - initrd_start, NULL);
389#endif 390#endif
390 if (!(cp.data && cp.size)) { 391 if (!(cp.data && cp.size)) {
391 /* 392 /*
diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c
index 2af69d27da62..73102d932760 100644
--- a/arch/x86/kernel/cpu/microcode/core.c
+++ b/arch/x86/kernel/cpu/microcode/core.c
@@ -46,6 +46,8 @@
46static struct microcode_ops *microcode_ops; 46static struct microcode_ops *microcode_ops;
47static bool dis_ucode_ldr = true; 47static bool dis_ucode_ldr = true;
48 48
49bool initrd_gone;
50
49LIST_HEAD(microcode_cache); 51LIST_HEAD(microcode_cache);
50 52
51/* 53/*
@@ -190,21 +192,24 @@ void load_ucode_ap(void)
190static int __init save_microcode_in_initrd(void) 192static int __init save_microcode_in_initrd(void)
191{ 193{
192 struct cpuinfo_x86 *c = &boot_cpu_data; 194 struct cpuinfo_x86 *c = &boot_cpu_data;
195 int ret = -EINVAL;
193 196
194 switch (c->x86_vendor) { 197 switch (c->x86_vendor) {
195 case X86_VENDOR_INTEL: 198 case X86_VENDOR_INTEL:
196 if (c->x86 >= 6) 199 if (c->x86 >= 6)
197 return save_microcode_in_initrd_intel(); 200 ret = save_microcode_in_initrd_intel();
198 break; 201 break;
199 case X86_VENDOR_AMD: 202 case X86_VENDOR_AMD:
200 if (c->x86 >= 0x10) 203 if (c->x86 >= 0x10)
201 return save_microcode_in_initrd_amd(c->x86); 204 ret = save_microcode_in_initrd_amd(c->x86);
202 break; 205 break;
203 default: 206 default:
204 break; 207 break;
205 } 208 }
206 209
207 return -EINVAL; 210 initrd_gone = true;
211
212 return ret;
208} 213}
209 214
210struct cpio_data find_microcode_in_initrd(const char *path, bool use_pa) 215struct cpio_data find_microcode_in_initrd(const char *path, bool use_pa)
@@ -247,9 +252,16 @@ struct cpio_data find_microcode_in_initrd(const char *path, bool use_pa)
247 * has the virtual address of the beginning of the initrd. It also 252 * has the virtual address of the beginning of the initrd. It also
248 * possibly relocates the ramdisk. In either case, initrd_start contains 253 * possibly relocates the ramdisk. In either case, initrd_start contains
249 * the updated address so use that instead. 254 * the updated address so use that instead.
255 *
256 * initrd_gone is for the hotplug case where we've thrown out initrd
257 * already.
250 */ 258 */
251 if (!use_pa && initrd_start) 259 if (!use_pa) {
252 start = initrd_start; 260 if (initrd_gone)
261 return (struct cpio_data){ NULL, 0, "" };
262 if (initrd_start)
263 start = initrd_start;
264 }
253 265
254 return find_cpio_data(path, (void *)start, size, NULL); 266 return find_cpio_data(path, (void *)start, size, NULL);
255#else /* !CONFIG_BLK_DEV_INITRD */ 267#else /* !CONFIG_BLK_DEV_INITRD */
diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c
index b624b54912e1..8325d8a09ab0 100644
--- a/arch/x86/kernel/cpu/microcode/intel.c
+++ b/arch/x86/kernel/cpu/microcode/intel.c
@@ -41,7 +41,7 @@
41 41
42static const char ucode_path[] = "kernel/x86/microcode/GenuineIntel.bin"; 42static const char ucode_path[] = "kernel/x86/microcode/GenuineIntel.bin";
43 43
44/* Current microcode patch used in early patching */ 44/* Current microcode patch used in early patching on the APs. */
45struct microcode_intel *intel_ucode_patch; 45struct microcode_intel *intel_ucode_patch;
46 46
47static inline bool cpu_signatures_match(unsigned int s1, unsigned int p1, 47static inline bool cpu_signatures_match(unsigned int s1, unsigned int p1,
@@ -150,7 +150,7 @@ static struct ucode_patch *__alloc_microcode_buf(void *data, unsigned int size)
150{ 150{
151 struct ucode_patch *p; 151 struct ucode_patch *p;
152 152
153 p = kzalloc(size, GFP_KERNEL); 153 p = kzalloc(sizeof(struct ucode_patch), GFP_KERNEL);
154 if (!p) 154 if (!p)
155 return ERR_PTR(-ENOMEM); 155 return ERR_PTR(-ENOMEM);
156 156
@@ -368,26 +368,6 @@ next:
368 return patch; 368 return patch;
369} 369}
370 370
371static void cpuid_1(void)
372{
373 /*
374 * According to the Intel SDM, Volume 3, 9.11.7:
375 *
376 * CPUID returns a value in a model specific register in
377 * addition to its usual register return values. The
378 * semantics of CPUID cause it to deposit an update ID value
379 * in the 64-bit model-specific register at address 08BH
380 * (IA32_BIOS_SIGN_ID). If no update is present in the
381 * processor, the value in the MSR remains unmodified.
382 *
383 * Use native_cpuid -- this code runs very early and we don't
384 * want to mess with paravirt.
385 */
386 unsigned int eax = 1, ebx, ecx = 0, edx;
387
388 native_cpuid(&eax, &ebx, &ecx, &edx);
389}
390
391static int collect_cpu_info_early(struct ucode_cpu_info *uci) 371static int collect_cpu_info_early(struct ucode_cpu_info *uci)
392{ 372{
393 unsigned int val[2]; 373 unsigned int val[2];
@@ -410,15 +390,8 @@ static int collect_cpu_info_early(struct ucode_cpu_info *uci)
410 native_rdmsr(MSR_IA32_PLATFORM_ID, val[0], val[1]); 390 native_rdmsr(MSR_IA32_PLATFORM_ID, val[0], val[1]);
411 csig.pf = 1 << ((val[1] >> 18) & 7); 391 csig.pf = 1 << ((val[1] >> 18) & 7);
412 } 392 }
413 native_wrmsrl(MSR_IA32_UCODE_REV, 0);
414
415 /* As documented in the SDM: Do a CPUID 1 here */
416 cpuid_1();
417
418 /* get the current revision from MSR 0x8B */
419 native_rdmsr(MSR_IA32_UCODE_REV, val[0], val[1]);
420 393
421 csig.rev = val[1]; 394 csig.rev = intel_get_microcode_revision();
422 395
423 uci->cpu_sig = csig; 396 uci->cpu_sig = csig;
424 uci->valid = 1; 397 uci->valid = 1;
@@ -602,7 +575,7 @@ static inline void print_ucode(struct ucode_cpu_info *uci)
602static int apply_microcode_early(struct ucode_cpu_info *uci, bool early) 575static int apply_microcode_early(struct ucode_cpu_info *uci, bool early)
603{ 576{
604 struct microcode_intel *mc; 577 struct microcode_intel *mc;
605 unsigned int val[2]; 578 u32 rev;
606 579
607 mc = uci->mc; 580 mc = uci->mc;
608 if (!mc) 581 if (!mc)
@@ -610,21 +583,16 @@ static int apply_microcode_early(struct ucode_cpu_info *uci, bool early)
610 583
611 /* write microcode via MSR 0x79 */ 584 /* write microcode via MSR 0x79 */
612 native_wrmsrl(MSR_IA32_UCODE_WRITE, (unsigned long)mc->bits); 585 native_wrmsrl(MSR_IA32_UCODE_WRITE, (unsigned long)mc->bits);
613 native_wrmsrl(MSR_IA32_UCODE_REV, 0);
614
615 /* As documented in the SDM: Do a CPUID 1 here */
616 cpuid_1();
617 586
618 /* get the current revision from MSR 0x8B */ 587 rev = intel_get_microcode_revision();
619 native_rdmsr(MSR_IA32_UCODE_REV, val[0], val[1]); 588 if (rev != mc->hdr.rev)
620 if (val[1] != mc->hdr.rev)
621 return -1; 589 return -1;
622 590
623#ifdef CONFIG_X86_64 591#ifdef CONFIG_X86_64
624 /* Flush global tlb. This is precaution. */ 592 /* Flush global tlb. This is precaution. */
625 flush_tlb_early(); 593 flush_tlb_early();
626#endif 594#endif
627 uci->cpu_sig.rev = val[1]; 595 uci->cpu_sig.rev = rev;
628 596
629 if (early) 597 if (early)
630 print_ucode(uci); 598 print_ucode(uci);
@@ -639,12 +607,6 @@ int __init save_microcode_in_initrd_intel(void)
639 struct ucode_cpu_info uci; 607 struct ucode_cpu_info uci;
640 struct cpio_data cp; 608 struct cpio_data cp;
641 609
642 /*
643 * AP loading didn't find any microcode patch, no need to save anything.
644 */
645 if (!intel_ucode_patch || IS_ERR(intel_ucode_patch))
646 return 0;
647
648 if (!load_builtin_intel_microcode(&cp)) 610 if (!load_builtin_intel_microcode(&cp))
649 cp = find_microcode_in_initrd(ucode_path, false); 611 cp = find_microcode_in_initrd(ucode_path, false);
650 612
@@ -660,7 +622,6 @@ int __init save_microcode_in_initrd_intel(void)
660 return 0; 622 return 0;
661} 623}
662 624
663
664/* 625/*
665 * @res_patch, output: a pointer to the patch we found. 626 * @res_patch, output: a pointer to the patch we found.
666 */ 627 */
@@ -804,8 +765,8 @@ static int apply_microcode_intel(int cpu)
804 struct microcode_intel *mc; 765 struct microcode_intel *mc;
805 struct ucode_cpu_info *uci; 766 struct ucode_cpu_info *uci;
806 struct cpuinfo_x86 *c; 767 struct cpuinfo_x86 *c;
807 unsigned int val[2];
808 static int prev_rev; 768 static int prev_rev;
769 u32 rev;
809 770
810 /* We should bind the task to the CPU */ 771 /* We should bind the task to the CPU */
811 if (WARN_ON(raw_smp_processor_id() != cpu)) 772 if (WARN_ON(raw_smp_processor_id() != cpu))
@@ -822,33 +783,28 @@ static int apply_microcode_intel(int cpu)
822 783
823 /* write microcode via MSR 0x79 */ 784 /* write microcode via MSR 0x79 */
824 wrmsrl(MSR_IA32_UCODE_WRITE, (unsigned long)mc->bits); 785 wrmsrl(MSR_IA32_UCODE_WRITE, (unsigned long)mc->bits);
825 wrmsrl(MSR_IA32_UCODE_REV, 0);
826
827 /* As documented in the SDM: Do a CPUID 1 here */
828 cpuid_1();
829 786
830 /* get the current revision from MSR 0x8B */ 787 rev = intel_get_microcode_revision();
831 rdmsr(MSR_IA32_UCODE_REV, val[0], val[1]);
832 788
833 if (val[1] != mc->hdr.rev) { 789 if (rev != mc->hdr.rev) {
834 pr_err("CPU%d update to revision 0x%x failed\n", 790 pr_err("CPU%d update to revision 0x%x failed\n",
835 cpu, mc->hdr.rev); 791 cpu, mc->hdr.rev);
836 return -1; 792 return -1;
837 } 793 }
838 794
839 if (val[1] != prev_rev) { 795 if (rev != prev_rev) {
840 pr_info("updated to revision 0x%x, date = %04x-%02x-%02x\n", 796 pr_info("updated to revision 0x%x, date = %04x-%02x-%02x\n",
841 val[1], 797 rev,
842 mc->hdr.date & 0xffff, 798 mc->hdr.date & 0xffff,
843 mc->hdr.date >> 24, 799 mc->hdr.date >> 24,
844 (mc->hdr.date >> 16) & 0xff); 800 (mc->hdr.date >> 16) & 0xff);
845 prev_rev = val[1]; 801 prev_rev = rev;
846 } 802 }
847 803
848 c = &cpu_data(cpu); 804 c = &cpu_data(cpu);
849 805
850 uci->cpu_sig.rev = val[1]; 806 uci->cpu_sig.rev = rev;
851 c->microcode = val[1]; 807 c->microcode = rev;
852 808
853 return 0; 809 return 0;
854} 810}
@@ -860,7 +816,7 @@ static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size,
860 u8 *ucode_ptr = data, *new_mc = NULL, *mc = NULL; 816 u8 *ucode_ptr = data, *new_mc = NULL, *mc = NULL;
861 int new_rev = uci->cpu_sig.rev; 817 int new_rev = uci->cpu_sig.rev;
862 unsigned int leftover = size; 818 unsigned int leftover = size;
863 unsigned int curr_mc_size = 0; 819 unsigned int curr_mc_size = 0, new_mc_size = 0;
864 unsigned int csig, cpf; 820 unsigned int csig, cpf;
865 821
866 while (leftover) { 822 while (leftover) {
@@ -901,6 +857,7 @@ static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size,
901 vfree(new_mc); 857 vfree(new_mc);
902 new_rev = mc_header.rev; 858 new_rev = mc_header.rev;
903 new_mc = mc; 859 new_mc = mc;
860 new_mc_size = mc_size;
904 mc = NULL; /* trigger new vmalloc */ 861 mc = NULL; /* trigger new vmalloc */
905 } 862 }
906 863
@@ -926,7 +883,7 @@ static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size,
926 * permanent memory. So it will be loaded early when a CPU is hot added 883 * permanent memory. So it will be loaded early when a CPU is hot added
927 * or resumes. 884 * or resumes.
928 */ 885 */
929 save_mc_for_early(new_mc, curr_mc_size); 886 save_mc_for_early(new_mc, new_mc_size);
930 887
931 pr_debug("CPU%d found a matching microcode update with version 0x%x (current=0x%x)\n", 888 pr_debug("CPU%d found a matching microcode update with version 0x%x (current=0x%x)\n",
932 cpu, new_rev, uci->cpu_sig.rev); 889 cpu, new_rev, uci->cpu_sig.rev);
diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c
index e4e97a5355ce..de7234401275 100644
--- a/arch/x86/kernel/fpu/core.c
+++ b/arch/x86/kernel/fpu/core.c
@@ -9,6 +9,7 @@
9#include <asm/fpu/regset.h> 9#include <asm/fpu/regset.h>
10#include <asm/fpu/signal.h> 10#include <asm/fpu/signal.h>
11#include <asm/fpu/types.h> 11#include <asm/fpu/types.h>
12#include <asm/fpu/xstate.h>
12#include <asm/traps.h> 13#include <asm/traps.h>
13 14
14#include <linux/hardirq.h> 15#include <linux/hardirq.h>
@@ -183,7 +184,8 @@ void fpstate_init(union fpregs_state *state)
183 * it will #GP. Make sure it is replaced after the memset(). 184 * it will #GP. Make sure it is replaced after the memset().
184 */ 185 */
185 if (static_cpu_has(X86_FEATURE_XSAVES)) 186 if (static_cpu_has(X86_FEATURE_XSAVES))
186 state->xsave.header.xcomp_bv = XCOMP_BV_COMPACTED_FORMAT; 187 state->xsave.header.xcomp_bv = XCOMP_BV_COMPACTED_FORMAT |
188 xfeatures_mask;
187 189
188 if (static_cpu_has(X86_FEATURE_FXSR)) 190 if (static_cpu_has(X86_FEATURE_FXSR))
189 fpstate_init_fxstate(&state->fxsave); 191 fpstate_init_fxstate(&state->fxsave);
diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c
index 85e87b46c318..dc6ba5bda9fc 100644
--- a/arch/x86/kernel/hpet.c
+++ b/arch/x86/kernel/hpet.c
@@ -352,6 +352,7 @@ static int hpet_resume(struct clock_event_device *evt, int timer)
352 } else { 352 } else {
353 struct hpet_dev *hdev = EVT_TO_HPET_DEV(evt); 353 struct hpet_dev *hdev = EVT_TO_HPET_DEV(evt);
354 354
355 irq_domain_deactivate_irq(irq_get_irq_data(hdev->irq));
355 irq_domain_activate_irq(irq_get_irq_data(hdev->irq)); 356 irq_domain_activate_irq(irq_get_irq_data(hdev->irq));
356 disable_irq(hdev->irq); 357 disable_irq(hdev->irq);
357 irq_set_affinity(hdev->irq, cpumask_of(hdev->cpu)); 358 irq_set_affinity(hdev->irq, cpumask_of(hdev->cpu));
diff --git a/arch/x86/kernel/pci-swiotlb.c b/arch/x86/kernel/pci-swiotlb.c
index b47edb8f5256..410efb2c7b80 100644
--- a/arch/x86/kernel/pci-swiotlb.c
+++ b/arch/x86/kernel/pci-swiotlb.c
@@ -68,12 +68,10 @@ static struct dma_map_ops swiotlb_dma_ops = {
68 */ 68 */
69int __init pci_swiotlb_detect_override(void) 69int __init pci_swiotlb_detect_override(void)
70{ 70{
71 int use_swiotlb = swiotlb | swiotlb_force; 71 if (swiotlb_force == SWIOTLB_FORCE)
72
73 if (swiotlb_force)
74 swiotlb = 1; 72 swiotlb = 1;
75 73
76 return use_swiotlb; 74 return swiotlb;
77} 75}
78IOMMU_INIT_FINISH(pci_swiotlb_detect_override, 76IOMMU_INIT_FINISH(pci_swiotlb_detect_override,
79 pci_xen_swiotlb_detect, 77 pci_xen_swiotlb_detect,
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 46732dc3b73c..99b920d0e516 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -433,9 +433,15 @@ static bool match_smt(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
433 int cpu1 = c->cpu_index, cpu2 = o->cpu_index; 433 int cpu1 = c->cpu_index, cpu2 = o->cpu_index;
434 434
435 if (c->phys_proc_id == o->phys_proc_id && 435 if (c->phys_proc_id == o->phys_proc_id &&
436 per_cpu(cpu_llc_id, cpu1) == per_cpu(cpu_llc_id, cpu2) && 436 per_cpu(cpu_llc_id, cpu1) == per_cpu(cpu_llc_id, cpu2)) {
437 c->cpu_core_id == o->cpu_core_id) 437 if (c->cpu_core_id == o->cpu_core_id)
438 return topology_sane(c, o, "smt"); 438 return topology_sane(c, o, "smt");
439
440 if ((c->cu_id != 0xff) &&
441 (o->cu_id != 0xff) &&
442 (c->cu_id == o->cu_id))
443 return topology_sane(c, o, "smt");
444 }
439 445
440 } else if (c->phys_proc_id == o->phys_proc_id && 446 } else if (c->phys_proc_id == o->phys_proc_id &&
441 c->cpu_core_id == o->cpu_core_id) { 447 c->cpu_core_id == o->cpu_core_id) {
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index be3a49ee0356..37e7cf544e51 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -694,6 +694,7 @@ unsigned long native_calibrate_tsc(void)
694 crystal_khz = 24000; /* 24.0 MHz */ 694 crystal_khz = 24000; /* 24.0 MHz */
695 break; 695 break;
696 case INTEL_FAM6_SKYLAKE_X: 696 case INTEL_FAM6_SKYLAKE_X:
697 case INTEL_FAM6_ATOM_DENVERTON:
697 crystal_khz = 25000; /* 25.0 MHz */ 698 crystal_khz = 25000; /* 25.0 MHz */
698 break; 699 break;
699 case INTEL_FAM6_ATOM_GOLDMONT: 700 case INTEL_FAM6_ATOM_GOLDMONT:
@@ -1355,6 +1356,9 @@ void __init tsc_init(void)
1355 (unsigned long)cpu_khz / 1000, 1356 (unsigned long)cpu_khz / 1000,
1356 (unsigned long)cpu_khz % 1000); 1357 (unsigned long)cpu_khz % 1000);
1357 1358
1359 /* Sanitize TSC ADJUST before cyc2ns gets initialized */
1360 tsc_store_and_check_tsc_adjust(true);
1361
1358 /* 1362 /*
1359 * Secondary CPUs do not run through tsc_init(), so set up 1363 * Secondary CPUs do not run through tsc_init(), so set up
1360 * all the scale factors for all CPUs, assuming the same 1364 * all the scale factors for all CPUs, assuming the same
@@ -1385,8 +1389,6 @@ void __init tsc_init(void)
1385 1389
1386 if (unsynchronized_tsc()) 1390 if (unsynchronized_tsc())
1387 mark_tsc_unstable("TSCs unsynchronized"); 1391 mark_tsc_unstable("TSCs unsynchronized");
1388 else
1389 tsc_store_and_check_tsc_adjust(true);
1390 1392
1391 check_system_tsc_reliable(); 1393 check_system_tsc_reliable();
1392 1394
diff --git a/arch/x86/kernel/tsc_sync.c b/arch/x86/kernel/tsc_sync.c
index d0db011051a5..728f75378475 100644
--- a/arch/x86/kernel/tsc_sync.c
+++ b/arch/x86/kernel/tsc_sync.c
@@ -286,13 +286,6 @@ void check_tsc_sync_source(int cpu)
286 if (unsynchronized_tsc()) 286 if (unsynchronized_tsc())
287 return; 287 return;
288 288
289 if (tsc_clocksource_reliable) {
290 if (cpu == (nr_cpu_ids-1) || system_state != SYSTEM_BOOTING)
291 pr_info(
292 "Skipped synchronization checks as TSC is reliable.\n");
293 return;
294 }
295
296 /* 289 /*
297 * Set the maximum number of test runs to 290 * Set the maximum number of test runs to
298 * 1 if the CPU does not provide the TSC_ADJUST MSR 291 * 1 if the CPU does not provide the TSC_ADJUST MSR
@@ -380,14 +373,19 @@ void check_tsc_sync_target(void)
380 int cpus = 2; 373 int cpus = 2;
381 374
382 /* Also aborts if there is no TSC. */ 375 /* Also aborts if there is no TSC. */
383 if (unsynchronized_tsc() || tsc_clocksource_reliable) 376 if (unsynchronized_tsc())
384 return; 377 return;
385 378
386 /* 379 /*
387 * Store, verify and sanitize the TSC adjust register. If 380 * Store, verify and sanitize the TSC adjust register. If
388 * successful skip the test. 381 * successful skip the test.
382 *
383 * The test is also skipped when the TSC is marked reliable. This
384 * is true for SoCs which have no fallback clocksource. On these
385 * SoCs the TSC is frequency synchronized, but still the TSC ADJUST
386 * register might have been wreckaged by the BIOS..
389 */ 387 */
390 if (tsc_store_and_check_tsc_adjust(false)) { 388 if (tsc_store_and_check_tsc_adjust(false) || tsc_clocksource_reliable) {
391 atomic_inc(&skip_test); 389 atomic_inc(&skip_test);
392 return; 390 return;
393 } 391 }
diff --git a/arch/x86/kernel/unwind_frame.c b/arch/x86/kernel/unwind_frame.c
index 4443e499f279..23d15565d02a 100644
--- a/arch/x86/kernel/unwind_frame.c
+++ b/arch/x86/kernel/unwind_frame.c
@@ -6,6 +6,21 @@
6 6
7#define FRAME_HEADER_SIZE (sizeof(long) * 2) 7#define FRAME_HEADER_SIZE (sizeof(long) * 2)
8 8
9/*
10 * This disables KASAN checking when reading a value from another task's stack,
11 * since the other task could be running on another CPU and could have poisoned
12 * the stack in the meantime.
13 */
14#define READ_ONCE_TASK_STACK(task, x) \
15({ \
16 unsigned long val; \
17 if (task == current) \
18 val = READ_ONCE(x); \
19 else \
20 val = READ_ONCE_NOCHECK(x); \
21 val; \
22})
23
9static void unwind_dump(struct unwind_state *state, unsigned long *sp) 24static void unwind_dump(struct unwind_state *state, unsigned long *sp)
10{ 25{
11 static bool dumped_before = false; 26 static bool dumped_before = false;
@@ -48,7 +63,8 @@ unsigned long unwind_get_return_address(struct unwind_state *state)
48 if (state->regs && user_mode(state->regs)) 63 if (state->regs && user_mode(state->regs))
49 return 0; 64 return 0;
50 65
51 addr = ftrace_graph_ret_addr(state->task, &state->graph_idx, *addr_p, 66 addr = READ_ONCE_TASK_STACK(state->task, *addr_p);
67 addr = ftrace_graph_ret_addr(state->task, &state->graph_idx, addr,
52 addr_p); 68 addr_p);
53 69
54 return __kernel_text_address(addr) ? addr : 0; 70 return __kernel_text_address(addr) ? addr : 0;
@@ -162,7 +178,7 @@ bool unwind_next_frame(struct unwind_state *state)
162 if (state->regs) 178 if (state->regs)
163 next_bp = (unsigned long *)state->regs->bp; 179 next_bp = (unsigned long *)state->regs->bp;
164 else 180 else
165 next_bp = (unsigned long *)*state->bp; 181 next_bp = (unsigned long *)READ_ONCE_TASK_STACK(state->task,*state->bp);
166 182
167 /* is the next frame pointer an encoded pointer to pt_regs? */ 183 /* is the next frame pointer an encoded pointer to pt_regs? */
168 regs = decode_frame_pointer(next_bp); 184 regs = decode_frame_pointer(next_bp);
@@ -207,6 +223,16 @@ bool unwind_next_frame(struct unwind_state *state)
207 return true; 223 return true;
208 224
209bad_address: 225bad_address:
226 /*
227 * When unwinding a non-current task, the task might actually be
228 * running on another CPU, in which case it could be modifying its
229 * stack while we're reading it. This is generally not a problem and
230 * can be ignored as long as the caller understands that unwinding
231 * another task will not always succeed.
232 */
233 if (state->task != current)
234 goto the_end;
235
210 if (state->regs) { 236 if (state->regs) {
211 printk_deferred_once(KERN_WARNING 237 printk_deferred_once(KERN_WARNING
212 "WARNING: kernel stack regs at %p in %s:%d has bad 'bp' value %p\n", 238 "WARNING: kernel stack regs at %p in %s:%d has bad 'bp' value %p\n",
diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
index ec5d7545e6dc..0442d98367ae 100644
--- a/arch/x86/kernel/vm86_32.c
+++ b/arch/x86/kernel/vm86_32.c
@@ -160,11 +160,12 @@ void save_v86_state(struct kernel_vm86_regs *regs, int retval)
160 160
161static void mark_screen_rdonly(struct mm_struct *mm) 161static void mark_screen_rdonly(struct mm_struct *mm)
162{ 162{
163 struct vm_area_struct *vma;
164 spinlock_t *ptl;
163 pgd_t *pgd; 165 pgd_t *pgd;
164 pud_t *pud; 166 pud_t *pud;
165 pmd_t *pmd; 167 pmd_t *pmd;
166 pte_t *pte; 168 pte_t *pte;
167 spinlock_t *ptl;
168 int i; 169 int i;
169 170
170 down_write(&mm->mmap_sem); 171 down_write(&mm->mmap_sem);
@@ -177,7 +178,7 @@ static void mark_screen_rdonly(struct mm_struct *mm)
177 pmd = pmd_offset(pud, 0xA0000); 178 pmd = pmd_offset(pud, 0xA0000);
178 179
179 if (pmd_trans_huge(*pmd)) { 180 if (pmd_trans_huge(*pmd)) {
180 struct vm_area_struct *vma = find_vma(mm, 0xA0000); 181 vma = find_vma(mm, 0xA0000);
181 split_huge_pmd(vma, pmd, 0xA0000); 182 split_huge_pmd(vma, pmd, 0xA0000);
182 } 183 }
183 if (pmd_none_or_clear_bad(pmd)) 184 if (pmd_none_or_clear_bad(pmd))
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index 56628a44668b..cedbba0f3402 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -818,6 +818,20 @@ static int segmented_read_std(struct x86_emulate_ctxt *ctxt,
818 return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception); 818 return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception);
819} 819}
820 820
821static int segmented_write_std(struct x86_emulate_ctxt *ctxt,
822 struct segmented_address addr,
823 void *data,
824 unsigned int size)
825{
826 int rc;
827 ulong linear;
828
829 rc = linearize(ctxt, addr, size, true, &linear);
830 if (rc != X86EMUL_CONTINUE)
831 return rc;
832 return ctxt->ops->write_std(ctxt, linear, data, size, &ctxt->exception);
833}
834
821/* 835/*
822 * Prefetch the remaining bytes of the instruction without crossing page 836 * Prefetch the remaining bytes of the instruction without crossing page
823 * boundary if they are not in fetch_cache yet. 837 * boundary if they are not in fetch_cache yet.
@@ -1571,7 +1585,6 @@ static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1571 &ctxt->exception); 1585 &ctxt->exception);
1572} 1586}
1573 1587
1574/* Does not support long mode */
1575static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt, 1588static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1576 u16 selector, int seg, u8 cpl, 1589 u16 selector, int seg, u8 cpl,
1577 enum x86_transfer_type transfer, 1590 enum x86_transfer_type transfer,
@@ -1608,20 +1621,34 @@ static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1608 1621
1609 rpl = selector & 3; 1622 rpl = selector & 3;
1610 1623
1611 /* NULL selector is not valid for TR, CS and SS (except for long mode) */
1612 if ((seg == VCPU_SREG_CS
1613 || (seg == VCPU_SREG_SS
1614 && (ctxt->mode != X86EMUL_MODE_PROT64 || rpl != cpl))
1615 || seg == VCPU_SREG_TR)
1616 && null_selector)
1617 goto exception;
1618
1619 /* TR should be in GDT only */ 1624 /* TR should be in GDT only */
1620 if (seg == VCPU_SREG_TR && (selector & (1 << 2))) 1625 if (seg == VCPU_SREG_TR && (selector & (1 << 2)))
1621 goto exception; 1626 goto exception;
1622 1627
1623 if (null_selector) /* for NULL selector skip all following checks */ 1628 /* NULL selector is not valid for TR, CS and (except for long mode) SS */
1629 if (null_selector) {
1630 if (seg == VCPU_SREG_CS || seg == VCPU_SREG_TR)
1631 goto exception;
1632
1633 if (seg == VCPU_SREG_SS) {
1634 if (ctxt->mode != X86EMUL_MODE_PROT64 || rpl != cpl)
1635 goto exception;
1636
1637 /*
1638 * ctxt->ops->set_segment expects the CPL to be in
1639 * SS.DPL, so fake an expand-up 32-bit data segment.
1640 */
1641 seg_desc.type = 3;
1642 seg_desc.p = 1;
1643 seg_desc.s = 1;
1644 seg_desc.dpl = cpl;
1645 seg_desc.d = 1;
1646 seg_desc.g = 1;
1647 }
1648
1649 /* Skip all following checks */
1624 goto load; 1650 goto load;
1651 }
1625 1652
1626 ret = read_segment_descriptor(ctxt, selector, &seg_desc, &desc_addr); 1653 ret = read_segment_descriptor(ctxt, selector, &seg_desc, &desc_addr);
1627 if (ret != X86EMUL_CONTINUE) 1654 if (ret != X86EMUL_CONTINUE)
@@ -1737,6 +1764,21 @@ static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1737 u16 selector, int seg) 1764 u16 selector, int seg)
1738{ 1765{
1739 u8 cpl = ctxt->ops->cpl(ctxt); 1766 u8 cpl = ctxt->ops->cpl(ctxt);
1767
1768 /*
1769 * None of MOV, POP and LSS can load a NULL selector in CPL=3, but
1770 * they can load it at CPL<3 (Intel's manual says only LSS can,
1771 * but it's wrong).
1772 *
1773 * However, the Intel manual says that putting IST=1/DPL=3 in
1774 * an interrupt gate will result in SS=3 (the AMD manual instead
1775 * says it doesn't), so allow SS=3 in __load_segment_descriptor
1776 * and only forbid it here.
1777 */
1778 if (seg == VCPU_SREG_SS && selector == 3 &&
1779 ctxt->mode == X86EMUL_MODE_PROT64)
1780 return emulate_exception(ctxt, GP_VECTOR, 0, true);
1781
1740 return __load_segment_descriptor(ctxt, selector, seg, cpl, 1782 return __load_segment_descriptor(ctxt, selector, seg, cpl,
1741 X86_TRANSFER_NONE, NULL); 1783 X86_TRANSFER_NONE, NULL);
1742} 1784}
@@ -3685,8 +3727,8 @@ static int emulate_store_desc_ptr(struct x86_emulate_ctxt *ctxt,
3685 } 3727 }
3686 /* Disable writeback. */ 3728 /* Disable writeback. */
3687 ctxt->dst.type = OP_NONE; 3729 ctxt->dst.type = OP_NONE;
3688 return segmented_write(ctxt, ctxt->dst.addr.mem, 3730 return segmented_write_std(ctxt, ctxt->dst.addr.mem,
3689 &desc_ptr, 2 + ctxt->op_bytes); 3731 &desc_ptr, 2 + ctxt->op_bytes);
3690} 3732}
3691 3733
3692static int em_sgdt(struct x86_emulate_ctxt *ctxt) 3734static int em_sgdt(struct x86_emulate_ctxt *ctxt)
@@ -3932,7 +3974,7 @@ static int em_fxsave(struct x86_emulate_ctxt *ctxt)
3932 else 3974 else
3933 size = offsetof(struct fxregs_state, xmm_space[0]); 3975 size = offsetof(struct fxregs_state, xmm_space[0]);
3934 3976
3935 return segmented_write(ctxt, ctxt->memop.addr.mem, &fx_state, size); 3977 return segmented_write_std(ctxt, ctxt->memop.addr.mem, &fx_state, size);
3936} 3978}
3937 3979
3938static int fxrstor_fixup(struct x86_emulate_ctxt *ctxt, 3980static int fxrstor_fixup(struct x86_emulate_ctxt *ctxt,
@@ -3974,7 +4016,7 @@ static int em_fxrstor(struct x86_emulate_ctxt *ctxt)
3974 if (rc != X86EMUL_CONTINUE) 4016 if (rc != X86EMUL_CONTINUE)
3975 return rc; 4017 return rc;
3976 4018
3977 rc = segmented_read(ctxt, ctxt->memop.addr.mem, &fx_state, 512); 4019 rc = segmented_read_std(ctxt, ctxt->memop.addr.mem, &fx_state, 512);
3978 if (rc != X86EMUL_CONTINUE) 4020 if (rc != X86EMUL_CONTINUE)
3979 return rc; 4021 return rc;
3980 4022
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index 5fe290c1b7d8..2f6ef5121a4c 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -2426,3 +2426,9 @@ void kvm_lapic_init(void)
2426 jump_label_rate_limit(&apic_hw_disabled, HZ); 2426 jump_label_rate_limit(&apic_hw_disabled, HZ);
2427 jump_label_rate_limit(&apic_sw_disabled, HZ); 2427 jump_label_rate_limit(&apic_sw_disabled, HZ);
2428} 2428}
2429
2430void kvm_lapic_exit(void)
2431{
2432 static_key_deferred_flush(&apic_hw_disabled);
2433 static_key_deferred_flush(&apic_sw_disabled);
2434}
diff --git a/arch/x86/kvm/lapic.h b/arch/x86/kvm/lapic.h
index e0c80233b3e1..ff8039d61672 100644
--- a/arch/x86/kvm/lapic.h
+++ b/arch/x86/kvm/lapic.h
@@ -110,6 +110,7 @@ static inline bool kvm_hv_vapic_assist_page_enabled(struct kvm_vcpu *vcpu)
110 110
111int kvm_lapic_enable_pv_eoi(struct kvm_vcpu *vcpu, u64 data); 111int kvm_lapic_enable_pv_eoi(struct kvm_vcpu *vcpu, u64 data);
112void kvm_lapic_init(void); 112void kvm_lapic_init(void);
113void kvm_lapic_exit(void);
113 114
114#define VEC_POS(v) ((v) & (32 - 1)) 115#define VEC_POS(v) ((v) & (32 - 1))
115#define REG_POS(v) (((v) >> 5) << 4) 116#define REG_POS(v) (((v) >> 5) << 4)
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 24db5fb6f575..a236decb81e4 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -132,12 +132,6 @@ module_param_named(preemption_timer, enable_preemption_timer, bool, S_IRUGO);
132 132
133#define VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE 5 133#define VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE 5
134 134
135#define VMX_VPID_EXTENT_SUPPORTED_MASK \
136 (VMX_VPID_EXTENT_INDIVIDUAL_ADDR_BIT | \
137 VMX_VPID_EXTENT_SINGLE_CONTEXT_BIT | \
138 VMX_VPID_EXTENT_GLOBAL_CONTEXT_BIT | \
139 VMX_VPID_EXTENT_SINGLE_NON_GLOBAL_BIT)
140
141/* 135/*
142 * Hyper-V requires all of these, so mark them as supported even though 136 * Hyper-V requires all of these, so mark them as supported even though
143 * they are just treated the same as all-context. 137 * they are just treated the same as all-context.
@@ -10473,12 +10467,12 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
10473 !nested_guest_cr4_valid(vcpu, vmcs12->guest_cr4)) { 10467 !nested_guest_cr4_valid(vcpu, vmcs12->guest_cr4)) {
10474 nested_vmx_entry_failure(vcpu, vmcs12, 10468 nested_vmx_entry_failure(vcpu, vmcs12,
10475 EXIT_REASON_INVALID_STATE, ENTRY_FAIL_DEFAULT); 10469 EXIT_REASON_INVALID_STATE, ENTRY_FAIL_DEFAULT);
10476 goto out; 10470 return 1;
10477 } 10471 }
10478 if (vmcs12->vmcs_link_pointer != -1ull) { 10472 if (vmcs12->vmcs_link_pointer != -1ull) {
10479 nested_vmx_entry_failure(vcpu, vmcs12, 10473 nested_vmx_entry_failure(vcpu, vmcs12,
10480 EXIT_REASON_INVALID_STATE, ENTRY_FAIL_VMCS_LINK_PTR); 10474 EXIT_REASON_INVALID_STATE, ENTRY_FAIL_VMCS_LINK_PTR);
10481 goto out; 10475 return 1;
10482 } 10476 }
10483 10477
10484 /* 10478 /*
@@ -10498,7 +10492,7 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
10498 ia32e != !!(vmcs12->guest_ia32_efer & EFER_LME))) { 10492 ia32e != !!(vmcs12->guest_ia32_efer & EFER_LME))) {
10499 nested_vmx_entry_failure(vcpu, vmcs12, 10493 nested_vmx_entry_failure(vcpu, vmcs12,
10500 EXIT_REASON_INVALID_STATE, ENTRY_FAIL_DEFAULT); 10494 EXIT_REASON_INVALID_STATE, ENTRY_FAIL_DEFAULT);
10501 goto out; 10495 return 1;
10502 } 10496 }
10503 } 10497 }
10504 10498
@@ -10516,7 +10510,7 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
10516 ia32e != !!(vmcs12->host_ia32_efer & EFER_LME)) { 10510 ia32e != !!(vmcs12->host_ia32_efer & EFER_LME)) {
10517 nested_vmx_entry_failure(vcpu, vmcs12, 10511 nested_vmx_entry_failure(vcpu, vmcs12,
10518 EXIT_REASON_INVALID_STATE, ENTRY_FAIL_DEFAULT); 10512 EXIT_REASON_INVALID_STATE, ENTRY_FAIL_DEFAULT);
10519 goto out; 10513 return 1;
10520 } 10514 }
10521 } 10515 }
10522 10516
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 51ccfe08e32f..e52c9088660f 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -3070,6 +3070,8 @@ static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu,
3070 memset(&events->reserved, 0, sizeof(events->reserved)); 3070 memset(&events->reserved, 0, sizeof(events->reserved));
3071} 3071}
3072 3072
3073static void kvm_set_hflags(struct kvm_vcpu *vcpu, unsigned emul_flags);
3074
3073static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu, 3075static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
3074 struct kvm_vcpu_events *events) 3076 struct kvm_vcpu_events *events)
3075{ 3077{
@@ -3106,10 +3108,13 @@ static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
3106 vcpu->arch.apic->sipi_vector = events->sipi_vector; 3108 vcpu->arch.apic->sipi_vector = events->sipi_vector;
3107 3109
3108 if (events->flags & KVM_VCPUEVENT_VALID_SMM) { 3110 if (events->flags & KVM_VCPUEVENT_VALID_SMM) {
3111 u32 hflags = vcpu->arch.hflags;
3109 if (events->smi.smm) 3112 if (events->smi.smm)
3110 vcpu->arch.hflags |= HF_SMM_MASK; 3113 hflags |= HF_SMM_MASK;
3111 else 3114 else
3112 vcpu->arch.hflags &= ~HF_SMM_MASK; 3115 hflags &= ~HF_SMM_MASK;
3116 kvm_set_hflags(vcpu, hflags);
3117
3113 vcpu->arch.smi_pending = events->smi.pending; 3118 vcpu->arch.smi_pending = events->smi.pending;
3114 if (events->smi.smm_inside_nmi) 3119 if (events->smi.smm_inside_nmi)
3115 vcpu->arch.hflags |= HF_SMM_INSIDE_NMI_MASK; 3120 vcpu->arch.hflags |= HF_SMM_INSIDE_NMI_MASK;
@@ -3177,6 +3182,7 @@ static void fill_xsave(u8 *dest, struct kvm_vcpu *vcpu)
3177 memcpy(dest, xsave, XSAVE_HDR_OFFSET); 3182 memcpy(dest, xsave, XSAVE_HDR_OFFSET);
3178 3183
3179 /* Set XSTATE_BV */ 3184 /* Set XSTATE_BV */
3185 xstate_bv &= vcpu->arch.guest_supported_xcr0 | XFEATURE_MASK_FPSSE;
3180 *(u64 *)(dest + XSAVE_HDR_OFFSET) = xstate_bv; 3186 *(u64 *)(dest + XSAVE_HDR_OFFSET) = xstate_bv;
3181 3187
3182 /* 3188 /*
@@ -3337,6 +3343,8 @@ static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
3337 3343
3338 switch (cap->cap) { 3344 switch (cap->cap) {
3339 case KVM_CAP_HYPERV_SYNIC: 3345 case KVM_CAP_HYPERV_SYNIC:
3346 if (!irqchip_in_kernel(vcpu->kvm))
3347 return -EINVAL;
3340 return kvm_hv_activate_synic(vcpu); 3348 return kvm_hv_activate_synic(vcpu);
3341 default: 3349 default:
3342 return -EINVAL; 3350 return -EINVAL;
@@ -6040,6 +6048,7 @@ out:
6040 6048
6041void kvm_arch_exit(void) 6049void kvm_arch_exit(void)
6042{ 6050{
6051 kvm_lapic_exit();
6043 perf_unregister_guest_info_callbacks(&kvm_guest_cbs); 6052 perf_unregister_guest_info_callbacks(&kvm_guest_cbs);
6044 6053
6045 if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) 6054 if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
@@ -6163,7 +6172,8 @@ static int emulator_fix_hypercall(struct x86_emulate_ctxt *ctxt)
6163 6172
6164 kvm_x86_ops->patch_hypercall(vcpu, instruction); 6173 kvm_x86_ops->patch_hypercall(vcpu, instruction);
6165 6174
6166 return emulator_write_emulated(ctxt, rip, instruction, 3, NULL); 6175 return emulator_write_emulated(ctxt, rip, instruction, 3,
6176 &ctxt->exception);
6167} 6177}
6168 6178
6169static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu) 6179static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu)
diff --git a/arch/x86/mm/dump_pagetables.c b/arch/x86/mm/dump_pagetables.c
index ea9c49adaa1f..8aa6bea1cd6c 100644
--- a/arch/x86/mm/dump_pagetables.c
+++ b/arch/x86/mm/dump_pagetables.c
@@ -15,6 +15,7 @@
15#include <linux/debugfs.h> 15#include <linux/debugfs.h>
16#include <linux/mm.h> 16#include <linux/mm.h>
17#include <linux/init.h> 17#include <linux/init.h>
18#include <linux/sched.h>
18#include <linux/seq_file.h> 19#include <linux/seq_file.h>
19 20
20#include <asm/pgtable.h> 21#include <asm/pgtable.h>
@@ -406,6 +407,7 @@ static void ptdump_walk_pgd_level_core(struct seq_file *m, pgd_t *pgd,
406 } else 407 } else
407 note_page(m, &st, __pgprot(0), 1); 408 note_page(m, &st, __pgprot(0), 1);
408 409
410 cond_resched();
409 start++; 411 start++;
410 } 412 }
411 413
diff --git a/arch/x86/mm/mpx.c b/arch/x86/mm/mpx.c
index 324e5713d386..af59f808742f 100644
--- a/arch/x86/mm/mpx.c
+++ b/arch/x86/mm/mpx.c
@@ -293,7 +293,7 @@ siginfo_t *mpx_generate_siginfo(struct pt_regs *regs)
293 * We were not able to extract an address from the instruction, 293 * We were not able to extract an address from the instruction,
294 * probably because there was something invalid in it. 294 * probably because there was something invalid in it.
295 */ 295 */
296 if (info->si_addr == (void *)-1) { 296 if (info->si_addr == (void __user *)-1) {
297 err = -EINVAL; 297 err = -EINVAL;
298 goto err_out; 298 goto err_out;
299 } 299 }
diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
index e76d1af60f7a..bb660e53cbd6 100644
--- a/arch/x86/net/bpf_jit_comp.c
+++ b/arch/x86/net/bpf_jit_comp.c
@@ -1172,6 +1172,8 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
1172 set_memory_ro((unsigned long)header, header->pages); 1172 set_memory_ro((unsigned long)header, header->pages);
1173 prog->bpf_func = (void *)image; 1173 prog->bpf_func = (void *)image;
1174 prog->jited = 1; 1174 prog->jited = 1;
1175 } else {
1176 prog = orig_prog;
1175 } 1177 }
1176 1178
1177out_addrs: 1179out_addrs:
diff --git a/arch/x86/pci/acpi.c b/arch/x86/pci/acpi.c
index 3cd69832d7f4..3961103e9176 100644
--- a/arch/x86/pci/acpi.c
+++ b/arch/x86/pci/acpi.c
@@ -114,6 +114,16 @@ static const struct dmi_system_id pci_crs_quirks[] __initconst = {
114 DMI_MATCH(DMI_BIOS_VERSION, "6JET85WW (1.43 )"), 114 DMI_MATCH(DMI_BIOS_VERSION, "6JET85WW (1.43 )"),
115 }, 115 },
116 }, 116 },
117 /* https://bugzilla.kernel.org/show_bug.cgi?id=42606 */
118 {
119 .callback = set_nouse_crs,
120 .ident = "Supermicro X8DTH",
121 .matches = {
122 DMI_MATCH(DMI_SYS_VENDOR, "Supermicro"),
123 DMI_MATCH(DMI_PRODUCT_NAME, "X8DTH-i/6/iF/6F"),
124 DMI_MATCH(DMI_BIOS_VERSION, "2.0a"),
125 },
126 },
117 127
118 /* https://bugzilla.kernel.org/show_bug.cgi?id=15362 */ 128 /* https://bugzilla.kernel.org/show_bug.cgi?id=15362 */
119 { 129 {
diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c
index 936a488d6cf6..274dfc481849 100644
--- a/arch/x86/platform/efi/efi.c
+++ b/arch/x86/platform/efi/efi.c
@@ -210,6 +210,70 @@ int __init efi_memblock_x86_reserve_range(void)
210 return 0; 210 return 0;
211} 211}
212 212
213#define OVERFLOW_ADDR_SHIFT (64 - EFI_PAGE_SHIFT)
214#define OVERFLOW_ADDR_MASK (U64_MAX << OVERFLOW_ADDR_SHIFT)
215#define U64_HIGH_BIT (~(U64_MAX >> 1))
216
217static bool __init efi_memmap_entry_valid(const efi_memory_desc_t *md, int i)
218{
219 u64 end = (md->num_pages << EFI_PAGE_SHIFT) + md->phys_addr - 1;
220 u64 end_hi = 0;
221 char buf[64];
222
223 if (md->num_pages == 0) {
224 end = 0;
225 } else if (md->num_pages > EFI_PAGES_MAX ||
226 EFI_PAGES_MAX - md->num_pages <
227 (md->phys_addr >> EFI_PAGE_SHIFT)) {
228 end_hi = (md->num_pages & OVERFLOW_ADDR_MASK)
229 >> OVERFLOW_ADDR_SHIFT;
230
231 if ((md->phys_addr & U64_HIGH_BIT) && !(end & U64_HIGH_BIT))
232 end_hi += 1;
233 } else {
234 return true;
235 }
236
237 pr_warn_once(FW_BUG "Invalid EFI memory map entries:\n");
238
239 if (end_hi) {
240 pr_warn("mem%02u: %s range=[0x%016llx-0x%llx%016llx] (invalid)\n",
241 i, efi_md_typeattr_format(buf, sizeof(buf), md),
242 md->phys_addr, end_hi, end);
243 } else {
244 pr_warn("mem%02u: %s range=[0x%016llx-0x%016llx] (invalid)\n",
245 i, efi_md_typeattr_format(buf, sizeof(buf), md),
246 md->phys_addr, end);
247 }
248 return false;
249}
250
251static void __init efi_clean_memmap(void)
252{
253 efi_memory_desc_t *out = efi.memmap.map;
254 const efi_memory_desc_t *in = out;
255 const efi_memory_desc_t *end = efi.memmap.map_end;
256 int i, n_removal;
257
258 for (i = n_removal = 0; in < end; i++) {
259 if (efi_memmap_entry_valid(in, i)) {
260 if (out != in)
261 memcpy(out, in, efi.memmap.desc_size);
262 out = (void *)out + efi.memmap.desc_size;
263 } else {
264 n_removal++;
265 }
266 in = (void *)in + efi.memmap.desc_size;
267 }
268
269 if (n_removal > 0) {
270 u64 size = efi.memmap.nr_map - n_removal;
271
272 pr_warn("Removing %d invalid memory map entries.\n", n_removal);
273 efi_memmap_install(efi.memmap.phys_map, size);
274 }
275}
276
213void __init efi_print_memmap(void) 277void __init efi_print_memmap(void)
214{ 278{
215 efi_memory_desc_t *md; 279 efi_memory_desc_t *md;
@@ -472,6 +536,8 @@ void __init efi_init(void)
472 } 536 }
473 } 537 }
474 538
539 efi_clean_memmap();
540
475 if (efi_enabled(EFI_DBG)) 541 if (efi_enabled(EFI_DBG))
476 efi_print_memmap(); 542 efi_print_memmap();
477} 543}
diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c
index 319148bd4b05..2f25a363068c 100644
--- a/arch/x86/platform/efi/efi_64.c
+++ b/arch/x86/platform/efi/efi_64.c
@@ -269,6 +269,22 @@ int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages)
269 efi_scratch.use_pgd = true; 269 efi_scratch.use_pgd = true;
270 270
271 /* 271 /*
272 * Certain firmware versions are way too sentimential and still believe
273 * they are exclusive and unquestionable owners of the first physical page,
274 * even though they explicitly mark it as EFI_CONVENTIONAL_MEMORY
275 * (but then write-access it later during SetVirtualAddressMap()).
276 *
277 * Create a 1:1 mapping for this page, to avoid triple faults during early
278 * boot with such firmware. We are free to hand this page to the BIOS,
279 * as trim_bios_range() will reserve the first page and isolate it away
280 * from memory allocators anyway.
281 */
282 if (kernel_map_pages_in_pgd(pgd, 0x0, 0x0, 1, _PAGE_RW)) {
283 pr_err("Failed to create 1:1 mapping for the first page!\n");
284 return 1;
285 }
286
287 /*
272 * When making calls to the firmware everything needs to be 1:1 288 * When making calls to the firmware everything needs to be 1:1
273 * mapped and addressable with 32-bit pointers. Map the kernel 289 * mapped and addressable with 32-bit pointers. Map the kernel
274 * text and allocate a new stack because we can't rely on the 290 * text and allocate a new stack because we can't rely on the
diff --git a/arch/x86/platform/efi/quirks.c b/arch/x86/platform/efi/quirks.c
index 10aca63a50d7..30031d5293c4 100644
--- a/arch/x86/platform/efi/quirks.c
+++ b/arch/x86/platform/efi/quirks.c
@@ -214,7 +214,7 @@ void __init efi_arch_mem_reserve(phys_addr_t addr, u64 size)
214 214
215 new_size = efi.memmap.desc_size * num_entries; 215 new_size = efi.memmap.desc_size * num_entries;
216 216
217 new_phys = memblock_alloc(new_size, 0); 217 new_phys = efi_memmap_alloc(num_entries);
218 if (!new_phys) { 218 if (!new_phys) {
219 pr_err("Could not allocate boot services memmap\n"); 219 pr_err("Could not allocate boot services memmap\n");
220 return; 220 return;
@@ -355,7 +355,7 @@ void __init efi_free_boot_services(void)
355 } 355 }
356 356
357 new_size = efi.memmap.desc_size * num_entries; 357 new_size = efi.memmap.desc_size * num_entries;
358 new_phys = memblock_alloc(new_size, 0); 358 new_phys = efi_memmap_alloc(num_entries);
359 if (!new_phys) { 359 if (!new_phys) {
360 pr_err("Failed to allocate new EFI memmap\n"); 360 pr_err("Failed to allocate new EFI memmap\n");
361 return; 361 return;
diff --git a/arch/x86/platform/intel-mid/device_libs/Makefile b/arch/x86/platform/intel-mid/device_libs/Makefile
index 61b5ed2b7d40..90e4f2a6625b 100644
--- a/arch/x86/platform/intel-mid/device_libs/Makefile
+++ b/arch/x86/platform/intel-mid/device_libs/Makefile
@@ -15,7 +15,7 @@ obj-$(subst m,y,$(CONFIG_INTEL_MID_POWER_BUTTON)) += platform_msic_power_btn.o
15obj-$(subst m,y,$(CONFIG_GPIO_INTEL_PMIC)) += platform_pmic_gpio.o 15obj-$(subst m,y,$(CONFIG_GPIO_INTEL_PMIC)) += platform_pmic_gpio.o
16obj-$(subst m,y,$(CONFIG_INTEL_MFLD_THERMAL)) += platform_msic_thermal.o 16obj-$(subst m,y,$(CONFIG_INTEL_MFLD_THERMAL)) += platform_msic_thermal.o
17# SPI Devices 17# SPI Devices
18obj-$(subst m,y,$(CONFIG_SPI_SPIDEV)) += platform_spidev.o 18obj-$(subst m,y,$(CONFIG_SPI_SPIDEV)) += platform_mrfld_spidev.o
19# I2C Devices 19# I2C Devices
20obj-$(subst m,y,$(CONFIG_SENSORS_EMC1403)) += platform_emc1403.o 20obj-$(subst m,y,$(CONFIG_SENSORS_EMC1403)) += platform_emc1403.o
21obj-$(subst m,y,$(CONFIG_SENSORS_LIS3LV02D)) += platform_lis331.o 21obj-$(subst m,y,$(CONFIG_SENSORS_LIS3LV02D)) += platform_lis331.o
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_spidev.c b/arch/x86/platform/intel-mid/device_libs/platform_mrfld_spidev.c
index 30c601b399ee..27186ad654c9 100644
--- a/arch/x86/platform/intel-mid/device_libs/platform_spidev.c
+++ b/arch/x86/platform/intel-mid/device_libs/platform_mrfld_spidev.c
@@ -11,6 +11,7 @@
11 * of the License. 11 * of the License.
12 */ 12 */
13 13
14#include <linux/err.h>
14#include <linux/init.h> 15#include <linux/init.h>
15#include <linux/sfi.h> 16#include <linux/sfi.h>
16#include <linux/spi/pxa2xx_spi.h> 17#include <linux/spi/pxa2xx_spi.h>
@@ -34,6 +35,9 @@ static void __init *spidev_platform_data(void *info)
34{ 35{
35 struct spi_board_info *spi_info = info; 36 struct spi_board_info *spi_info = info;
36 37
38 if (intel_mid_identify_cpu() != INTEL_MID_CPU_CHIP_TANGIER)
39 return ERR_PTR(-ENODEV);
40
37 spi_info->mode = SPI_MODE_0; 41 spi_info->mode = SPI_MODE_0;
38 spi_info->controller_data = &spidev_spi_chip; 42 spi_info->controller_data = &spidev_spi_chip;
39 43
diff --git a/arch/x86/xen/pci-swiotlb-xen.c b/arch/x86/xen/pci-swiotlb-xen.c
index a9fafb5c8738..a0b36a9d5df1 100644
--- a/arch/x86/xen/pci-swiotlb-xen.c
+++ b/arch/x86/xen/pci-swiotlb-xen.c
@@ -48,7 +48,7 @@ int __init pci_xen_swiotlb_detect(void)
48 * activate this IOMMU. If running as PV privileged, activate it 48 * activate this IOMMU. If running as PV privileged, activate it
49 * irregardless. 49 * irregardless.
50 */ 50 */
51 if ((xen_initial_domain() || swiotlb || swiotlb_force)) 51 if (xen_initial_domain() || swiotlb || swiotlb_force == SWIOTLB_FORCE)
52 xen_swiotlb = 1; 52 xen_swiotlb = 1;
53 53
54 /* If we are running under Xen, we MUST disable the native SWIOTLB. 54 /* If we are running under Xen, we MUST disable the native SWIOTLB.
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c
index 8c394e30e5fe..f3f7b41116f7 100644
--- a/arch/x86/xen/setup.c
+++ b/arch/x86/xen/setup.c
@@ -713,10 +713,9 @@ static void __init xen_reserve_xen_mfnlist(void)
713 size = PFN_PHYS(xen_start_info->nr_p2m_frames); 713 size = PFN_PHYS(xen_start_info->nr_p2m_frames);
714 } 714 }
715 715
716 if (!xen_is_e820_reserved(start, size)) { 716 memblock_reserve(start, size);
717 memblock_reserve(start, size); 717 if (!xen_is_e820_reserved(start, size))
718 return; 718 return;
719 }
720 719
721#ifdef CONFIG_X86_32 720#ifdef CONFIG_X86_32
722 /* 721 /*
@@ -727,6 +726,7 @@ static void __init xen_reserve_xen_mfnlist(void)
727 BUG(); 726 BUG();
728#else 727#else
729 xen_relocate_p2m(); 728 xen_relocate_p2m();
729 memblock_free(start, size);
730#endif 730#endif
731} 731}
732 732
diff --git a/arch/xtensa/kernel/setup.c b/arch/xtensa/kernel/setup.c
index 848e8568fb3c..8fd4be610607 100644
--- a/arch/xtensa/kernel/setup.c
+++ b/arch/xtensa/kernel/setup.c
@@ -419,7 +419,7 @@ subsys_initcall(topology_init);
419 419
420void cpu_reset(void) 420void cpu_reset(void)
421{ 421{
422#if XCHAL_HAVE_PTP_MMU 422#if XCHAL_HAVE_PTP_MMU && IS_ENABLED(CONFIG_MMU)
423 local_irq_disable(); 423 local_irq_disable();
424 /* 424 /*
425 * We have full MMU: all autoload ways, ways 7, 8 and 9 of DTLB must 425 * We have full MMU: all autoload ways, ways 7, 8 and 9 of DTLB must
diff --git a/block/blk-lib.c b/block/blk-lib.c
index ed89c8f4b2a0..ed1e78e24db0 100644
--- a/block/blk-lib.c
+++ b/block/blk-lib.c
@@ -301,23 +301,11 @@ int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
301 if ((sector | nr_sects) & bs_mask) 301 if ((sector | nr_sects) & bs_mask)
302 return -EINVAL; 302 return -EINVAL;
303 303
304 if (discard) {
305 ret = __blkdev_issue_discard(bdev, sector, nr_sects, gfp_mask,
306 BLKDEV_DISCARD_ZERO, biop);
307 if (ret == 0 || (ret && ret != -EOPNOTSUPP))
308 goto out;
309 }
310
311 ret = __blkdev_issue_write_zeroes(bdev, sector, nr_sects, gfp_mask, 304 ret = __blkdev_issue_write_zeroes(bdev, sector, nr_sects, gfp_mask,
312 biop); 305 biop);
313 if (ret == 0 || (ret && ret != -EOPNOTSUPP)) 306 if (ret == 0 || (ret && ret != -EOPNOTSUPP))
314 goto out; 307 goto out;
315 308
316 ret = __blkdev_issue_write_same(bdev, sector, nr_sects, gfp_mask,
317 ZERO_PAGE(0), biop);
318 if (ret == 0 || (ret && ret != -EOPNOTSUPP))
319 goto out;
320
321 ret = 0; 309 ret = 0;
322 while (nr_sects != 0) { 310 while (nr_sects != 0) {
323 bio = next_bio(bio, min(nr_sects, (sector_t)BIO_MAX_PAGES), 311 bio = next_bio(bio, min(nr_sects, (sector_t)BIO_MAX_PAGES),
@@ -370,6 +358,16 @@ int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
370 struct bio *bio = NULL; 358 struct bio *bio = NULL;
371 struct blk_plug plug; 359 struct blk_plug plug;
372 360
361 if (discard) {
362 if (!blkdev_issue_discard(bdev, sector, nr_sects, gfp_mask,
363 BLKDEV_DISCARD_ZERO))
364 return 0;
365 }
366
367 if (!blkdev_issue_write_same(bdev, sector, nr_sects, gfp_mask,
368 ZERO_PAGE(0)))
369 return 0;
370
373 blk_start_plug(&plug); 371 blk_start_plug(&plug);
374 ret = __blkdev_issue_zeroout(bdev, sector, nr_sects, gfp_mask, 372 ret = __blkdev_issue_zeroout(bdev, sector, nr_sects, gfp_mask,
375 &bio, discard); 373 &bio, discard);
diff --git a/block/blk-mq.c b/block/blk-mq.c
index a8e67a155d04..c3400b5444a7 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -912,7 +912,6 @@ bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list)
912static void blk_mq_process_rq_list(struct blk_mq_hw_ctx *hctx) 912static void blk_mq_process_rq_list(struct blk_mq_hw_ctx *hctx)
913{ 913{
914 LIST_HEAD(rq_list); 914 LIST_HEAD(rq_list);
915 LIST_HEAD(driver_list);
916 915
917 if (unlikely(blk_mq_hctx_stopped(hctx))) 916 if (unlikely(blk_mq_hctx_stopped(hctx)))
918 return; 917 return;
diff --git a/block/blk-wbt.c b/block/blk-wbt.c
index 6e82769f4042..f0a9c07b4c7a 100644
--- a/block/blk-wbt.c
+++ b/block/blk-wbt.c
@@ -544,6 +544,8 @@ static inline bool may_queue(struct rq_wb *rwb, struct rq_wait *rqw,
544 * the timer to kick off queuing again. 544 * the timer to kick off queuing again.
545 */ 545 */
546static void __wbt_wait(struct rq_wb *rwb, unsigned long rw, spinlock_t *lock) 546static void __wbt_wait(struct rq_wb *rwb, unsigned long rw, spinlock_t *lock)
547 __releases(lock)
548 __acquires(lock)
547{ 549{
548 struct rq_wait *rqw = get_rq_wait(rwb, current_is_kswapd()); 550 struct rq_wait *rqw = get_rq_wait(rwb, current_is_kswapd());
549 DEFINE_WAIT(wait); 551 DEFINE_WAIT(wait);
@@ -558,13 +560,12 @@ static void __wbt_wait(struct rq_wb *rwb, unsigned long rw, spinlock_t *lock)
558 if (may_queue(rwb, rqw, &wait, rw)) 560 if (may_queue(rwb, rqw, &wait, rw))
559 break; 561 break;
560 562
561 if (lock) 563 if (lock) {
562 spin_unlock_irq(lock); 564 spin_unlock_irq(lock);
563 565 io_schedule();
564 io_schedule();
565
566 if (lock)
567 spin_lock_irq(lock); 566 spin_lock_irq(lock);
567 } else
568 io_schedule();
568 } while (1); 569 } while (1);
569 570
570 finish_wait(&rqw->wait, &wait); 571 finish_wait(&rqw->wait, &wait);
@@ -595,7 +596,7 @@ static inline bool wbt_should_throttle(struct rq_wb *rwb, struct bio *bio)
595 * in an irq held spinlock, if it holds one when calling this function. 596 * in an irq held spinlock, if it holds one when calling this function.
596 * If we do sleep, we'll release and re-grab it. 597 * If we do sleep, we'll release and re-grab it.
597 */ 598 */
598unsigned int wbt_wait(struct rq_wb *rwb, struct bio *bio, spinlock_t *lock) 599enum wbt_flags wbt_wait(struct rq_wb *rwb, struct bio *bio, spinlock_t *lock)
599{ 600{
600 unsigned int ret = 0; 601 unsigned int ret = 0;
601 602
diff --git a/block/blk-zoned.c b/block/blk-zoned.c
index 472211fa183a..3bd15d8095b1 100644
--- a/block/blk-zoned.c
+++ b/block/blk-zoned.c
@@ -16,7 +16,7 @@
16static inline sector_t blk_zone_start(struct request_queue *q, 16static inline sector_t blk_zone_start(struct request_queue *q,
17 sector_t sector) 17 sector_t sector)
18{ 18{
19 sector_t zone_mask = blk_queue_zone_size(q) - 1; 19 sector_t zone_mask = blk_queue_zone_sectors(q) - 1;
20 20
21 return sector & ~zone_mask; 21 return sector & ~zone_mask;
22} 22}
@@ -222,7 +222,7 @@ int blkdev_reset_zones(struct block_device *bdev,
222 return -EINVAL; 222 return -EINVAL;
223 223
224 /* Check alignment (handle eventual smaller last zone) */ 224 /* Check alignment (handle eventual smaller last zone) */
225 zone_sectors = blk_queue_zone_size(q); 225 zone_sectors = blk_queue_zone_sectors(q);
226 if (sector & (zone_sectors - 1)) 226 if (sector & (zone_sectors - 1))
227 return -EINVAL; 227 return -EINVAL;
228 228
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index c73a6fcaeb9d..838f07e2b64a 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -3758,7 +3758,7 @@ static void cfq_init_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
3758} 3758}
3759 3759
3760#ifdef CONFIG_CFQ_GROUP_IOSCHED 3760#ifdef CONFIG_CFQ_GROUP_IOSCHED
3761static void check_blkcg_changed(struct cfq_io_cq *cic, struct bio *bio) 3761static bool check_blkcg_changed(struct cfq_io_cq *cic, struct bio *bio)
3762{ 3762{
3763 struct cfq_data *cfqd = cic_to_cfqd(cic); 3763 struct cfq_data *cfqd = cic_to_cfqd(cic);
3764 struct cfq_queue *cfqq; 3764 struct cfq_queue *cfqq;
@@ -3775,15 +3775,7 @@ static void check_blkcg_changed(struct cfq_io_cq *cic, struct bio *bio)
3775 * spuriously on a newly created cic but there's no harm. 3775 * spuriously on a newly created cic but there's no harm.
3776 */ 3776 */
3777 if (unlikely(!cfqd) || likely(cic->blkcg_serial_nr == serial_nr)) 3777 if (unlikely(!cfqd) || likely(cic->blkcg_serial_nr == serial_nr))
3778 return; 3778 return nonroot_cg;
3779
3780 /*
3781 * If we have a non-root cgroup, we can depend on that to
3782 * do proper throttling of writes. Turn off wbt for that
3783 * case, if it was enabled by default.
3784 */
3785 if (nonroot_cg)
3786 wbt_disable_default(cfqd->queue);
3787 3779
3788 /* 3780 /*
3789 * Drop reference to queues. New queues will be assigned in new 3781 * Drop reference to queues. New queues will be assigned in new
@@ -3804,9 +3796,13 @@ static void check_blkcg_changed(struct cfq_io_cq *cic, struct bio *bio)
3804 } 3796 }
3805 3797
3806 cic->blkcg_serial_nr = serial_nr; 3798 cic->blkcg_serial_nr = serial_nr;
3799 return nonroot_cg;
3807} 3800}
3808#else 3801#else
3809static inline void check_blkcg_changed(struct cfq_io_cq *cic, struct bio *bio) { } 3802static inline bool check_blkcg_changed(struct cfq_io_cq *cic, struct bio *bio)
3803{
3804 return false;
3805}
3810#endif /* CONFIG_CFQ_GROUP_IOSCHED */ 3806#endif /* CONFIG_CFQ_GROUP_IOSCHED */
3811 3807
3812static struct cfq_queue ** 3808static struct cfq_queue **
@@ -4448,11 +4444,12 @@ cfq_set_request(struct request_queue *q, struct request *rq, struct bio *bio,
4448 const int rw = rq_data_dir(rq); 4444 const int rw = rq_data_dir(rq);
4449 const bool is_sync = rq_is_sync(rq); 4445 const bool is_sync = rq_is_sync(rq);
4450 struct cfq_queue *cfqq; 4446 struct cfq_queue *cfqq;
4447 bool disable_wbt;
4451 4448
4452 spin_lock_irq(q->queue_lock); 4449 spin_lock_irq(q->queue_lock);
4453 4450
4454 check_ioprio_changed(cic, bio); 4451 check_ioprio_changed(cic, bio);
4455 check_blkcg_changed(cic, bio); 4452 disable_wbt = check_blkcg_changed(cic, bio);
4456new_queue: 4453new_queue:
4457 cfqq = cic_to_cfqq(cic, is_sync); 4454 cfqq = cic_to_cfqq(cic, is_sync);
4458 if (!cfqq || cfqq == &cfqd->oom_cfqq) { 4455 if (!cfqq || cfqq == &cfqd->oom_cfqq) {
@@ -4488,6 +4485,10 @@ new_queue:
4488 rq->elv.priv[0] = cfqq; 4485 rq->elv.priv[0] = cfqq;
4489 rq->elv.priv[1] = cfqq->cfqg; 4486 rq->elv.priv[1] = cfqq->cfqg;
4490 spin_unlock_irq(q->queue_lock); 4487 spin_unlock_irq(q->queue_lock);
4488
4489 if (disable_wbt)
4490 wbt_disable_default(q);
4491
4491 return 0; 4492 return 0;
4492} 4493}
4493 4494
diff --git a/block/partition-generic.c b/block/partition-generic.c
index d7beb6bbbf66..7afb9907821f 100644
--- a/block/partition-generic.c
+++ b/block/partition-generic.c
@@ -434,7 +434,7 @@ static bool part_zone_aligned(struct gendisk *disk,
434 struct block_device *bdev, 434 struct block_device *bdev,
435 sector_t from, sector_t size) 435 sector_t from, sector_t size)
436{ 436{
437 unsigned int zone_size = bdev_zone_size(bdev); 437 unsigned int zone_sectors = bdev_zone_sectors(bdev);
438 438
439 /* 439 /*
440 * If this function is called, then the disk is a zoned block device 440 * If this function is called, then the disk is a zoned block device
@@ -446,7 +446,7 @@ static bool part_zone_aligned(struct gendisk *disk,
446 * regular block devices (no zone operation) and their zone size will 446 * regular block devices (no zone operation) and their zone size will
447 * be reported as 0. Allow this case. 447 * be reported as 0. Allow this case.
448 */ 448 */
449 if (!zone_size) 449 if (!zone_sectors)
450 return true; 450 return true;
451 451
452 /* 452 /*
@@ -455,24 +455,24 @@ static bool part_zone_aligned(struct gendisk *disk,
455 * use it. Check the zone size too: it should be a power of 2 number 455 * use it. Check the zone size too: it should be a power of 2 number
456 * of sectors. 456 * of sectors.
457 */ 457 */
458 if (WARN_ON_ONCE(!is_power_of_2(zone_size))) { 458 if (WARN_ON_ONCE(!is_power_of_2(zone_sectors))) {
459 u32 rem; 459 u32 rem;
460 460
461 div_u64_rem(from, zone_size, &rem); 461 div_u64_rem(from, zone_sectors, &rem);
462 if (rem) 462 if (rem)
463 return false; 463 return false;
464 if ((from + size) < get_capacity(disk)) { 464 if ((from + size) < get_capacity(disk)) {
465 div_u64_rem(size, zone_size, &rem); 465 div_u64_rem(size, zone_sectors, &rem);
466 if (rem) 466 if (rem)
467 return false; 467 return false;
468 } 468 }
469 469
470 } else { 470 } else {
471 471
472 if (from & (zone_size - 1)) 472 if (from & (zone_sectors - 1))
473 return false; 473 return false;
474 if ((from + size) < get_capacity(disk) && 474 if ((from + size) < get_capacity(disk) &&
475 (size & (zone_size - 1))) 475 (size & (zone_sectors - 1)))
476 return false; 476 return false;
477 477
478 } 478 }
diff --git a/crypto/algapi.c b/crypto/algapi.c
index df939b54b09f..1fad2a6b3bbb 100644
--- a/crypto/algapi.c
+++ b/crypto/algapi.c
@@ -356,6 +356,7 @@ int crypto_register_alg(struct crypto_alg *alg)
356 struct crypto_larval *larval; 356 struct crypto_larval *larval;
357 int err; 357 int err;
358 358
359 alg->cra_flags &= ~CRYPTO_ALG_DEAD;
359 err = crypto_check_alg(alg); 360 err = crypto_check_alg(alg);
360 if (err) 361 if (err)
361 return err; 362 return err;
diff --git a/crypto/algif_aead.c b/crypto/algif_aead.c
index f849311e9fd4..533265f110e0 100644
--- a/crypto/algif_aead.c
+++ b/crypto/algif_aead.c
@@ -661,9 +661,9 @@ static int aead_recvmsg_sync(struct socket *sock, struct msghdr *msg, int flags)
661unlock: 661unlock:
662 list_for_each_entry_safe(rsgl, tmp, &ctx->list, list) { 662 list_for_each_entry_safe(rsgl, tmp, &ctx->list, list) {
663 af_alg_free_sg(&rsgl->sgl); 663 af_alg_free_sg(&rsgl->sgl);
664 list_del(&rsgl->list);
664 if (rsgl != &ctx->first_rsgl) 665 if (rsgl != &ctx->first_rsgl)
665 sock_kfree_s(sk, rsgl, sizeof(*rsgl)); 666 sock_kfree_s(sk, rsgl, sizeof(*rsgl));
666 list_del(&rsgl->list);
667 } 667 }
668 INIT_LIST_HEAD(&ctx->list); 668 INIT_LIST_HEAD(&ctx->list);
669 aead_wmem_wakeup(sk); 669 aead_wmem_wakeup(sk);
diff --git a/crypto/testmgr.c b/crypto/testmgr.c
index f616ad74cce7..44e888b0b041 100644
--- a/crypto/testmgr.c
+++ b/crypto/testmgr.c
@@ -1461,16 +1461,25 @@ static int test_acomp(struct crypto_acomp *tfm, struct comp_testvec *ctemplate,
1461 for (i = 0; i < ctcount; i++) { 1461 for (i = 0; i < ctcount; i++) {
1462 unsigned int dlen = COMP_BUF_SIZE; 1462 unsigned int dlen = COMP_BUF_SIZE;
1463 int ilen = ctemplate[i].inlen; 1463 int ilen = ctemplate[i].inlen;
1464 void *input_vec;
1464 1465
1466 input_vec = kmalloc(ilen, GFP_KERNEL);
1467 if (!input_vec) {
1468 ret = -ENOMEM;
1469 goto out;
1470 }
1471
1472 memcpy(input_vec, ctemplate[i].input, ilen);
1465 memset(output, 0, dlen); 1473 memset(output, 0, dlen);
1466 init_completion(&result.completion); 1474 init_completion(&result.completion);
1467 sg_init_one(&src, ctemplate[i].input, ilen); 1475 sg_init_one(&src, input_vec, ilen);
1468 sg_init_one(&dst, output, dlen); 1476 sg_init_one(&dst, output, dlen);
1469 1477
1470 req = acomp_request_alloc(tfm); 1478 req = acomp_request_alloc(tfm);
1471 if (!req) { 1479 if (!req) {
1472 pr_err("alg: acomp: request alloc failed for %s\n", 1480 pr_err("alg: acomp: request alloc failed for %s\n",
1473 algo); 1481 algo);
1482 kfree(input_vec);
1474 ret = -ENOMEM; 1483 ret = -ENOMEM;
1475 goto out; 1484 goto out;
1476 } 1485 }
@@ -1483,6 +1492,7 @@ static int test_acomp(struct crypto_acomp *tfm, struct comp_testvec *ctemplate,
1483 if (ret) { 1492 if (ret) {
1484 pr_err("alg: acomp: compression failed on test %d for %s: ret=%d\n", 1493 pr_err("alg: acomp: compression failed on test %d for %s: ret=%d\n",
1485 i + 1, algo, -ret); 1494 i + 1, algo, -ret);
1495 kfree(input_vec);
1486 acomp_request_free(req); 1496 acomp_request_free(req);
1487 goto out; 1497 goto out;
1488 } 1498 }
@@ -1491,6 +1501,7 @@ static int test_acomp(struct crypto_acomp *tfm, struct comp_testvec *ctemplate,
1491 pr_err("alg: acomp: Compression test %d failed for %s: output len = %d\n", 1501 pr_err("alg: acomp: Compression test %d failed for %s: output len = %d\n",
1492 i + 1, algo, req->dlen); 1502 i + 1, algo, req->dlen);
1493 ret = -EINVAL; 1503 ret = -EINVAL;
1504 kfree(input_vec);
1494 acomp_request_free(req); 1505 acomp_request_free(req);
1495 goto out; 1506 goto out;
1496 } 1507 }
@@ -1500,26 +1511,37 @@ static int test_acomp(struct crypto_acomp *tfm, struct comp_testvec *ctemplate,
1500 i + 1, algo); 1511 i + 1, algo);
1501 hexdump(output, req->dlen); 1512 hexdump(output, req->dlen);
1502 ret = -EINVAL; 1513 ret = -EINVAL;
1514 kfree(input_vec);
1503 acomp_request_free(req); 1515 acomp_request_free(req);
1504 goto out; 1516 goto out;
1505 } 1517 }
1506 1518
1519 kfree(input_vec);
1507 acomp_request_free(req); 1520 acomp_request_free(req);
1508 } 1521 }
1509 1522
1510 for (i = 0; i < dtcount; i++) { 1523 for (i = 0; i < dtcount; i++) {
1511 unsigned int dlen = COMP_BUF_SIZE; 1524 unsigned int dlen = COMP_BUF_SIZE;
1512 int ilen = dtemplate[i].inlen; 1525 int ilen = dtemplate[i].inlen;
1526 void *input_vec;
1527
1528 input_vec = kmalloc(ilen, GFP_KERNEL);
1529 if (!input_vec) {
1530 ret = -ENOMEM;
1531 goto out;
1532 }
1513 1533
1534 memcpy(input_vec, dtemplate[i].input, ilen);
1514 memset(output, 0, dlen); 1535 memset(output, 0, dlen);
1515 init_completion(&result.completion); 1536 init_completion(&result.completion);
1516 sg_init_one(&src, dtemplate[i].input, ilen); 1537 sg_init_one(&src, input_vec, ilen);
1517 sg_init_one(&dst, output, dlen); 1538 sg_init_one(&dst, output, dlen);
1518 1539
1519 req = acomp_request_alloc(tfm); 1540 req = acomp_request_alloc(tfm);
1520 if (!req) { 1541 if (!req) {
1521 pr_err("alg: acomp: request alloc failed for %s\n", 1542 pr_err("alg: acomp: request alloc failed for %s\n",
1522 algo); 1543 algo);
1544 kfree(input_vec);
1523 ret = -ENOMEM; 1545 ret = -ENOMEM;
1524 goto out; 1546 goto out;
1525 } 1547 }
@@ -1532,6 +1554,7 @@ static int test_acomp(struct crypto_acomp *tfm, struct comp_testvec *ctemplate,
1532 if (ret) { 1554 if (ret) {
1533 pr_err("alg: acomp: decompression failed on test %d for %s: ret=%d\n", 1555 pr_err("alg: acomp: decompression failed on test %d for %s: ret=%d\n",
1534 i + 1, algo, -ret); 1556 i + 1, algo, -ret);
1557 kfree(input_vec);
1535 acomp_request_free(req); 1558 acomp_request_free(req);
1536 goto out; 1559 goto out;
1537 } 1560 }
@@ -1540,6 +1563,7 @@ static int test_acomp(struct crypto_acomp *tfm, struct comp_testvec *ctemplate,
1540 pr_err("alg: acomp: Decompression test %d failed for %s: output len = %d\n", 1563 pr_err("alg: acomp: Decompression test %d failed for %s: output len = %d\n",
1541 i + 1, algo, req->dlen); 1564 i + 1, algo, req->dlen);
1542 ret = -EINVAL; 1565 ret = -EINVAL;
1566 kfree(input_vec);
1543 acomp_request_free(req); 1567 acomp_request_free(req);
1544 goto out; 1568 goto out;
1545 } 1569 }
@@ -1549,10 +1573,12 @@ static int test_acomp(struct crypto_acomp *tfm, struct comp_testvec *ctemplate,
1549 i + 1, algo); 1573 i + 1, algo);
1550 hexdump(output, req->dlen); 1574 hexdump(output, req->dlen);
1551 ret = -EINVAL; 1575 ret = -EINVAL;
1576 kfree(input_vec);
1552 acomp_request_free(req); 1577 acomp_request_free(req);
1553 goto out; 1578 goto out;
1554 } 1579 }
1555 1580
1581 kfree(input_vec);
1556 acomp_request_free(req); 1582 acomp_request_free(req);
1557 } 1583 }
1558 1584
diff --git a/drivers/acpi/acpi_watchdog.c b/drivers/acpi/acpi_watchdog.c
index 13caebd679f5..8c4e0a18460a 100644
--- a/drivers/acpi/acpi_watchdog.c
+++ b/drivers/acpi/acpi_watchdog.c
@@ -114,7 +114,7 @@ void __init acpi_watchdog_init(void)
114 pdev = platform_device_register_simple("wdat_wdt", PLATFORM_DEVID_NONE, 114 pdev = platform_device_register_simple("wdat_wdt", PLATFORM_DEVID_NONE,
115 resources, nresources); 115 resources, nresources);
116 if (IS_ERR(pdev)) 116 if (IS_ERR(pdev))
117 pr_err("Failed to create platform device\n"); 117 pr_err("Device creation failed: %ld\n", PTR_ERR(pdev));
118 118
119 kfree(resources); 119 kfree(resources);
120 120
diff --git a/drivers/acpi/acpica/tbdata.c b/drivers/acpi/acpica/tbdata.c
index 82b0b5710979..b0399e8f6d27 100644
--- a/drivers/acpi/acpica/tbdata.c
+++ b/drivers/acpi/acpica/tbdata.c
@@ -852,23 +852,18 @@ acpi_tb_install_and_load_table(acpi_physical_address address,
852 852
853 ACPI_FUNCTION_TRACE(tb_install_and_load_table); 853 ACPI_FUNCTION_TRACE(tb_install_and_load_table);
854 854
855 (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
856
857 /* Install the table and load it into the namespace */ 855 /* Install the table and load it into the namespace */
858 856
859 status = acpi_tb_install_standard_table(address, flags, TRUE, 857 status = acpi_tb_install_standard_table(address, flags, TRUE,
860 override, &i); 858 override, &i);
861 if (ACPI_FAILURE(status)) { 859 if (ACPI_FAILURE(status)) {
862 goto unlock_and_exit; 860 goto exit;
863 } 861 }
864 862
865 (void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
866 status = acpi_tb_load_table(i, acpi_gbl_root_node); 863 status = acpi_tb_load_table(i, acpi_gbl_root_node);
867 (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
868 864
869unlock_and_exit: 865exit:
870 *table_index = i; 866 *table_index = i;
871 (void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
872 return_ACPI_STATUS(status); 867 return_ACPI_STATUS(status);
873} 868}
874 869
diff --git a/drivers/acpi/acpica/tbinstal.c b/drivers/acpi/acpica/tbinstal.c
index 5fdf251a9f97..01e1b3d63fc0 100644
--- a/drivers/acpi/acpica/tbinstal.c
+++ b/drivers/acpi/acpica/tbinstal.c
@@ -217,6 +217,10 @@ acpi_tb_install_standard_table(acpi_physical_address address,
217 goto release_and_exit; 217 goto release_and_exit;
218 } 218 }
219 219
220 /* Acquire the table lock */
221
222 (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
223
220 if (reload) { 224 if (reload) {
221 /* 225 /*
222 * Validate the incoming table signature. 226 * Validate the incoming table signature.
@@ -244,7 +248,7 @@ acpi_tb_install_standard_table(acpi_physical_address address,
244 new_table_desc.signature.integer)); 248 new_table_desc.signature.integer));
245 249
246 status = AE_BAD_SIGNATURE; 250 status = AE_BAD_SIGNATURE;
247 goto release_and_exit; 251 goto unlock_and_exit;
248 } 252 }
249 253
250 /* Check if table is already registered */ 254 /* Check if table is already registered */
@@ -279,7 +283,7 @@ acpi_tb_install_standard_table(acpi_physical_address address,
279 /* Table is still loaded, this is an error */ 283 /* Table is still loaded, this is an error */
280 284
281 status = AE_ALREADY_EXISTS; 285 status = AE_ALREADY_EXISTS;
282 goto release_and_exit; 286 goto unlock_and_exit;
283 } else { 287 } else {
284 /* 288 /*
285 * Table was unloaded, allow it to be reloaded. 289 * Table was unloaded, allow it to be reloaded.
@@ -290,6 +294,7 @@ acpi_tb_install_standard_table(acpi_physical_address address,
290 * indicate the re-installation. 294 * indicate the re-installation.
291 */ 295 */
292 acpi_tb_uninstall_table(&new_table_desc); 296 acpi_tb_uninstall_table(&new_table_desc);
297 (void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
293 *table_index = i; 298 *table_index = i;
294 return_ACPI_STATUS(AE_OK); 299 return_ACPI_STATUS(AE_OK);
295 } 300 }
@@ -303,11 +308,19 @@ acpi_tb_install_standard_table(acpi_physical_address address,
303 308
304 /* Invoke table handler if present */ 309 /* Invoke table handler if present */
305 310
311 (void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
306 if (acpi_gbl_table_handler) { 312 if (acpi_gbl_table_handler) {
307 (void)acpi_gbl_table_handler(ACPI_TABLE_EVENT_INSTALL, 313 (void)acpi_gbl_table_handler(ACPI_TABLE_EVENT_INSTALL,
308 new_table_desc.pointer, 314 new_table_desc.pointer,
309 acpi_gbl_table_handler_context); 315 acpi_gbl_table_handler_context);
310 } 316 }
317 (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
318
319unlock_and_exit:
320
321 /* Release the table lock */
322
323 (void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
311 324
312release_and_exit: 325release_and_exit:
313 326
diff --git a/drivers/acpi/glue.c b/drivers/acpi/glue.c
index f8d65647ea79..fb19e1cdb641 100644
--- a/drivers/acpi/glue.c
+++ b/drivers/acpi/glue.c
@@ -98,7 +98,15 @@ static int find_child_checks(struct acpi_device *adev, bool check_children)
98 if (check_children && list_empty(&adev->children)) 98 if (check_children && list_empty(&adev->children))
99 return -ENODEV; 99 return -ENODEV;
100 100
101 return sta_present ? FIND_CHILD_MAX_SCORE : FIND_CHILD_MIN_SCORE; 101 /*
102 * If the device has a _HID (or _CID) returning a valid ACPI/PNP
103 * device ID, it is better to make it look less attractive here, so that
104 * the other device with the same _ADR value (that may not have a valid
105 * device ID) can be matched going forward. [This means a second spec
106 * violation in a row, so whatever we do here is best effort anyway.]
107 */
108 return sta_present && list_empty(&adev->pnp.ids) ?
109 FIND_CHILD_MAX_SCORE : FIND_CHILD_MIN_SCORE;
102} 110}
103 111
104struct acpi_device *acpi_find_child_device(struct acpi_device *parent, 112struct acpi_device *acpi_find_child_device(struct acpi_device *parent,
@@ -250,7 +258,6 @@ int acpi_bind_one(struct device *dev, struct acpi_device *acpi_dev)
250 return 0; 258 return 0;
251 259
252 err: 260 err:
253 acpi_dma_deconfigure(dev);
254 ACPI_COMPANION_SET(dev, NULL); 261 ACPI_COMPANION_SET(dev, NULL);
255 put_device(dev); 262 put_device(dev);
256 put_device(&acpi_dev->dev); 263 put_device(&acpi_dev->dev);
diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
index 1b41a2739dac..0c452265c111 100644
--- a/drivers/acpi/internal.h
+++ b/drivers/acpi/internal.h
@@ -37,6 +37,7 @@ void acpi_amba_init(void);
37static inline void acpi_amba_init(void) {} 37static inline void acpi_amba_init(void) {}
38#endif 38#endif
39int acpi_sysfs_init(void); 39int acpi_sysfs_init(void);
40void acpi_gpe_apply_masked_gpes(void);
40void acpi_container_init(void); 41void acpi_container_init(void);
41void acpi_memory_hotplug_init(void); 42void acpi_memory_hotplug_init(void);
42#ifdef CONFIG_ACPI_HOTPLUG_IOAPIC 43#ifdef CONFIG_ACPI_HOTPLUG_IOAPIC
diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c
index 2f82b8eba360..7361d00818e2 100644
--- a/drivers/acpi/nfit/core.c
+++ b/drivers/acpi/nfit/core.c
@@ -2704,6 +2704,7 @@ static int acpi_nfit_flush_probe(struct nvdimm_bus_descriptor *nd_desc)
2704 struct acpi_nfit_desc *acpi_desc = to_acpi_nfit_desc(nd_desc); 2704 struct acpi_nfit_desc *acpi_desc = to_acpi_nfit_desc(nd_desc);
2705 struct device *dev = acpi_desc->dev; 2705 struct device *dev = acpi_desc->dev;
2706 struct acpi_nfit_flush_work flush; 2706 struct acpi_nfit_flush_work flush;
2707 int rc;
2707 2708
2708 /* bounce the device lock to flush acpi_nfit_add / acpi_nfit_notify */ 2709 /* bounce the device lock to flush acpi_nfit_add / acpi_nfit_notify */
2709 device_lock(dev); 2710 device_lock(dev);
@@ -2716,7 +2717,10 @@ static int acpi_nfit_flush_probe(struct nvdimm_bus_descriptor *nd_desc)
2716 INIT_WORK_ONSTACK(&flush.work, flush_probe); 2717 INIT_WORK_ONSTACK(&flush.work, flush_probe);
2717 COMPLETION_INITIALIZER_ONSTACK(flush.cmp); 2718 COMPLETION_INITIALIZER_ONSTACK(flush.cmp);
2718 queue_work(nfit_wq, &flush.work); 2719 queue_work(nfit_wq, &flush.work);
2719 return wait_for_completion_interruptible(&flush.cmp); 2720
2721 rc = wait_for_completion_interruptible(&flush.cmp);
2722 cancel_work_sync(&flush.work);
2723 return rc;
2720} 2724}
2721 2725
2722static int acpi_nfit_clear_to_send(struct nvdimm_bus_descriptor *nd_desc, 2726static int acpi_nfit_clear_to_send(struct nvdimm_bus_descriptor *nd_desc,
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 45dec874ea55..192691880d55 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -2074,6 +2074,7 @@ int __init acpi_scan_init(void)
2074 } 2074 }
2075 } 2075 }
2076 2076
2077 acpi_gpe_apply_masked_gpes();
2077 acpi_update_all_gpes(); 2078 acpi_update_all_gpes();
2078 acpi_ec_ecdt_start(); 2079 acpi_ec_ecdt_start();
2079 2080
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index 9b6cebe227a0..54abb26b7366 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -674,14 +674,6 @@ static void acpi_sleep_suspend_setup(void)
674 if (acpi_sleep_state_supported(i)) 674 if (acpi_sleep_state_supported(i))
675 sleep_states[i] = 1; 675 sleep_states[i] = 1;
676 676
677 /*
678 * Use suspend-to-idle by default if ACPI_FADT_LOW_POWER_S0 is set and
679 * the default suspend mode was not selected from the command line.
680 */
681 if (acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0 &&
682 mem_sleep_default > PM_SUSPEND_MEM)
683 mem_sleep_default = PM_SUSPEND_FREEZE;
684
685 suspend_set_ops(old_suspend_ordering ? 677 suspend_set_ops(old_suspend_ordering ?
686 &acpi_suspend_ops_old : &acpi_suspend_ops); 678 &acpi_suspend_ops_old : &acpi_suspend_ops);
687 freeze_set_ops(&acpi_freeze_ops); 679 freeze_set_ops(&acpi_freeze_ops);
diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c
index 703c26e7022c..cf05ae973381 100644
--- a/drivers/acpi/sysfs.c
+++ b/drivers/acpi/sysfs.c
@@ -708,6 +708,62 @@ end:
708 return result ? result : size; 708 return result ? result : size;
709} 709}
710 710
711/*
712 * A Quirk Mechanism for GPE Flooding Prevention:
713 *
714 * Quirks may be needed to prevent GPE flooding on a specific GPE. The
715 * flooding typically cannot be detected and automatically prevented by
716 * ACPI_GPE_DISPATCH_NONE check because there is a _Lxx/_Exx prepared in
717 * the AML tables. This normally indicates a feature gap in Linux, thus
718 * instead of providing endless quirk tables, we provide a boot parameter
719 * for those who want this quirk. For example, if the users want to prevent
720 * the GPE flooding for GPE 00, they need to specify the following boot
721 * parameter:
722 * acpi_mask_gpe=0x00
723 * The masking status can be modified by the following runtime controlling
724 * interface:
725 * echo unmask > /sys/firmware/acpi/interrupts/gpe00
726 */
727
728/*
729 * Currently, the GPE flooding prevention only supports to mask the GPEs
730 * numbered from 00 to 7f.
731 */
732#define ACPI_MASKABLE_GPE_MAX 0x80
733
734static u64 __initdata acpi_masked_gpes;
735
736static int __init acpi_gpe_set_masked_gpes(char *val)
737{
738 u8 gpe;
739
740 if (kstrtou8(val, 0, &gpe) || gpe > ACPI_MASKABLE_GPE_MAX)
741 return -EINVAL;
742 acpi_masked_gpes |= ((u64)1<<gpe);
743
744 return 1;
745}
746__setup("acpi_mask_gpe=", acpi_gpe_set_masked_gpes);
747
748void __init acpi_gpe_apply_masked_gpes(void)
749{
750 acpi_handle handle;
751 acpi_status status;
752 u8 gpe;
753
754 for (gpe = 0;
755 gpe < min_t(u8, ACPI_MASKABLE_GPE_MAX, acpi_current_gpe_count);
756 gpe++) {
757 if (acpi_masked_gpes & ((u64)1<<gpe)) {
758 status = acpi_get_gpe_device(gpe, &handle);
759 if (ACPI_SUCCESS(status)) {
760 pr_info("Masking GPE 0x%x.\n", gpe);
761 (void)acpi_mask_gpe(handle, gpe, TRUE);
762 }
763 }
764 }
765}
766
711void acpi_irq_stats_init(void) 767void acpi_irq_stats_init(void)
712{ 768{
713 acpi_status status; 769 acpi_status status;
diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
index 02ded25c82e4..7f48156cbc0c 100644
--- a/drivers/acpi/video_detect.c
+++ b/drivers/acpi/video_detect.c
@@ -305,17 +305,6 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
305 DMI_MATCH(DMI_PRODUCT_NAME, "Dell System XPS L702X"), 305 DMI_MATCH(DMI_PRODUCT_NAME, "Dell System XPS L702X"),
306 }, 306 },
307 }, 307 },
308 {
309 /* https://bugzilla.redhat.com/show_bug.cgi?id=1204476 */
310 /* https://bugs.launchpad.net/ubuntu/+source/linux-lts-trusty/+bug/1416940 */
311 .callback = video_detect_force_native,
312 .ident = "HP Pavilion dv6",
313 .matches = {
314 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
315 DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dv6 Notebook PC"),
316 },
317 },
318
319 { }, 308 { },
320}; 309};
321 310
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 9cd0a2d41816..c2d3785ec227 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -1702,6 +1702,8 @@ unsigned ata_exec_internal_sg(struct ata_device *dev,
1702 1702
1703 if (qc->err_mask & ~AC_ERR_OTHER) 1703 if (qc->err_mask & ~AC_ERR_OTHER)
1704 qc->err_mask &= ~AC_ERR_OTHER; 1704 qc->err_mask &= ~AC_ERR_OTHER;
1705 } else if (qc->tf.command == ATA_CMD_REQ_SENSE_DATA) {
1706 qc->result_tf.command |= ATA_SENSE;
1705 } 1707 }
1706 1708
1707 /* finish up */ 1709 /* finish up */
@@ -4356,10 +4358,10 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
4356 { "ST380013AS", "3.20", ATA_HORKAGE_MAX_SEC_1024 }, 4358 { "ST380013AS", "3.20", ATA_HORKAGE_MAX_SEC_1024 },
4357 4359
4358 /* 4360 /*
4359 * Device times out with higher max sects. 4361 * These devices time out with higher max sects.
4360 * https://bugzilla.kernel.org/show_bug.cgi?id=121671 4362 * https://bugzilla.kernel.org/show_bug.cgi?id=121671
4361 */ 4363 */
4362 { "LITEON CX1-JB256-HP", NULL, ATA_HORKAGE_MAX_SEC_1024 }, 4364 { "LITEON CX1-JB*-HP", NULL, ATA_HORKAGE_MAX_SEC_1024 },
4363 4365
4364 /* Devices we expect to fail diagnostics */ 4366 /* Devices we expect to fail diagnostics */
4365 4367
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
index 823e938c9a78..2f32782cea6d 100644
--- a/drivers/ata/sata_mv.c
+++ b/drivers/ata/sata_mv.c
@@ -4132,6 +4132,9 @@ static int mv_platform_probe(struct platform_device *pdev)
4132 host->iomap = NULL; 4132 host->iomap = NULL;
4133 hpriv->base = devm_ioremap(&pdev->dev, res->start, 4133 hpriv->base = devm_ioremap(&pdev->dev, res->start,
4134 resource_size(res)); 4134 resource_size(res));
4135 if (!hpriv->base)
4136 return -ENOMEM;
4137
4135 hpriv->base -= SATAHC0_REG_BASE; 4138 hpriv->base -= SATAHC0_REG_BASE;
4136 4139
4137 hpriv->clk = clk_get(&pdev->dev, NULL); 4140 hpriv->clk = clk_get(&pdev->dev, NULL);
diff --git a/drivers/auxdisplay/Kconfig b/drivers/auxdisplay/Kconfig
index 4ef4c5caed4f..8a8e403644d6 100644
--- a/drivers/auxdisplay/Kconfig
+++ b/drivers/auxdisplay/Kconfig
@@ -132,9 +132,9 @@ config HT16K33
132 tristate "Holtek Ht16K33 LED controller with keyscan" 132 tristate "Holtek Ht16K33 LED controller with keyscan"
133 depends on FB && OF && I2C && INPUT 133 depends on FB && OF && I2C && INPUT
134 select FB_SYS_FOPS 134 select FB_SYS_FOPS
135 select FB_CFB_FILLRECT 135 select FB_SYS_FILLRECT
136 select FB_CFB_COPYAREA 136 select FB_SYS_COPYAREA
137 select FB_CFB_IMAGEBLIT 137 select FB_SYS_IMAGEBLIT
138 select INPUT_MATRIXKMAP 138 select INPUT_MATRIXKMAP
139 select FB_BACKLIGHT 139 select FB_BACKLIGHT
140 help 140 help
diff --git a/drivers/base/base.h b/drivers/base/base.h
index ada9dce34e6d..e19b1008e5fb 100644
--- a/drivers/base/base.h
+++ b/drivers/base/base.h
@@ -141,8 +141,6 @@ extern void device_unblock_probing(void);
141extern struct kset *devices_kset; 141extern struct kset *devices_kset;
142extern void devices_kset_move_last(struct device *dev); 142extern void devices_kset_move_last(struct device *dev);
143 143
144extern struct device_attribute dev_attr_deferred_probe;
145
146#if defined(CONFIG_MODULES) && defined(CONFIG_SYSFS) 144#if defined(CONFIG_MODULES) && defined(CONFIG_SYSFS)
147extern void module_add_driver(struct module *mod, struct device_driver *drv); 145extern void module_add_driver(struct module *mod, struct device_driver *drv);
148extern void module_remove_driver(struct device_driver *drv); 146extern void module_remove_driver(struct device_driver *drv);
diff --git a/drivers/base/core.c b/drivers/base/core.c
index 020ea7f05520..8c25e68e67d7 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -1060,14 +1060,8 @@ static int device_add_attrs(struct device *dev)
1060 goto err_remove_dev_groups; 1060 goto err_remove_dev_groups;
1061 } 1061 }
1062 1062
1063 error = device_create_file(dev, &dev_attr_deferred_probe);
1064 if (error)
1065 goto err_remove_online;
1066
1067 return 0; 1063 return 0;
1068 1064
1069 err_remove_online:
1070 device_remove_file(dev, &dev_attr_online);
1071 err_remove_dev_groups: 1065 err_remove_dev_groups:
1072 device_remove_groups(dev, dev->groups); 1066 device_remove_groups(dev, dev->groups);
1073 err_remove_type_groups: 1067 err_remove_type_groups:
@@ -1085,7 +1079,6 @@ static void device_remove_attrs(struct device *dev)
1085 struct class *class = dev->class; 1079 struct class *class = dev->class;
1086 const struct device_type *type = dev->type; 1080 const struct device_type *type = dev->type;
1087 1081
1088 device_remove_file(dev, &dev_attr_deferred_probe);
1089 device_remove_file(dev, &dev_attr_online); 1082 device_remove_file(dev, &dev_attr_online);
1090 device_remove_groups(dev, dev->groups); 1083 device_remove_groups(dev, dev->groups);
1091 1084
diff --git a/drivers/base/dd.c b/drivers/base/dd.c
index a8b258e5407b..a1fbf55c4d3a 100644
--- a/drivers/base/dd.c
+++ b/drivers/base/dd.c
@@ -53,19 +53,6 @@ static LIST_HEAD(deferred_probe_pending_list);
53static LIST_HEAD(deferred_probe_active_list); 53static LIST_HEAD(deferred_probe_active_list);
54static atomic_t deferred_trigger_count = ATOMIC_INIT(0); 54static atomic_t deferred_trigger_count = ATOMIC_INIT(0);
55 55
56static ssize_t deferred_probe_show(struct device *dev,
57 struct device_attribute *attr, char *buf)
58{
59 bool value;
60
61 mutex_lock(&deferred_probe_mutex);
62 value = !list_empty(&dev->p->deferred_probe);
63 mutex_unlock(&deferred_probe_mutex);
64
65 return sprintf(buf, "%d\n", value);
66}
67DEVICE_ATTR_RO(deferred_probe);
68
69/* 56/*
70 * In some cases, like suspend to RAM or hibernation, It might be reasonable 57 * In some cases, like suspend to RAM or hibernation, It might be reasonable
71 * to prohibit probing of devices as it could be unsafe. 58 * to prohibit probing of devices as it could be unsafe.
diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c
index 4497d263209f..ac350c518e0c 100644
--- a/drivers/base/firmware_class.c
+++ b/drivers/base/firmware_class.c
@@ -558,9 +558,6 @@ static void fw_load_abort(struct firmware_priv *fw_priv)
558 struct firmware_buf *buf = fw_priv->buf; 558 struct firmware_buf *buf = fw_priv->buf;
559 559
560 __fw_load_abort(buf); 560 __fw_load_abort(buf);
561
562 /* avoid user action after loading abort */
563 fw_priv->buf = NULL;
564} 561}
565 562
566static LIST_HEAD(pending_fw_head); 563static LIST_HEAD(pending_fw_head);
@@ -713,7 +710,7 @@ static ssize_t firmware_loading_store(struct device *dev,
713 710
714 mutex_lock(&fw_lock); 711 mutex_lock(&fw_lock);
715 fw_buf = fw_priv->buf; 712 fw_buf = fw_priv->buf;
716 if (!fw_buf) 713 if (fw_state_is_aborted(&fw_buf->fw_st))
717 goto out; 714 goto out;
718 715
719 switch (loading) { 716 switch (loading) {
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index 8ab8ea1253e6..fa26ffd25fa6 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -389,33 +389,33 @@ static ssize_t show_valid_zones(struct device *dev,
389{ 389{
390 struct memory_block *mem = to_memory_block(dev); 390 struct memory_block *mem = to_memory_block(dev);
391 unsigned long start_pfn, end_pfn; 391 unsigned long start_pfn, end_pfn;
392 unsigned long valid_start, valid_end, valid_pages;
392 unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block; 393 unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block;
393 struct page *first_page;
394 struct zone *zone; 394 struct zone *zone;
395 int zone_shift = 0; 395 int zone_shift = 0;
396 396
397 start_pfn = section_nr_to_pfn(mem->start_section_nr); 397 start_pfn = section_nr_to_pfn(mem->start_section_nr);
398 end_pfn = start_pfn + nr_pages; 398 end_pfn = start_pfn + nr_pages;
399 first_page = pfn_to_page(start_pfn);
400 399
401 /* The block contains more than one zone can not be offlined. */ 400 /* The block contains more than one zone can not be offlined. */
402 if (!test_pages_in_a_zone(start_pfn, end_pfn)) 401 if (!test_pages_in_a_zone(start_pfn, end_pfn, &valid_start, &valid_end))
403 return sprintf(buf, "none\n"); 402 return sprintf(buf, "none\n");
404 403
405 zone = page_zone(first_page); 404 zone = page_zone(pfn_to_page(valid_start));
405 valid_pages = valid_end - valid_start;
406 406
407 /* MMOP_ONLINE_KEEP */ 407 /* MMOP_ONLINE_KEEP */
408 sprintf(buf, "%s", zone->name); 408 sprintf(buf, "%s", zone->name);
409 409
410 /* MMOP_ONLINE_KERNEL */ 410 /* MMOP_ONLINE_KERNEL */
411 zone_shift = zone_can_shift(start_pfn, nr_pages, ZONE_NORMAL); 411 zone_can_shift(valid_start, valid_pages, ZONE_NORMAL, &zone_shift);
412 if (zone_shift) { 412 if (zone_shift) {
413 strcat(buf, " "); 413 strcat(buf, " ");
414 strcat(buf, (zone + zone_shift)->name); 414 strcat(buf, (zone + zone_shift)->name);
415 } 415 }
416 416
417 /* MMOP_ONLINE_MOVABLE */ 417 /* MMOP_ONLINE_MOVABLE */
418 zone_shift = zone_can_shift(start_pfn, nr_pages, ZONE_MOVABLE); 418 zone_can_shift(valid_start, valid_pages, ZONE_MOVABLE, &zone_shift);
419 if (zone_shift) { 419 if (zone_shift) {
420 strcat(buf, " "); 420 strcat(buf, " ");
421 strcat(buf, (zone + zone_shift)->name); 421 strcat(buf, (zone + zone_shift)->name);
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index a5e1262b964b..2997026b4dfb 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -626,6 +626,7 @@ static int genpd_runtime_resume(struct device *dev)
626 626
627 out: 627 out:
628 /* Measure resume latency. */ 628 /* Measure resume latency. */
629 time_start = 0;
629 if (timed && runtime_pm) 630 if (timed && runtime_pm)
630 time_start = ktime_get(); 631 time_start = ktime_get();
631 632
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index 872eac4cb1df..a14fac6a01d3 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -966,13 +966,13 @@ int __pm_runtime_idle(struct device *dev, int rpmflags)
966 unsigned long flags; 966 unsigned long flags;
967 int retval; 967 int retval;
968 968
969 might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
970
971 if (rpmflags & RPM_GET_PUT) { 969 if (rpmflags & RPM_GET_PUT) {
972 if (!atomic_dec_and_test(&dev->power.usage_count)) 970 if (!atomic_dec_and_test(&dev->power.usage_count))
973 return 0; 971 return 0;
974 } 972 }
975 973
974 might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
975
976 spin_lock_irqsave(&dev->power.lock, flags); 976 spin_lock_irqsave(&dev->power.lock, flags);
977 retval = rpm_idle(dev, rpmflags); 977 retval = rpm_idle(dev, rpmflags);
978 spin_unlock_irqrestore(&dev->power.lock, flags); 978 spin_unlock_irqrestore(&dev->power.lock, flags);
@@ -998,13 +998,13 @@ int __pm_runtime_suspend(struct device *dev, int rpmflags)
998 unsigned long flags; 998 unsigned long flags;
999 int retval; 999 int retval;
1000 1000
1001 might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
1002
1003 if (rpmflags & RPM_GET_PUT) { 1001 if (rpmflags & RPM_GET_PUT) {
1004 if (!atomic_dec_and_test(&dev->power.usage_count)) 1002 if (!atomic_dec_and_test(&dev->power.usage_count))
1005 return 0; 1003 return 0;
1006 } 1004 }
1007 1005
1006 might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
1007
1008 spin_lock_irqsave(&dev->power.lock, flags); 1008 spin_lock_irqsave(&dev->power.lock, flags);
1009 retval = rpm_suspend(dev, rpmflags); 1009 retval = rpm_suspend(dev, rpmflags);
1010 spin_unlock_irqrestore(&dev->power.lock, flags); 1010 spin_unlock_irqrestore(&dev->power.lock, flags);
@@ -1029,7 +1029,8 @@ int __pm_runtime_resume(struct device *dev, int rpmflags)
1029 unsigned long flags; 1029 unsigned long flags;
1030 int retval; 1030 int retval;
1031 1031
1032 might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe); 1032 might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe &&
1033 dev->power.runtime_status != RPM_ACTIVE);
1033 1034
1034 if (rpmflags & RPM_GET_PUT) 1035 if (rpmflags & RPM_GET_PUT)
1035 atomic_inc(&dev->power.usage_count); 1036 atomic_inc(&dev->power.usage_count);
diff --git a/drivers/bcma/bcma_private.h b/drivers/bcma/bcma_private.h
index f642c4264c27..168fa175d65a 100644
--- a/drivers/bcma/bcma_private.h
+++ b/drivers/bcma/bcma_private.h
@@ -45,6 +45,9 @@ int bcma_sprom_get(struct bcma_bus *bus);
45void bcma_core_chipcommon_early_init(struct bcma_drv_cc *cc); 45void bcma_core_chipcommon_early_init(struct bcma_drv_cc *cc);
46void bcma_core_chipcommon_init(struct bcma_drv_cc *cc); 46void bcma_core_chipcommon_init(struct bcma_drv_cc *cc);
47void bcma_chipco_bcm4331_ext_pa_lines_ctl(struct bcma_drv_cc *cc, bool enable); 47void bcma_chipco_bcm4331_ext_pa_lines_ctl(struct bcma_drv_cc *cc, bool enable);
48#ifdef CONFIG_BCMA_DRIVER_MIPS
49void bcma_chipco_serial_init(struct bcma_drv_cc *cc);
50#endif /* CONFIG_BCMA_DRIVER_MIPS */
48 51
49/* driver_chipcommon_b.c */ 52/* driver_chipcommon_b.c */
50int bcma_core_chipcommon_b_init(struct bcma_drv_cc_b *ccb); 53int bcma_core_chipcommon_b_init(struct bcma_drv_cc_b *ccb);
diff --git a/drivers/bcma/driver_chipcommon.c b/drivers/bcma/driver_chipcommon.c
index b4f6520e74f0..62f5bfa5065d 100644
--- a/drivers/bcma/driver_chipcommon.c
+++ b/drivers/bcma/driver_chipcommon.c
@@ -15,8 +15,6 @@
15#include <linux/platform_device.h> 15#include <linux/platform_device.h>
16#include <linux/bcma/bcma.h> 16#include <linux/bcma/bcma.h>
17 17
18static void bcma_chipco_serial_init(struct bcma_drv_cc *cc);
19
20static inline u32 bcma_cc_write32_masked(struct bcma_drv_cc *cc, u16 offset, 18static inline u32 bcma_cc_write32_masked(struct bcma_drv_cc *cc, u16 offset,
21 u32 mask, u32 value) 19 u32 mask, u32 value)
22{ 20{
@@ -186,9 +184,6 @@ void bcma_core_chipcommon_early_init(struct bcma_drv_cc *cc)
186 if (cc->capabilities & BCMA_CC_CAP_PMU) 184 if (cc->capabilities & BCMA_CC_CAP_PMU)
187 bcma_pmu_early_init(cc); 185 bcma_pmu_early_init(cc);
188 186
189 if (IS_BUILTIN(CONFIG_BCM47XX) && bus->hosttype == BCMA_HOSTTYPE_SOC)
190 bcma_chipco_serial_init(cc);
191
192 if (bus->hosttype == BCMA_HOSTTYPE_SOC) 187 if (bus->hosttype == BCMA_HOSTTYPE_SOC)
193 bcma_core_chipcommon_flash_detect(cc); 188 bcma_core_chipcommon_flash_detect(cc);
194 189
@@ -378,9 +373,9 @@ u32 bcma_chipco_gpio_pulldown(struct bcma_drv_cc *cc, u32 mask, u32 value)
378 return res; 373 return res;
379} 374}
380 375
381static void bcma_chipco_serial_init(struct bcma_drv_cc *cc) 376#ifdef CONFIG_BCMA_DRIVER_MIPS
377void bcma_chipco_serial_init(struct bcma_drv_cc *cc)
382{ 378{
383#if IS_BUILTIN(CONFIG_BCM47XX)
384 unsigned int irq; 379 unsigned int irq;
385 u32 baud_base; 380 u32 baud_base;
386 u32 i; 381 u32 i;
@@ -422,5 +417,5 @@ static void bcma_chipco_serial_init(struct bcma_drv_cc *cc)
422 ports[i].baud_base = baud_base; 417 ports[i].baud_base = baud_base;
423 ports[i].reg_shift = 0; 418 ports[i].reg_shift = 0;
424 } 419 }
425#endif /* CONFIG_BCM47XX */
426} 420}
421#endif /* CONFIG_BCMA_DRIVER_MIPS */
diff --git a/drivers/bcma/driver_mips.c b/drivers/bcma/driver_mips.c
index 96f171328200..89af807cf29c 100644
--- a/drivers/bcma/driver_mips.c
+++ b/drivers/bcma/driver_mips.c
@@ -278,9 +278,12 @@ static void bcma_core_mips_nvram_init(struct bcma_drv_mips *mcore)
278 278
279void bcma_core_mips_early_init(struct bcma_drv_mips *mcore) 279void bcma_core_mips_early_init(struct bcma_drv_mips *mcore)
280{ 280{
281 struct bcma_bus *bus = mcore->core->bus;
282
281 if (mcore->early_setup_done) 283 if (mcore->early_setup_done)
282 return; 284 return;
283 285
286 bcma_chipco_serial_init(&bus->drv_cc);
284 bcma_core_mips_nvram_init(mcore); 287 bcma_core_mips_nvram_init(mcore);
285 288
286 mcore->early_setup_done = true; 289 mcore->early_setup_done = true;
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 38c576f76d36..9fd06eeb1a17 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -271,7 +271,7 @@ static inline int sock_send_bvec(struct nbd_device *nbd, int index,
271static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index) 271static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
272{ 272{
273 struct request *req = blk_mq_rq_from_pdu(cmd); 273 struct request *req = blk_mq_rq_from_pdu(cmd);
274 int result, flags; 274 int result;
275 struct nbd_request request; 275 struct nbd_request request;
276 unsigned long size = blk_rq_bytes(req); 276 unsigned long size = blk_rq_bytes(req);
277 struct bio *bio; 277 struct bio *bio;
@@ -310,7 +310,6 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
310 if (type != NBD_CMD_WRITE) 310 if (type != NBD_CMD_WRITE)
311 return 0; 311 return 0;
312 312
313 flags = 0;
314 bio = req->bio; 313 bio = req->bio;
315 while (bio) { 314 while (bio) {
316 struct bio *next = bio->bi_next; 315 struct bio *next = bio->bi_next;
@@ -319,9 +318,8 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
319 318
320 bio_for_each_segment(bvec, bio, iter) { 319 bio_for_each_segment(bvec, bio, iter) {
321 bool is_last = !next && bio_iter_last(bvec, iter); 320 bool is_last = !next && bio_iter_last(bvec, iter);
321 int flags = is_last ? 0 : MSG_MORE;
322 322
323 if (is_last)
324 flags = MSG_MORE;
325 dev_dbg(nbd_to_dev(nbd), "request %p: sending %d bytes data\n", 323 dev_dbg(nbd_to_dev(nbd), "request %p: sending %d bytes data\n",
326 cmd, bvec.bv_len); 324 cmd, bvec.bv_len);
327 result = sock_send_bvec(nbd, index, &bvec, flags); 325 result = sock_send_bvec(nbd, index, &bvec, flags);
@@ -1042,6 +1040,7 @@ static int __init nbd_init(void)
1042 return -ENOMEM; 1040 return -ENOMEM;
1043 1041
1044 for (i = 0; i < nbds_max; i++) { 1042 for (i = 0; i < nbds_max; i++) {
1043 struct request_queue *q;
1045 struct gendisk *disk = alloc_disk(1 << part_shift); 1044 struct gendisk *disk = alloc_disk(1 << part_shift);
1046 if (!disk) 1045 if (!disk)
1047 goto out; 1046 goto out;
@@ -1067,12 +1066,13 @@ static int __init nbd_init(void)
1067 * every gendisk to have its very own request_queue struct. 1066 * every gendisk to have its very own request_queue struct.
1068 * These structs are big so we dynamically allocate them. 1067 * These structs are big so we dynamically allocate them.
1069 */ 1068 */
1070 disk->queue = blk_mq_init_queue(&nbd_dev[i].tag_set); 1069 q = blk_mq_init_queue(&nbd_dev[i].tag_set);
1071 if (!disk->queue) { 1070 if (IS_ERR(q)) {
1072 blk_mq_free_tag_set(&nbd_dev[i].tag_set); 1071 blk_mq_free_tag_set(&nbd_dev[i].tag_set);
1073 put_disk(disk); 1072 put_disk(disk);
1074 goto out; 1073 goto out;
1075 } 1074 }
1075 disk->queue = q;
1076 1076
1077 /* 1077 /*
1078 * Tell the block layer that we are not a rotational device 1078 * Tell the block layer that we are not a rotational device
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 5545a679abd8..10332c24f961 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -56,6 +56,7 @@ struct virtblk_req {
56 struct virtio_blk_outhdr out_hdr; 56 struct virtio_blk_outhdr out_hdr;
57 struct virtio_scsi_inhdr in_hdr; 57 struct virtio_scsi_inhdr in_hdr;
58 u8 status; 58 u8 status;
59 u8 sense[SCSI_SENSE_BUFFERSIZE];
59 struct scatterlist sg[]; 60 struct scatterlist sg[];
60}; 61};
61 62
@@ -102,7 +103,8 @@ static int __virtblk_add_req(struct virtqueue *vq,
102 } 103 }
103 104
104 if (type == cpu_to_virtio32(vq->vdev, VIRTIO_BLK_T_SCSI_CMD)) { 105 if (type == cpu_to_virtio32(vq->vdev, VIRTIO_BLK_T_SCSI_CMD)) {
105 sg_init_one(&sense, vbr->req->sense, SCSI_SENSE_BUFFERSIZE); 106 memcpy(vbr->sense, vbr->req->sense, SCSI_SENSE_BUFFERSIZE);
107 sg_init_one(&sense, vbr->sense, SCSI_SENSE_BUFFERSIZE);
106 sgs[num_out + num_in++] = &sense; 108 sgs[num_out + num_in++] = &sense;
107 sg_init_one(&inhdr, &vbr->in_hdr, sizeof(vbr->in_hdr)); 109 sg_init_one(&inhdr, &vbr->in_hdr, sizeof(vbr->in_hdr));
108 sgs[num_out + num_in++] = &inhdr; 110 sgs[num_out + num_in++] = &inhdr;
@@ -628,11 +630,12 @@ static int virtblk_probe(struct virtio_device *vdev)
628 if (err) 630 if (err)
629 goto out_put_disk; 631 goto out_put_disk;
630 632
631 q = vblk->disk->queue = blk_mq_init_queue(&vblk->tag_set); 633 q = blk_mq_init_queue(&vblk->tag_set);
632 if (IS_ERR(q)) { 634 if (IS_ERR(q)) {
633 err = -ENOMEM; 635 err = -ENOMEM;
634 goto out_free_tags; 636 goto out_free_tags;
635 } 637 }
638 vblk->disk->queue = q;
636 639
637 q->queuedata = vblk; 640 q->queuedata = vblk;
638 641
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index b2bdfa81f929..265f1a7072e9 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -197,13 +197,13 @@ struct blkfront_info
197 /* Number of pages per ring buffer. */ 197 /* Number of pages per ring buffer. */
198 unsigned int nr_ring_pages; 198 unsigned int nr_ring_pages;
199 struct request_queue *rq; 199 struct request_queue *rq;
200 unsigned int feature_flush; 200 unsigned int feature_flush:1;
201 unsigned int feature_fua; 201 unsigned int feature_fua:1;
202 unsigned int feature_discard:1; 202 unsigned int feature_discard:1;
203 unsigned int feature_secdiscard:1; 203 unsigned int feature_secdiscard:1;
204 unsigned int feature_persistent:1;
204 unsigned int discard_granularity; 205 unsigned int discard_granularity;
205 unsigned int discard_alignment; 206 unsigned int discard_alignment;
206 unsigned int feature_persistent:1;
207 /* Number of 4KB segments handled */ 207 /* Number of 4KB segments handled */
208 unsigned int max_indirect_segments; 208 unsigned int max_indirect_segments;
209 int is_ready; 209 int is_ready;
@@ -2223,7 +2223,7 @@ static int blkfront_setup_indirect(struct blkfront_ring_info *rinfo)
2223 } 2223 }
2224 else 2224 else
2225 grants = info->max_indirect_segments; 2225 grants = info->max_indirect_segments;
2226 psegs = grants / GRANTS_PER_PSEG; 2226 psegs = DIV_ROUND_UP(grants, GRANTS_PER_PSEG);
2227 2227
2228 err = fill_grant_buffer(rinfo, 2228 err = fill_grant_buffer(rinfo,
2229 (grants + INDIRECT_GREFS(grants)) * BLK_RING_SIZE(info)); 2229 (grants + INDIRECT_GREFS(grants)) * BLK_RING_SIZE(info));
@@ -2323,13 +2323,16 @@ static void blkfront_gather_backend_features(struct blkfront_info *info)
2323 blkfront_setup_discard(info); 2323 blkfront_setup_discard(info);
2324 2324
2325 info->feature_persistent = 2325 info->feature_persistent =
2326 xenbus_read_unsigned(info->xbdev->otherend, 2326 !!xenbus_read_unsigned(info->xbdev->otherend,
2327 "feature-persistent", 0); 2327 "feature-persistent", 0);
2328 2328
2329 indirect_segments = xenbus_read_unsigned(info->xbdev->otherend, 2329 indirect_segments = xenbus_read_unsigned(info->xbdev->otherend,
2330 "feature-max-indirect-segments", 0); 2330 "feature-max-indirect-segments", 0);
2331 info->max_indirect_segments = min(indirect_segments, 2331 if (indirect_segments > xen_blkif_max_segments)
2332 xen_blkif_max_segments); 2332 indirect_segments = xen_blkif_max_segments;
2333 if (indirect_segments <= BLKIF_MAX_SEGMENTS_PER_REQUEST)
2334 indirect_segments = 0;
2335 info->max_indirect_segments = indirect_segments;
2333} 2336}
2334 2337
2335/* 2338/*
@@ -2652,6 +2655,9 @@ static int __init xlblk_init(void)
2652 if (!xen_domain()) 2655 if (!xen_domain())
2653 return -ENODEV; 2656 return -ENODEV;
2654 2657
2658 if (xen_blkif_max_segments < BLKIF_MAX_SEGMENTS_PER_REQUEST)
2659 xen_blkif_max_segments = BLKIF_MAX_SEGMENTS_PER_REQUEST;
2660
2655 if (xen_blkif_max_ring_order > XENBUS_MAX_RING_GRANT_ORDER) { 2661 if (xen_blkif_max_ring_order > XENBUS_MAX_RING_GRANT_ORDER) {
2656 pr_info("Invalid max_ring_order (%d), will use default max: %d.\n", 2662 pr_info("Invalid max_ring_order (%d), will use default max: %d.\n",
2657 xen_blkif_max_ring_order, XENBUS_MAX_RING_GRANT_ORDER); 2663 xen_blkif_max_ring_order, XENBUS_MAX_RING_GRANT_ORDER);
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index 15f58ab44d0b..e5ab7d9e8c45 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -25,6 +25,7 @@
25#include <linux/genhd.h> 25#include <linux/genhd.h>
26#include <linux/highmem.h> 26#include <linux/highmem.h>
27#include <linux/slab.h> 27#include <linux/slab.h>
28#include <linux/backing-dev.h>
28#include <linux/string.h> 29#include <linux/string.h>
29#include <linux/vmalloc.h> 30#include <linux/vmalloc.h>
30#include <linux/err.h> 31#include <linux/err.h>
@@ -112,6 +113,14 @@ static inline bool is_partial_io(struct bio_vec *bvec)
112 return bvec->bv_len != PAGE_SIZE; 113 return bvec->bv_len != PAGE_SIZE;
113} 114}
114 115
116static void zram_revalidate_disk(struct zram *zram)
117{
118 revalidate_disk(zram->disk);
119 /* revalidate_disk reset the BDI_CAP_STABLE_WRITES so set again */
120 zram->disk->queue->backing_dev_info.capabilities |=
121 BDI_CAP_STABLE_WRITES;
122}
123
115/* 124/*
116 * Check if request is within bounds and aligned on zram logical blocks. 125 * Check if request is within bounds and aligned on zram logical blocks.
117 */ 126 */
@@ -1095,15 +1104,9 @@ static ssize_t disksize_store(struct device *dev,
1095 zram->comp = comp; 1104 zram->comp = comp;
1096 zram->disksize = disksize; 1105 zram->disksize = disksize;
1097 set_capacity(zram->disk, zram->disksize >> SECTOR_SHIFT); 1106 set_capacity(zram->disk, zram->disksize >> SECTOR_SHIFT);
1107 zram_revalidate_disk(zram);
1098 up_write(&zram->init_lock); 1108 up_write(&zram->init_lock);
1099 1109
1100 /*
1101 * Revalidate disk out of the init_lock to avoid lockdep splat.
1102 * It's okay because disk's capacity is protected by init_lock
1103 * so that revalidate_disk always sees up-to-date capacity.
1104 */
1105 revalidate_disk(zram->disk);
1106
1107 return len; 1110 return len;
1108 1111
1109out_destroy_comp: 1112out_destroy_comp:
@@ -1149,7 +1152,7 @@ static ssize_t reset_store(struct device *dev,
1149 /* Make sure all the pending I/O are finished */ 1152 /* Make sure all the pending I/O are finished */
1150 fsync_bdev(bdev); 1153 fsync_bdev(bdev);
1151 zram_reset_device(zram); 1154 zram_reset_device(zram);
1152 revalidate_disk(zram->disk); 1155 zram_revalidate_disk(zram);
1153 bdput(bdev); 1156 bdput(bdev);
1154 1157
1155 mutex_lock(&bdev->bd_mutex); 1158 mutex_lock(&bdev->bd_mutex);
diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c
index 6ce5ce8be2f2..87fba424817e 100644
--- a/drivers/char/hw_random/core.c
+++ b/drivers/char/hw_random/core.c
@@ -92,7 +92,6 @@ static void add_early_randomness(struct hwrng *rng)
92 mutex_unlock(&reading_mutex); 92 mutex_unlock(&reading_mutex);
93 if (bytes_read > 0) 93 if (bytes_read > 0)
94 add_device_randomness(rng_buffer, bytes_read); 94 add_device_randomness(rng_buffer, bytes_read);
95 memset(rng_buffer, 0, size);
96} 95}
97 96
98static inline void cleanup_rng(struct kref *kref) 97static inline void cleanup_rng(struct kref *kref)
@@ -288,7 +287,6 @@ static ssize_t rng_dev_read(struct file *filp, char __user *buf,
288 } 287 }
289 } 288 }
290out: 289out:
291 memset(rng_buffer, 0, rng_buffer_size());
292 return ret ? : err; 290 return ret ? : err;
293 291
294out_unlock_reading: 292out_unlock_reading:
@@ -427,7 +425,6 @@ static int hwrng_fillfn(void *unused)
427 /* Outside lock, sure, but y'know: randomness. */ 425 /* Outside lock, sure, but y'know: randomness. */
428 add_hwgenerator_randomness((void *)rng_fillbuf, rc, 426 add_hwgenerator_randomness((void *)rng_fillbuf, rc,
429 rc * current_quality * 8 >> 10); 427 rc * current_quality * 8 >> 10);
430 memset(rng_fillbuf, 0, rng_buffer_size());
431 } 428 }
432 hwrng_fill = NULL; 429 hwrng_fill = NULL;
433 return 0; 430 return 0;
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index 5bb1985ec484..6d9cc2d39d22 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -381,9 +381,6 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
381 char *kbuf; /* k-addr because vread() takes vmlist_lock rwlock */ 381 char *kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
382 int err = 0; 382 int err = 0;
383 383
384 if (!pfn_valid(PFN_DOWN(p)))
385 return -EIO;
386
387 read = 0; 384 read = 0;
388 if (p < (unsigned long) high_memory) { 385 if (p < (unsigned long) high_memory) {
389 low_count = count; 386 low_count = count;
@@ -412,6 +409,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
412 * by the kernel or data corruption may occur 409 * by the kernel or data corruption may occur
413 */ 410 */
414 kbuf = xlate_dev_kmem_ptr((void *)p); 411 kbuf = xlate_dev_kmem_ptr((void *)p);
412 if (!virt_addr_valid(kbuf))
413 return -ENXIO;
415 414
416 if (copy_to_user(buf, kbuf, sz)) 415 if (copy_to_user(buf, kbuf, sz))
417 return -EFAULT; 416 return -EFAULT;
@@ -482,6 +481,8 @@ static ssize_t do_write_kmem(unsigned long p, const char __user *buf,
482 * corruption may occur. 481 * corruption may occur.
483 */ 482 */
484 ptr = xlate_dev_kmem_ptr((void *)p); 483 ptr = xlate_dev_kmem_ptr((void *)p);
484 if (!virt_addr_valid(ptr))
485 return -ENXIO;
485 486
486 copied = copy_from_user(ptr, buf, sz); 487 copied = copy_from_user(ptr, buf, sz);
487 if (copied) { 488 if (copied) {
@@ -512,9 +513,6 @@ static ssize_t write_kmem(struct file *file, const char __user *buf,
512 char *kbuf; /* k-addr because vwrite() takes vmlist_lock rwlock */ 513 char *kbuf; /* k-addr because vwrite() takes vmlist_lock rwlock */
513 int err = 0; 514 int err = 0;
514 515
515 if (!pfn_valid(PFN_DOWN(p)))
516 return -EIO;
517
518 if (p < (unsigned long) high_memory) { 516 if (p < (unsigned long) high_memory) {
519 unsigned long to_write = min_t(unsigned long, count, 517 unsigned long to_write = min_t(unsigned long, count,
520 (unsigned long)high_memory - p); 518 (unsigned long)high_memory - p);
diff --git a/drivers/char/ppdev.c b/drivers/char/ppdev.c
index 02819e0703c8..87885d146dbb 100644
--- a/drivers/char/ppdev.c
+++ b/drivers/char/ppdev.c
@@ -290,6 +290,7 @@ static int register_device(int minor, struct pp_struct *pp)
290 struct pardevice *pdev = NULL; 290 struct pardevice *pdev = NULL;
291 char *name; 291 char *name;
292 struct pardev_cb ppdev_cb; 292 struct pardev_cb ppdev_cb;
293 int rc = 0;
293 294
294 name = kasprintf(GFP_KERNEL, CHRDEV "%x", minor); 295 name = kasprintf(GFP_KERNEL, CHRDEV "%x", minor);
295 if (name == NULL) 296 if (name == NULL)
@@ -298,8 +299,8 @@ static int register_device(int minor, struct pp_struct *pp)
298 port = parport_find_number(minor); 299 port = parport_find_number(minor);
299 if (!port) { 300 if (!port) {
300 pr_warn("%s: no associated port!\n", name); 301 pr_warn("%s: no associated port!\n", name);
301 kfree(name); 302 rc = -ENXIO;
302 return -ENXIO; 303 goto err;
303 } 304 }
304 305
305 memset(&ppdev_cb, 0, sizeof(ppdev_cb)); 306 memset(&ppdev_cb, 0, sizeof(ppdev_cb));
@@ -308,16 +309,18 @@ static int register_device(int minor, struct pp_struct *pp)
308 ppdev_cb.private = pp; 309 ppdev_cb.private = pp;
309 pdev = parport_register_dev_model(port, name, &ppdev_cb, minor); 310 pdev = parport_register_dev_model(port, name, &ppdev_cb, minor);
310 parport_put_port(port); 311 parport_put_port(port);
311 kfree(name);
312 312
313 if (!pdev) { 313 if (!pdev) {
314 pr_warn("%s: failed to register device!\n", name); 314 pr_warn("%s: failed to register device!\n", name);
315 return -ENXIO; 315 rc = -ENXIO;
316 goto err;
316 } 317 }
317 318
318 pp->pdev = pdev; 319 pp->pdev = pdev;
319 dev_dbg(&pdev->dev, "registered pardevice\n"); 320 dev_dbg(&pdev->dev, "registered pardevice\n");
320 return 0; 321err:
322 kfree(name);
323 return rc;
321} 324}
322 325
323static enum ieee1284_phase init_phase(int mode) 326static enum ieee1284_phase init_phase(int mode)
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index 8b00e79c2683..17857beb4892 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -1862,7 +1862,7 @@ static void config_work_handler(struct work_struct *work)
1862{ 1862{
1863 struct ports_device *portdev; 1863 struct ports_device *portdev;
1864 1864
1865 portdev = container_of(work, struct ports_device, control_work); 1865 portdev = container_of(work, struct ports_device, config_work);
1866 if (!use_multiport(portdev)) { 1866 if (!use_multiport(portdev)) {
1867 struct virtio_device *vdev; 1867 struct virtio_device *vdev;
1868 struct port *port; 1868 struct port *port;
diff --git a/drivers/clk/clk-stm32f4.c b/drivers/clk/clk-stm32f4.c
index 5eb05dbf59b8..fc585f370549 100644
--- a/drivers/clk/clk-stm32f4.c
+++ b/drivers/clk/clk-stm32f4.c
@@ -768,5 +768,5 @@ fail:
768 kfree(clks); 768 kfree(clks);
769 iounmap(base); 769 iounmap(base);
770} 770}
771CLK_OF_DECLARE(stm32f42xx_rcc, "st,stm32f42xx-rcc", stm32f4_rcc_init); 771CLK_OF_DECLARE_DRIVER(stm32f42xx_rcc, "st,stm32f42xx-rcc", stm32f4_rcc_init);
772CLK_OF_DECLARE(stm32f46xx_rcc, "st,stm32f469-rcc", stm32f4_rcc_init); 772CLK_OF_DECLARE_DRIVER(stm32f46xx_rcc, "st,stm32f469-rcc", stm32f4_rcc_init);
diff --git a/drivers/clk/renesas/clk-mstp.c b/drivers/clk/renesas/clk-mstp.c
index 9375777776d9..b533f99550e1 100644
--- a/drivers/clk/renesas/clk-mstp.c
+++ b/drivers/clk/renesas/clk-mstp.c
@@ -37,12 +37,14 @@
37 * @smstpcr: module stop control register 37 * @smstpcr: module stop control register
38 * @mstpsr: module stop status register (optional) 38 * @mstpsr: module stop status register (optional)
39 * @lock: protects writes to SMSTPCR 39 * @lock: protects writes to SMSTPCR
40 * @width_8bit: registers are 8-bit, not 32-bit
40 */ 41 */
41struct mstp_clock_group { 42struct mstp_clock_group {
42 struct clk_onecell_data data; 43 struct clk_onecell_data data;
43 void __iomem *smstpcr; 44 void __iomem *smstpcr;
44 void __iomem *mstpsr; 45 void __iomem *mstpsr;
45 spinlock_t lock; 46 spinlock_t lock;
47 bool width_8bit;
46}; 48};
47 49
48/** 50/**
@@ -59,6 +61,18 @@ struct mstp_clock {
59 61
60#define to_mstp_clock(_hw) container_of(_hw, struct mstp_clock, hw) 62#define to_mstp_clock(_hw) container_of(_hw, struct mstp_clock, hw)
61 63
64static inline u32 cpg_mstp_read(struct mstp_clock_group *group,
65 u32 __iomem *reg)
66{
67 return group->width_8bit ? readb(reg) : clk_readl(reg);
68}
69
70static inline void cpg_mstp_write(struct mstp_clock_group *group, u32 val,
71 u32 __iomem *reg)
72{
73 group->width_8bit ? writeb(val, reg) : clk_writel(val, reg);
74}
75
62static int cpg_mstp_clock_endisable(struct clk_hw *hw, bool enable) 76static int cpg_mstp_clock_endisable(struct clk_hw *hw, bool enable)
63{ 77{
64 struct mstp_clock *clock = to_mstp_clock(hw); 78 struct mstp_clock *clock = to_mstp_clock(hw);
@@ -70,12 +84,12 @@ static int cpg_mstp_clock_endisable(struct clk_hw *hw, bool enable)
70 84
71 spin_lock_irqsave(&group->lock, flags); 85 spin_lock_irqsave(&group->lock, flags);
72 86
73 value = clk_readl(group->smstpcr); 87 value = cpg_mstp_read(group, group->smstpcr);
74 if (enable) 88 if (enable)
75 value &= ~bitmask; 89 value &= ~bitmask;
76 else 90 else
77 value |= bitmask; 91 value |= bitmask;
78 clk_writel(value, group->smstpcr); 92 cpg_mstp_write(group, value, group->smstpcr);
79 93
80 spin_unlock_irqrestore(&group->lock, flags); 94 spin_unlock_irqrestore(&group->lock, flags);
81 95
@@ -83,7 +97,7 @@ static int cpg_mstp_clock_endisable(struct clk_hw *hw, bool enable)
83 return 0; 97 return 0;
84 98
85 for (i = 1000; i > 0; --i) { 99 for (i = 1000; i > 0; --i) {
86 if (!(clk_readl(group->mstpsr) & bitmask)) 100 if (!(cpg_mstp_read(group, group->mstpsr) & bitmask))
87 break; 101 break;
88 cpu_relax(); 102 cpu_relax();
89 } 103 }
@@ -114,9 +128,9 @@ static int cpg_mstp_clock_is_enabled(struct clk_hw *hw)
114 u32 value; 128 u32 value;
115 129
116 if (group->mstpsr) 130 if (group->mstpsr)
117 value = clk_readl(group->mstpsr); 131 value = cpg_mstp_read(group, group->mstpsr);
118 else 132 else
119 value = clk_readl(group->smstpcr); 133 value = cpg_mstp_read(group, group->smstpcr);
120 134
121 return !(value & BIT(clock->bit_index)); 135 return !(value & BIT(clock->bit_index));
122} 136}
@@ -188,6 +202,9 @@ static void __init cpg_mstp_clocks_init(struct device_node *np)
188 return; 202 return;
189 } 203 }
190 204
205 if (of_device_is_compatible(np, "renesas,r7s72100-mstp-clocks"))
206 group->width_8bit = true;
207
191 for (i = 0; i < MSTP_MAX_CLOCKS; ++i) 208 for (i = 0; i < MSTP_MAX_CLOCKS; ++i)
192 clks[i] = ERR_PTR(-ENOENT); 209 clks[i] = ERR_PTR(-ENOENT);
193 210
diff --git a/drivers/clk/samsung/clk-exynos5420.c b/drivers/clk/samsung/clk-exynos5420.c
index 8c8b495cbf0d..cdc092a1d9ef 100644
--- a/drivers/clk/samsung/clk-exynos5420.c
+++ b/drivers/clk/samsung/clk-exynos5420.c
@@ -586,7 +586,7 @@ static const struct samsung_gate_clock exynos5800_gate_clks[] __initconst = {
586 GATE(CLK_ACLK550_CAM, "aclk550_cam", "mout_user_aclk550_cam", 586 GATE(CLK_ACLK550_CAM, "aclk550_cam", "mout_user_aclk550_cam",
587 GATE_BUS_TOP, 24, 0, 0), 587 GATE_BUS_TOP, 24, 0, 0),
588 GATE(CLK_ACLK432_SCALER, "aclk432_scaler", "mout_user_aclk432_scaler", 588 GATE(CLK_ACLK432_SCALER, "aclk432_scaler", "mout_user_aclk432_scaler",
589 GATE_BUS_TOP, 27, 0, 0), 589 GATE_BUS_TOP, 27, CLK_IS_CRITICAL, 0),
590}; 590};
591 591
592static const struct samsung_mux_clock exynos5420_mux_clks[] __initconst = { 592static const struct samsung_mux_clock exynos5420_mux_clks[] __initconst = {
@@ -956,20 +956,20 @@ static const struct samsung_gate_clock exynos5x_gate_clks[] __initconst = {
956 GATE(CLK_SMMU_G2D, "smmu_g2d", "aclk333_g2d", GATE_IP_G2D, 7, 0, 0), 956 GATE(CLK_SMMU_G2D, "smmu_g2d", "aclk333_g2d", GATE_IP_G2D, 7, 0, 0),
957 957
958 GATE(0, "aclk200_fsys", "mout_user_aclk200_fsys", 958 GATE(0, "aclk200_fsys", "mout_user_aclk200_fsys",
959 GATE_BUS_FSYS0, 9, CLK_IGNORE_UNUSED, 0), 959 GATE_BUS_FSYS0, 9, CLK_IS_CRITICAL, 0),
960 GATE(0, "aclk200_fsys2", "mout_user_aclk200_fsys2", 960 GATE(0, "aclk200_fsys2", "mout_user_aclk200_fsys2",
961 GATE_BUS_FSYS0, 10, CLK_IGNORE_UNUSED, 0), 961 GATE_BUS_FSYS0, 10, CLK_IGNORE_UNUSED, 0),
962 962
963 GATE(0, "aclk333_g2d", "mout_user_aclk333_g2d", 963 GATE(0, "aclk333_g2d", "mout_user_aclk333_g2d",
964 GATE_BUS_TOP, 0, CLK_IGNORE_UNUSED, 0), 964 GATE_BUS_TOP, 0, CLK_IGNORE_UNUSED, 0),
965 GATE(0, "aclk266_g2d", "mout_user_aclk266_g2d", 965 GATE(0, "aclk266_g2d", "mout_user_aclk266_g2d",
966 GATE_BUS_TOP, 1, CLK_IGNORE_UNUSED, 0), 966 GATE_BUS_TOP, 1, CLK_IS_CRITICAL, 0),
967 GATE(0, "aclk300_jpeg", "mout_user_aclk300_jpeg", 967 GATE(0, "aclk300_jpeg", "mout_user_aclk300_jpeg",
968 GATE_BUS_TOP, 4, CLK_IGNORE_UNUSED, 0), 968 GATE_BUS_TOP, 4, CLK_IGNORE_UNUSED, 0),
969 GATE(0, "aclk333_432_isp0", "mout_user_aclk333_432_isp0", 969 GATE(0, "aclk333_432_isp0", "mout_user_aclk333_432_isp0",
970 GATE_BUS_TOP, 5, 0, 0), 970 GATE_BUS_TOP, 5, 0, 0),
971 GATE(0, "aclk300_gscl", "mout_user_aclk300_gscl", 971 GATE(0, "aclk300_gscl", "mout_user_aclk300_gscl",
972 GATE_BUS_TOP, 6, CLK_IGNORE_UNUSED, 0), 972 GATE_BUS_TOP, 6, CLK_IS_CRITICAL, 0),
973 GATE(0, "aclk333_432_gscl", "mout_user_aclk333_432_gscl", 973 GATE(0, "aclk333_432_gscl", "mout_user_aclk333_432_gscl",
974 GATE_BUS_TOP, 7, CLK_IGNORE_UNUSED, 0), 974 GATE_BUS_TOP, 7, CLK_IGNORE_UNUSED, 0),
975 GATE(0, "aclk333_432_isp", "mout_user_aclk333_432_isp", 975 GATE(0, "aclk333_432_isp", "mout_user_aclk333_432_isp",
@@ -983,20 +983,20 @@ static const struct samsung_gate_clock exynos5x_gate_clks[] __initconst = {
983 GATE(0, "aclk166", "mout_user_aclk166", 983 GATE(0, "aclk166", "mout_user_aclk166",
984 GATE_BUS_TOP, 14, CLK_IGNORE_UNUSED, 0), 984 GATE_BUS_TOP, 14, CLK_IGNORE_UNUSED, 0),
985 GATE(CLK_ACLK333, "aclk333", "mout_user_aclk333", 985 GATE(CLK_ACLK333, "aclk333", "mout_user_aclk333",
986 GATE_BUS_TOP, 15, CLK_IGNORE_UNUSED, 0), 986 GATE_BUS_TOP, 15, CLK_IS_CRITICAL, 0),
987 GATE(0, "aclk400_isp", "mout_user_aclk400_isp", 987 GATE(0, "aclk400_isp", "mout_user_aclk400_isp",
988 GATE_BUS_TOP, 16, 0, 0), 988 GATE_BUS_TOP, 16, 0, 0),
989 GATE(0, "aclk400_mscl", "mout_user_aclk400_mscl", 989 GATE(0, "aclk400_mscl", "mout_user_aclk400_mscl",
990 GATE_BUS_TOP, 17, 0, 0), 990 GATE_BUS_TOP, 17, 0, 0),
991 GATE(0, "aclk200_disp1", "mout_user_aclk200_disp1", 991 GATE(0, "aclk200_disp1", "mout_user_aclk200_disp1",
992 GATE_BUS_TOP, 18, 0, 0), 992 GATE_BUS_TOP, 18, CLK_IS_CRITICAL, 0),
993 GATE(CLK_SCLK_MPHY_IXTAL24, "sclk_mphy_ixtal24", "mphy_refclk_ixtal24", 993 GATE(CLK_SCLK_MPHY_IXTAL24, "sclk_mphy_ixtal24", "mphy_refclk_ixtal24",
994 GATE_BUS_TOP, 28, 0, 0), 994 GATE_BUS_TOP, 28, 0, 0),
995 GATE(CLK_SCLK_HSIC_12M, "sclk_hsic_12m", "ff_hsic_12m", 995 GATE(CLK_SCLK_HSIC_12M, "sclk_hsic_12m", "ff_hsic_12m",
996 GATE_BUS_TOP, 29, 0, 0), 996 GATE_BUS_TOP, 29, 0, 0),
997 997
998 GATE(0, "aclk300_disp1", "mout_user_aclk300_disp1", 998 GATE(0, "aclk300_disp1", "mout_user_aclk300_disp1",
999 SRC_MASK_TOP2, 24, 0, 0), 999 SRC_MASK_TOP2, 24, CLK_IS_CRITICAL, 0),
1000 1000
1001 GATE(CLK_MAU_EPLL, "mau_epll", "mout_mau_epll_clk", 1001 GATE(CLK_MAU_EPLL, "mau_epll", "mout_mau_epll_clk",
1002 SRC_MASK_TOP7, 20, 0, 0), 1002 SRC_MASK_TOP7, 20, 0, 0),
diff --git a/drivers/clocksource/exynos_mct.c b/drivers/clocksource/exynos_mct.c
index 4da1dc2278bd..670ff0f25b67 100644
--- a/drivers/clocksource/exynos_mct.c
+++ b/drivers/clocksource/exynos_mct.c
@@ -495,6 +495,7 @@ static int exynos4_mct_dying_cpu(unsigned int cpu)
495 if (mct_int_type == MCT_INT_SPI) { 495 if (mct_int_type == MCT_INT_SPI) {
496 if (evt->irq != -1) 496 if (evt->irq != -1)
497 disable_irq_nosync(evt->irq); 497 disable_irq_nosync(evt->irq);
498 exynos4_mct_write(0x1, mevt->base + MCT_L_INT_CSTAT_OFFSET);
498 } else { 499 } else {
499 disable_percpu_irq(mct_irqs[MCT_L0_IRQ]); 500 disable_percpu_irq(mct_irqs[MCT_L0_IRQ]);
500 } 501 }
diff --git a/drivers/cpufreq/brcmstb-avs-cpufreq.c b/drivers/cpufreq/brcmstb-avs-cpufreq.c
index 4fda623e55bb..c94360671f41 100644
--- a/drivers/cpufreq/brcmstb-avs-cpufreq.c
+++ b/drivers/cpufreq/brcmstb-avs-cpufreq.c
@@ -784,8 +784,19 @@ static int brcm_avs_target_index(struct cpufreq_policy *policy,
784static int brcm_avs_suspend(struct cpufreq_policy *policy) 784static int brcm_avs_suspend(struct cpufreq_policy *policy)
785{ 785{
786 struct private_data *priv = policy->driver_data; 786 struct private_data *priv = policy->driver_data;
787 int ret;
788
789 ret = brcm_avs_get_pmap(priv, &priv->pmap);
790 if (ret)
791 return ret;
787 792
788 return brcm_avs_get_pmap(priv, &priv->pmap); 793 /*
794 * We can't use the P-state returned by brcm_avs_get_pmap(), since
795 * that's the initial P-state from when the P-map was downloaded to the
796 * AVS co-processor, not necessarily the P-state we are running at now.
797 * So, we get the current P-state explicitly.
798 */
799 return brcm_avs_get_pstate(priv, &priv->pmap.state);
789} 800}
790 801
791static int brcm_avs_resume(struct cpufreq_policy *policy) 802static int brcm_avs_resume(struct cpufreq_policy *policy)
@@ -954,9 +965,9 @@ static ssize_t show_brcm_avs_pmap(struct cpufreq_policy *policy, char *buf)
954 brcm_avs_parse_p1(pmap.p1, &mdiv_p0, &pdiv, &ndiv); 965 brcm_avs_parse_p1(pmap.p1, &mdiv_p0, &pdiv, &ndiv);
955 brcm_avs_parse_p2(pmap.p2, &mdiv_p1, &mdiv_p2, &mdiv_p3, &mdiv_p4); 966 brcm_avs_parse_p2(pmap.p2, &mdiv_p1, &mdiv_p2, &mdiv_p3, &mdiv_p4);
956 967
957 return sprintf(buf, "0x%08x 0x%08x %u %u %u %u %u %u %u\n", 968 return sprintf(buf, "0x%08x 0x%08x %u %u %u %u %u %u %u %u %u\n",
958 pmap.p1, pmap.p2, ndiv, pdiv, mdiv_p0, mdiv_p1, mdiv_p2, 969 pmap.p1, pmap.p2, ndiv, pdiv, mdiv_p0, mdiv_p1, mdiv_p2,
959 mdiv_p3, mdiv_p4); 970 mdiv_p3, mdiv_p4, pmap.mode, pmap.state);
960} 971}
961 972
962static ssize_t show_brcm_avs_voltage(struct cpufreq_policy *policy, char *buf) 973static ssize_t show_brcm_avs_voltage(struct cpufreq_policy *policy, char *buf)
diff --git a/drivers/cpufreq/cpufreq-dt-platdev.c b/drivers/cpufreq/cpufreq-dt-platdev.c
index bc97b6a4b1cf..7fcaf26e8f81 100644
--- a/drivers/cpufreq/cpufreq-dt-platdev.c
+++ b/drivers/cpufreq/cpufreq-dt-platdev.c
@@ -26,6 +26,8 @@ static const struct of_device_id machines[] __initconst = {
26 { .compatible = "allwinner,sun8i-a83t", }, 26 { .compatible = "allwinner,sun8i-a83t", },
27 { .compatible = "allwinner,sun8i-h3", }, 27 { .compatible = "allwinner,sun8i-h3", },
28 28
29 { .compatible = "apm,xgene-shadowcat", },
30
29 { .compatible = "arm,integrator-ap", }, 31 { .compatible = "arm,integrator-ap", },
30 { .compatible = "arm,integrator-cp", }, 32 { .compatible = "arm,integrator-cp", },
31 33
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 6acbd4af632e..50bd6d987fc3 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -857,13 +857,13 @@ static struct freq_attr *hwp_cpufreq_attrs[] = {
857 NULL, 857 NULL,
858}; 858};
859 859
860static void intel_pstate_hwp_set(const struct cpumask *cpumask) 860static void intel_pstate_hwp_set(struct cpufreq_policy *policy)
861{ 861{
862 int min, hw_min, max, hw_max, cpu, range, adj_range; 862 int min, hw_min, max, hw_max, cpu, range, adj_range;
863 struct perf_limits *perf_limits = limits; 863 struct perf_limits *perf_limits = limits;
864 u64 value, cap; 864 u64 value, cap;
865 865
866 for_each_cpu(cpu, cpumask) { 866 for_each_cpu(cpu, policy->cpus) {
867 int max_perf_pct, min_perf_pct; 867 int max_perf_pct, min_perf_pct;
868 struct cpudata *cpu_data = all_cpu_data[cpu]; 868 struct cpudata *cpu_data = all_cpu_data[cpu];
869 s16 epp; 869 s16 epp;
@@ -949,7 +949,7 @@ skip_epp:
949static int intel_pstate_hwp_set_policy(struct cpufreq_policy *policy) 949static int intel_pstate_hwp_set_policy(struct cpufreq_policy *policy)
950{ 950{
951 if (hwp_active) 951 if (hwp_active)
952 intel_pstate_hwp_set(policy->cpus); 952 intel_pstate_hwp_set(policy);
953 953
954 return 0; 954 return 0;
955} 955}
@@ -968,19 +968,28 @@ static int intel_pstate_hwp_save_state(struct cpufreq_policy *policy)
968 968
969static int intel_pstate_resume(struct cpufreq_policy *policy) 969static int intel_pstate_resume(struct cpufreq_policy *policy)
970{ 970{
971 int ret;
972
971 if (!hwp_active) 973 if (!hwp_active)
972 return 0; 974 return 0;
973 975
976 mutex_lock(&intel_pstate_limits_lock);
977
974 all_cpu_data[policy->cpu]->epp_policy = 0; 978 all_cpu_data[policy->cpu]->epp_policy = 0;
975 979
976 return intel_pstate_hwp_set_policy(policy); 980 ret = intel_pstate_hwp_set_policy(policy);
981
982 mutex_unlock(&intel_pstate_limits_lock);
983
984 return ret;
977} 985}
978 986
979static void intel_pstate_hwp_set_online_cpus(void) 987static void intel_pstate_update_policies(void)
980{ 988{
981 get_online_cpus(); 989 int cpu;
982 intel_pstate_hwp_set(cpu_online_mask); 990
983 put_online_cpus(); 991 for_each_possible_cpu(cpu)
992 cpufreq_update_policy(cpu);
984} 993}
985 994
986/************************** debugfs begin ************************/ 995/************************** debugfs begin ************************/
@@ -1018,10 +1027,6 @@ static void __init intel_pstate_debug_expose_params(void)
1018 struct dentry *debugfs_parent; 1027 struct dentry *debugfs_parent;
1019 int i = 0; 1028 int i = 0;
1020 1029
1021 if (hwp_active ||
1022 pstate_funcs.get_target_pstate == get_target_pstate_use_cpu_load)
1023 return;
1024
1025 debugfs_parent = debugfs_create_dir("pstate_snb", NULL); 1030 debugfs_parent = debugfs_create_dir("pstate_snb", NULL);
1026 if (IS_ERR_OR_NULL(debugfs_parent)) 1031 if (IS_ERR_OR_NULL(debugfs_parent))
1027 return; 1032 return;
@@ -1105,11 +1110,10 @@ static ssize_t store_no_turbo(struct kobject *a, struct attribute *b,
1105 1110
1106 limits->no_turbo = clamp_t(int, input, 0, 1); 1111 limits->no_turbo = clamp_t(int, input, 0, 1);
1107 1112
1108 if (hwp_active)
1109 intel_pstate_hwp_set_online_cpus();
1110
1111 mutex_unlock(&intel_pstate_limits_lock); 1113 mutex_unlock(&intel_pstate_limits_lock);
1112 1114
1115 intel_pstate_update_policies();
1116
1113 return count; 1117 return count;
1114} 1118}
1115 1119
@@ -1134,11 +1138,10 @@ static ssize_t store_max_perf_pct(struct kobject *a, struct attribute *b,
1134 limits->max_perf_pct); 1138 limits->max_perf_pct);
1135 limits->max_perf = div_ext_fp(limits->max_perf_pct, 100); 1139 limits->max_perf = div_ext_fp(limits->max_perf_pct, 100);
1136 1140
1137 if (hwp_active)
1138 intel_pstate_hwp_set_online_cpus();
1139
1140 mutex_unlock(&intel_pstate_limits_lock); 1141 mutex_unlock(&intel_pstate_limits_lock);
1141 1142
1143 intel_pstate_update_policies();
1144
1142 return count; 1145 return count;
1143} 1146}
1144 1147
@@ -1163,11 +1166,10 @@ static ssize_t store_min_perf_pct(struct kobject *a, struct attribute *b,
1163 limits->min_perf_pct); 1166 limits->min_perf_pct);
1164 limits->min_perf = div_ext_fp(limits->min_perf_pct, 100); 1167 limits->min_perf = div_ext_fp(limits->min_perf_pct, 100);
1165 1168
1166 if (hwp_active)
1167 intel_pstate_hwp_set_online_cpus();
1168
1169 mutex_unlock(&intel_pstate_limits_lock); 1169 mutex_unlock(&intel_pstate_limits_lock);
1170 1170
1171 intel_pstate_update_policies();
1172
1171 return count; 1173 return count;
1172} 1174}
1173 1175
@@ -1233,6 +1235,25 @@ static void intel_pstate_hwp_enable(struct cpudata *cpudata)
1233 cpudata->epp_default = intel_pstate_get_epp(cpudata, 0); 1235 cpudata->epp_default = intel_pstate_get_epp(cpudata, 0);
1234} 1236}
1235 1237
1238#define MSR_IA32_POWER_CTL_BIT_EE 19
1239
1240/* Disable energy efficiency optimization */
1241static void intel_pstate_disable_ee(int cpu)
1242{
1243 u64 power_ctl;
1244 int ret;
1245
1246 ret = rdmsrl_on_cpu(cpu, MSR_IA32_POWER_CTL, &power_ctl);
1247 if (ret)
1248 return;
1249
1250 if (!(power_ctl & BIT(MSR_IA32_POWER_CTL_BIT_EE))) {
1251 pr_info("Disabling energy efficiency optimization\n");
1252 power_ctl |= BIT(MSR_IA32_POWER_CTL_BIT_EE);
1253 wrmsrl_on_cpu(cpu, MSR_IA32_POWER_CTL, power_ctl);
1254 }
1255}
1256
1236static int atom_get_min_pstate(void) 1257static int atom_get_min_pstate(void)
1237{ 1258{
1238 u64 value; 1259 u64 value;
@@ -1843,6 +1864,11 @@ static const struct x86_cpu_id intel_pstate_cpu_oob_ids[] __initconst = {
1843 {} 1864 {}
1844}; 1865};
1845 1866
1867static const struct x86_cpu_id intel_pstate_cpu_ee_disable_ids[] = {
1868 ICPU(INTEL_FAM6_KABYLAKE_DESKTOP, core_params),
1869 {}
1870};
1871
1846static int intel_pstate_init_cpu(unsigned int cpunum) 1872static int intel_pstate_init_cpu(unsigned int cpunum)
1847{ 1873{
1848 struct cpudata *cpu; 1874 struct cpudata *cpu;
@@ -1873,6 +1899,12 @@ static int intel_pstate_init_cpu(unsigned int cpunum)
1873 cpu->cpu = cpunum; 1899 cpu->cpu = cpunum;
1874 1900
1875 if (hwp_active) { 1901 if (hwp_active) {
1902 const struct x86_cpu_id *id;
1903
1904 id = x86_match_cpu(intel_pstate_cpu_ee_disable_ids);
1905 if (id)
1906 intel_pstate_disable_ee(cpunum);
1907
1876 intel_pstate_hwp_enable(cpu); 1908 intel_pstate_hwp_enable(cpu);
1877 pid_params.sample_rate_ms = 50; 1909 pid_params.sample_rate_ms = 50;
1878 pid_params.sample_rate_ns = 50 * NSEC_PER_MSEC; 1910 pid_params.sample_rate_ns = 50 * NSEC_PER_MSEC;
@@ -2003,7 +2035,8 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
2003 limits = &performance_limits; 2035 limits = &performance_limits;
2004 perf_limits = limits; 2036 perf_limits = limits;
2005 } 2037 }
2006 if (policy->max >= policy->cpuinfo.max_freq) { 2038 if (policy->max >= policy->cpuinfo.max_freq &&
2039 !limits->no_turbo) {
2007 pr_debug("set performance\n"); 2040 pr_debug("set performance\n");
2008 intel_pstate_set_performance_limits(perf_limits); 2041 intel_pstate_set_performance_limits(perf_limits);
2009 goto out; 2042 goto out;
@@ -2045,6 +2078,17 @@ static int intel_pstate_verify_policy(struct cpufreq_policy *policy)
2045 policy->policy != CPUFREQ_POLICY_PERFORMANCE) 2078 policy->policy != CPUFREQ_POLICY_PERFORMANCE)
2046 return -EINVAL; 2079 return -EINVAL;
2047 2080
2081 /* When per-CPU limits are used, sysfs limits are not used */
2082 if (!per_cpu_limits) {
2083 unsigned int max_freq, min_freq;
2084
2085 max_freq = policy->cpuinfo.max_freq *
2086 limits->max_sysfs_pct / 100;
2087 min_freq = policy->cpuinfo.max_freq *
2088 limits->min_sysfs_pct / 100;
2089 cpufreq_verify_within_limits(policy, min_freq, max_freq);
2090 }
2091
2048 return 0; 2092 return 0;
2049} 2093}
2050 2094
@@ -2153,8 +2197,12 @@ static int intel_cpufreq_verify_policy(struct cpufreq_policy *policy)
2153 if (per_cpu_limits) 2197 if (per_cpu_limits)
2154 perf_limits = cpu->perf_limits; 2198 perf_limits = cpu->perf_limits;
2155 2199
2200 mutex_lock(&intel_pstate_limits_lock);
2201
2156 intel_pstate_update_perf_limits(policy, perf_limits); 2202 intel_pstate_update_perf_limits(policy, perf_limits);
2157 2203
2204 mutex_unlock(&intel_pstate_limits_lock);
2205
2158 return 0; 2206 return 0;
2159} 2207}
2160 2208
@@ -2487,7 +2535,10 @@ hwp_cpu_matched:
2487 if (rc) 2535 if (rc)
2488 goto out; 2536 goto out;
2489 2537
2490 intel_pstate_debug_expose_params(); 2538 if (intel_pstate_driver == &intel_pstate && !hwp_active &&
2539 pstate_funcs.get_target_pstate != get_target_pstate_use_cpu_load)
2540 intel_pstate_debug_expose_params();
2541
2491 intel_pstate_sysfs_expose_params(); 2542 intel_pstate_sysfs_expose_params();
2492 2543
2493 if (hwp_active) 2544 if (hwp_active)
diff --git a/drivers/crypto/ccp/ccp-dev-v5.c b/drivers/crypto/ccp/ccp-dev-v5.c
index e2ce8190ecc9..612898b4aaad 100644
--- a/drivers/crypto/ccp/ccp-dev-v5.c
+++ b/drivers/crypto/ccp/ccp-dev-v5.c
@@ -959,7 +959,7 @@ static irqreturn_t ccp5_irq_handler(int irq, void *data)
959static void ccp5_config(struct ccp_device *ccp) 959static void ccp5_config(struct ccp_device *ccp)
960{ 960{
961 /* Public side */ 961 /* Public side */
962 iowrite32(0x00001249, ccp->io_regs + CMD5_REQID_CONFIG_OFFSET); 962 iowrite32(0x0, ccp->io_regs + CMD5_REQID_CONFIG_OFFSET);
963} 963}
964 964
965static void ccp5other_config(struct ccp_device *ccp) 965static void ccp5other_config(struct ccp_device *ccp)
diff --git a/drivers/crypto/ccp/ccp-dev.h b/drivers/crypto/ccp/ccp-dev.h
index 830f35e6005f..649e5610a5ce 100644
--- a/drivers/crypto/ccp/ccp-dev.h
+++ b/drivers/crypto/ccp/ccp-dev.h
@@ -238,6 +238,7 @@ struct ccp_dma_chan {
238 struct ccp_device *ccp; 238 struct ccp_device *ccp;
239 239
240 spinlock_t lock; 240 spinlock_t lock;
241 struct list_head created;
241 struct list_head pending; 242 struct list_head pending;
242 struct list_head active; 243 struct list_head active;
243 struct list_head complete; 244 struct list_head complete;
diff --git a/drivers/crypto/ccp/ccp-dmaengine.c b/drivers/crypto/ccp/ccp-dmaengine.c
index 6553912804f7..e5d9278f4019 100644
--- a/drivers/crypto/ccp/ccp-dmaengine.c
+++ b/drivers/crypto/ccp/ccp-dmaengine.c
@@ -63,6 +63,7 @@ static void ccp_free_chan_resources(struct dma_chan *dma_chan)
63 ccp_free_desc_resources(chan->ccp, &chan->complete); 63 ccp_free_desc_resources(chan->ccp, &chan->complete);
64 ccp_free_desc_resources(chan->ccp, &chan->active); 64 ccp_free_desc_resources(chan->ccp, &chan->active);
65 ccp_free_desc_resources(chan->ccp, &chan->pending); 65 ccp_free_desc_resources(chan->ccp, &chan->pending);
66 ccp_free_desc_resources(chan->ccp, &chan->created);
66 67
67 spin_unlock_irqrestore(&chan->lock, flags); 68 spin_unlock_irqrestore(&chan->lock, flags);
68} 69}
@@ -273,6 +274,7 @@ static dma_cookie_t ccp_tx_submit(struct dma_async_tx_descriptor *tx_desc)
273 spin_lock_irqsave(&chan->lock, flags); 274 spin_lock_irqsave(&chan->lock, flags);
274 275
275 cookie = dma_cookie_assign(tx_desc); 276 cookie = dma_cookie_assign(tx_desc);
277 list_del(&desc->entry);
276 list_add_tail(&desc->entry, &chan->pending); 278 list_add_tail(&desc->entry, &chan->pending);
277 279
278 spin_unlock_irqrestore(&chan->lock, flags); 280 spin_unlock_irqrestore(&chan->lock, flags);
@@ -426,7 +428,7 @@ static struct ccp_dma_desc *ccp_create_desc(struct dma_chan *dma_chan,
426 428
427 spin_lock_irqsave(&chan->lock, sflags); 429 spin_lock_irqsave(&chan->lock, sflags);
428 430
429 list_add_tail(&desc->entry, &chan->pending); 431 list_add_tail(&desc->entry, &chan->created);
430 432
431 spin_unlock_irqrestore(&chan->lock, sflags); 433 spin_unlock_irqrestore(&chan->lock, sflags);
432 434
@@ -610,6 +612,7 @@ static int ccp_terminate_all(struct dma_chan *dma_chan)
610 /*TODO: Purge the complete list? */ 612 /*TODO: Purge the complete list? */
611 ccp_free_desc_resources(chan->ccp, &chan->active); 613 ccp_free_desc_resources(chan->ccp, &chan->active);
612 ccp_free_desc_resources(chan->ccp, &chan->pending); 614 ccp_free_desc_resources(chan->ccp, &chan->pending);
615 ccp_free_desc_resources(chan->ccp, &chan->created);
613 616
614 spin_unlock_irqrestore(&chan->lock, flags); 617 spin_unlock_irqrestore(&chan->lock, flags);
615 618
@@ -679,6 +682,7 @@ int ccp_dmaengine_register(struct ccp_device *ccp)
679 chan->ccp = ccp; 682 chan->ccp = ccp;
680 683
681 spin_lock_init(&chan->lock); 684 spin_lock_init(&chan->lock);
685 INIT_LIST_HEAD(&chan->created);
682 INIT_LIST_HEAD(&chan->pending); 686 INIT_LIST_HEAD(&chan->pending);
683 INIT_LIST_HEAD(&chan->active); 687 INIT_LIST_HEAD(&chan->active);
684 INIT_LIST_HEAD(&chan->complete); 688 INIT_LIST_HEAD(&chan->complete);
diff --git a/drivers/crypto/chelsio/chcr_algo.c b/drivers/crypto/chelsio/chcr_algo.c
index 2ed1e24b44a8..b4b78b37f8a6 100644
--- a/drivers/crypto/chelsio/chcr_algo.c
+++ b/drivers/crypto/chelsio/chcr_algo.c
@@ -158,7 +158,7 @@ int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input,
158 case CRYPTO_ALG_TYPE_AEAD: 158 case CRYPTO_ALG_TYPE_AEAD:
159 ctx_req.req.aead_req = (struct aead_request *)req; 159 ctx_req.req.aead_req = (struct aead_request *)req;
160 ctx_req.ctx.reqctx = aead_request_ctx(ctx_req.req.aead_req); 160 ctx_req.ctx.reqctx = aead_request_ctx(ctx_req.req.aead_req);
161 dma_unmap_sg(&u_ctx->lldi.pdev->dev, ctx_req.req.aead_req->dst, 161 dma_unmap_sg(&u_ctx->lldi.pdev->dev, ctx_req.ctx.reqctx->dst,
162 ctx_req.ctx.reqctx->dst_nents, DMA_FROM_DEVICE); 162 ctx_req.ctx.reqctx->dst_nents, DMA_FROM_DEVICE);
163 if (ctx_req.ctx.reqctx->skb) { 163 if (ctx_req.ctx.reqctx->skb) {
164 kfree_skb(ctx_req.ctx.reqctx->skb); 164 kfree_skb(ctx_req.ctx.reqctx->skb);
@@ -1362,8 +1362,7 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req,
1362 struct chcr_wr *chcr_req; 1362 struct chcr_wr *chcr_req;
1363 struct cpl_rx_phys_dsgl *phys_cpl; 1363 struct cpl_rx_phys_dsgl *phys_cpl;
1364 struct phys_sge_parm sg_param; 1364 struct phys_sge_parm sg_param;
1365 struct scatterlist *src, *dst; 1365 struct scatterlist *src;
1366 struct scatterlist src_sg[2], dst_sg[2];
1367 unsigned int frags = 0, transhdr_len; 1366 unsigned int frags = 0, transhdr_len;
1368 unsigned int ivsize = crypto_aead_ivsize(tfm), dst_size = 0; 1367 unsigned int ivsize = crypto_aead_ivsize(tfm), dst_size = 0;
1369 unsigned int kctx_len = 0; 1368 unsigned int kctx_len = 0;
@@ -1383,19 +1382,21 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req,
1383 1382
1384 if (sg_nents_for_len(req->src, req->assoclen + req->cryptlen) < 0) 1383 if (sg_nents_for_len(req->src, req->assoclen + req->cryptlen) < 0)
1385 goto err; 1384 goto err;
1386 src = scatterwalk_ffwd(src_sg, req->src, req->assoclen); 1385 src = scatterwalk_ffwd(reqctx->srcffwd, req->src, req->assoclen);
1387 dst = src; 1386 reqctx->dst = src;
1387
1388 if (req->src != req->dst) { 1388 if (req->src != req->dst) {
1389 err = chcr_copy_assoc(req, aeadctx); 1389 err = chcr_copy_assoc(req, aeadctx);
1390 if (err) 1390 if (err)
1391 return ERR_PTR(err); 1391 return ERR_PTR(err);
1392 dst = scatterwalk_ffwd(dst_sg, req->dst, req->assoclen); 1392 reqctx->dst = scatterwalk_ffwd(reqctx->dstffwd, req->dst,
1393 req->assoclen);
1393 } 1394 }
1394 if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_NULL) { 1395 if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_NULL) {
1395 null = 1; 1396 null = 1;
1396 assoclen = 0; 1397 assoclen = 0;
1397 } 1398 }
1398 reqctx->dst_nents = sg_nents_for_len(dst, req->cryptlen + 1399 reqctx->dst_nents = sg_nents_for_len(reqctx->dst, req->cryptlen +
1399 (op_type ? -authsize : authsize)); 1400 (op_type ? -authsize : authsize));
1400 if (reqctx->dst_nents <= 0) { 1401 if (reqctx->dst_nents <= 0) {
1401 pr_err("AUTHENC:Invalid Destination sg entries\n"); 1402 pr_err("AUTHENC:Invalid Destination sg entries\n");
@@ -1460,7 +1461,7 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req,
1460 sg_param.obsize = req->cryptlen + (op_type ? -authsize : authsize); 1461 sg_param.obsize = req->cryptlen + (op_type ? -authsize : authsize);
1461 sg_param.qid = qid; 1462 sg_param.qid = qid;
1462 sg_param.align = 0; 1463 sg_param.align = 0;
1463 if (map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl, dst, 1464 if (map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl, reqctx->dst,
1464 &sg_param)) 1465 &sg_param))
1465 goto dstmap_fail; 1466 goto dstmap_fail;
1466 1467
@@ -1711,8 +1712,7 @@ static struct sk_buff *create_aead_ccm_wr(struct aead_request *req,
1711 struct chcr_wr *chcr_req; 1712 struct chcr_wr *chcr_req;
1712 struct cpl_rx_phys_dsgl *phys_cpl; 1713 struct cpl_rx_phys_dsgl *phys_cpl;
1713 struct phys_sge_parm sg_param; 1714 struct phys_sge_parm sg_param;
1714 struct scatterlist *src, *dst; 1715 struct scatterlist *src;
1715 struct scatterlist src_sg[2], dst_sg[2];
1716 unsigned int frags = 0, transhdr_len, ivsize = AES_BLOCK_SIZE; 1716 unsigned int frags = 0, transhdr_len, ivsize = AES_BLOCK_SIZE;
1717 unsigned int dst_size = 0, kctx_len; 1717 unsigned int dst_size = 0, kctx_len;
1718 unsigned int sub_type; 1718 unsigned int sub_type;
@@ -1728,17 +1728,19 @@ static struct sk_buff *create_aead_ccm_wr(struct aead_request *req,
1728 if (sg_nents_for_len(req->src, req->assoclen + req->cryptlen) < 0) 1728 if (sg_nents_for_len(req->src, req->assoclen + req->cryptlen) < 0)
1729 goto err; 1729 goto err;
1730 sub_type = get_aead_subtype(tfm); 1730 sub_type = get_aead_subtype(tfm);
1731 src = scatterwalk_ffwd(src_sg, req->src, req->assoclen); 1731 src = scatterwalk_ffwd(reqctx->srcffwd, req->src, req->assoclen);
1732 dst = src; 1732 reqctx->dst = src;
1733
1733 if (req->src != req->dst) { 1734 if (req->src != req->dst) {
1734 err = chcr_copy_assoc(req, aeadctx); 1735 err = chcr_copy_assoc(req, aeadctx);
1735 if (err) { 1736 if (err) {
1736 pr_err("AAD copy to destination buffer fails\n"); 1737 pr_err("AAD copy to destination buffer fails\n");
1737 return ERR_PTR(err); 1738 return ERR_PTR(err);
1738 } 1739 }
1739 dst = scatterwalk_ffwd(dst_sg, req->dst, req->assoclen); 1740 reqctx->dst = scatterwalk_ffwd(reqctx->dstffwd, req->dst,
1741 req->assoclen);
1740 } 1742 }
1741 reqctx->dst_nents = sg_nents_for_len(dst, req->cryptlen + 1743 reqctx->dst_nents = sg_nents_for_len(reqctx->dst, req->cryptlen +
1742 (op_type ? -authsize : authsize)); 1744 (op_type ? -authsize : authsize));
1743 if (reqctx->dst_nents <= 0) { 1745 if (reqctx->dst_nents <= 0) {
1744 pr_err("CCM:Invalid Destination sg entries\n"); 1746 pr_err("CCM:Invalid Destination sg entries\n");
@@ -1777,7 +1779,7 @@ static struct sk_buff *create_aead_ccm_wr(struct aead_request *req,
1777 sg_param.obsize = req->cryptlen + (op_type ? -authsize : authsize); 1779 sg_param.obsize = req->cryptlen + (op_type ? -authsize : authsize);
1778 sg_param.qid = qid; 1780 sg_param.qid = qid;
1779 sg_param.align = 0; 1781 sg_param.align = 0;
1780 if (map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl, dst, 1782 if (map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl, reqctx->dst,
1781 &sg_param)) 1783 &sg_param))
1782 goto dstmap_fail; 1784 goto dstmap_fail;
1783 1785
@@ -1809,8 +1811,7 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
1809 struct chcr_wr *chcr_req; 1811 struct chcr_wr *chcr_req;
1810 struct cpl_rx_phys_dsgl *phys_cpl; 1812 struct cpl_rx_phys_dsgl *phys_cpl;
1811 struct phys_sge_parm sg_param; 1813 struct phys_sge_parm sg_param;
1812 struct scatterlist *src, *dst; 1814 struct scatterlist *src;
1813 struct scatterlist src_sg[2], dst_sg[2];
1814 unsigned int frags = 0, transhdr_len; 1815 unsigned int frags = 0, transhdr_len;
1815 unsigned int ivsize = AES_BLOCK_SIZE; 1816 unsigned int ivsize = AES_BLOCK_SIZE;
1816 unsigned int dst_size = 0, kctx_len; 1817 unsigned int dst_size = 0, kctx_len;
@@ -1832,13 +1833,14 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
1832 if (sg_nents_for_len(req->src, req->assoclen + req->cryptlen) < 0) 1833 if (sg_nents_for_len(req->src, req->assoclen + req->cryptlen) < 0)
1833 goto err; 1834 goto err;
1834 1835
1835 src = scatterwalk_ffwd(src_sg, req->src, req->assoclen); 1836 src = scatterwalk_ffwd(reqctx->srcffwd, req->src, req->assoclen);
1836 dst = src; 1837 reqctx->dst = src;
1837 if (req->src != req->dst) { 1838 if (req->src != req->dst) {
1838 err = chcr_copy_assoc(req, aeadctx); 1839 err = chcr_copy_assoc(req, aeadctx);
1839 if (err) 1840 if (err)
1840 return ERR_PTR(err); 1841 return ERR_PTR(err);
1841 dst = scatterwalk_ffwd(dst_sg, req->dst, req->assoclen); 1842 reqctx->dst = scatterwalk_ffwd(reqctx->dstffwd, req->dst,
1843 req->assoclen);
1842 } 1844 }
1843 1845
1844 if (!req->cryptlen) 1846 if (!req->cryptlen)
@@ -1848,7 +1850,7 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
1848 crypt_len = AES_BLOCK_SIZE; 1850 crypt_len = AES_BLOCK_SIZE;
1849 else 1851 else
1850 crypt_len = req->cryptlen; 1852 crypt_len = req->cryptlen;
1851 reqctx->dst_nents = sg_nents_for_len(dst, req->cryptlen + 1853 reqctx->dst_nents = sg_nents_for_len(reqctx->dst, req->cryptlen +
1852 (op_type ? -authsize : authsize)); 1854 (op_type ? -authsize : authsize));
1853 if (reqctx->dst_nents <= 0) { 1855 if (reqctx->dst_nents <= 0) {
1854 pr_err("GCM:Invalid Destination sg entries\n"); 1856 pr_err("GCM:Invalid Destination sg entries\n");
@@ -1923,7 +1925,7 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
1923 sg_param.obsize = req->cryptlen + (op_type ? -authsize : authsize); 1925 sg_param.obsize = req->cryptlen + (op_type ? -authsize : authsize);
1924 sg_param.qid = qid; 1926 sg_param.qid = qid;
1925 sg_param.align = 0; 1927 sg_param.align = 0;
1926 if (map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl, dst, 1928 if (map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl, reqctx->dst,
1927 &sg_param)) 1929 &sg_param))
1928 goto dstmap_fail; 1930 goto dstmap_fail;
1929 1931
@@ -1937,7 +1939,8 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
1937 write_sg_to_skb(skb, &frags, src, req->cryptlen); 1939 write_sg_to_skb(skb, &frags, src, req->cryptlen);
1938 } else { 1940 } else {
1939 aes_gcm_empty_pld_pad(req->dst, authsize - 1); 1941 aes_gcm_empty_pld_pad(req->dst, authsize - 1);
1940 write_sg_to_skb(skb, &frags, dst, crypt_len); 1942 write_sg_to_skb(skb, &frags, reqctx->dst, crypt_len);
1943
1941 } 1944 }
1942 1945
1943 create_wreq(ctx, chcr_req, req, skb, kctx_len, size, 1, 1946 create_wreq(ctx, chcr_req, req, skb, kctx_len, size, 1,
@@ -2189,8 +2192,8 @@ static int chcr_gcm_setkey(struct crypto_aead *aead, const u8 *key,
2189 unsigned int ck_size; 2192 unsigned int ck_size;
2190 int ret = 0, key_ctx_size = 0; 2193 int ret = 0, key_ctx_size = 0;
2191 2194
2192 if (get_aead_subtype(aead) == 2195 if (get_aead_subtype(aead) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106 &&
2193 CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) { 2196 keylen > 3) {
2194 keylen -= 4; /* nonce/salt is present in the last 4 bytes */ 2197 keylen -= 4; /* nonce/salt is present in the last 4 bytes */
2195 memcpy(aeadctx->salt, key + keylen, 4); 2198 memcpy(aeadctx->salt, key + keylen, 4);
2196 } 2199 }
diff --git a/drivers/crypto/chelsio/chcr_core.c b/drivers/crypto/chelsio/chcr_core.c
index 918da8e6e2d8..1c65f07e1cc9 100644
--- a/drivers/crypto/chelsio/chcr_core.c
+++ b/drivers/crypto/chelsio/chcr_core.c
@@ -52,6 +52,7 @@ static struct cxgb4_uld_info chcr_uld_info = {
52int assign_chcr_device(struct chcr_dev **dev) 52int assign_chcr_device(struct chcr_dev **dev)
53{ 53{
54 struct uld_ctx *u_ctx; 54 struct uld_ctx *u_ctx;
55 int ret = -ENXIO;
55 56
56 /* 57 /*
57 * Which device to use if multiple devices are available TODO 58 * Which device to use if multiple devices are available TODO
@@ -59,15 +60,14 @@ int assign_chcr_device(struct chcr_dev **dev)
59 * must go to the same device to maintain the ordering. 60 * must go to the same device to maintain the ordering.
60 */ 61 */
61 mutex_lock(&dev_mutex); /* TODO ? */ 62 mutex_lock(&dev_mutex); /* TODO ? */
62 u_ctx = list_first_entry(&uld_ctx_list, struct uld_ctx, entry); 63 list_for_each_entry(u_ctx, &uld_ctx_list, entry)
63 if (!u_ctx) { 64 if (u_ctx && u_ctx->dev) {
64 mutex_unlock(&dev_mutex); 65 *dev = u_ctx->dev;
65 return -ENXIO; 66 ret = 0;
67 break;
66 } 68 }
67
68 *dev = u_ctx->dev;
69 mutex_unlock(&dev_mutex); 69 mutex_unlock(&dev_mutex);
70 return 0; 70 return ret;
71} 71}
72 72
73static int chcr_dev_add(struct uld_ctx *u_ctx) 73static int chcr_dev_add(struct uld_ctx *u_ctx)
@@ -202,10 +202,8 @@ static int chcr_uld_state_change(void *handle, enum cxgb4_state state)
202 202
203static int __init chcr_crypto_init(void) 203static int __init chcr_crypto_init(void)
204{ 204{
205 if (cxgb4_register_uld(CXGB4_ULD_CRYPTO, &chcr_uld_info)) { 205 if (cxgb4_register_uld(CXGB4_ULD_CRYPTO, &chcr_uld_info))
206 pr_err("ULD register fail: No chcr crypto support in cxgb4"); 206 pr_err("ULD register fail: No chcr crypto support in cxgb4");
207 return -1;
208 }
209 207
210 return 0; 208 return 0;
211} 209}
diff --git a/drivers/crypto/chelsio/chcr_crypto.h b/drivers/crypto/chelsio/chcr_crypto.h
index d5af7d64a763..7ec0a8f12475 100644
--- a/drivers/crypto/chelsio/chcr_crypto.h
+++ b/drivers/crypto/chelsio/chcr_crypto.h
@@ -158,6 +158,9 @@ struct ablk_ctx {
158}; 158};
159struct chcr_aead_reqctx { 159struct chcr_aead_reqctx {
160 struct sk_buff *skb; 160 struct sk_buff *skb;
161 struct scatterlist *dst;
162 struct scatterlist srcffwd[2];
163 struct scatterlist dstffwd[2];
161 short int dst_nents; 164 short int dst_nents;
162 u16 verify; 165 u16 verify;
163 u8 iv[CHCR_MAX_CRYPTO_IV_LEN]; 166 u8 iv[CHCR_MAX_CRYPTO_IV_LEN];
diff --git a/drivers/crypto/marvell/cesa.h b/drivers/crypto/marvell/cesa.h
index a768da7138a1..b7872f62f674 100644
--- a/drivers/crypto/marvell/cesa.h
+++ b/drivers/crypto/marvell/cesa.h
@@ -273,7 +273,8 @@ struct mv_cesa_op_ctx {
273#define CESA_TDMA_SRC_IN_SRAM BIT(30) 273#define CESA_TDMA_SRC_IN_SRAM BIT(30)
274#define CESA_TDMA_END_OF_REQ BIT(29) 274#define CESA_TDMA_END_OF_REQ BIT(29)
275#define CESA_TDMA_BREAK_CHAIN BIT(28) 275#define CESA_TDMA_BREAK_CHAIN BIT(28)
276#define CESA_TDMA_TYPE_MSK GENMASK(27, 0) 276#define CESA_TDMA_SET_STATE BIT(27)
277#define CESA_TDMA_TYPE_MSK GENMASK(26, 0)
277#define CESA_TDMA_DUMMY 0 278#define CESA_TDMA_DUMMY 0
278#define CESA_TDMA_DATA 1 279#define CESA_TDMA_DATA 1
279#define CESA_TDMA_OP 2 280#define CESA_TDMA_OP 2
diff --git a/drivers/crypto/marvell/hash.c b/drivers/crypto/marvell/hash.c
index 317cf029c0cf..77c0fb936f47 100644
--- a/drivers/crypto/marvell/hash.c
+++ b/drivers/crypto/marvell/hash.c
@@ -280,13 +280,32 @@ static void mv_cesa_ahash_std_prepare(struct ahash_request *req)
280 sreq->offset = 0; 280 sreq->offset = 0;
281} 281}
282 282
283static void mv_cesa_ahash_dma_step(struct ahash_request *req)
284{
285 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
286 struct mv_cesa_req *base = &creq->base;
287
288 /* We must explicitly set the digest state. */
289 if (base->chain.first->flags & CESA_TDMA_SET_STATE) {
290 struct mv_cesa_engine *engine = base->engine;
291 int i;
292
293 /* Set the hash state in the IVDIG regs. */
294 for (i = 0; i < ARRAY_SIZE(creq->state); i++)
295 writel_relaxed(creq->state[i], engine->regs +
296 CESA_IVDIG(i));
297 }
298
299 mv_cesa_dma_step(base);
300}
301
283static void mv_cesa_ahash_step(struct crypto_async_request *req) 302static void mv_cesa_ahash_step(struct crypto_async_request *req)
284{ 303{
285 struct ahash_request *ahashreq = ahash_request_cast(req); 304 struct ahash_request *ahashreq = ahash_request_cast(req);
286 struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq); 305 struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq);
287 306
288 if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ) 307 if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
289 mv_cesa_dma_step(&creq->base); 308 mv_cesa_ahash_dma_step(ahashreq);
290 else 309 else
291 mv_cesa_ahash_std_step(ahashreq); 310 mv_cesa_ahash_std_step(ahashreq);
292} 311}
@@ -584,12 +603,16 @@ static int mv_cesa_ahash_dma_req_init(struct ahash_request *req)
584 struct mv_cesa_ahash_dma_iter iter; 603 struct mv_cesa_ahash_dma_iter iter;
585 struct mv_cesa_op_ctx *op = NULL; 604 struct mv_cesa_op_ctx *op = NULL;
586 unsigned int frag_len; 605 unsigned int frag_len;
606 bool set_state = false;
587 int ret; 607 int ret;
588 u32 type; 608 u32 type;
589 609
590 basereq->chain.first = NULL; 610 basereq->chain.first = NULL;
591 basereq->chain.last = NULL; 611 basereq->chain.last = NULL;
592 612
613 if (!mv_cesa_mac_op_is_first_frag(&creq->op_tmpl))
614 set_state = true;
615
593 if (creq->src_nents) { 616 if (creq->src_nents) {
594 ret = dma_map_sg(cesa_dev->dev, req->src, creq->src_nents, 617 ret = dma_map_sg(cesa_dev->dev, req->src, creq->src_nents,
595 DMA_TO_DEVICE); 618 DMA_TO_DEVICE);
@@ -683,6 +706,15 @@ static int mv_cesa_ahash_dma_req_init(struct ahash_request *req)
683 if (type != CESA_TDMA_RESULT) 706 if (type != CESA_TDMA_RESULT)
684 basereq->chain.last->flags |= CESA_TDMA_BREAK_CHAIN; 707 basereq->chain.last->flags |= CESA_TDMA_BREAK_CHAIN;
685 708
709 if (set_state) {
710 /*
711 * Put the CESA_TDMA_SET_STATE flag on the first tdma desc to
712 * let the step logic know that the IVDIG registers should be
713 * explicitly set before launching a TDMA chain.
714 */
715 basereq->chain.first->flags |= CESA_TDMA_SET_STATE;
716 }
717
686 return 0; 718 return 0;
687 719
688err_free_tdma: 720err_free_tdma:
diff --git a/drivers/crypto/marvell/tdma.c b/drivers/crypto/marvell/tdma.c
index 4416b88eca70..c76375ff376d 100644
--- a/drivers/crypto/marvell/tdma.c
+++ b/drivers/crypto/marvell/tdma.c
@@ -109,7 +109,14 @@ void mv_cesa_tdma_chain(struct mv_cesa_engine *engine,
109 last->next = dreq->chain.first; 109 last->next = dreq->chain.first;
110 engine->chain.last = dreq->chain.last; 110 engine->chain.last = dreq->chain.last;
111 111
112 if (!(last->flags & CESA_TDMA_BREAK_CHAIN)) 112 /*
113 * Break the DMA chain if the CESA_TDMA_BREAK_CHAIN is set on
114 * the last element of the current chain, or if the request
115 * being queued needs the IV regs to be set before lauching
116 * the request.
117 */
118 if (!(last->flags & CESA_TDMA_BREAK_CHAIN) &&
119 !(dreq->chain.first->flags & CESA_TDMA_SET_STATE))
113 last->next_dma = dreq->chain.first->cur_dma; 120 last->next_dma = dreq->chain.first->cur_dma;
114 } 121 }
115} 122}
diff --git a/drivers/crypto/qat/qat_c62x/adf_drv.c b/drivers/crypto/qat/qat_c62x/adf_drv.c
index bc5cbc193aae..5b2d78a5b5aa 100644
--- a/drivers/crypto/qat/qat_c62x/adf_drv.c
+++ b/drivers/crypto/qat/qat_c62x/adf_drv.c
@@ -233,7 +233,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
233 &hw_data->accel_capabilities_mask); 233 &hw_data->accel_capabilities_mask);
234 234
235 /* Find and map all the device's BARS */ 235 /* Find and map all the device's BARS */
236 i = 0; 236 i = (hw_data->fuses & ADF_DEVICE_FUSECTL_MASK) ? 1 : 0;
237 bar_mask = pci_select_bars(pdev, IORESOURCE_MEM); 237 bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
238 for_each_set_bit(bar_nr, (const unsigned long *)&bar_mask, 238 for_each_set_bit(bar_nr, (const unsigned long *)&bar_mask,
239 ADF_PCI_MAX_BARS * 2) { 239 ADF_PCI_MAX_BARS * 2) {
diff --git a/drivers/crypto/qat/qat_common/adf_accel_devices.h b/drivers/crypto/qat/qat_common/adf_accel_devices.h
index e8822536530b..33f0a6251e38 100644
--- a/drivers/crypto/qat/qat_common/adf_accel_devices.h
+++ b/drivers/crypto/qat/qat_common/adf_accel_devices.h
@@ -69,6 +69,7 @@
69#define ADF_ERRSOU5 (0x3A000 + 0xD8) 69#define ADF_ERRSOU5 (0x3A000 + 0xD8)
70#define ADF_DEVICE_FUSECTL_OFFSET 0x40 70#define ADF_DEVICE_FUSECTL_OFFSET 0x40
71#define ADF_DEVICE_LEGFUSE_OFFSET 0x4C 71#define ADF_DEVICE_LEGFUSE_OFFSET 0x4C
72#define ADF_DEVICE_FUSECTL_MASK 0x80000000
72#define ADF_PCI_MAX_BARS 3 73#define ADF_PCI_MAX_BARS 3
73#define ADF_DEVICE_NAME_LENGTH 32 74#define ADF_DEVICE_NAME_LENGTH 32
74#define ADF_ETR_MAX_RINGS_PER_BANK 16 75#define ADF_ETR_MAX_RINGS_PER_BANK 16
diff --git a/drivers/crypto/qat/qat_common/qat_hal.c b/drivers/crypto/qat/qat_common/qat_hal.c
index 1e480f140663..8c4fd255a601 100644
--- a/drivers/crypto/qat/qat_common/qat_hal.c
+++ b/drivers/crypto/qat/qat_common/qat_hal.c
@@ -456,7 +456,7 @@ static int qat_hal_init_esram(struct icp_qat_fw_loader_handle *handle)
456 unsigned int csr_val; 456 unsigned int csr_val;
457 int times = 30; 457 int times = 30;
458 458
459 if (handle->pci_dev->device == ADF_C3XXX_PCI_DEVICE_ID) 459 if (handle->pci_dev->device != ADF_DH895XCC_PCI_DEVICE_ID)
460 return 0; 460 return 0;
461 461
462 csr_val = ADF_CSR_RD(csr_addr, 0); 462 csr_val = ADF_CSR_RD(csr_addr, 0);
@@ -716,7 +716,7 @@ int qat_hal_init(struct adf_accel_dev *accel_dev)
716 (void __iomem *)((uintptr_t)handle->hal_cap_ae_xfer_csr_addr_v + 716 (void __iomem *)((uintptr_t)handle->hal_cap_ae_xfer_csr_addr_v +
717 LOCAL_TO_XFER_REG_OFFSET); 717 LOCAL_TO_XFER_REG_OFFSET);
718 handle->pci_dev = pci_info->pci_dev; 718 handle->pci_dev = pci_info->pci_dev;
719 if (handle->pci_dev->device != ADF_C3XXX_PCI_DEVICE_ID) { 719 if (handle->pci_dev->device == ADF_DH895XCC_PCI_DEVICE_ID) {
720 sram_bar = 720 sram_bar =
721 &pci_info->pci_bars[hw_data->get_sram_bar_id(hw_data)]; 721 &pci_info->pci_bars[hw_data->get_sram_bar_id(hw_data)];
722 handle->hal_sram_addr_v = sram_bar->virt_addr; 722 handle->hal_sram_addr_v = sram_bar->virt_addr;
diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
index a324801d6a66..47206a21bb90 100644
--- a/drivers/devfreq/devfreq.c
+++ b/drivers/devfreq/devfreq.c
@@ -593,11 +593,16 @@ struct devfreq *devfreq_add_device(struct device *dev,
593 list_add(&devfreq->node, &devfreq_list); 593 list_add(&devfreq->node, &devfreq_list);
594 594
595 governor = find_devfreq_governor(devfreq->governor_name); 595 governor = find_devfreq_governor(devfreq->governor_name);
596 if (!IS_ERR(governor)) 596 if (IS_ERR(governor)) {
597 devfreq->governor = governor; 597 dev_err(dev, "%s: Unable to find governor for the device\n",
598 if (devfreq->governor) 598 __func__);
599 err = devfreq->governor->event_handler(devfreq, 599 err = PTR_ERR(governor);
600 DEVFREQ_GOV_START, NULL); 600 goto err_init;
601 }
602
603 devfreq->governor = governor;
604 err = devfreq->governor->event_handler(devfreq, DEVFREQ_GOV_START,
605 NULL);
601 if (err) { 606 if (err) {
602 dev_err(dev, "%s: Unable to start governor for the device\n", 607 dev_err(dev, "%s: Unable to start governor for the device\n",
603 __func__); 608 __func__);
diff --git a/drivers/devfreq/exynos-bus.c b/drivers/devfreq/exynos-bus.c
index a8ed7792ece2..9af86f46fbec 100644
--- a/drivers/devfreq/exynos-bus.c
+++ b/drivers/devfreq/exynos-bus.c
@@ -497,7 +497,7 @@ passive:
497 if (IS_ERR(bus->devfreq)) { 497 if (IS_ERR(bus->devfreq)) {
498 dev_err(dev, 498 dev_err(dev,
499 "failed to add devfreq dev with passive governor\n"); 499 "failed to add devfreq dev with passive governor\n");
500 ret = -EPROBE_DEFER; 500 ret = PTR_ERR(bus->devfreq);
501 goto err; 501 goto err;
502 } 502 }
503 503
diff --git a/drivers/dma/cppi41.c b/drivers/dma/cppi41.c
index d5ba43a87a68..200828c60db9 100644
--- a/drivers/dma/cppi41.c
+++ b/drivers/dma/cppi41.c
@@ -153,6 +153,8 @@ struct cppi41_dd {
153 153
154 /* context for suspend/resume */ 154 /* context for suspend/resume */
155 unsigned int dma_tdfdq; 155 unsigned int dma_tdfdq;
156
157 bool is_suspended;
156}; 158};
157 159
158#define FIST_COMPLETION_QUEUE 93 160#define FIST_COMPLETION_QUEUE 93
@@ -257,6 +259,10 @@ static struct cppi41_channel *desc_to_chan(struct cppi41_dd *cdd, u32 desc)
257 BUG_ON(desc_num >= ALLOC_DECS_NUM); 259 BUG_ON(desc_num >= ALLOC_DECS_NUM);
258 c = cdd->chan_busy[desc_num]; 260 c = cdd->chan_busy[desc_num];
259 cdd->chan_busy[desc_num] = NULL; 261 cdd->chan_busy[desc_num] = NULL;
262
263 /* Usecount for chan_busy[], paired with push_desc_queue() */
264 pm_runtime_put(cdd->ddev.dev);
265
260 return c; 266 return c;
261} 267}
262 268
@@ -317,12 +323,12 @@ static irqreturn_t cppi41_irq(int irq, void *data)
317 323
318 while (val) { 324 while (val) {
319 u32 desc, len; 325 u32 desc, len;
320 int error;
321 326
322 error = pm_runtime_get(cdd->ddev.dev); 327 /*
323 if (error < 0) 328 * This should never trigger, see the comments in
324 dev_err(cdd->ddev.dev, "%s pm runtime get: %i\n", 329 * push_desc_queue()
325 __func__, error); 330 */
331 WARN_ON(cdd->is_suspended);
326 332
327 q_num = __fls(val); 333 q_num = __fls(val);
328 val &= ~(1 << q_num); 334 val &= ~(1 << q_num);
@@ -343,9 +349,6 @@ static irqreturn_t cppi41_irq(int irq, void *data)
343 c->residue = pd_trans_len(c->desc->pd6) - len; 349 c->residue = pd_trans_len(c->desc->pd6) - len;
344 dma_cookie_complete(&c->txd); 350 dma_cookie_complete(&c->txd);
345 dmaengine_desc_get_callback_invoke(&c->txd, NULL); 351 dmaengine_desc_get_callback_invoke(&c->txd, NULL);
346
347 pm_runtime_mark_last_busy(cdd->ddev.dev);
348 pm_runtime_put_autosuspend(cdd->ddev.dev);
349 } 352 }
350 } 353 }
351 return IRQ_HANDLED; 354 return IRQ_HANDLED;
@@ -447,6 +450,15 @@ static void push_desc_queue(struct cppi41_channel *c)
447 */ 450 */
448 __iowmb(); 451 __iowmb();
449 452
453 /*
454 * DMA transfers can take at least 200ms to complete with USB mass
455 * storage connected. To prevent autosuspend timeouts, we must use
456 * pm_runtime_get/put() when chan_busy[] is modified. This will get
457 * cleared in desc_to_chan() or cppi41_stop_chan() depending on the
458 * outcome of the transfer.
459 */
460 pm_runtime_get(cdd->ddev.dev);
461
450 desc_phys = lower_32_bits(c->desc_phys); 462 desc_phys = lower_32_bits(c->desc_phys);
451 desc_num = (desc_phys - cdd->descs_phys) / sizeof(struct cppi41_desc); 463 desc_num = (desc_phys - cdd->descs_phys) / sizeof(struct cppi41_desc);
452 WARN_ON(cdd->chan_busy[desc_num]); 464 WARN_ON(cdd->chan_busy[desc_num]);
@@ -457,20 +469,26 @@ static void push_desc_queue(struct cppi41_channel *c)
457 cppi_writel(reg, cdd->qmgr_mem + QMGR_QUEUE_D(c->q_num)); 469 cppi_writel(reg, cdd->qmgr_mem + QMGR_QUEUE_D(c->q_num));
458} 470}
459 471
460static void pending_desc(struct cppi41_channel *c) 472/*
473 * Caller must hold cdd->lock to prevent push_desc_queue()
474 * getting called out of order. We have both cppi41_dma_issue_pending()
475 * and cppi41_runtime_resume() call this function.
476 */
477static void cppi41_run_queue(struct cppi41_dd *cdd)
461{ 478{
462 struct cppi41_dd *cdd = c->cdd; 479 struct cppi41_channel *c, *_c;
463 unsigned long flags;
464 480
465 spin_lock_irqsave(&cdd->lock, flags); 481 list_for_each_entry_safe(c, _c, &cdd->pending, node) {
466 list_add_tail(&c->node, &cdd->pending); 482 push_desc_queue(c);
467 spin_unlock_irqrestore(&cdd->lock, flags); 483 list_del(&c->node);
484 }
468} 485}
469 486
470static void cppi41_dma_issue_pending(struct dma_chan *chan) 487static void cppi41_dma_issue_pending(struct dma_chan *chan)
471{ 488{
472 struct cppi41_channel *c = to_cpp41_chan(chan); 489 struct cppi41_channel *c = to_cpp41_chan(chan);
473 struct cppi41_dd *cdd = c->cdd; 490 struct cppi41_dd *cdd = c->cdd;
491 unsigned long flags;
474 int error; 492 int error;
475 493
476 error = pm_runtime_get(cdd->ddev.dev); 494 error = pm_runtime_get(cdd->ddev.dev);
@@ -482,10 +500,11 @@ static void cppi41_dma_issue_pending(struct dma_chan *chan)
482 return; 500 return;
483 } 501 }
484 502
485 if (likely(pm_runtime_active(cdd->ddev.dev))) 503 spin_lock_irqsave(&cdd->lock, flags);
486 push_desc_queue(c); 504 list_add_tail(&c->node, &cdd->pending);
487 else 505 if (!cdd->is_suspended)
488 pending_desc(c); 506 cppi41_run_queue(cdd);
507 spin_unlock_irqrestore(&cdd->lock, flags);
489 508
490 pm_runtime_mark_last_busy(cdd->ddev.dev); 509 pm_runtime_mark_last_busy(cdd->ddev.dev);
491 pm_runtime_put_autosuspend(cdd->ddev.dev); 510 pm_runtime_put_autosuspend(cdd->ddev.dev);
@@ -705,6 +724,9 @@ static int cppi41_stop_chan(struct dma_chan *chan)
705 WARN_ON(!cdd->chan_busy[desc_num]); 724 WARN_ON(!cdd->chan_busy[desc_num]);
706 cdd->chan_busy[desc_num] = NULL; 725 cdd->chan_busy[desc_num] = NULL;
707 726
727 /* Usecount for chan_busy[], paired with push_desc_queue() */
728 pm_runtime_put(cdd->ddev.dev);
729
708 return 0; 730 return 0;
709} 731}
710 732
@@ -1150,8 +1172,12 @@ static int __maybe_unused cppi41_resume(struct device *dev)
1150static int __maybe_unused cppi41_runtime_suspend(struct device *dev) 1172static int __maybe_unused cppi41_runtime_suspend(struct device *dev)
1151{ 1173{
1152 struct cppi41_dd *cdd = dev_get_drvdata(dev); 1174 struct cppi41_dd *cdd = dev_get_drvdata(dev);
1175 unsigned long flags;
1153 1176
1177 spin_lock_irqsave(&cdd->lock, flags);
1178 cdd->is_suspended = true;
1154 WARN_ON(!list_empty(&cdd->pending)); 1179 WARN_ON(!list_empty(&cdd->pending));
1180 spin_unlock_irqrestore(&cdd->lock, flags);
1155 1181
1156 return 0; 1182 return 0;
1157} 1183}
@@ -1159,14 +1185,11 @@ static int __maybe_unused cppi41_runtime_suspend(struct device *dev)
1159static int __maybe_unused cppi41_runtime_resume(struct device *dev) 1185static int __maybe_unused cppi41_runtime_resume(struct device *dev)
1160{ 1186{
1161 struct cppi41_dd *cdd = dev_get_drvdata(dev); 1187 struct cppi41_dd *cdd = dev_get_drvdata(dev);
1162 struct cppi41_channel *c, *_c;
1163 unsigned long flags; 1188 unsigned long flags;
1164 1189
1165 spin_lock_irqsave(&cdd->lock, flags); 1190 spin_lock_irqsave(&cdd->lock, flags);
1166 list_for_each_entry_safe(c, _c, &cdd->pending, node) { 1191 cdd->is_suspended = false;
1167 push_desc_queue(c); 1192 cppi41_run_queue(cdd);
1168 list_del(&c->node);
1169 }
1170 spin_unlock_irqrestore(&cdd->lock, flags); 1193 spin_unlock_irqrestore(&cdd->lock, flags);
1171 1194
1172 return 0; 1195 return 0;
diff --git a/drivers/dma/dw/Kconfig b/drivers/dma/dw/Kconfig
index e00c9b022964..5a37b9fcf40d 100644
--- a/drivers/dma/dw/Kconfig
+++ b/drivers/dma/dw/Kconfig
@@ -24,5 +24,5 @@ config DW_DMAC_PCI
24 select DW_DMAC_CORE 24 select DW_DMAC_CORE
25 help 25 help
26 Support the Synopsys DesignWare AHB DMA controller on the 26 Support the Synopsys DesignWare AHB DMA controller on the
27 platfroms that enumerate it as a PCI device. For example, 27 platforms that enumerate it as a PCI device. For example,
28 Intel Medfield has integrated this GPDMA controller. 28 Intel Medfield has integrated this GPDMA controller.
diff --git a/drivers/dma/ioat/hw.h b/drivers/dma/ioat/hw.h
index 8e67895bcca3..abcc51b343ce 100644
--- a/drivers/dma/ioat/hw.h
+++ b/drivers/dma/ioat/hw.h
@@ -64,6 +64,8 @@
64#define PCI_DEVICE_ID_INTEL_IOAT_BDX8 0x6f2e 64#define PCI_DEVICE_ID_INTEL_IOAT_BDX8 0x6f2e
65#define PCI_DEVICE_ID_INTEL_IOAT_BDX9 0x6f2f 65#define PCI_DEVICE_ID_INTEL_IOAT_BDX9 0x6f2f
66 66
67#define PCI_DEVICE_ID_INTEL_IOAT_SKX 0x2021
68
67#define IOAT_VER_1_2 0x12 /* Version 1.2 */ 69#define IOAT_VER_1_2 0x12 /* Version 1.2 */
68#define IOAT_VER_2_0 0x20 /* Version 2.0 */ 70#define IOAT_VER_2_0 0x20 /* Version 2.0 */
69#define IOAT_VER_3_0 0x30 /* Version 3.0 */ 71#define IOAT_VER_3_0 0x30 /* Version 3.0 */
diff --git a/drivers/dma/ioat/init.c b/drivers/dma/ioat/init.c
index 90eddd9f07e4..cc5259b881d4 100644
--- a/drivers/dma/ioat/init.c
+++ b/drivers/dma/ioat/init.c
@@ -106,6 +106,8 @@ static struct pci_device_id ioat_pci_tbl[] = {
106 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX8) }, 106 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX8) },
107 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX9) }, 107 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX9) },
108 108
109 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SKX) },
110
109 /* I/OAT v3.3 platforms */ 111 /* I/OAT v3.3 platforms */
110 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BWD0) }, 112 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BWD0) },
111 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BWD1) }, 113 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BWD1) },
@@ -243,10 +245,15 @@ static bool is_bdx_ioat(struct pci_dev *pdev)
243 } 245 }
244} 246}
245 247
248static inline bool is_skx_ioat(struct pci_dev *pdev)
249{
250 return (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_SKX) ? true : false;
251}
252
246static bool is_xeon_cb32(struct pci_dev *pdev) 253static bool is_xeon_cb32(struct pci_dev *pdev)
247{ 254{
248 return is_jf_ioat(pdev) || is_snb_ioat(pdev) || is_ivb_ioat(pdev) || 255 return is_jf_ioat(pdev) || is_snb_ioat(pdev) || is_ivb_ioat(pdev) ||
249 is_hsw_ioat(pdev) || is_bdx_ioat(pdev); 256 is_hsw_ioat(pdev) || is_bdx_ioat(pdev) || is_skx_ioat(pdev);
250} 257}
251 258
252bool is_bwd_ioat(struct pci_dev *pdev) 259bool is_bwd_ioat(struct pci_dev *pdev)
@@ -693,7 +700,7 @@ static int ioat_alloc_chan_resources(struct dma_chan *c)
693 /* doing 2 32bit writes to mmio since 1 64b write doesn't work */ 700 /* doing 2 32bit writes to mmio since 1 64b write doesn't work */
694 ioat_chan->completion = 701 ioat_chan->completion =
695 dma_pool_zalloc(ioat_chan->ioat_dma->completion_pool, 702 dma_pool_zalloc(ioat_chan->ioat_dma->completion_pool,
696 GFP_KERNEL, &ioat_chan->completion_dma); 703 GFP_NOWAIT, &ioat_chan->completion_dma);
697 if (!ioat_chan->completion) 704 if (!ioat_chan->completion)
698 return -ENOMEM; 705 return -ENOMEM;
699 706
@@ -703,7 +710,7 @@ static int ioat_alloc_chan_resources(struct dma_chan *c)
703 ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH); 710 ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH);
704 711
705 order = IOAT_MAX_ORDER; 712 order = IOAT_MAX_ORDER;
706 ring = ioat_alloc_ring(c, order, GFP_KERNEL); 713 ring = ioat_alloc_ring(c, order, GFP_NOWAIT);
707 if (!ring) 714 if (!ring)
708 return -ENOMEM; 715 return -ENOMEM;
709 716
@@ -1357,6 +1364,8 @@ static int ioat_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1357 1364
1358 device->version = readb(device->reg_base + IOAT_VER_OFFSET); 1365 device->version = readb(device->reg_base + IOAT_VER_OFFSET);
1359 if (device->version >= IOAT_VER_3_0) { 1366 if (device->version >= IOAT_VER_3_0) {
1367 if (is_skx_ioat(pdev))
1368 device->version = IOAT_VER_3_2;
1360 err = ioat3_dma_probe(device, ioat_dca_enabled); 1369 err = ioat3_dma_probe(device, ioat_dca_enabled);
1361 1370
1362 if (device->version >= IOAT_VER_3_3) 1371 if (device->version >= IOAT_VER_3_3)
diff --git a/drivers/dma/omap-dma.c b/drivers/dma/omap-dma.c
index ac68666cd3f4..daf479cce691 100644
--- a/drivers/dma/omap-dma.c
+++ b/drivers/dma/omap-dma.c
@@ -938,21 +938,14 @@ static struct dma_async_tx_descriptor *omap_dma_prep_slave_sg(
938 d->ccr |= CCR_DST_AMODE_POSTINC; 938 d->ccr |= CCR_DST_AMODE_POSTINC;
939 if (port_window) { 939 if (port_window) {
940 d->ccr |= CCR_SRC_AMODE_DBLIDX; 940 d->ccr |= CCR_SRC_AMODE_DBLIDX;
941 d->ei = 1;
942 /*
943 * One frame covers the port_window and by configure
944 * the source frame index to be -1 * (port_window - 1)
945 * we instruct the sDMA that after a frame is processed
946 * it should move back to the start of the window.
947 */
948 d->fi = -(port_window_bytes - 1);
949 941
950 if (port_window_bytes >= 64) 942 if (port_window_bytes >= 64)
951 d->csdp = CSDP_SRC_BURST_64 | CSDP_SRC_PACKED; 943 d->csdp |= CSDP_SRC_BURST_64;
952 else if (port_window_bytes >= 32) 944 else if (port_window_bytes >= 32)
953 d->csdp = CSDP_SRC_BURST_32 | CSDP_SRC_PACKED; 945 d->csdp |= CSDP_SRC_BURST_32;
954 else if (port_window_bytes >= 16) 946 else if (port_window_bytes >= 16)
955 d->csdp = CSDP_SRC_BURST_16 | CSDP_SRC_PACKED; 947 d->csdp |= CSDP_SRC_BURST_16;
948
956 } else { 949 } else {
957 d->ccr |= CCR_SRC_AMODE_CONSTANT; 950 d->ccr |= CCR_SRC_AMODE_CONSTANT;
958 } 951 }
@@ -962,13 +955,21 @@ static struct dma_async_tx_descriptor *omap_dma_prep_slave_sg(
962 d->ccr |= CCR_SRC_AMODE_POSTINC; 955 d->ccr |= CCR_SRC_AMODE_POSTINC;
963 if (port_window) { 956 if (port_window) {
964 d->ccr |= CCR_DST_AMODE_DBLIDX; 957 d->ccr |= CCR_DST_AMODE_DBLIDX;
958 d->ei = 1;
959 /*
960 * One frame covers the port_window and by configure
961 * the source frame index to be -1 * (port_window - 1)
962 * we instruct the sDMA that after a frame is processed
963 * it should move back to the start of the window.
964 */
965 d->fi = -(port_window_bytes - 1);
965 966
966 if (port_window_bytes >= 64) 967 if (port_window_bytes >= 64)
967 d->csdp = CSDP_DST_BURST_64 | CSDP_DST_PACKED; 968 d->csdp |= CSDP_DST_BURST_64;
968 else if (port_window_bytes >= 32) 969 else if (port_window_bytes >= 32)
969 d->csdp = CSDP_DST_BURST_32 | CSDP_DST_PACKED; 970 d->csdp |= CSDP_DST_BURST_32;
970 else if (port_window_bytes >= 16) 971 else if (port_window_bytes >= 16)
971 d->csdp = CSDP_DST_BURST_16 | CSDP_DST_PACKED; 972 d->csdp |= CSDP_DST_BURST_16;
972 } else { 973 } else {
973 d->ccr |= CCR_DST_AMODE_CONSTANT; 974 d->ccr |= CCR_DST_AMODE_CONSTANT;
974 } 975 }
@@ -1017,7 +1018,7 @@ static struct dma_async_tx_descriptor *omap_dma_prep_slave_sg(
1017 osg->addr = sg_dma_address(sgent); 1018 osg->addr = sg_dma_address(sgent);
1018 osg->en = en; 1019 osg->en = en;
1019 osg->fn = sg_dma_len(sgent) / frame_bytes; 1020 osg->fn = sg_dma_len(sgent) / frame_bytes;
1020 if (port_window && dir == DMA_MEM_TO_DEV) { 1021 if (port_window && dir == DMA_DEV_TO_MEM) {
1021 osg->ei = 1; 1022 osg->ei = 1;
1022 /* 1023 /*
1023 * One frame covers the port_window and by configure 1024 * One frame covers the port_window and by configure
@@ -1452,6 +1453,7 @@ static int omap_dma_probe(struct platform_device *pdev)
1452 struct omap_dmadev *od; 1453 struct omap_dmadev *od;
1453 struct resource *res; 1454 struct resource *res;
1454 int rc, i, irq; 1455 int rc, i, irq;
1456 u32 lch_count;
1455 1457
1456 od = devm_kzalloc(&pdev->dev, sizeof(*od), GFP_KERNEL); 1458 od = devm_kzalloc(&pdev->dev, sizeof(*od), GFP_KERNEL);
1457 if (!od) 1459 if (!od)
@@ -1494,20 +1496,31 @@ static int omap_dma_probe(struct platform_device *pdev)
1494 spin_lock_init(&od->lock); 1496 spin_lock_init(&od->lock);
1495 spin_lock_init(&od->irq_lock); 1497 spin_lock_init(&od->irq_lock);
1496 1498
1497 if (!pdev->dev.of_node) { 1499 /* Number of DMA requests */
1498 od->dma_requests = od->plat->dma_attr->lch_count; 1500 od->dma_requests = OMAP_SDMA_REQUESTS;
1499 if (unlikely(!od->dma_requests)) 1501 if (pdev->dev.of_node && of_property_read_u32(pdev->dev.of_node,
1500 od->dma_requests = OMAP_SDMA_REQUESTS; 1502 "dma-requests",
1501 } else if (of_property_read_u32(pdev->dev.of_node, "dma-requests", 1503 &od->dma_requests)) {
1502 &od->dma_requests)) {
1503 dev_info(&pdev->dev, 1504 dev_info(&pdev->dev,
1504 "Missing dma-requests property, using %u.\n", 1505 "Missing dma-requests property, using %u.\n",
1505 OMAP_SDMA_REQUESTS); 1506 OMAP_SDMA_REQUESTS);
1506 od->dma_requests = OMAP_SDMA_REQUESTS;
1507 } 1507 }
1508 1508
1509 od->lch_map = devm_kcalloc(&pdev->dev, od->dma_requests, 1509 /* Number of available logical channels */
1510 sizeof(*od->lch_map), GFP_KERNEL); 1510 if (!pdev->dev.of_node) {
1511 lch_count = od->plat->dma_attr->lch_count;
1512 if (unlikely(!lch_count))
1513 lch_count = OMAP_SDMA_CHANNELS;
1514 } else if (of_property_read_u32(pdev->dev.of_node, "dma-channels",
1515 &lch_count)) {
1516 dev_info(&pdev->dev,
1517 "Missing dma-channels property, using %u.\n",
1518 OMAP_SDMA_CHANNELS);
1519 lch_count = OMAP_SDMA_CHANNELS;
1520 }
1521
1522 od->lch_map = devm_kcalloc(&pdev->dev, lch_count, sizeof(*od->lch_map),
1523 GFP_KERNEL);
1511 if (!od->lch_map) 1524 if (!od->lch_map)
1512 return -ENOMEM; 1525 return -ENOMEM;
1513 1526
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index 87fd01539fcb..7539f73df9e0 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -448,6 +448,9 @@ struct dma_pl330_chan {
448 448
449 /* for cyclic capability */ 449 /* for cyclic capability */
450 bool cyclic; 450 bool cyclic;
451
452 /* for runtime pm tracking */
453 bool active;
451}; 454};
452 455
453struct pl330_dmac { 456struct pl330_dmac {
@@ -1696,7 +1699,6 @@ static bool _chan_ns(const struct pl330_dmac *pl330, int i)
1696static struct pl330_thread *pl330_request_channel(struct pl330_dmac *pl330) 1699static struct pl330_thread *pl330_request_channel(struct pl330_dmac *pl330)
1697{ 1700{
1698 struct pl330_thread *thrd = NULL; 1701 struct pl330_thread *thrd = NULL;
1699 unsigned long flags;
1700 int chans, i; 1702 int chans, i;
1701 1703
1702 if (pl330->state == DYING) 1704 if (pl330->state == DYING)
@@ -1704,8 +1706,6 @@ static struct pl330_thread *pl330_request_channel(struct pl330_dmac *pl330)
1704 1706
1705 chans = pl330->pcfg.num_chan; 1707 chans = pl330->pcfg.num_chan;
1706 1708
1707 spin_lock_irqsave(&pl330->lock, flags);
1708
1709 for (i = 0; i < chans; i++) { 1709 for (i = 0; i < chans; i++) {
1710 thrd = &pl330->channels[i]; 1710 thrd = &pl330->channels[i];
1711 if ((thrd->free) && (!_manager_ns(thrd) || 1711 if ((thrd->free) && (!_manager_ns(thrd) ||
@@ -1723,8 +1723,6 @@ static struct pl330_thread *pl330_request_channel(struct pl330_dmac *pl330)
1723 thrd = NULL; 1723 thrd = NULL;
1724 } 1724 }
1725 1725
1726 spin_unlock_irqrestore(&pl330->lock, flags);
1727
1728 return thrd; 1726 return thrd;
1729} 1727}
1730 1728
@@ -1742,7 +1740,6 @@ static inline void _free_event(struct pl330_thread *thrd, int ev)
1742static void pl330_release_channel(struct pl330_thread *thrd) 1740static void pl330_release_channel(struct pl330_thread *thrd)
1743{ 1741{
1744 struct pl330_dmac *pl330; 1742 struct pl330_dmac *pl330;
1745 unsigned long flags;
1746 1743
1747 if (!thrd || thrd->free) 1744 if (!thrd || thrd->free)
1748 return; 1745 return;
@@ -1754,10 +1751,8 @@ static void pl330_release_channel(struct pl330_thread *thrd)
1754 1751
1755 pl330 = thrd->dmac; 1752 pl330 = thrd->dmac;
1756 1753
1757 spin_lock_irqsave(&pl330->lock, flags);
1758 _free_event(thrd, thrd->ev); 1754 _free_event(thrd, thrd->ev);
1759 thrd->free = true; 1755 thrd->free = true;
1760 spin_unlock_irqrestore(&pl330->lock, flags);
1761} 1756}
1762 1757
1763/* Initialize the structure for PL330 configuration, that can be used 1758/* Initialize the structure for PL330 configuration, that can be used
@@ -2033,6 +2028,7 @@ static void pl330_tasklet(unsigned long data)
2033 _stop(pch->thread); 2028 _stop(pch->thread);
2034 spin_unlock(&pch->thread->dmac->lock); 2029 spin_unlock(&pch->thread->dmac->lock);
2035 power_down = true; 2030 power_down = true;
2031 pch->active = false;
2036 } else { 2032 } else {
2037 /* Make sure the PL330 Channel thread is active */ 2033 /* Make sure the PL330 Channel thread is active */
2038 spin_lock(&pch->thread->dmac->lock); 2034 spin_lock(&pch->thread->dmac->lock);
@@ -2052,6 +2048,7 @@ static void pl330_tasklet(unsigned long data)
2052 desc->status = PREP; 2048 desc->status = PREP;
2053 list_move_tail(&desc->node, &pch->work_list); 2049 list_move_tail(&desc->node, &pch->work_list);
2054 if (power_down) { 2050 if (power_down) {
2051 pch->active = true;
2055 spin_lock(&pch->thread->dmac->lock); 2052 spin_lock(&pch->thread->dmac->lock);
2056 _start(pch->thread); 2053 _start(pch->thread);
2057 spin_unlock(&pch->thread->dmac->lock); 2054 spin_unlock(&pch->thread->dmac->lock);
@@ -2117,20 +2114,20 @@ static int pl330_alloc_chan_resources(struct dma_chan *chan)
2117 struct pl330_dmac *pl330 = pch->dmac; 2114 struct pl330_dmac *pl330 = pch->dmac;
2118 unsigned long flags; 2115 unsigned long flags;
2119 2116
2120 spin_lock_irqsave(&pch->lock, flags); 2117 spin_lock_irqsave(&pl330->lock, flags);
2121 2118
2122 dma_cookie_init(chan); 2119 dma_cookie_init(chan);
2123 pch->cyclic = false; 2120 pch->cyclic = false;
2124 2121
2125 pch->thread = pl330_request_channel(pl330); 2122 pch->thread = pl330_request_channel(pl330);
2126 if (!pch->thread) { 2123 if (!pch->thread) {
2127 spin_unlock_irqrestore(&pch->lock, flags); 2124 spin_unlock_irqrestore(&pl330->lock, flags);
2128 return -ENOMEM; 2125 return -ENOMEM;
2129 } 2126 }
2130 2127
2131 tasklet_init(&pch->task, pl330_tasklet, (unsigned long) pch); 2128 tasklet_init(&pch->task, pl330_tasklet, (unsigned long) pch);
2132 2129
2133 spin_unlock_irqrestore(&pch->lock, flags); 2130 spin_unlock_irqrestore(&pl330->lock, flags);
2134 2131
2135 return 1; 2132 return 1;
2136} 2133}
@@ -2166,6 +2163,7 @@ static int pl330_terminate_all(struct dma_chan *chan)
2166 unsigned long flags; 2163 unsigned long flags;
2167 struct pl330_dmac *pl330 = pch->dmac; 2164 struct pl330_dmac *pl330 = pch->dmac;
2168 LIST_HEAD(list); 2165 LIST_HEAD(list);
2166 bool power_down = false;
2169 2167
2170 pm_runtime_get_sync(pl330->ddma.dev); 2168 pm_runtime_get_sync(pl330->ddma.dev);
2171 spin_lock_irqsave(&pch->lock, flags); 2169 spin_lock_irqsave(&pch->lock, flags);
@@ -2176,6 +2174,8 @@ static int pl330_terminate_all(struct dma_chan *chan)
2176 pch->thread->req[0].desc = NULL; 2174 pch->thread->req[0].desc = NULL;
2177 pch->thread->req[1].desc = NULL; 2175 pch->thread->req[1].desc = NULL;
2178 pch->thread->req_running = -1; 2176 pch->thread->req_running = -1;
2177 power_down = pch->active;
2178 pch->active = false;
2179 2179
2180 /* Mark all desc done */ 2180 /* Mark all desc done */
2181 list_for_each_entry(desc, &pch->submitted_list, node) { 2181 list_for_each_entry(desc, &pch->submitted_list, node) {
@@ -2193,6 +2193,8 @@ static int pl330_terminate_all(struct dma_chan *chan)
2193 list_splice_tail_init(&pch->completed_list, &pl330->desc_pool); 2193 list_splice_tail_init(&pch->completed_list, &pl330->desc_pool);
2194 spin_unlock_irqrestore(&pch->lock, flags); 2194 spin_unlock_irqrestore(&pch->lock, flags);
2195 pm_runtime_mark_last_busy(pl330->ddma.dev); 2195 pm_runtime_mark_last_busy(pl330->ddma.dev);
2196 if (power_down)
2197 pm_runtime_put_autosuspend(pl330->ddma.dev);
2196 pm_runtime_put_autosuspend(pl330->ddma.dev); 2198 pm_runtime_put_autosuspend(pl330->ddma.dev);
2197 2199
2198 return 0; 2200 return 0;
@@ -2228,12 +2230,13 @@ static int pl330_pause(struct dma_chan *chan)
2228static void pl330_free_chan_resources(struct dma_chan *chan) 2230static void pl330_free_chan_resources(struct dma_chan *chan)
2229{ 2231{
2230 struct dma_pl330_chan *pch = to_pchan(chan); 2232 struct dma_pl330_chan *pch = to_pchan(chan);
2233 struct pl330_dmac *pl330 = pch->dmac;
2231 unsigned long flags; 2234 unsigned long flags;
2232 2235
2233 tasklet_kill(&pch->task); 2236 tasklet_kill(&pch->task);
2234 2237
2235 pm_runtime_get_sync(pch->dmac->ddma.dev); 2238 pm_runtime_get_sync(pch->dmac->ddma.dev);
2236 spin_lock_irqsave(&pch->lock, flags); 2239 spin_lock_irqsave(&pl330->lock, flags);
2237 2240
2238 pl330_release_channel(pch->thread); 2241 pl330_release_channel(pch->thread);
2239 pch->thread = NULL; 2242 pch->thread = NULL;
@@ -2241,7 +2244,7 @@ static void pl330_free_chan_resources(struct dma_chan *chan)
2241 if (pch->cyclic) 2244 if (pch->cyclic)
2242 list_splice_tail_init(&pch->work_list, &pch->dmac->desc_pool); 2245 list_splice_tail_init(&pch->work_list, &pch->dmac->desc_pool);
2243 2246
2244 spin_unlock_irqrestore(&pch->lock, flags); 2247 spin_unlock_irqrestore(&pl330->lock, flags);
2245 pm_runtime_mark_last_busy(pch->dmac->ddma.dev); 2248 pm_runtime_mark_last_busy(pch->dmac->ddma.dev);
2246 pm_runtime_put_autosuspend(pch->dmac->ddma.dev); 2249 pm_runtime_put_autosuspend(pch->dmac->ddma.dev);
2247} 2250}
@@ -2357,6 +2360,7 @@ static void pl330_issue_pending(struct dma_chan *chan)
2357 * updated on work_list emptiness status. 2360 * updated on work_list emptiness status.
2358 */ 2361 */
2359 WARN_ON(list_empty(&pch->submitted_list)); 2362 WARN_ON(list_empty(&pch->submitted_list));
2363 pch->active = true;
2360 pm_runtime_get_sync(pch->dmac->ddma.dev); 2364 pm_runtime_get_sync(pch->dmac->ddma.dev);
2361 } 2365 }
2362 list_splice_tail_init(&pch->submitted_list, &pch->work_list); 2366 list_splice_tail_init(&pch->submitted_list, &pch->work_list);
diff --git a/drivers/dma/sh/rcar-dmac.c b/drivers/dma/sh/rcar-dmac.c
index 2e441d0ccd79..4c357d475465 100644
--- a/drivers/dma/sh/rcar-dmac.c
+++ b/drivers/dma/sh/rcar-dmac.c
@@ -986,6 +986,7 @@ static void rcar_dmac_free_chan_resources(struct dma_chan *chan)
986{ 986{
987 struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan); 987 struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
988 struct rcar_dmac *dmac = to_rcar_dmac(chan->device); 988 struct rcar_dmac *dmac = to_rcar_dmac(chan->device);
989 struct rcar_dmac_chan_map *map = &rchan->map;
989 struct rcar_dmac_desc_page *page, *_page; 990 struct rcar_dmac_desc_page *page, *_page;
990 struct rcar_dmac_desc *desc; 991 struct rcar_dmac_desc *desc;
991 LIST_HEAD(list); 992 LIST_HEAD(list);
@@ -1019,6 +1020,13 @@ static void rcar_dmac_free_chan_resources(struct dma_chan *chan)
1019 free_page((unsigned long)page); 1020 free_page((unsigned long)page);
1020 } 1021 }
1021 1022
1023 /* Remove slave mapping if present. */
1024 if (map->slave.xfer_size) {
1025 dma_unmap_resource(chan->device->dev, map->addr,
1026 map->slave.xfer_size, map->dir, 0);
1027 map->slave.xfer_size = 0;
1028 }
1029
1022 pm_runtime_put(chan->device->dev); 1030 pm_runtime_put(chan->device->dev);
1023} 1031}
1024 1032
diff --git a/drivers/dma/stm32-dma.c b/drivers/dma/stm32-dma.c
index 3688d0873a3e..3056ce7f8c69 100644
--- a/drivers/dma/stm32-dma.c
+++ b/drivers/dma/stm32-dma.c
@@ -880,7 +880,7 @@ static enum dma_status stm32_dma_tx_status(struct dma_chan *c,
880 struct virt_dma_desc *vdesc; 880 struct virt_dma_desc *vdesc;
881 enum dma_status status; 881 enum dma_status status;
882 unsigned long flags; 882 unsigned long flags;
883 u32 residue; 883 u32 residue = 0;
884 884
885 status = dma_cookie_status(c, cookie, state); 885 status = dma_cookie_status(c, cookie, state);
886 if ((status == DMA_COMPLETE) || (!state)) 886 if ((status == DMA_COMPLETE) || (!state))
@@ -888,16 +888,12 @@ static enum dma_status stm32_dma_tx_status(struct dma_chan *c,
888 888
889 spin_lock_irqsave(&chan->vchan.lock, flags); 889 spin_lock_irqsave(&chan->vchan.lock, flags);
890 vdesc = vchan_find_desc(&chan->vchan, cookie); 890 vdesc = vchan_find_desc(&chan->vchan, cookie);
891 if (cookie == chan->desc->vdesc.tx.cookie) { 891 if (chan->desc && cookie == chan->desc->vdesc.tx.cookie)
892 residue = stm32_dma_desc_residue(chan, chan->desc, 892 residue = stm32_dma_desc_residue(chan, chan->desc,
893 chan->next_sg); 893 chan->next_sg);
894 } else if (vdesc) { 894 else if (vdesc)
895 residue = stm32_dma_desc_residue(chan, 895 residue = stm32_dma_desc_residue(chan,
896 to_stm32_dma_desc(vdesc), 0); 896 to_stm32_dma_desc(vdesc), 0);
897 } else {
898 residue = 0;
899 }
900
901 dma_set_residue(state, residue); 897 dma_set_residue(state, residue);
902 898
903 spin_unlock_irqrestore(&chan->vchan.lock, flags); 899 spin_unlock_irqrestore(&chan->vchan.lock, flags);
@@ -972,21 +968,18 @@ static struct dma_chan *stm32_dma_of_xlate(struct of_phandle_args *dma_spec,
972 struct stm32_dma_chan *chan; 968 struct stm32_dma_chan *chan;
973 struct dma_chan *c; 969 struct dma_chan *c;
974 970
975 if (dma_spec->args_count < 3) 971 if (dma_spec->args_count < 4)
976 return NULL; 972 return NULL;
977 973
978 cfg.channel_id = dma_spec->args[0]; 974 cfg.channel_id = dma_spec->args[0];
979 cfg.request_line = dma_spec->args[1]; 975 cfg.request_line = dma_spec->args[1];
980 cfg.stream_config = dma_spec->args[2]; 976 cfg.stream_config = dma_spec->args[2];
981 cfg.threshold = 0; 977 cfg.threshold = dma_spec->args[3];
982 978
983 if ((cfg.channel_id >= STM32_DMA_MAX_CHANNELS) || (cfg.request_line >= 979 if ((cfg.channel_id >= STM32_DMA_MAX_CHANNELS) || (cfg.request_line >=
984 STM32_DMA_MAX_REQUEST_ID)) 980 STM32_DMA_MAX_REQUEST_ID))
985 return NULL; 981 return NULL;
986 982
987 if (dma_spec->args_count > 3)
988 cfg.threshold = dma_spec->args[3];
989
990 chan = &dmadev->chan[cfg.channel_id]; 983 chan = &dmadev->chan[cfg.channel_id];
991 984
992 c = dma_get_slave_channel(&chan->vchan.chan); 985 c = dma_get_slave_channel(&chan->vchan.chan);
diff --git a/drivers/dma/ti-dma-crossbar.c b/drivers/dma/ti-dma-crossbar.c
index 3f24aeb48c0e..2403475a37cf 100644
--- a/drivers/dma/ti-dma-crossbar.c
+++ b/drivers/dma/ti-dma-crossbar.c
@@ -149,6 +149,7 @@ static int ti_am335x_xbar_probe(struct platform_device *pdev)
149 match = of_match_node(ti_am335x_master_match, dma_node); 149 match = of_match_node(ti_am335x_master_match, dma_node);
150 if (!match) { 150 if (!match) {
151 dev_err(&pdev->dev, "DMA master is not supported\n"); 151 dev_err(&pdev->dev, "DMA master is not supported\n");
152 of_node_put(dma_node);
152 return -EINVAL; 153 return -EINVAL;
153 } 154 }
154 155
@@ -339,6 +340,7 @@ static int ti_dra7_xbar_probe(struct platform_device *pdev)
339 match = of_match_node(ti_dra7_master_match, dma_node); 340 match = of_match_node(ti_dra7_master_match, dma_node);
340 if (!match) { 341 if (!match) {
341 dev_err(&pdev->dev, "DMA master is not supported\n"); 342 dev_err(&pdev->dev, "DMA master is not supported\n");
343 of_node_put(dma_node);
342 return -EINVAL; 344 return -EINVAL;
343 } 345 }
344 346
diff --git a/drivers/extcon/extcon.c b/drivers/extcon/extcon.c
index 78298460d168..7c1e3a7b14e0 100644
--- a/drivers/extcon/extcon.c
+++ b/drivers/extcon/extcon.c
@@ -453,7 +453,7 @@ int extcon_sync(struct extcon_dev *edev, unsigned int id)
453 dev_err(&edev->dev, "out of memory in extcon_set_state\n"); 453 dev_err(&edev->dev, "out of memory in extcon_set_state\n");
454 kobject_uevent(&edev->dev.kobj, KOBJ_CHANGE); 454 kobject_uevent(&edev->dev.kobj, KOBJ_CHANGE);
455 455
456 return 0; 456 return -ENOMEM;
457 } 457 }
458 458
459 length = name_show(&edev->dev, NULL, prop_buf); 459 length = name_show(&edev->dev, NULL, prop_buf);
diff --git a/drivers/firmware/arm_scpi.c b/drivers/firmware/arm_scpi.c
index 70e13230d8db..9ad0b1934be9 100644
--- a/drivers/firmware/arm_scpi.c
+++ b/drivers/firmware/arm_scpi.c
@@ -721,11 +721,17 @@ static int scpi_sensor_get_value(u16 sensor, u64 *val)
721 721
722 ret = scpi_send_message(CMD_SENSOR_VALUE, &id, sizeof(id), 722 ret = scpi_send_message(CMD_SENSOR_VALUE, &id, sizeof(id),
723 &buf, sizeof(buf)); 723 &buf, sizeof(buf));
724 if (!ret) 724 if (ret)
725 return ret;
726
727 if (scpi_info->is_legacy)
728 /* only 32-bits supported, hi_val can be junk */
729 *val = le32_to_cpu(buf.lo_val);
730 else
725 *val = (u64)le32_to_cpu(buf.hi_val) << 32 | 731 *val = (u64)le32_to_cpu(buf.hi_val) << 32 |
726 le32_to_cpu(buf.lo_val); 732 le32_to_cpu(buf.lo_val);
727 733
728 return ret; 734 return 0;
729} 735}
730 736
731static int scpi_device_get_power_state(u16 dev_id) 737static int scpi_device_get_power_state(u16 dev_id)
diff --git a/drivers/firmware/efi/fake_mem.c b/drivers/firmware/efi/fake_mem.c
index 520a40e5e0e4..6c7d60c239b5 100644
--- a/drivers/firmware/efi/fake_mem.c
+++ b/drivers/firmware/efi/fake_mem.c
@@ -71,8 +71,7 @@ void __init efi_fake_memmap(void)
71 } 71 }
72 72
73 /* allocate memory for new EFI memmap */ 73 /* allocate memory for new EFI memmap */
74 new_memmap_phy = memblock_alloc(efi.memmap.desc_size * new_nr_map, 74 new_memmap_phy = efi_memmap_alloc(new_nr_map);
75 PAGE_SIZE);
76 if (!new_memmap_phy) 75 if (!new_memmap_phy)
77 return; 76 return;
78 77
diff --git a/drivers/firmware/efi/libstub/efistub.h b/drivers/firmware/efi/libstub/efistub.h
index b98824e3800a..0e2a96b12cb3 100644
--- a/drivers/firmware/efi/libstub/efistub.h
+++ b/drivers/firmware/efi/libstub/efistub.h
@@ -39,14 +39,6 @@ efi_status_t efi_file_close(void *handle);
39 39
40unsigned long get_dram_base(efi_system_table_t *sys_table_arg); 40unsigned long get_dram_base(efi_system_table_t *sys_table_arg);
41 41
42efi_status_t update_fdt(efi_system_table_t *sys_table, void *orig_fdt,
43 unsigned long orig_fdt_size,
44 void *fdt, int new_fdt_size, char *cmdline_ptr,
45 u64 initrd_addr, u64 initrd_size,
46 efi_memory_desc_t *memory_map,
47 unsigned long map_size, unsigned long desc_size,
48 u32 desc_ver);
49
50efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table, 42efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table,
51 void *handle, 43 void *handle,
52 unsigned long *new_fdt_addr, 44 unsigned long *new_fdt_addr,
diff --git a/drivers/firmware/efi/libstub/fdt.c b/drivers/firmware/efi/libstub/fdt.c
index a6a93116a8f0..260c4b4b492e 100644
--- a/drivers/firmware/efi/libstub/fdt.c
+++ b/drivers/firmware/efi/libstub/fdt.c
@@ -16,13 +16,10 @@
16 16
17#include "efistub.h" 17#include "efistub.h"
18 18
19efi_status_t update_fdt(efi_system_table_t *sys_table, void *orig_fdt, 19static efi_status_t update_fdt(efi_system_table_t *sys_table, void *orig_fdt,
20 unsigned long orig_fdt_size, 20 unsigned long orig_fdt_size,
21 void *fdt, int new_fdt_size, char *cmdline_ptr, 21 void *fdt, int new_fdt_size, char *cmdline_ptr,
22 u64 initrd_addr, u64 initrd_size, 22 u64 initrd_addr, u64 initrd_size)
23 efi_memory_desc_t *memory_map,
24 unsigned long map_size, unsigned long desc_size,
25 u32 desc_ver)
26{ 23{
27 int node, num_rsv; 24 int node, num_rsv;
28 int status; 25 int status;
@@ -101,25 +98,23 @@ efi_status_t update_fdt(efi_system_table_t *sys_table, void *orig_fdt,
101 if (status) 98 if (status)
102 goto fdt_set_fail; 99 goto fdt_set_fail;
103 100
104 fdt_val64 = cpu_to_fdt64((u64)(unsigned long)memory_map); 101 fdt_val64 = U64_MAX; /* placeholder */
105 status = fdt_setprop(fdt, node, "linux,uefi-mmap-start", 102 status = fdt_setprop(fdt, node, "linux,uefi-mmap-start",
106 &fdt_val64, sizeof(fdt_val64)); 103 &fdt_val64, sizeof(fdt_val64));
107 if (status) 104 if (status)
108 goto fdt_set_fail; 105 goto fdt_set_fail;
109 106
110 fdt_val32 = cpu_to_fdt32(map_size); 107 fdt_val32 = U32_MAX; /* placeholder */
111 status = fdt_setprop(fdt, node, "linux,uefi-mmap-size", 108 status = fdt_setprop(fdt, node, "linux,uefi-mmap-size",
112 &fdt_val32, sizeof(fdt_val32)); 109 &fdt_val32, sizeof(fdt_val32));
113 if (status) 110 if (status)
114 goto fdt_set_fail; 111 goto fdt_set_fail;
115 112
116 fdt_val32 = cpu_to_fdt32(desc_size);
117 status = fdt_setprop(fdt, node, "linux,uefi-mmap-desc-size", 113 status = fdt_setprop(fdt, node, "linux,uefi-mmap-desc-size",
118 &fdt_val32, sizeof(fdt_val32)); 114 &fdt_val32, sizeof(fdt_val32));
119 if (status) 115 if (status)
120 goto fdt_set_fail; 116 goto fdt_set_fail;
121 117
122 fdt_val32 = cpu_to_fdt32(desc_ver);
123 status = fdt_setprop(fdt, node, "linux,uefi-mmap-desc-ver", 118 status = fdt_setprop(fdt, node, "linux,uefi-mmap-desc-ver",
124 &fdt_val32, sizeof(fdt_val32)); 119 &fdt_val32, sizeof(fdt_val32));
125 if (status) 120 if (status)
@@ -148,6 +143,43 @@ fdt_set_fail:
148 return EFI_LOAD_ERROR; 143 return EFI_LOAD_ERROR;
149} 144}
150 145
146static efi_status_t update_fdt_memmap(void *fdt, struct efi_boot_memmap *map)
147{
148 int node = fdt_path_offset(fdt, "/chosen");
149 u64 fdt_val64;
150 u32 fdt_val32;
151 int err;
152
153 if (node < 0)
154 return EFI_LOAD_ERROR;
155
156 fdt_val64 = cpu_to_fdt64((unsigned long)*map->map);
157 err = fdt_setprop_inplace(fdt, node, "linux,uefi-mmap-start",
158 &fdt_val64, sizeof(fdt_val64));
159 if (err)
160 return EFI_LOAD_ERROR;
161
162 fdt_val32 = cpu_to_fdt32(*map->map_size);
163 err = fdt_setprop_inplace(fdt, node, "linux,uefi-mmap-size",
164 &fdt_val32, sizeof(fdt_val32));
165 if (err)
166 return EFI_LOAD_ERROR;
167
168 fdt_val32 = cpu_to_fdt32(*map->desc_size);
169 err = fdt_setprop_inplace(fdt, node, "linux,uefi-mmap-desc-size",
170 &fdt_val32, sizeof(fdt_val32));
171 if (err)
172 return EFI_LOAD_ERROR;
173
174 fdt_val32 = cpu_to_fdt32(*map->desc_ver);
175 err = fdt_setprop_inplace(fdt, node, "linux,uefi-mmap-desc-ver",
176 &fdt_val32, sizeof(fdt_val32));
177 if (err)
178 return EFI_LOAD_ERROR;
179
180 return EFI_SUCCESS;
181}
182
151#ifndef EFI_FDT_ALIGN 183#ifndef EFI_FDT_ALIGN
152#define EFI_FDT_ALIGN EFI_PAGE_SIZE 184#define EFI_FDT_ALIGN EFI_PAGE_SIZE
153#endif 185#endif
@@ -155,6 +187,7 @@ fdt_set_fail:
155struct exit_boot_struct { 187struct exit_boot_struct {
156 efi_memory_desc_t *runtime_map; 188 efi_memory_desc_t *runtime_map;
157 int *runtime_entry_count; 189 int *runtime_entry_count;
190 void *new_fdt_addr;
158}; 191};
159 192
160static efi_status_t exit_boot_func(efi_system_table_t *sys_table_arg, 193static efi_status_t exit_boot_func(efi_system_table_t *sys_table_arg,
@@ -170,7 +203,7 @@ static efi_status_t exit_boot_func(efi_system_table_t *sys_table_arg,
170 efi_get_virtmap(*map->map, *map->map_size, *map->desc_size, 203 efi_get_virtmap(*map->map, *map->map_size, *map->desc_size,
171 p->runtime_map, p->runtime_entry_count); 204 p->runtime_map, p->runtime_entry_count);
172 205
173 return EFI_SUCCESS; 206 return update_fdt_memmap(p->new_fdt_addr, map);
174} 207}
175 208
176/* 209/*
@@ -243,20 +276,10 @@ efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table,
243 goto fail; 276 goto fail;
244 } 277 }
245 278
246 /*
247 * Now that we have done our final memory allocation (and free)
248 * we can get the memory map key needed for
249 * exit_boot_services().
250 */
251 status = efi_get_memory_map(sys_table, &map);
252 if (status != EFI_SUCCESS)
253 goto fail_free_new_fdt;
254
255 status = update_fdt(sys_table, 279 status = update_fdt(sys_table,
256 (void *)fdt_addr, fdt_size, 280 (void *)fdt_addr, fdt_size,
257 (void *)*new_fdt_addr, new_fdt_size, 281 (void *)*new_fdt_addr, new_fdt_size,
258 cmdline_ptr, initrd_addr, initrd_size, 282 cmdline_ptr, initrd_addr, initrd_size);
259 memory_map, map_size, desc_size, desc_ver);
260 283
261 /* Succeeding the first time is the expected case. */ 284 /* Succeeding the first time is the expected case. */
262 if (status == EFI_SUCCESS) 285 if (status == EFI_SUCCESS)
@@ -266,22 +289,19 @@ efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table,
266 /* 289 /*
267 * We need to allocate more space for the new 290 * We need to allocate more space for the new
268 * device tree, so free existing buffer that is 291 * device tree, so free existing buffer that is
269 * too small. Also free memory map, as we will need 292 * too small.
270 * to get new one that reflects the free/alloc we do
271 * on the device tree buffer.
272 */ 293 */
273 efi_free(sys_table, new_fdt_size, *new_fdt_addr); 294 efi_free(sys_table, new_fdt_size, *new_fdt_addr);
274 sys_table->boottime->free_pool(memory_map);
275 new_fdt_size += EFI_PAGE_SIZE; 295 new_fdt_size += EFI_PAGE_SIZE;
276 } else { 296 } else {
277 pr_efi_err(sys_table, "Unable to construct new device tree.\n"); 297 pr_efi_err(sys_table, "Unable to construct new device tree.\n");
278 goto fail_free_mmap; 298 goto fail_free_new_fdt;
279 } 299 }
280 } 300 }
281 301
282 sys_table->boottime->free_pool(memory_map);
283 priv.runtime_map = runtime_map; 302 priv.runtime_map = runtime_map;
284 priv.runtime_entry_count = &runtime_entry_count; 303 priv.runtime_entry_count = &runtime_entry_count;
304 priv.new_fdt_addr = (void *)*new_fdt_addr;
285 status = efi_exit_boot_services(sys_table, handle, &map, &priv, 305 status = efi_exit_boot_services(sys_table, handle, &map, &priv,
286 exit_boot_func); 306 exit_boot_func);
287 307
@@ -319,9 +339,6 @@ efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table,
319 339
320 pr_efi_err(sys_table, "Exit boot services failed.\n"); 340 pr_efi_err(sys_table, "Exit boot services failed.\n");
321 341
322fail_free_mmap:
323 sys_table->boottime->free_pool(memory_map);
324
325fail_free_new_fdt: 342fail_free_new_fdt:
326 efi_free(sys_table, new_fdt_size, *new_fdt_addr); 343 efi_free(sys_table, new_fdt_size, *new_fdt_addr);
327 344
diff --git a/drivers/firmware/efi/memmap.c b/drivers/firmware/efi/memmap.c
index f03ddecd232b..78686443cb37 100644
--- a/drivers/firmware/efi/memmap.c
+++ b/drivers/firmware/efi/memmap.c
@@ -9,6 +9,44 @@
9#include <linux/efi.h> 9#include <linux/efi.h>
10#include <linux/io.h> 10#include <linux/io.h>
11#include <asm/early_ioremap.h> 11#include <asm/early_ioremap.h>
12#include <linux/memblock.h>
13#include <linux/slab.h>
14
15static phys_addr_t __init __efi_memmap_alloc_early(unsigned long size)
16{
17 return memblock_alloc(size, 0);
18}
19
20static phys_addr_t __init __efi_memmap_alloc_late(unsigned long size)
21{
22 unsigned int order = get_order(size);
23 struct page *p = alloc_pages(GFP_KERNEL, order);
24
25 if (!p)
26 return 0;
27
28 return PFN_PHYS(page_to_pfn(p));
29}
30
31/**
32 * efi_memmap_alloc - Allocate memory for the EFI memory map
33 * @num_entries: Number of entries in the allocated map.
34 *
35 * Depending on whether mm_init() has already been invoked or not,
36 * either memblock or "normal" page allocation is used.
37 *
38 * Returns the physical address of the allocated memory map on
39 * success, zero on failure.
40 */
41phys_addr_t __init efi_memmap_alloc(unsigned int num_entries)
42{
43 unsigned long size = num_entries * efi.memmap.desc_size;
44
45 if (slab_is_available())
46 return __efi_memmap_alloc_late(size);
47
48 return __efi_memmap_alloc_early(size);
49}
12 50
13/** 51/**
14 * __efi_memmap_init - Common code for mapping the EFI memory map 52 * __efi_memmap_init - Common code for mapping the EFI memory map
diff --git a/drivers/firmware/psci_checker.c b/drivers/firmware/psci_checker.c
index 44bdb78f837b..29d58feaf675 100644
--- a/drivers/firmware/psci_checker.c
+++ b/drivers/firmware/psci_checker.c
@@ -270,8 +270,7 @@ static int suspend_test_thread(void *arg)
270 struct cpuidle_device *dev; 270 struct cpuidle_device *dev;
271 struct cpuidle_driver *drv; 271 struct cpuidle_driver *drv;
272 /* No need for an actual callback, we just want to wake up the CPU. */ 272 /* No need for an actual callback, we just want to wake up the CPU. */
273 struct timer_list wakeup_timer = 273 struct timer_list wakeup_timer;
274 TIMER_INITIALIZER(dummy_callback, 0, 0);
275 274
276 /* Wait for the main thread to give the start signal. */ 275 /* Wait for the main thread to give the start signal. */
277 wait_for_completion(&suspend_threads_started); 276 wait_for_completion(&suspend_threads_started);
@@ -287,6 +286,7 @@ static int suspend_test_thread(void *arg)
287 pr_info("CPU %d entering suspend cycles, states 1 through %d\n", 286 pr_info("CPU %d entering suspend cycles, states 1 through %d\n",
288 cpu, drv->state_count - 1); 287 cpu, drv->state_count - 1);
289 288
289 setup_timer_on_stack(&wakeup_timer, dummy_callback, 0);
290 for (i = 0; i < NUM_SUSPEND_CYCLE; ++i) { 290 for (i = 0; i < NUM_SUSPEND_CYCLE; ++i) {
291 int index; 291 int index;
292 /* 292 /*
diff --git a/drivers/gpio/gpio-mxs.c b/drivers/gpio/gpio-mxs.c
index 1e8fde8cb803..2292742eac8f 100644
--- a/drivers/gpio/gpio-mxs.c
+++ b/drivers/gpio/gpio-mxs.c
@@ -205,7 +205,7 @@ static int mxs_gpio_set_wake_irq(struct irq_data *d, unsigned int enable)
205 return 0; 205 return 0;
206} 206}
207 207
208static int __init mxs_gpio_init_gc(struct mxs_gpio_port *port, int irq_base) 208static int mxs_gpio_init_gc(struct mxs_gpio_port *port, int irq_base)
209{ 209{
210 struct irq_chip_generic *gc; 210 struct irq_chip_generic *gc;
211 struct irq_chip_type *ct; 211 struct irq_chip_type *ct;
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index f4c26c7826cd..a07ae9e37930 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -1317,12 +1317,12 @@ void gpiochip_remove(struct gpio_chip *chip)
1317 1317
1318 /* FIXME: should the legacy sysfs handling be moved to gpio_device? */ 1318 /* FIXME: should the legacy sysfs handling be moved to gpio_device? */
1319 gpiochip_sysfs_unregister(gdev); 1319 gpiochip_sysfs_unregister(gdev);
1320 gpiochip_free_hogs(chip);
1320 /* Numb the device, cancelling all outstanding operations */ 1321 /* Numb the device, cancelling all outstanding operations */
1321 gdev->chip = NULL; 1322 gdev->chip = NULL;
1322 gpiochip_irqchip_remove(chip); 1323 gpiochip_irqchip_remove(chip);
1323 acpi_gpiochip_remove(chip); 1324 acpi_gpiochip_remove(chip);
1324 gpiochip_remove_pin_ranges(chip); 1325 gpiochip_remove_pin_ranges(chip);
1325 gpiochip_free_hogs(chip);
1326 of_gpiochip_remove(chip); 1326 of_gpiochip_remove(chip);
1327 /* 1327 /*
1328 * We accept no more calls into the driver from this point, so 1328 * We accept no more calls into the driver from this point, so
@@ -1723,7 +1723,7 @@ static void gpiochip_irqchip_remove(struct gpio_chip *gpiochip)
1723} 1723}
1724 1724
1725/** 1725/**
1726 * _gpiochip_irqchip_add() - adds an irqchip to a gpiochip 1726 * gpiochip_irqchip_add_key() - adds an irqchip to a gpiochip
1727 * @gpiochip: the gpiochip to add the irqchip to 1727 * @gpiochip: the gpiochip to add the irqchip to
1728 * @irqchip: the irqchip to add to the gpiochip 1728 * @irqchip: the irqchip to add to the gpiochip
1729 * @first_irq: if not dynamically assigned, the base (first) IRQ to 1729 * @first_irq: if not dynamically assigned, the base (first) IRQ to
@@ -1749,13 +1749,13 @@ static void gpiochip_irqchip_remove(struct gpio_chip *gpiochip)
1749 * the pins on the gpiochip can generate a unique IRQ. Everything else 1749 * the pins on the gpiochip can generate a unique IRQ. Everything else
1750 * need to be open coded. 1750 * need to be open coded.
1751 */ 1751 */
1752int _gpiochip_irqchip_add(struct gpio_chip *gpiochip, 1752int gpiochip_irqchip_add_key(struct gpio_chip *gpiochip,
1753 struct irq_chip *irqchip, 1753 struct irq_chip *irqchip,
1754 unsigned int first_irq, 1754 unsigned int first_irq,
1755 irq_flow_handler_t handler, 1755 irq_flow_handler_t handler,
1756 unsigned int type, 1756 unsigned int type,
1757 bool nested, 1757 bool nested,
1758 struct lock_class_key *lock_key) 1758 struct lock_class_key *lock_key)
1759{ 1759{
1760 struct device_node *of_node; 1760 struct device_node *of_node;
1761 bool irq_base_set = false; 1761 bool irq_base_set = false;
@@ -1840,7 +1840,7 @@ int _gpiochip_irqchip_add(struct gpio_chip *gpiochip,
1840 1840
1841 return 0; 1841 return 0;
1842} 1842}
1843EXPORT_SYMBOL_GPL(_gpiochip_irqchip_add); 1843EXPORT_SYMBOL_GPL(gpiochip_irqchip_add_key);
1844 1844
1845#else /* CONFIG_GPIOLIB_IRQCHIP */ 1845#else /* CONFIG_GPIOLIB_IRQCHIP */
1846 1846
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
index 9ada56c16a58..4c851fde1e82 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
@@ -840,6 +840,9 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
840 else if (type == CGS_UCODE_ID_SMU_SK) 840 else if (type == CGS_UCODE_ID_SMU_SK)
841 strcpy(fw_name, "amdgpu/polaris10_smc_sk.bin"); 841 strcpy(fw_name, "amdgpu/polaris10_smc_sk.bin");
842 break; 842 break;
843 case CHIP_POLARIS12:
844 strcpy(fw_name, "amdgpu/polaris12_smc.bin");
845 break;
843 default: 846 default:
844 DRM_ERROR("SMC firmware not supported\n"); 847 DRM_ERROR("SMC firmware not supported\n");
845 return -EINVAL; 848 return -EINVAL;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 29d6d84d1c28..41e41f90265d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -83,6 +83,13 @@ int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type,
83 } 83 }
84 break; 84 break;
85 } 85 }
86
87 if (!(*out_ring && (*out_ring)->adev)) {
88 DRM_ERROR("Ring %d is not initialized on IP %d\n",
89 ring, ip_type);
90 return -EINVAL;
91 }
92
86 return 0; 93 return 0;
87} 94}
88 95
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 60bd4afe45c8..fe3bb94fe58d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -73,6 +73,7 @@ static const char *amdgpu_asic_name[] = {
73 "STONEY", 73 "STONEY",
74 "POLARIS10", 74 "POLARIS10",
75 "POLARIS11", 75 "POLARIS11",
76 "POLARIS12",
76 "LAST", 77 "LAST",
77}; 78};
78 79
@@ -1277,6 +1278,7 @@ static int amdgpu_early_init(struct amdgpu_device *adev)
1277 case CHIP_FIJI: 1278 case CHIP_FIJI:
1278 case CHIP_POLARIS11: 1279 case CHIP_POLARIS11:
1279 case CHIP_POLARIS10: 1280 case CHIP_POLARIS10:
1281 case CHIP_POLARIS12:
1280 case CHIP_CARRIZO: 1282 case CHIP_CARRIZO:
1281 case CHIP_STONEY: 1283 case CHIP_STONEY:
1282 if (adev->asic_type == CHIP_CARRIZO || adev->asic_type == CHIP_STONEY) 1284 if (adev->asic_type == CHIP_CARRIZO || adev->asic_type == CHIP_STONEY)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 8cb937b2bfcc..2534adaebe30 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -418,6 +418,13 @@ static const struct pci_device_id pciidlist[] = {
418 {0x1002, 0x67CA, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10}, 418 {0x1002, 0x67CA, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10},
419 {0x1002, 0x67CC, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10}, 419 {0x1002, 0x67CC, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10},
420 {0x1002, 0x67CF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10}, 420 {0x1002, 0x67CF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10},
421 /* Polaris12 */
422 {0x1002, 0x6980, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
423 {0x1002, 0x6981, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
424 {0x1002, 0x6985, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
425 {0x1002, 0x6986, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
426 {0x1002, 0x6987, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
427 {0x1002, 0x699F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
421 428
422 {0, 0, 0} 429 {0, 0, 0}
423}; 430};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c
index fc592c2b0e16..95a568df8551 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c
@@ -98,6 +98,7 @@ static int amdgpu_pp_early_init(void *handle)
98 switch (adev->asic_type) { 98 switch (adev->asic_type) {
99 case CHIP_POLARIS11: 99 case CHIP_POLARIS11:
100 case CHIP_POLARIS10: 100 case CHIP_POLARIS10:
101 case CHIP_POLARIS12:
101 case CHIP_TONGA: 102 case CHIP_TONGA:
102 case CHIP_FIJI: 103 case CHIP_FIJI:
103 case CHIP_TOPAZ: 104 case CHIP_TOPAZ:
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index a81dfaeeb8c0..1d564beb0fde 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -65,6 +65,7 @@
65#define FIRMWARE_STONEY "amdgpu/stoney_uvd.bin" 65#define FIRMWARE_STONEY "amdgpu/stoney_uvd.bin"
66#define FIRMWARE_POLARIS10 "amdgpu/polaris10_uvd.bin" 66#define FIRMWARE_POLARIS10 "amdgpu/polaris10_uvd.bin"
67#define FIRMWARE_POLARIS11 "amdgpu/polaris11_uvd.bin" 67#define FIRMWARE_POLARIS11 "amdgpu/polaris11_uvd.bin"
68#define FIRMWARE_POLARIS12 "amdgpu/polaris12_uvd.bin"
68 69
69/** 70/**
70 * amdgpu_uvd_cs_ctx - Command submission parser context 71 * amdgpu_uvd_cs_ctx - Command submission parser context
@@ -98,6 +99,7 @@ MODULE_FIRMWARE(FIRMWARE_FIJI);
98MODULE_FIRMWARE(FIRMWARE_STONEY); 99MODULE_FIRMWARE(FIRMWARE_STONEY);
99MODULE_FIRMWARE(FIRMWARE_POLARIS10); 100MODULE_FIRMWARE(FIRMWARE_POLARIS10);
100MODULE_FIRMWARE(FIRMWARE_POLARIS11); 101MODULE_FIRMWARE(FIRMWARE_POLARIS11);
102MODULE_FIRMWARE(FIRMWARE_POLARIS12);
101 103
102static void amdgpu_uvd_idle_work_handler(struct work_struct *work); 104static void amdgpu_uvd_idle_work_handler(struct work_struct *work);
103 105
@@ -149,6 +151,9 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
149 case CHIP_POLARIS11: 151 case CHIP_POLARIS11:
150 fw_name = FIRMWARE_POLARIS11; 152 fw_name = FIRMWARE_POLARIS11;
151 break; 153 break;
154 case CHIP_POLARIS12:
155 fw_name = FIRMWARE_POLARIS12;
156 break;
152 default: 157 default:
153 return -EINVAL; 158 return -EINVAL;
154 } 159 }
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
index 69b66b9e7f57..8fec802d3908 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
@@ -52,6 +52,7 @@
52#define FIRMWARE_STONEY "amdgpu/stoney_vce.bin" 52#define FIRMWARE_STONEY "amdgpu/stoney_vce.bin"
53#define FIRMWARE_POLARIS10 "amdgpu/polaris10_vce.bin" 53#define FIRMWARE_POLARIS10 "amdgpu/polaris10_vce.bin"
54#define FIRMWARE_POLARIS11 "amdgpu/polaris11_vce.bin" 54#define FIRMWARE_POLARIS11 "amdgpu/polaris11_vce.bin"
55#define FIRMWARE_POLARIS12 "amdgpu/polaris12_vce.bin"
55 56
56#ifdef CONFIG_DRM_AMDGPU_CIK 57#ifdef CONFIG_DRM_AMDGPU_CIK
57MODULE_FIRMWARE(FIRMWARE_BONAIRE); 58MODULE_FIRMWARE(FIRMWARE_BONAIRE);
@@ -66,6 +67,7 @@ MODULE_FIRMWARE(FIRMWARE_FIJI);
66MODULE_FIRMWARE(FIRMWARE_STONEY); 67MODULE_FIRMWARE(FIRMWARE_STONEY);
67MODULE_FIRMWARE(FIRMWARE_POLARIS10); 68MODULE_FIRMWARE(FIRMWARE_POLARIS10);
68MODULE_FIRMWARE(FIRMWARE_POLARIS11); 69MODULE_FIRMWARE(FIRMWARE_POLARIS11);
70MODULE_FIRMWARE(FIRMWARE_POLARIS12);
69 71
70static void amdgpu_vce_idle_work_handler(struct work_struct *work); 72static void amdgpu_vce_idle_work_handler(struct work_struct *work);
71 73
@@ -121,6 +123,9 @@ int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size)
121 case CHIP_POLARIS11: 123 case CHIP_POLARIS11:
122 fw_name = FIRMWARE_POLARIS11; 124 fw_name = FIRMWARE_POLARIS11;
123 break; 125 break;
126 case CHIP_POLARIS12:
127 fw_name = FIRMWARE_POLARIS12;
128 break;
124 129
125 default: 130 default:
126 return -EINVAL; 131 return -EINVAL;
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
index 9999dc71b998..ccb5e02e7b20 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
@@ -2512,6 +2512,8 @@ static int dce_v10_0_cursor_move_locked(struct drm_crtc *crtc,
2512 2512
2513 WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y); 2513 WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y);
2514 WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin); 2514 WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin);
2515 WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
2516 ((amdgpu_crtc->cursor_width - 1) << 16) | (amdgpu_crtc->cursor_height - 1));
2515 2517
2516 return 0; 2518 return 0;
2517} 2519}
@@ -2537,7 +2539,6 @@ static int dce_v10_0_crtc_cursor_set2(struct drm_crtc *crtc,
2537 int32_t hot_y) 2539 int32_t hot_y)
2538{ 2540{
2539 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2541 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2540 struct amdgpu_device *adev = crtc->dev->dev_private;
2541 struct drm_gem_object *obj; 2542 struct drm_gem_object *obj;
2542 struct amdgpu_bo *aobj; 2543 struct amdgpu_bo *aobj;
2543 int ret; 2544 int ret;
@@ -2578,7 +2579,9 @@ static int dce_v10_0_crtc_cursor_set2(struct drm_crtc *crtc,
2578 2579
2579 dce_v10_0_lock_cursor(crtc, true); 2580 dce_v10_0_lock_cursor(crtc, true);
2580 2581
2581 if (hot_x != amdgpu_crtc->cursor_hot_x || 2582 if (width != amdgpu_crtc->cursor_width ||
2583 height != amdgpu_crtc->cursor_height ||
2584 hot_x != amdgpu_crtc->cursor_hot_x ||
2582 hot_y != amdgpu_crtc->cursor_hot_y) { 2585 hot_y != amdgpu_crtc->cursor_hot_y) {
2583 int x, y; 2586 int x, y;
2584 2587
@@ -2587,16 +2590,10 @@ static int dce_v10_0_crtc_cursor_set2(struct drm_crtc *crtc,
2587 2590
2588 dce_v10_0_cursor_move_locked(crtc, x, y); 2591 dce_v10_0_cursor_move_locked(crtc, x, y);
2589 2592
2590 amdgpu_crtc->cursor_hot_x = hot_x;
2591 amdgpu_crtc->cursor_hot_y = hot_y;
2592 }
2593
2594 if (width != amdgpu_crtc->cursor_width ||
2595 height != amdgpu_crtc->cursor_height) {
2596 WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
2597 (width - 1) << 16 | (height - 1));
2598 amdgpu_crtc->cursor_width = width; 2593 amdgpu_crtc->cursor_width = width;
2599 amdgpu_crtc->cursor_height = height; 2594 amdgpu_crtc->cursor_height = height;
2595 amdgpu_crtc->cursor_hot_x = hot_x;
2596 amdgpu_crtc->cursor_hot_y = hot_y;
2600 } 2597 }
2601 2598
2602 dce_v10_0_show_cursor(crtc); 2599 dce_v10_0_show_cursor(crtc);
@@ -2620,7 +2617,6 @@ unpin:
2620static void dce_v10_0_cursor_reset(struct drm_crtc *crtc) 2617static void dce_v10_0_cursor_reset(struct drm_crtc *crtc)
2621{ 2618{
2622 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2619 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2623 struct amdgpu_device *adev = crtc->dev->dev_private;
2624 2620
2625 if (amdgpu_crtc->cursor_bo) { 2621 if (amdgpu_crtc->cursor_bo) {
2626 dce_v10_0_lock_cursor(crtc, true); 2622 dce_v10_0_lock_cursor(crtc, true);
@@ -2628,10 +2624,6 @@ static void dce_v10_0_cursor_reset(struct drm_crtc *crtc)
2628 dce_v10_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x, 2624 dce_v10_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x,
2629 amdgpu_crtc->cursor_y); 2625 amdgpu_crtc->cursor_y);
2630 2626
2631 WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
2632 (amdgpu_crtc->cursor_width - 1) << 16 |
2633 (amdgpu_crtc->cursor_height - 1));
2634
2635 dce_v10_0_show_cursor(crtc); 2627 dce_v10_0_show_cursor(crtc);
2636 2628
2637 dce_v10_0_lock_cursor(crtc, false); 2629 dce_v10_0_lock_cursor(crtc, false);
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
index b3d62b909f43..a7af5b33a5e3 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
@@ -167,6 +167,7 @@ static void dce_v11_0_init_golden_registers(struct amdgpu_device *adev)
167 (const u32)ARRAY_SIZE(stoney_golden_settings_a11)); 167 (const u32)ARRAY_SIZE(stoney_golden_settings_a11));
168 break; 168 break;
169 case CHIP_POLARIS11: 169 case CHIP_POLARIS11:
170 case CHIP_POLARIS12:
170 amdgpu_program_register_sequence(adev, 171 amdgpu_program_register_sequence(adev,
171 polaris11_golden_settings_a11, 172 polaris11_golden_settings_a11,
172 (const u32)ARRAY_SIZE(polaris11_golden_settings_a11)); 173 (const u32)ARRAY_SIZE(polaris11_golden_settings_a11));
@@ -608,6 +609,7 @@ static int dce_v11_0_get_num_crtc (struct amdgpu_device *adev)
608 num_crtc = 6; 609 num_crtc = 6;
609 break; 610 break;
610 case CHIP_POLARIS11: 611 case CHIP_POLARIS11:
612 case CHIP_POLARIS12:
611 num_crtc = 5; 613 num_crtc = 5;
612 break; 614 break;
613 default: 615 default:
@@ -1589,6 +1591,7 @@ static int dce_v11_0_audio_init(struct amdgpu_device *adev)
1589 adev->mode_info.audio.num_pins = 8; 1591 adev->mode_info.audio.num_pins = 8;
1590 break; 1592 break;
1591 case CHIP_POLARIS11: 1593 case CHIP_POLARIS11:
1594 case CHIP_POLARIS12:
1592 adev->mode_info.audio.num_pins = 6; 1595 adev->mode_info.audio.num_pins = 6;
1593 break; 1596 break;
1594 default: 1597 default:
@@ -2388,7 +2391,8 @@ static u32 dce_v11_0_pick_pll(struct drm_crtc *crtc)
2388 int pll; 2391 int pll;
2389 2392
2390 if ((adev->asic_type == CHIP_POLARIS10) || 2393 if ((adev->asic_type == CHIP_POLARIS10) ||
2391 (adev->asic_type == CHIP_POLARIS11)) { 2394 (adev->asic_type == CHIP_POLARIS11) ||
2395 (adev->asic_type == CHIP_POLARIS12)) {
2392 struct amdgpu_encoder *amdgpu_encoder = 2396 struct amdgpu_encoder *amdgpu_encoder =
2393 to_amdgpu_encoder(amdgpu_crtc->encoder); 2397 to_amdgpu_encoder(amdgpu_crtc->encoder);
2394 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; 2398 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
@@ -2528,6 +2532,8 @@ static int dce_v11_0_cursor_move_locked(struct drm_crtc *crtc,
2528 2532
2529 WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y); 2533 WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y);
2530 WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin); 2534 WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin);
2535 WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
2536 ((amdgpu_crtc->cursor_width - 1) << 16) | (amdgpu_crtc->cursor_height - 1));
2531 2537
2532 return 0; 2538 return 0;
2533} 2539}
@@ -2553,7 +2559,6 @@ static int dce_v11_0_crtc_cursor_set2(struct drm_crtc *crtc,
2553 int32_t hot_y) 2559 int32_t hot_y)
2554{ 2560{
2555 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2561 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2556 struct amdgpu_device *adev = crtc->dev->dev_private;
2557 struct drm_gem_object *obj; 2562 struct drm_gem_object *obj;
2558 struct amdgpu_bo *aobj; 2563 struct amdgpu_bo *aobj;
2559 int ret; 2564 int ret;
@@ -2594,7 +2599,9 @@ static int dce_v11_0_crtc_cursor_set2(struct drm_crtc *crtc,
2594 2599
2595 dce_v11_0_lock_cursor(crtc, true); 2600 dce_v11_0_lock_cursor(crtc, true);
2596 2601
2597 if (hot_x != amdgpu_crtc->cursor_hot_x || 2602 if (width != amdgpu_crtc->cursor_width ||
2603 height != amdgpu_crtc->cursor_height ||
2604 hot_x != amdgpu_crtc->cursor_hot_x ||
2598 hot_y != amdgpu_crtc->cursor_hot_y) { 2605 hot_y != amdgpu_crtc->cursor_hot_y) {
2599 int x, y; 2606 int x, y;
2600 2607
@@ -2603,16 +2610,10 @@ static int dce_v11_0_crtc_cursor_set2(struct drm_crtc *crtc,
2603 2610
2604 dce_v11_0_cursor_move_locked(crtc, x, y); 2611 dce_v11_0_cursor_move_locked(crtc, x, y);
2605 2612
2606 amdgpu_crtc->cursor_hot_x = hot_x;
2607 amdgpu_crtc->cursor_hot_y = hot_y;
2608 }
2609
2610 if (width != amdgpu_crtc->cursor_width ||
2611 height != amdgpu_crtc->cursor_height) {
2612 WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
2613 (width - 1) << 16 | (height - 1));
2614 amdgpu_crtc->cursor_width = width; 2613 amdgpu_crtc->cursor_width = width;
2615 amdgpu_crtc->cursor_height = height; 2614 amdgpu_crtc->cursor_height = height;
2615 amdgpu_crtc->cursor_hot_x = hot_x;
2616 amdgpu_crtc->cursor_hot_y = hot_y;
2616 } 2617 }
2617 2618
2618 dce_v11_0_show_cursor(crtc); 2619 dce_v11_0_show_cursor(crtc);
@@ -2636,7 +2637,6 @@ unpin:
2636static void dce_v11_0_cursor_reset(struct drm_crtc *crtc) 2637static void dce_v11_0_cursor_reset(struct drm_crtc *crtc)
2637{ 2638{
2638 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2639 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2639 struct amdgpu_device *adev = crtc->dev->dev_private;
2640 2640
2641 if (amdgpu_crtc->cursor_bo) { 2641 if (amdgpu_crtc->cursor_bo) {
2642 dce_v11_0_lock_cursor(crtc, true); 2642 dce_v11_0_lock_cursor(crtc, true);
@@ -2644,10 +2644,6 @@ static void dce_v11_0_cursor_reset(struct drm_crtc *crtc)
2644 dce_v11_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x, 2644 dce_v11_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x,
2645 amdgpu_crtc->cursor_y); 2645 amdgpu_crtc->cursor_y);
2646 2646
2647 WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
2648 (amdgpu_crtc->cursor_width - 1) << 16 |
2649 (amdgpu_crtc->cursor_height - 1));
2650
2651 dce_v11_0_show_cursor(crtc); 2647 dce_v11_0_show_cursor(crtc);
2652 2648
2653 dce_v11_0_lock_cursor(crtc, false); 2649 dce_v11_0_lock_cursor(crtc, false);
@@ -2822,7 +2818,8 @@ static int dce_v11_0_crtc_mode_set(struct drm_crtc *crtc,
2822 return -EINVAL; 2818 return -EINVAL;
2823 2819
2824 if ((adev->asic_type == CHIP_POLARIS10) || 2820 if ((adev->asic_type == CHIP_POLARIS10) ||
2825 (adev->asic_type == CHIP_POLARIS11)) { 2821 (adev->asic_type == CHIP_POLARIS11) ||
2822 (adev->asic_type == CHIP_POLARIS12)) {
2826 struct amdgpu_encoder *amdgpu_encoder = 2823 struct amdgpu_encoder *amdgpu_encoder =
2827 to_amdgpu_encoder(amdgpu_crtc->encoder); 2824 to_amdgpu_encoder(amdgpu_crtc->encoder);
2828 int encoder_mode = 2825 int encoder_mode =
@@ -2992,6 +2989,7 @@ static int dce_v11_0_early_init(void *handle)
2992 adev->mode_info.num_dig = 6; 2989 adev->mode_info.num_dig = 6;
2993 break; 2990 break;
2994 case CHIP_POLARIS11: 2991 case CHIP_POLARIS11:
2992 case CHIP_POLARIS12:
2995 adev->mode_info.num_hpd = 5; 2993 adev->mode_info.num_hpd = 5;
2996 adev->mode_info.num_dig = 5; 2994 adev->mode_info.num_dig = 5;
2997 break; 2995 break;
@@ -3101,7 +3099,8 @@ static int dce_v11_0_hw_init(void *handle)
3101 amdgpu_atombios_crtc_powergate_init(adev); 3099 amdgpu_atombios_crtc_powergate_init(adev);
3102 amdgpu_atombios_encoder_init_dig(adev); 3100 amdgpu_atombios_encoder_init_dig(adev);
3103 if ((adev->asic_type == CHIP_POLARIS10) || 3101 if ((adev->asic_type == CHIP_POLARIS10) ||
3104 (adev->asic_type == CHIP_POLARIS11)) { 3102 (adev->asic_type == CHIP_POLARIS11) ||
3103 (adev->asic_type == CHIP_POLARIS12)) {
3105 amdgpu_atombios_crtc_set_dce_clock(adev, adev->clock.default_dispclk, 3104 amdgpu_atombios_crtc_set_dce_clock(adev, adev->clock.default_dispclk,
3106 DCE_CLOCK_TYPE_DISPCLK, ATOM_GCK_DFS); 3105 DCE_CLOCK_TYPE_DISPCLK, ATOM_GCK_DFS);
3107 amdgpu_atombios_crtc_set_dce_clock(adev, 0, 3106 amdgpu_atombios_crtc_set_dce_clock(adev, 0,
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
index b4e4ec630e8c..39df6a50637f 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
@@ -1859,6 +1859,8 @@ static int dce_v6_0_cursor_move_locked(struct drm_crtc *crtc,
1859 struct amdgpu_device *adev = crtc->dev->dev_private; 1859 struct amdgpu_device *adev = crtc->dev->dev_private;
1860 int xorigin = 0, yorigin = 0; 1860 int xorigin = 0, yorigin = 0;
1861 1861
1862 int w = amdgpu_crtc->cursor_width;
1863
1862 amdgpu_crtc->cursor_x = x; 1864 amdgpu_crtc->cursor_x = x;
1863 amdgpu_crtc->cursor_y = y; 1865 amdgpu_crtc->cursor_y = y;
1864 1866
@@ -1878,6 +1880,8 @@ static int dce_v6_0_cursor_move_locked(struct drm_crtc *crtc,
1878 1880
1879 WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y); 1881 WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y);
1880 WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin); 1882 WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin);
1883 WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
1884 ((w - 1) << 16) | (amdgpu_crtc->cursor_height - 1));
1881 1885
1882 return 0; 1886 return 0;
1883} 1887}
@@ -1903,7 +1907,6 @@ static int dce_v6_0_crtc_cursor_set2(struct drm_crtc *crtc,
1903 int32_t hot_y) 1907 int32_t hot_y)
1904{ 1908{
1905 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 1909 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1906 struct amdgpu_device *adev = crtc->dev->dev_private;
1907 struct drm_gem_object *obj; 1910 struct drm_gem_object *obj;
1908 struct amdgpu_bo *aobj; 1911 struct amdgpu_bo *aobj;
1909 int ret; 1912 int ret;
@@ -1944,7 +1947,9 @@ static int dce_v6_0_crtc_cursor_set2(struct drm_crtc *crtc,
1944 1947
1945 dce_v6_0_lock_cursor(crtc, true); 1948 dce_v6_0_lock_cursor(crtc, true);
1946 1949
1947 if (hot_x != amdgpu_crtc->cursor_hot_x || 1950 if (width != amdgpu_crtc->cursor_width ||
1951 height != amdgpu_crtc->cursor_height ||
1952 hot_x != amdgpu_crtc->cursor_hot_x ||
1948 hot_y != amdgpu_crtc->cursor_hot_y) { 1953 hot_y != amdgpu_crtc->cursor_hot_y) {
1949 int x, y; 1954 int x, y;
1950 1955
@@ -1953,16 +1958,10 @@ static int dce_v6_0_crtc_cursor_set2(struct drm_crtc *crtc,
1953 1958
1954 dce_v6_0_cursor_move_locked(crtc, x, y); 1959 dce_v6_0_cursor_move_locked(crtc, x, y);
1955 1960
1956 amdgpu_crtc->cursor_hot_x = hot_x;
1957 amdgpu_crtc->cursor_hot_y = hot_y;
1958 }
1959
1960 if (width != amdgpu_crtc->cursor_width ||
1961 height != amdgpu_crtc->cursor_height) {
1962 WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
1963 (width - 1) << 16 | (height - 1));
1964 amdgpu_crtc->cursor_width = width; 1961 amdgpu_crtc->cursor_width = width;
1965 amdgpu_crtc->cursor_height = height; 1962 amdgpu_crtc->cursor_height = height;
1963 amdgpu_crtc->cursor_hot_x = hot_x;
1964 amdgpu_crtc->cursor_hot_y = hot_y;
1966 } 1965 }
1967 1966
1968 dce_v6_0_show_cursor(crtc); 1967 dce_v6_0_show_cursor(crtc);
@@ -1986,7 +1985,6 @@ unpin:
1986static void dce_v6_0_cursor_reset(struct drm_crtc *crtc) 1985static void dce_v6_0_cursor_reset(struct drm_crtc *crtc)
1987{ 1986{
1988 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 1987 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1989 struct amdgpu_device *adev = crtc->dev->dev_private;
1990 1988
1991 if (amdgpu_crtc->cursor_bo) { 1989 if (amdgpu_crtc->cursor_bo) {
1992 dce_v6_0_lock_cursor(crtc, true); 1990 dce_v6_0_lock_cursor(crtc, true);
@@ -1994,10 +1992,6 @@ static void dce_v6_0_cursor_reset(struct drm_crtc *crtc)
1994 dce_v6_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x, 1992 dce_v6_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x,
1995 amdgpu_crtc->cursor_y); 1993 amdgpu_crtc->cursor_y);
1996 1994
1997 WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
1998 (amdgpu_crtc->cursor_width - 1) << 16 |
1999 (amdgpu_crtc->cursor_height - 1));
2000
2001 dce_v6_0_show_cursor(crtc); 1995 dce_v6_0_show_cursor(crtc);
2002 dce_v6_0_lock_cursor(crtc, false); 1996 dce_v6_0_lock_cursor(crtc, false);
2003 } 1997 }
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
index 584abe834a3c..28102bb1704d 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
@@ -2363,6 +2363,8 @@ static int dce_v8_0_cursor_move_locked(struct drm_crtc *crtc,
2363 2363
2364 WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y); 2364 WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y);
2365 WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin); 2365 WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin);
2366 WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
2367 ((amdgpu_crtc->cursor_width - 1) << 16) | (amdgpu_crtc->cursor_height - 1));
2366 2368
2367 return 0; 2369 return 0;
2368} 2370}
@@ -2388,7 +2390,6 @@ static int dce_v8_0_crtc_cursor_set2(struct drm_crtc *crtc,
2388 int32_t hot_y) 2390 int32_t hot_y)
2389{ 2391{
2390 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2392 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2391 struct amdgpu_device *adev = crtc->dev->dev_private;
2392 struct drm_gem_object *obj; 2393 struct drm_gem_object *obj;
2393 struct amdgpu_bo *aobj; 2394 struct amdgpu_bo *aobj;
2394 int ret; 2395 int ret;
@@ -2429,7 +2430,9 @@ static int dce_v8_0_crtc_cursor_set2(struct drm_crtc *crtc,
2429 2430
2430 dce_v8_0_lock_cursor(crtc, true); 2431 dce_v8_0_lock_cursor(crtc, true);
2431 2432
2432 if (hot_x != amdgpu_crtc->cursor_hot_x || 2433 if (width != amdgpu_crtc->cursor_width ||
2434 height != amdgpu_crtc->cursor_height ||
2435 hot_x != amdgpu_crtc->cursor_hot_x ||
2433 hot_y != amdgpu_crtc->cursor_hot_y) { 2436 hot_y != amdgpu_crtc->cursor_hot_y) {
2434 int x, y; 2437 int x, y;
2435 2438
@@ -2438,16 +2441,10 @@ static int dce_v8_0_crtc_cursor_set2(struct drm_crtc *crtc,
2438 2441
2439 dce_v8_0_cursor_move_locked(crtc, x, y); 2442 dce_v8_0_cursor_move_locked(crtc, x, y);
2440 2443
2441 amdgpu_crtc->cursor_hot_x = hot_x;
2442 amdgpu_crtc->cursor_hot_y = hot_y;
2443 }
2444
2445 if (width != amdgpu_crtc->cursor_width ||
2446 height != amdgpu_crtc->cursor_height) {
2447 WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
2448 (width - 1) << 16 | (height - 1));
2449 amdgpu_crtc->cursor_width = width; 2444 amdgpu_crtc->cursor_width = width;
2450 amdgpu_crtc->cursor_height = height; 2445 amdgpu_crtc->cursor_height = height;
2446 amdgpu_crtc->cursor_hot_x = hot_x;
2447 amdgpu_crtc->cursor_hot_y = hot_y;
2451 } 2448 }
2452 2449
2453 dce_v8_0_show_cursor(crtc); 2450 dce_v8_0_show_cursor(crtc);
@@ -2471,7 +2468,6 @@ unpin:
2471static void dce_v8_0_cursor_reset(struct drm_crtc *crtc) 2468static void dce_v8_0_cursor_reset(struct drm_crtc *crtc)
2472{ 2469{
2473 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2470 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2474 struct amdgpu_device *adev = crtc->dev->dev_private;
2475 2471
2476 if (amdgpu_crtc->cursor_bo) { 2472 if (amdgpu_crtc->cursor_bo) {
2477 dce_v8_0_lock_cursor(crtc, true); 2473 dce_v8_0_lock_cursor(crtc, true);
@@ -2479,10 +2475,6 @@ static void dce_v8_0_cursor_reset(struct drm_crtc *crtc)
2479 dce_v8_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x, 2475 dce_v8_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x,
2480 amdgpu_crtc->cursor_y); 2476 amdgpu_crtc->cursor_y);
2481 2477
2482 WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
2483 (amdgpu_crtc->cursor_width - 1) << 16 |
2484 (amdgpu_crtc->cursor_height - 1));
2485
2486 dce_v8_0_show_cursor(crtc); 2478 dce_v8_0_show_cursor(crtc);
2487 2479
2488 dce_v8_0_lock_cursor(crtc, false); 2480 dce_v8_0_lock_cursor(crtc, false);
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
index 762f8e82ceb7..e9a176891e13 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
@@ -627,11 +627,8 @@ static const struct drm_encoder_helper_funcs dce_virtual_encoder_helper_funcs =
627 627
628static void dce_virtual_encoder_destroy(struct drm_encoder *encoder) 628static void dce_virtual_encoder_destroy(struct drm_encoder *encoder)
629{ 629{
630 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
631
632 kfree(amdgpu_encoder->enc_priv);
633 drm_encoder_cleanup(encoder); 630 drm_encoder_cleanup(encoder);
634 kfree(amdgpu_encoder); 631 kfree(encoder);
635} 632}
636 633
637static const struct drm_encoder_funcs dce_virtual_encoder_funcs = { 634static const struct drm_encoder_funcs dce_virtual_encoder_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index d0ec00986f38..373374164bd5 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -139,6 +139,13 @@ MODULE_FIRMWARE("amdgpu/polaris10_mec.bin");
139MODULE_FIRMWARE("amdgpu/polaris10_mec2.bin"); 139MODULE_FIRMWARE("amdgpu/polaris10_mec2.bin");
140MODULE_FIRMWARE("amdgpu/polaris10_rlc.bin"); 140MODULE_FIRMWARE("amdgpu/polaris10_rlc.bin");
141 141
142MODULE_FIRMWARE("amdgpu/polaris12_ce.bin");
143MODULE_FIRMWARE("amdgpu/polaris12_pfp.bin");
144MODULE_FIRMWARE("amdgpu/polaris12_me.bin");
145MODULE_FIRMWARE("amdgpu/polaris12_mec.bin");
146MODULE_FIRMWARE("amdgpu/polaris12_mec2.bin");
147MODULE_FIRMWARE("amdgpu/polaris12_rlc.bin");
148
142static const struct amdgpu_gds_reg_offset amdgpu_gds_reg_offset[] = 149static const struct amdgpu_gds_reg_offset amdgpu_gds_reg_offset[] =
143{ 150{
144 {mmGDS_VMID0_BASE, mmGDS_VMID0_SIZE, mmGDS_GWS_VMID0, mmGDS_OA_VMID0}, 151 {mmGDS_VMID0_BASE, mmGDS_VMID0_SIZE, mmGDS_GWS_VMID0, mmGDS_OA_VMID0},
@@ -689,6 +696,7 @@ static void gfx_v8_0_init_golden_registers(struct amdgpu_device *adev)
689 (const u32)ARRAY_SIZE(tonga_golden_common_all)); 696 (const u32)ARRAY_SIZE(tonga_golden_common_all));
690 break; 697 break;
691 case CHIP_POLARIS11: 698 case CHIP_POLARIS11:
699 case CHIP_POLARIS12:
692 amdgpu_program_register_sequence(adev, 700 amdgpu_program_register_sequence(adev,
693 golden_settings_polaris11_a11, 701 golden_settings_polaris11_a11,
694 (const u32)ARRAY_SIZE(golden_settings_polaris11_a11)); 702 (const u32)ARRAY_SIZE(golden_settings_polaris11_a11));
@@ -903,6 +911,9 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
903 case CHIP_POLARIS10: 911 case CHIP_POLARIS10:
904 chip_name = "polaris10"; 912 chip_name = "polaris10";
905 break; 913 break;
914 case CHIP_POLARIS12:
915 chip_name = "polaris12";
916 break;
906 case CHIP_STONEY: 917 case CHIP_STONEY:
907 chip_name = "stoney"; 918 chip_name = "stoney";
908 break; 919 break;
@@ -1768,6 +1779,7 @@ static int gfx_v8_0_gpu_early_init(struct amdgpu_device *adev)
1768 gb_addr_config = TONGA_GB_ADDR_CONFIG_GOLDEN; 1779 gb_addr_config = TONGA_GB_ADDR_CONFIG_GOLDEN;
1769 break; 1780 break;
1770 case CHIP_POLARIS11: 1781 case CHIP_POLARIS11:
1782 case CHIP_POLARIS12:
1771 ret = amdgpu_atombios_get_gfx_info(adev); 1783 ret = amdgpu_atombios_get_gfx_info(adev);
1772 if (ret) 1784 if (ret)
1773 return ret; 1785 return ret;
@@ -2682,6 +2694,7 @@ static void gfx_v8_0_tiling_mode_table_init(struct amdgpu_device *adev)
2682 2694
2683 break; 2695 break;
2684 case CHIP_POLARIS11: 2696 case CHIP_POLARIS11:
2697 case CHIP_POLARIS12:
2685 modearray[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 2698 modearray[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2686 PIPE_CONFIG(ADDR_SURF_P4_16x16) | 2699 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2687 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) | 2700 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
@@ -3503,6 +3516,7 @@ gfx_v8_0_raster_config(struct amdgpu_device *adev, u32 *rconf, u32 *rconf1)
3503 *rconf1 |= 0x0; 3516 *rconf1 |= 0x0;
3504 break; 3517 break;
3505 case CHIP_POLARIS11: 3518 case CHIP_POLARIS11:
3519 case CHIP_POLARIS12:
3506 *rconf |= RB_MAP_PKR0(2) | RB_XSEL2(1) | SE_MAP(2) | 3520 *rconf |= RB_MAP_PKR0(2) | RB_XSEL2(1) | SE_MAP(2) |
3507 SE_XSEL(1) | SE_YSEL(1); 3521 SE_XSEL(1) | SE_YSEL(1);
3508 *rconf1 |= 0x0; 3522 *rconf1 |= 0x0;
@@ -4021,7 +4035,8 @@ static void gfx_v8_0_init_pg(struct amdgpu_device *adev)
4021 cz_enable_cp_power_gating(adev, true); 4035 cz_enable_cp_power_gating(adev, true);
4022 else 4036 else
4023 cz_enable_cp_power_gating(adev, false); 4037 cz_enable_cp_power_gating(adev, false);
4024 } else if (adev->asic_type == CHIP_POLARIS11) { 4038 } else if ((adev->asic_type == CHIP_POLARIS11) ||
4039 (adev->asic_type == CHIP_POLARIS12)) {
4025 gfx_v8_0_init_csb(adev); 4040 gfx_v8_0_init_csb(adev);
4026 gfx_v8_0_init_save_restore_list(adev); 4041 gfx_v8_0_init_save_restore_list(adev);
4027 gfx_v8_0_enable_save_restore_machine(adev); 4042 gfx_v8_0_enable_save_restore_machine(adev);
@@ -4095,7 +4110,8 @@ static int gfx_v8_0_rlc_resume(struct amdgpu_device *adev)
4095 RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK); 4110 RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK);
4096 WREG32(mmRLC_CGCG_CGLS_CTRL, tmp); 4111 WREG32(mmRLC_CGCG_CGLS_CTRL, tmp);
4097 if (adev->asic_type == CHIP_POLARIS11 || 4112 if (adev->asic_type == CHIP_POLARIS11 ||
4098 adev->asic_type == CHIP_POLARIS10) { 4113 adev->asic_type == CHIP_POLARIS10 ||
4114 adev->asic_type == CHIP_POLARIS12) {
4099 tmp = RREG32(mmRLC_CGCG_CGLS_CTRL_3D); 4115 tmp = RREG32(mmRLC_CGCG_CGLS_CTRL_3D);
4100 tmp &= ~0x3; 4116 tmp &= ~0x3;
4101 WREG32(mmRLC_CGCG_CGLS_CTRL_3D, tmp); 4117 WREG32(mmRLC_CGCG_CGLS_CTRL_3D, tmp);
@@ -4283,6 +4299,7 @@ static int gfx_v8_0_cp_gfx_start(struct amdgpu_device *adev)
4283 amdgpu_ring_write(ring, 0x0000002A); 4299 amdgpu_ring_write(ring, 0x0000002A);
4284 break; 4300 break;
4285 case CHIP_POLARIS11: 4301 case CHIP_POLARIS11:
4302 case CHIP_POLARIS12:
4286 amdgpu_ring_write(ring, 0x16000012); 4303 amdgpu_ring_write(ring, 0x16000012);
4287 amdgpu_ring_write(ring, 0x00000000); 4304 amdgpu_ring_write(ring, 0x00000000);
4288 break; 4305 break;
@@ -4664,7 +4681,8 @@ static int gfx_v8_0_cp_compute_resume(struct amdgpu_device *adev)
4664 (adev->asic_type == CHIP_FIJI) || 4681 (adev->asic_type == CHIP_FIJI) ||
4665 (adev->asic_type == CHIP_STONEY) || 4682 (adev->asic_type == CHIP_STONEY) ||
4666 (adev->asic_type == CHIP_POLARIS11) || 4683 (adev->asic_type == CHIP_POLARIS11) ||
4667 (adev->asic_type == CHIP_POLARIS10)) { 4684 (adev->asic_type == CHIP_POLARIS10) ||
4685 (adev->asic_type == CHIP_POLARIS12)) {
4668 WREG32(mmCP_MEC_DOORBELL_RANGE_LOWER, 4686 WREG32(mmCP_MEC_DOORBELL_RANGE_LOWER,
4669 AMDGPU_DOORBELL_KIQ << 2); 4687 AMDGPU_DOORBELL_KIQ << 2);
4670 WREG32(mmCP_MEC_DOORBELL_RANGE_UPPER, 4688 WREG32(mmCP_MEC_DOORBELL_RANGE_UPPER,
@@ -4700,7 +4718,8 @@ static int gfx_v8_0_cp_compute_resume(struct amdgpu_device *adev)
4700 mqd->cp_hqd_persistent_state = tmp; 4718 mqd->cp_hqd_persistent_state = tmp;
4701 if (adev->asic_type == CHIP_STONEY || 4719 if (adev->asic_type == CHIP_STONEY ||
4702 adev->asic_type == CHIP_POLARIS11 || 4720 adev->asic_type == CHIP_POLARIS11 ||
4703 adev->asic_type == CHIP_POLARIS10) { 4721 adev->asic_type == CHIP_POLARIS10 ||
4722 adev->asic_type == CHIP_POLARIS12) {
4704 tmp = RREG32(mmCP_ME1_PIPE3_INT_CNTL); 4723 tmp = RREG32(mmCP_ME1_PIPE3_INT_CNTL);
4705 tmp = REG_SET_FIELD(tmp, CP_ME1_PIPE3_INT_CNTL, GENERIC2_INT_ENABLE, 1); 4724 tmp = REG_SET_FIELD(tmp, CP_ME1_PIPE3_INT_CNTL, GENERIC2_INT_ENABLE, 1);
4706 WREG32(mmCP_ME1_PIPE3_INT_CNTL, tmp); 4725 WREG32(mmCP_ME1_PIPE3_INT_CNTL, tmp);
@@ -5279,7 +5298,8 @@ static int gfx_v8_0_late_init(void *handle)
5279static void gfx_v8_0_enable_gfx_static_mg_power_gating(struct amdgpu_device *adev, 5298static void gfx_v8_0_enable_gfx_static_mg_power_gating(struct amdgpu_device *adev,
5280 bool enable) 5299 bool enable)
5281{ 5300{
5282 if (adev->asic_type == CHIP_POLARIS11) 5301 if ((adev->asic_type == CHIP_POLARIS11) ||
5302 (adev->asic_type == CHIP_POLARIS12))
5283 /* Send msg to SMU via Powerplay */ 5303 /* Send msg to SMU via Powerplay */
5284 amdgpu_set_powergating_state(adev, 5304 amdgpu_set_powergating_state(adev,
5285 AMD_IP_BLOCK_TYPE_SMC, 5305 AMD_IP_BLOCK_TYPE_SMC,
@@ -5353,6 +5373,7 @@ static int gfx_v8_0_set_powergating_state(void *handle,
5353 gfx_v8_0_enable_gfx_dynamic_mg_power_gating(adev, false); 5373 gfx_v8_0_enable_gfx_dynamic_mg_power_gating(adev, false);
5354 break; 5374 break;
5355 case CHIP_POLARIS11: 5375 case CHIP_POLARIS11:
5376 case CHIP_POLARIS12:
5356 if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_SMG) && enable) 5377 if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_SMG) && enable)
5357 gfx_v8_0_enable_gfx_static_mg_power_gating(adev, true); 5378 gfx_v8_0_enable_gfx_static_mg_power_gating(adev, true);
5358 else 5379 else
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
index 45a573e63d4a..0635829b18cf 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
@@ -44,6 +44,7 @@ MODULE_FIRMWARE("radeon/tahiti_mc.bin");
44MODULE_FIRMWARE("radeon/pitcairn_mc.bin"); 44MODULE_FIRMWARE("radeon/pitcairn_mc.bin");
45MODULE_FIRMWARE("radeon/verde_mc.bin"); 45MODULE_FIRMWARE("radeon/verde_mc.bin");
46MODULE_FIRMWARE("radeon/oland_mc.bin"); 46MODULE_FIRMWARE("radeon/oland_mc.bin");
47MODULE_FIRMWARE("radeon/si58_mc.bin");
47 48
48#define MC_SEQ_MISC0__MT__MASK 0xf0000000 49#define MC_SEQ_MISC0__MT__MASK 0xf0000000
49#define MC_SEQ_MISC0__MT__GDDR1 0x10000000 50#define MC_SEQ_MISC0__MT__GDDR1 0x10000000
@@ -113,6 +114,7 @@ static int gmc_v6_0_init_microcode(struct amdgpu_device *adev)
113 const char *chip_name; 114 const char *chip_name;
114 char fw_name[30]; 115 char fw_name[30];
115 int err; 116 int err;
117 bool is_58_fw = false;
116 118
117 DRM_DEBUG("\n"); 119 DRM_DEBUG("\n");
118 120
@@ -135,7 +137,14 @@ static int gmc_v6_0_init_microcode(struct amdgpu_device *adev)
135 default: BUG(); 137 default: BUG();
136 } 138 }
137 139
138 snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name); 140 /* this memory configuration requires special firmware */
141 if (((RREG32(mmMC_SEQ_MISC0) & 0xff000000) >> 24) == 0x58)
142 is_58_fw = true;
143
144 if (is_58_fw)
145 snprintf(fw_name, sizeof(fw_name), "radeon/si58_mc.bin");
146 else
147 snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name);
139 err = request_firmware(&adev->mc.fw, fw_name, adev->dev); 148 err = request_firmware(&adev->mc.fw, fw_name, adev->dev);
140 if (err) 149 if (err)
141 goto out; 150 goto out;
@@ -245,6 +254,9 @@ static void gmc_v6_0_mc_program(struct amdgpu_device *adev)
245 } 254 }
246 WREG32(mmHDP_REG_COHERENCY_FLUSH_CNTL, 0); 255 WREG32(mmHDP_REG_COHERENCY_FLUSH_CNTL, 0);
247 256
257 if (adev->mode_info.num_crtc)
258 amdgpu_display_set_vga_render_state(adev, false);
259
248 gmc_v6_0_mc_stop(adev, &save); 260 gmc_v6_0_mc_stop(adev, &save);
249 261
250 if (gmc_v6_0_wait_for_idle((void *)adev)) { 262 if (gmc_v6_0_wait_for_idle((void *)adev)) {
@@ -274,7 +286,6 @@ static void gmc_v6_0_mc_program(struct amdgpu_device *adev)
274 dev_warn(adev->dev, "Wait for MC idle timedout !\n"); 286 dev_warn(adev->dev, "Wait for MC idle timedout !\n");
275 } 287 }
276 gmc_v6_0_mc_resume(adev, &save); 288 gmc_v6_0_mc_resume(adev, &save);
277 amdgpu_display_set_vga_render_state(adev, false);
278} 289}
279 290
280static int gmc_v6_0_mc_init(struct amdgpu_device *adev) 291static int gmc_v6_0_mc_init(struct amdgpu_device *adev)
@@ -463,19 +474,11 @@ static int gmc_v6_0_gart_enable(struct amdgpu_device *adev)
463 WREG32(mmVM_CONTEXT1_CNTL, 474 WREG32(mmVM_CONTEXT1_CNTL,
464 VM_CONTEXT1_CNTL__ENABLE_CONTEXT_MASK | 475 VM_CONTEXT1_CNTL__ENABLE_CONTEXT_MASK |
465 (1UL << VM_CONTEXT1_CNTL__PAGE_TABLE_DEPTH__SHIFT) | 476 (1UL << VM_CONTEXT1_CNTL__PAGE_TABLE_DEPTH__SHIFT) |
466 ((amdgpu_vm_block_size - 9) << VM_CONTEXT1_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT) | 477 ((amdgpu_vm_block_size - 9) << VM_CONTEXT1_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT));
467 VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | 478 if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
468 VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK | 479 gmc_v6_0_set_fault_enable_default(adev, false);
469 VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | 480 else
470 VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK | 481 gmc_v6_0_set_fault_enable_default(adev, true);
471 VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
472 VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK |
473 VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
474 VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK |
475 VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
476 VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK |
477 VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
478 VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK);
479 482
480 gmc_v6_0_gart_flush_gpu_tlb(adev, 0); 483 gmc_v6_0_gart_flush_gpu_tlb(adev, 0);
481 dev_info(adev->dev, "PCIE GART of %uM enabled (table at 0x%016llX).\n", 484 dev_info(adev->dev, "PCIE GART of %uM enabled (table at 0x%016llX).\n",
@@ -754,7 +757,10 @@ static int gmc_v6_0_late_init(void *handle)
754{ 757{
755 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 758 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
756 759
757 return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0); 760 if (amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS)
761 return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0);
762 else
763 return 0;
758} 764}
759 765
760static int gmc_v6_0_sw_init(void *handle) 766static int gmc_v6_0_sw_init(void *handle)
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
index 0daac3a5be79..476bc9f1954b 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
@@ -46,6 +46,7 @@ static int gmc_v8_0_wait_for_idle(void *handle);
46MODULE_FIRMWARE("amdgpu/tonga_mc.bin"); 46MODULE_FIRMWARE("amdgpu/tonga_mc.bin");
47MODULE_FIRMWARE("amdgpu/polaris11_mc.bin"); 47MODULE_FIRMWARE("amdgpu/polaris11_mc.bin");
48MODULE_FIRMWARE("amdgpu/polaris10_mc.bin"); 48MODULE_FIRMWARE("amdgpu/polaris10_mc.bin");
49MODULE_FIRMWARE("amdgpu/polaris12_mc.bin");
49 50
50static const u32 golden_settings_tonga_a11[] = 51static const u32 golden_settings_tonga_a11[] =
51{ 52{
@@ -130,6 +131,7 @@ static void gmc_v8_0_init_golden_registers(struct amdgpu_device *adev)
130 (const u32)ARRAY_SIZE(golden_settings_tonga_a11)); 131 (const u32)ARRAY_SIZE(golden_settings_tonga_a11));
131 break; 132 break;
132 case CHIP_POLARIS11: 133 case CHIP_POLARIS11:
134 case CHIP_POLARIS12:
133 amdgpu_program_register_sequence(adev, 135 amdgpu_program_register_sequence(adev,
134 golden_settings_polaris11_a11, 136 golden_settings_polaris11_a11,
135 (const u32)ARRAY_SIZE(golden_settings_polaris11_a11)); 137 (const u32)ARRAY_SIZE(golden_settings_polaris11_a11));
@@ -225,6 +227,9 @@ static int gmc_v8_0_init_microcode(struct amdgpu_device *adev)
225 case CHIP_POLARIS10: 227 case CHIP_POLARIS10:
226 chip_name = "polaris10"; 228 chip_name = "polaris10";
227 break; 229 break;
230 case CHIP_POLARIS12:
231 chip_name = "polaris12";
232 break;
228 case CHIP_FIJI: 233 case CHIP_FIJI:
229 case CHIP_CARRIZO: 234 case CHIP_CARRIZO:
230 case CHIP_STONEY: 235 case CHIP_STONEY:
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
index 1170a64a3184..034ace79ed49 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
@@ -60,6 +60,8 @@ MODULE_FIRMWARE("amdgpu/polaris10_sdma.bin");
60MODULE_FIRMWARE("amdgpu/polaris10_sdma1.bin"); 60MODULE_FIRMWARE("amdgpu/polaris10_sdma1.bin");
61MODULE_FIRMWARE("amdgpu/polaris11_sdma.bin"); 61MODULE_FIRMWARE("amdgpu/polaris11_sdma.bin");
62MODULE_FIRMWARE("amdgpu/polaris11_sdma1.bin"); 62MODULE_FIRMWARE("amdgpu/polaris11_sdma1.bin");
63MODULE_FIRMWARE("amdgpu/polaris12_sdma.bin");
64MODULE_FIRMWARE("amdgpu/polaris12_sdma1.bin");
63 65
64 66
65static const u32 sdma_offsets[SDMA_MAX_INSTANCE] = 67static const u32 sdma_offsets[SDMA_MAX_INSTANCE] =
@@ -206,6 +208,7 @@ static void sdma_v3_0_init_golden_registers(struct amdgpu_device *adev)
206 (const u32)ARRAY_SIZE(golden_settings_tonga_a11)); 208 (const u32)ARRAY_SIZE(golden_settings_tonga_a11));
207 break; 209 break;
208 case CHIP_POLARIS11: 210 case CHIP_POLARIS11:
211 case CHIP_POLARIS12:
209 amdgpu_program_register_sequence(adev, 212 amdgpu_program_register_sequence(adev,
210 golden_settings_polaris11_a11, 213 golden_settings_polaris11_a11,
211 (const u32)ARRAY_SIZE(golden_settings_polaris11_a11)); 214 (const u32)ARRAY_SIZE(golden_settings_polaris11_a11));
@@ -278,6 +281,9 @@ static int sdma_v3_0_init_microcode(struct amdgpu_device *adev)
278 case CHIP_POLARIS10: 281 case CHIP_POLARIS10:
279 chip_name = "polaris10"; 282 chip_name = "polaris10";
280 break; 283 break;
284 case CHIP_POLARIS12:
285 chip_name = "polaris12";
286 break;
281 case CHIP_CARRIZO: 287 case CHIP_CARRIZO:
282 chip_name = "carrizo"; 288 chip_name = "carrizo";
283 break; 289 break;
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.c b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
index 6c65a1a2de79..6e150db8f380 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
@@ -56,7 +56,6 @@
56#define BIOS_SCRATCH_4 0x5cd 56#define BIOS_SCRATCH_4 0x5cd
57 57
58MODULE_FIRMWARE("radeon/tahiti_smc.bin"); 58MODULE_FIRMWARE("radeon/tahiti_smc.bin");
59MODULE_FIRMWARE("radeon/tahiti_k_smc.bin");
60MODULE_FIRMWARE("radeon/pitcairn_smc.bin"); 59MODULE_FIRMWARE("radeon/pitcairn_smc.bin");
61MODULE_FIRMWARE("radeon/pitcairn_k_smc.bin"); 60MODULE_FIRMWARE("radeon/pitcairn_k_smc.bin");
62MODULE_FIRMWARE("radeon/verde_smc.bin"); 61MODULE_FIRMWARE("radeon/verde_smc.bin");
@@ -65,6 +64,7 @@ MODULE_FIRMWARE("radeon/oland_smc.bin");
65MODULE_FIRMWARE("radeon/oland_k_smc.bin"); 64MODULE_FIRMWARE("radeon/oland_k_smc.bin");
66MODULE_FIRMWARE("radeon/hainan_smc.bin"); 65MODULE_FIRMWARE("radeon/hainan_smc.bin");
67MODULE_FIRMWARE("radeon/hainan_k_smc.bin"); 66MODULE_FIRMWARE("radeon/hainan_k_smc.bin");
67MODULE_FIRMWARE("radeon/banks_k_2_smc.bin");
68 68
69union power_info { 69union power_info {
70 struct _ATOM_POWERPLAY_INFO info; 70 struct _ATOM_POWERPLAY_INFO info;
@@ -3488,30 +3488,6 @@ static void si_apply_state_adjust_rules(struct amdgpu_device *adev,
3488 (adev->pdev->device == 0x6817) || 3488 (adev->pdev->device == 0x6817) ||
3489 (adev->pdev->device == 0x6806)) 3489 (adev->pdev->device == 0x6806))
3490 max_mclk = 120000; 3490 max_mclk = 120000;
3491 } else if (adev->asic_type == CHIP_VERDE) {
3492 if ((adev->pdev->revision == 0x81) ||
3493 (adev->pdev->revision == 0x83) ||
3494 (adev->pdev->revision == 0x87) ||
3495 (adev->pdev->device == 0x6820) ||
3496 (adev->pdev->device == 0x6821) ||
3497 (adev->pdev->device == 0x6822) ||
3498 (adev->pdev->device == 0x6823) ||
3499 (adev->pdev->device == 0x682A) ||
3500 (adev->pdev->device == 0x682B)) {
3501 max_sclk = 75000;
3502 max_mclk = 80000;
3503 }
3504 } else if (adev->asic_type == CHIP_OLAND) {
3505 if ((adev->pdev->revision == 0xC7) ||
3506 (adev->pdev->revision == 0x80) ||
3507 (adev->pdev->revision == 0x81) ||
3508 (adev->pdev->revision == 0x83) ||
3509 (adev->pdev->revision == 0x87) ||
3510 (adev->pdev->device == 0x6604) ||
3511 (adev->pdev->device == 0x6605)) {
3512 max_sclk = 75000;
3513 max_mclk = 80000;
3514 }
3515 } else if (adev->asic_type == CHIP_HAINAN) { 3491 } else if (adev->asic_type == CHIP_HAINAN) {
3516 if ((adev->pdev->revision == 0x81) || 3492 if ((adev->pdev->revision == 0x81) ||
3517 (adev->pdev->revision == 0x83) || 3493 (adev->pdev->revision == 0x83) ||
@@ -3520,7 +3496,6 @@ static void si_apply_state_adjust_rules(struct amdgpu_device *adev,
3520 (adev->pdev->device == 0x6665) || 3496 (adev->pdev->device == 0x6665) ||
3521 (adev->pdev->device == 0x6667)) { 3497 (adev->pdev->device == 0x6667)) {
3522 max_sclk = 75000; 3498 max_sclk = 75000;
3523 max_mclk = 80000;
3524 } 3499 }
3525 } 3500 }
3526 /* Apply dpm quirks */ 3501 /* Apply dpm quirks */
@@ -7687,50 +7662,51 @@ static int si_dpm_init_microcode(struct amdgpu_device *adev)
7687 chip_name = "tahiti"; 7662 chip_name = "tahiti";
7688 break; 7663 break;
7689 case CHIP_PITCAIRN: 7664 case CHIP_PITCAIRN:
7690 if ((adev->pdev->revision == 0x81) || 7665 if ((adev->pdev->revision == 0x81) &&
7691 (adev->pdev->device == 0x6810) || 7666 ((adev->pdev->device == 0x6810) ||
7692 (adev->pdev->device == 0x6811) || 7667 (adev->pdev->device == 0x6811)))
7693 (adev->pdev->device == 0x6816) ||
7694 (adev->pdev->device == 0x6817) ||
7695 (adev->pdev->device == 0x6806))
7696 chip_name = "pitcairn_k"; 7668 chip_name = "pitcairn_k";
7697 else 7669 else
7698 chip_name = "pitcairn"; 7670 chip_name = "pitcairn";
7699 break; 7671 break;
7700 case CHIP_VERDE: 7672 case CHIP_VERDE:
7701 if ((adev->pdev->revision == 0x81) || 7673 if (((adev->pdev->device == 0x6820) &&
7702 (adev->pdev->revision == 0x83) || 7674 ((adev->pdev->revision == 0x81) ||
7703 (adev->pdev->revision == 0x87) || 7675 (adev->pdev->revision == 0x83))) ||
7704 (adev->pdev->device == 0x6820) || 7676 ((adev->pdev->device == 0x6821) &&
7705 (adev->pdev->device == 0x6821) || 7677 ((adev->pdev->revision == 0x83) ||
7706 (adev->pdev->device == 0x6822) || 7678 (adev->pdev->revision == 0x87))) ||
7707 (adev->pdev->device == 0x6823) || 7679 ((adev->pdev->revision == 0x87) &&
7708 (adev->pdev->device == 0x682A) || 7680 ((adev->pdev->device == 0x6823) ||
7709 (adev->pdev->device == 0x682B)) 7681 (adev->pdev->device == 0x682b))))
7710 chip_name = "verde_k"; 7682 chip_name = "verde_k";
7711 else 7683 else
7712 chip_name = "verde"; 7684 chip_name = "verde";
7713 break; 7685 break;
7714 case CHIP_OLAND: 7686 case CHIP_OLAND:
7715 if ((adev->pdev->revision == 0xC7) || 7687 if (((adev->pdev->revision == 0x81) &&
7716 (adev->pdev->revision == 0x80) || 7688 ((adev->pdev->device == 0x6600) ||
7717 (adev->pdev->revision == 0x81) || 7689 (adev->pdev->device == 0x6604) ||
7718 (adev->pdev->revision == 0x83) || 7690 (adev->pdev->device == 0x6605) ||
7719 (adev->pdev->revision == 0x87) || 7691 (adev->pdev->device == 0x6610))) ||
7720 (adev->pdev->device == 0x6604) || 7692 ((adev->pdev->revision == 0x83) &&
7721 (adev->pdev->device == 0x6605)) 7693 (adev->pdev->device == 0x6610)))
7722 chip_name = "oland_k"; 7694 chip_name = "oland_k";
7723 else 7695 else
7724 chip_name = "oland"; 7696 chip_name = "oland";
7725 break; 7697 break;
7726 case CHIP_HAINAN: 7698 case CHIP_HAINAN:
7727 if ((adev->pdev->revision == 0x81) || 7699 if (((adev->pdev->revision == 0x81) &&
7728 (adev->pdev->revision == 0x83) || 7700 (adev->pdev->device == 0x6660)) ||
7729 (adev->pdev->revision == 0xC3) || 7701 ((adev->pdev->revision == 0x83) &&
7730 (adev->pdev->device == 0x6664) || 7702 ((adev->pdev->device == 0x6660) ||
7731 (adev->pdev->device == 0x6665) || 7703 (adev->pdev->device == 0x6663) ||
7732 (adev->pdev->device == 0x6667)) 7704 (adev->pdev->device == 0x6665) ||
7705 (adev->pdev->device == 0x6667))))
7733 chip_name = "hainan_k"; 7706 chip_name = "hainan_k";
7707 else if ((adev->pdev->revision == 0xc3) &&
7708 (adev->pdev->device == 0x6665))
7709 chip_name = "banks_k_2";
7734 else 7710 else
7735 chip_name = "hainan"; 7711 chip_name = "hainan";
7736 break; 7712 break;
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
index 96444e4d862a..7fb9137dd89b 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
@@ -40,13 +40,14 @@
40#include "smu/smu_7_0_1_sh_mask.h" 40#include "smu/smu_7_0_1_sh_mask.h"
41 41
42static void uvd_v4_2_mc_resume(struct amdgpu_device *adev); 42static void uvd_v4_2_mc_resume(struct amdgpu_device *adev);
43static void uvd_v4_2_init_cg(struct amdgpu_device *adev);
44static void uvd_v4_2_set_ring_funcs(struct amdgpu_device *adev); 43static void uvd_v4_2_set_ring_funcs(struct amdgpu_device *adev);
45static void uvd_v4_2_set_irq_funcs(struct amdgpu_device *adev); 44static void uvd_v4_2_set_irq_funcs(struct amdgpu_device *adev);
46static int uvd_v4_2_start(struct amdgpu_device *adev); 45static int uvd_v4_2_start(struct amdgpu_device *adev);
47static void uvd_v4_2_stop(struct amdgpu_device *adev); 46static void uvd_v4_2_stop(struct amdgpu_device *adev);
48static int uvd_v4_2_set_clockgating_state(void *handle, 47static int uvd_v4_2_set_clockgating_state(void *handle,
49 enum amd_clockgating_state state); 48 enum amd_clockgating_state state);
49static void uvd_v4_2_set_dcm(struct amdgpu_device *adev,
50 bool sw_mode);
50/** 51/**
51 * uvd_v4_2_ring_get_rptr - get read pointer 52 * uvd_v4_2_ring_get_rptr - get read pointer
52 * 53 *
@@ -140,7 +141,8 @@ static int uvd_v4_2_sw_fini(void *handle)
140 141
141 return r; 142 return r;
142} 143}
143 144static void uvd_v4_2_enable_mgcg(struct amdgpu_device *adev,
145 bool enable);
144/** 146/**
145 * uvd_v4_2_hw_init - start and test UVD block 147 * uvd_v4_2_hw_init - start and test UVD block
146 * 148 *
@@ -155,8 +157,7 @@ static int uvd_v4_2_hw_init(void *handle)
155 uint32_t tmp; 157 uint32_t tmp;
156 int r; 158 int r;
157 159
158 uvd_v4_2_init_cg(adev); 160 uvd_v4_2_enable_mgcg(adev, true);
159 uvd_v4_2_set_clockgating_state(adev, AMD_CG_STATE_GATE);
160 amdgpu_asic_set_uvd_clocks(adev, 10000, 10000); 161 amdgpu_asic_set_uvd_clocks(adev, 10000, 10000);
161 r = uvd_v4_2_start(adev); 162 r = uvd_v4_2_start(adev);
162 if (r) 163 if (r)
@@ -266,11 +267,13 @@ static int uvd_v4_2_start(struct amdgpu_device *adev)
266 struct amdgpu_ring *ring = &adev->uvd.ring; 267 struct amdgpu_ring *ring = &adev->uvd.ring;
267 uint32_t rb_bufsz; 268 uint32_t rb_bufsz;
268 int i, j, r; 269 int i, j, r;
269
270 /* disable byte swapping */ 270 /* disable byte swapping */
271 u32 lmi_swap_cntl = 0; 271 u32 lmi_swap_cntl = 0;
272 u32 mp_swap_cntl = 0; 272 u32 mp_swap_cntl = 0;
273 273
274 WREG32(mmUVD_CGC_GATE, 0);
275 uvd_v4_2_set_dcm(adev, true);
276
274 uvd_v4_2_mc_resume(adev); 277 uvd_v4_2_mc_resume(adev);
275 278
276 /* disable interupt */ 279 /* disable interupt */
@@ -406,6 +409,8 @@ static void uvd_v4_2_stop(struct amdgpu_device *adev)
406 409
407 /* Unstall UMC and register bus */ 410 /* Unstall UMC and register bus */
408 WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8)); 411 WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8));
412
413 uvd_v4_2_set_dcm(adev, false);
409} 414}
410 415
411/** 416/**
@@ -619,19 +624,6 @@ static void uvd_v4_2_set_dcm(struct amdgpu_device *adev,
619 WREG32_UVD_CTX(ixUVD_CGC_CTRL2, tmp2); 624 WREG32_UVD_CTX(ixUVD_CGC_CTRL2, tmp2);
620} 625}
621 626
622static void uvd_v4_2_init_cg(struct amdgpu_device *adev)
623{
624 bool hw_mode = true;
625
626 if (hw_mode) {
627 uvd_v4_2_set_dcm(adev, false);
628 } else {
629 u32 tmp = RREG32(mmUVD_CGC_CTRL);
630 tmp &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
631 WREG32(mmUVD_CGC_CTRL, tmp);
632 }
633}
634
635static bool uvd_v4_2_is_idle(void *handle) 627static bool uvd_v4_2_is_idle(void *handle)
636{ 628{
637 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 629 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -685,17 +677,6 @@ static int uvd_v4_2_process_interrupt(struct amdgpu_device *adev,
685static int uvd_v4_2_set_clockgating_state(void *handle, 677static int uvd_v4_2_set_clockgating_state(void *handle,
686 enum amd_clockgating_state state) 678 enum amd_clockgating_state state)
687{ 679{
688 bool gate = false;
689 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
690
691 if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG))
692 return 0;
693
694 if (state == AMD_CG_STATE_GATE)
695 gate = true;
696
697 uvd_v4_2_enable_mgcg(adev, gate);
698
699 return 0; 680 return 0;
700} 681}
701 682
@@ -711,9 +692,6 @@ static int uvd_v4_2_set_powergating_state(void *handle,
711 */ 692 */
712 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 693 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
713 694
714 if (!(adev->pg_flags & AMD_PG_SUPPORT_UVD))
715 return 0;
716
717 if (state == AMD_PG_STATE_GATE) { 695 if (state == AMD_PG_STATE_GATE) {
718 uvd_v4_2_stop(adev); 696 uvd_v4_2_stop(adev);
719 return 0; 697 return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
index a79e283590fb..6de6becce745 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
@@ -791,15 +791,10 @@ static int uvd_v5_0_set_clockgating_state(void *handle,
791{ 791{
792 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 792 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
793 bool enable = (state == AMD_CG_STATE_GATE) ? true : false; 793 bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
794 static int curstate = -1;
795 794
796 if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG)) 795 if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG))
797 return 0; 796 return 0;
798 797
799 if (curstate == state)
800 return 0;
801
802 curstate = state;
803 if (enable) { 798 if (enable) {
804 /* wait for STATUS to clear */ 799 /* wait for STATUS to clear */
805 if (uvd_v5_0_wait_for_idle(handle)) 800 if (uvd_v5_0_wait_for_idle(handle))
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
index 6b3293a1c7b8..37ca685e5a9a 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
@@ -43,9 +43,13 @@
43 43
44#define GRBM_GFX_INDEX__VCE_INSTANCE__SHIFT 0x04 44#define GRBM_GFX_INDEX__VCE_INSTANCE__SHIFT 0x04
45#define GRBM_GFX_INDEX__VCE_INSTANCE_MASK 0x10 45#define GRBM_GFX_INDEX__VCE_INSTANCE_MASK 0x10
46#define GRBM_GFX_INDEX__VCE_ALL_PIPE 0x07
47
46#define mmVCE_LMI_VCPU_CACHE_40BIT_BAR0 0x8616 48#define mmVCE_LMI_VCPU_CACHE_40BIT_BAR0 0x8616
47#define mmVCE_LMI_VCPU_CACHE_40BIT_BAR1 0x8617 49#define mmVCE_LMI_VCPU_CACHE_40BIT_BAR1 0x8617
48#define mmVCE_LMI_VCPU_CACHE_40BIT_BAR2 0x8618 50#define mmVCE_LMI_VCPU_CACHE_40BIT_BAR2 0x8618
51#define mmGRBM_GFX_INDEX_DEFAULT 0xE0000000
52
49#define VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK 0x02 53#define VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK 0x02
50 54
51#define VCE_V3_0_FW_SIZE (384 * 1024) 55#define VCE_V3_0_FW_SIZE (384 * 1024)
@@ -54,6 +58,9 @@
54 58
55#define FW_52_8_3 ((52 << 24) | (8 << 16) | (3 << 8)) 59#define FW_52_8_3 ((52 << 24) | (8 << 16) | (3 << 8))
56 60
61#define GET_VCE_INSTANCE(i) ((i) << GRBM_GFX_INDEX__VCE_INSTANCE__SHIFT \
62 | GRBM_GFX_INDEX__VCE_ALL_PIPE)
63
57static void vce_v3_0_mc_resume(struct amdgpu_device *adev, int idx); 64static void vce_v3_0_mc_resume(struct amdgpu_device *adev, int idx);
58static void vce_v3_0_set_ring_funcs(struct amdgpu_device *adev); 65static void vce_v3_0_set_ring_funcs(struct amdgpu_device *adev);
59static void vce_v3_0_set_irq_funcs(struct amdgpu_device *adev); 66static void vce_v3_0_set_irq_funcs(struct amdgpu_device *adev);
@@ -175,7 +182,7 @@ static void vce_v3_0_set_vce_sw_clock_gating(struct amdgpu_device *adev,
175 WREG32(mmVCE_UENC_CLOCK_GATING_2, data); 182 WREG32(mmVCE_UENC_CLOCK_GATING_2, data);
176 183
177 data = RREG32(mmVCE_UENC_REG_CLOCK_GATING); 184 data = RREG32(mmVCE_UENC_REG_CLOCK_GATING);
178 data &= ~0xffc00000; 185 data &= ~0x3ff;
179 WREG32(mmVCE_UENC_REG_CLOCK_GATING, data); 186 WREG32(mmVCE_UENC_REG_CLOCK_GATING, data);
180 187
181 data = RREG32(mmVCE_UENC_DMA_DCLK_CTRL); 188 data = RREG32(mmVCE_UENC_DMA_DCLK_CTRL);
@@ -249,7 +256,7 @@ static int vce_v3_0_start(struct amdgpu_device *adev)
249 if (adev->vce.harvest_config & (1 << idx)) 256 if (adev->vce.harvest_config & (1 << idx))
250 continue; 257 continue;
251 258
252 WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, idx); 259 WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(idx));
253 vce_v3_0_mc_resume(adev, idx); 260 vce_v3_0_mc_resume(adev, idx);
254 WREG32_FIELD(VCE_STATUS, JOB_BUSY, 1); 261 WREG32_FIELD(VCE_STATUS, JOB_BUSY, 1);
255 262
@@ -273,7 +280,7 @@ static int vce_v3_0_start(struct amdgpu_device *adev)
273 } 280 }
274 } 281 }
275 282
276 WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, 0); 283 WREG32(mmGRBM_GFX_INDEX, mmGRBM_GFX_INDEX_DEFAULT);
277 mutex_unlock(&adev->grbm_idx_mutex); 284 mutex_unlock(&adev->grbm_idx_mutex);
278 285
279 return 0; 286 return 0;
@@ -288,7 +295,7 @@ static int vce_v3_0_stop(struct amdgpu_device *adev)
288 if (adev->vce.harvest_config & (1 << idx)) 295 if (adev->vce.harvest_config & (1 << idx))
289 continue; 296 continue;
290 297
291 WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, idx); 298 WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(idx));
292 299
293 if (adev->asic_type >= CHIP_STONEY) 300 if (adev->asic_type >= CHIP_STONEY)
294 WREG32_P(mmVCE_VCPU_CNTL, 0, ~0x200001); 301 WREG32_P(mmVCE_VCPU_CNTL, 0, ~0x200001);
@@ -306,7 +313,7 @@ static int vce_v3_0_stop(struct amdgpu_device *adev)
306 vce_v3_0_set_vce_sw_clock_gating(adev, false); 313 vce_v3_0_set_vce_sw_clock_gating(adev, false);
307 } 314 }
308 315
309 WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, 0); 316 WREG32(mmGRBM_GFX_INDEX, mmGRBM_GFX_INDEX_DEFAULT);
310 mutex_unlock(&adev->grbm_idx_mutex); 317 mutex_unlock(&adev->grbm_idx_mutex);
311 318
312 return 0; 319 return 0;
@@ -320,11 +327,12 @@ static unsigned vce_v3_0_get_harvest_config(struct amdgpu_device *adev)
320{ 327{
321 u32 tmp; 328 u32 tmp;
322 329
323 /* Fiji, Stoney, Polaris10, Polaris11 are single pipe */ 330 /* Fiji, Stoney, Polaris10, Polaris11, Polaris12 are single pipe */
324 if ((adev->asic_type == CHIP_FIJI) || 331 if ((adev->asic_type == CHIP_FIJI) ||
325 (adev->asic_type == CHIP_STONEY) || 332 (adev->asic_type == CHIP_STONEY) ||
326 (adev->asic_type == CHIP_POLARIS10) || 333 (adev->asic_type == CHIP_POLARIS10) ||
327 (adev->asic_type == CHIP_POLARIS11)) 334 (adev->asic_type == CHIP_POLARIS11) ||
335 (adev->asic_type == CHIP_POLARIS12))
328 return AMDGPU_VCE_HARVEST_VCE1; 336 return AMDGPU_VCE_HARVEST_VCE1;
329 337
330 /* Tonga and CZ are dual or single pipe */ 338 /* Tonga and CZ are dual or single pipe */
@@ -585,17 +593,17 @@ static bool vce_v3_0_check_soft_reset(void *handle)
585 * VCE team suggest use bit 3--bit 6 for busy status check 593 * VCE team suggest use bit 3--bit 6 for busy status check
586 */ 594 */
587 mutex_lock(&adev->grbm_idx_mutex); 595 mutex_lock(&adev->grbm_idx_mutex);
588 WREG32_FIELD(GRBM_GFX_INDEX, INSTANCE_INDEX, 0); 596 WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(0));
589 if (RREG32(mmVCE_STATUS) & AMDGPU_VCE_STATUS_BUSY_MASK) { 597 if (RREG32(mmVCE_STATUS) & AMDGPU_VCE_STATUS_BUSY_MASK) {
590 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE0, 1); 598 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE0, 1);
591 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE1, 1); 599 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE1, 1);
592 } 600 }
593 WREG32_FIELD(GRBM_GFX_INDEX, INSTANCE_INDEX, 0x10); 601 WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(1));
594 if (RREG32(mmVCE_STATUS) & AMDGPU_VCE_STATUS_BUSY_MASK) { 602 if (RREG32(mmVCE_STATUS) & AMDGPU_VCE_STATUS_BUSY_MASK) {
595 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE0, 1); 603 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE0, 1);
596 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE1, 1); 604 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE1, 1);
597 } 605 }
598 WREG32_FIELD(GRBM_GFX_INDEX, INSTANCE_INDEX, 0); 606 WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(0));
599 mutex_unlock(&adev->grbm_idx_mutex); 607 mutex_unlock(&adev->grbm_idx_mutex);
600 608
601 if (srbm_soft_reset) { 609 if (srbm_soft_reset) {
@@ -733,7 +741,7 @@ static int vce_v3_0_set_clockgating_state(void *handle,
733 if (adev->vce.harvest_config & (1 << i)) 741 if (adev->vce.harvest_config & (1 << i))
734 continue; 742 continue;
735 743
736 WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, i); 744 WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(i));
737 745
738 if (enable) { 746 if (enable) {
739 /* initialize VCE_CLOCK_GATING_A: Clock ON/OFF delay */ 747 /* initialize VCE_CLOCK_GATING_A: Clock ON/OFF delay */
@@ -752,7 +760,7 @@ static int vce_v3_0_set_clockgating_state(void *handle,
752 vce_v3_0_set_vce_sw_clock_gating(adev, enable); 760 vce_v3_0_set_vce_sw_clock_gating(adev, enable);
753 } 761 }
754 762
755 WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, 0); 763 WREG32(mmGRBM_GFX_INDEX, mmGRBM_GFX_INDEX_DEFAULT);
756 mutex_unlock(&adev->grbm_idx_mutex); 764 mutex_unlock(&adev->grbm_idx_mutex);
757 765
758 return 0; 766 return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
index bf088d6d9bf1..c2ac54f11341 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/vi.c
@@ -88,6 +88,7 @@ MODULE_FIRMWARE("amdgpu/polaris10_smc.bin");
88MODULE_FIRMWARE("amdgpu/polaris10_smc_sk.bin"); 88MODULE_FIRMWARE("amdgpu/polaris10_smc_sk.bin");
89MODULE_FIRMWARE("amdgpu/polaris11_smc.bin"); 89MODULE_FIRMWARE("amdgpu/polaris11_smc.bin");
90MODULE_FIRMWARE("amdgpu/polaris11_smc_sk.bin"); 90MODULE_FIRMWARE("amdgpu/polaris11_smc_sk.bin");
91MODULE_FIRMWARE("amdgpu/polaris12_smc.bin");
91 92
92/* 93/*
93 * Indirect registers accessor 94 * Indirect registers accessor
@@ -312,6 +313,7 @@ static void vi_init_golden_registers(struct amdgpu_device *adev)
312 break; 313 break;
313 case CHIP_POLARIS11: 314 case CHIP_POLARIS11:
314 case CHIP_POLARIS10: 315 case CHIP_POLARIS10:
316 case CHIP_POLARIS12:
315 default: 317 default:
316 break; 318 break;
317 } 319 }
@@ -671,6 +673,7 @@ static int vi_read_register(struct amdgpu_device *adev, u32 se_num,
671 case CHIP_TONGA: 673 case CHIP_TONGA:
672 case CHIP_POLARIS11: 674 case CHIP_POLARIS11:
673 case CHIP_POLARIS10: 675 case CHIP_POLARIS10:
676 case CHIP_POLARIS12:
674 case CHIP_CARRIZO: 677 case CHIP_CARRIZO:
675 case CHIP_STONEY: 678 case CHIP_STONEY:
676 asic_register_table = cz_allowed_read_registers; 679 asic_register_table = cz_allowed_read_registers;
@@ -994,6 +997,11 @@ static int vi_common_early_init(void *handle)
994 adev->pg_flags = 0; 997 adev->pg_flags = 0;
995 adev->external_rev_id = adev->rev_id + 0x50; 998 adev->external_rev_id = adev->rev_id + 0x50;
996 break; 999 break;
1000 case CHIP_POLARIS12:
1001 adev->cg_flags = AMD_CG_SUPPORT_UVD_MGCG;
1002 adev->pg_flags = 0;
1003 adev->external_rev_id = adev->rev_id + 0x64;
1004 break;
997 case CHIP_CARRIZO: 1005 case CHIP_CARRIZO:
998 adev->cg_flags = AMD_CG_SUPPORT_UVD_MGCG | 1006 adev->cg_flags = AMD_CG_SUPPORT_UVD_MGCG |
999 AMD_CG_SUPPORT_GFX_MGCG | 1007 AMD_CG_SUPPORT_GFX_MGCG |
@@ -1346,6 +1354,7 @@ static int vi_common_set_clockgating_state(void *handle,
1346 case CHIP_TONGA: 1354 case CHIP_TONGA:
1347 case CHIP_POLARIS10: 1355 case CHIP_POLARIS10:
1348 case CHIP_POLARIS11: 1356 case CHIP_POLARIS11:
1357 case CHIP_POLARIS12:
1349 vi_common_set_clockgating_state_by_smu(adev, state); 1358 vi_common_set_clockgating_state_by_smu(adev, state);
1350 default: 1359 default:
1351 break; 1360 break;
@@ -1429,6 +1438,7 @@ int vi_set_ip_blocks(struct amdgpu_device *adev)
1429 break; 1438 break;
1430 case CHIP_POLARIS11: 1439 case CHIP_POLARIS11:
1431 case CHIP_POLARIS10: 1440 case CHIP_POLARIS10:
1441 case CHIP_POLARIS12:
1432 amdgpu_ip_block_add(adev, &vi_common_ip_block); 1442 amdgpu_ip_block_add(adev, &vi_common_ip_block);
1433 amdgpu_ip_block_add(adev, &gmc_v8_1_ip_block); 1443 amdgpu_ip_block_add(adev, &gmc_v8_1_ip_block);
1434 amdgpu_ip_block_add(adev, &tonga_ih_ip_block); 1444 amdgpu_ip_block_add(adev, &tonga_ih_ip_block);
diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h
index c02469ada9f1..85f358764bbc 100644
--- a/drivers/gpu/drm/amd/include/amd_shared.h
+++ b/drivers/gpu/drm/amd/include/amd_shared.h
@@ -23,7 +23,7 @@
23#ifndef __AMD_SHARED_H__ 23#ifndef __AMD_SHARED_H__
24#define __AMD_SHARED_H__ 24#define __AMD_SHARED_H__
25 25
26#define AMD_MAX_USEC_TIMEOUT 100000 /* 100 ms */ 26#define AMD_MAX_USEC_TIMEOUT 200000 /* 200 ms */
27 27
28/* 28/*
29 * Supported ASIC types 29 * Supported ASIC types
@@ -46,6 +46,7 @@ enum amd_asic_type {
46 CHIP_STONEY, 46 CHIP_STONEY,
47 CHIP_POLARIS10, 47 CHIP_POLARIS10,
48 CHIP_POLARIS11, 48 CHIP_POLARIS11,
49 CHIP_POLARIS12,
49 CHIP_LAST, 50 CHIP_LAST,
50}; 51};
51 52
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c
index b0c63c5f54c9..6bb79c94cb9f 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c
@@ -200,7 +200,7 @@ int cz_dpm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate)
200 cgs_set_clockgating_state( 200 cgs_set_clockgating_state(
201 hwmgr->device, 201 hwmgr->device,
202 AMD_IP_BLOCK_TYPE_VCE, 202 AMD_IP_BLOCK_TYPE_VCE,
203 AMD_CG_STATE_UNGATE); 203 AMD_CG_STATE_GATE);
204 cgs_set_powergating_state( 204 cgs_set_powergating_state(
205 hwmgr->device, 205 hwmgr->device,
206 AMD_IP_BLOCK_TYPE_VCE, 206 AMD_IP_BLOCK_TYPE_VCE,
@@ -218,7 +218,7 @@ int cz_dpm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate)
218 cgs_set_clockgating_state( 218 cgs_set_clockgating_state(
219 hwmgr->device, 219 hwmgr->device,
220 AMD_IP_BLOCK_TYPE_VCE, 220 AMD_IP_BLOCK_TYPE_VCE,
221 AMD_PG_STATE_GATE); 221 AMD_PG_STATE_UNGATE);
222 cz_dpm_update_vce_dpm(hwmgr); 222 cz_dpm_update_vce_dpm(hwmgr);
223 cz_enable_disable_vce_dpm(hwmgr, true); 223 cz_enable_disable_vce_dpm(hwmgr, true);
224 return 0; 224 return 0;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
index 4b14f259a147..0fb4e8c8f5e1 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
@@ -1402,14 +1402,22 @@ int cz_dpm_update_vce_dpm(struct pp_hwmgr *hwmgr)
1402 cz_hwmgr->vce_dpm.hard_min_clk, 1402 cz_hwmgr->vce_dpm.hard_min_clk,
1403 PPSMC_MSG_SetEclkHardMin)); 1403 PPSMC_MSG_SetEclkHardMin));
1404 } else { 1404 } else {
1405 /*EPR# 419220 -HW limitation to to */ 1405 /*Program HardMin based on the vce_arbiter.ecclk */
1406 cz_hwmgr->vce_dpm.hard_min_clk = hwmgr->vce_arbiter.ecclk; 1406 if (hwmgr->vce_arbiter.ecclk == 0) {
1407 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, 1407 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
1408 PPSMC_MSG_SetEclkHardMin, 1408 PPSMC_MSG_SetEclkHardMin, 0);
1409 cz_get_eclk_level(hwmgr, 1409 /* disable ECLK DPM 0. Otherwise VCE could hang if
1410 cz_hwmgr->vce_dpm.hard_min_clk, 1410 * switching SCLK from DPM 0 to 6/7 */
1411 PPSMC_MSG_SetEclkHardMin)); 1411 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
1412 1412 PPSMC_MSG_SetEclkSoftMin, 1);
1413 } else {
1414 cz_hwmgr->vce_dpm.hard_min_clk = hwmgr->vce_arbiter.ecclk;
1415 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
1416 PPSMC_MSG_SetEclkHardMin,
1417 cz_get_eclk_level(hwmgr,
1418 cz_hwmgr->vce_dpm.hard_min_clk,
1419 PPSMC_MSG_SetEclkHardMin));
1420 }
1413 } 1421 }
1414 return 0; 1422 return 0;
1415} 1423}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
index dc6700aee18f..b03606405a53 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
@@ -95,6 +95,7 @@ int hwmgr_init(struct amd_pp_init *pp_init, struct pp_instance *handle)
95 break; 95 break;
96 case CHIP_POLARIS11: 96 case CHIP_POLARIS11:
97 case CHIP_POLARIS10: 97 case CHIP_POLARIS10:
98 case CHIP_POLARIS12:
98 polaris_set_asic_special_caps(hwmgr); 99 polaris_set_asic_special_caps(hwmgr);
99 hwmgr->feature_mask &= ~(PP_UVD_HANDSHAKE_MASK); 100 hwmgr->feature_mask &= ~(PP_UVD_HANDSHAKE_MASK);
100 break; 101 break;
@@ -745,7 +746,7 @@ int polaris_set_asic_special_caps(struct pp_hwmgr *hwmgr)
745 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 746 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
746 PHM_PlatformCaps_TablelessHardwareInterface); 747 PHM_PlatformCaps_TablelessHardwareInterface);
747 748
748 if (hwmgr->chip_id == CHIP_POLARIS11) 749 if ((hwmgr->chip_id == CHIP_POLARIS11) || (hwmgr->chip_id == CHIP_POLARIS12))
749 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 750 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
750 PHM_PlatformCaps_SPLLShutdownSupport); 751 PHM_PlatformCaps_SPLLShutdownSupport);
751 return 0; 752 return 0;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c
index 26477f0f09dc..6cd1287a7a8f 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c
@@ -521,7 +521,7 @@ int smu7_enable_didt_config(struct pp_hwmgr *hwmgr)
521 PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", return result); 521 PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", return result);
522 result = smu7_program_pt_config_registers(hwmgr, DIDTConfig_Polaris10); 522 result = smu7_program_pt_config_registers(hwmgr, DIDTConfig_Polaris10);
523 PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", return result); 523 PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", return result);
524 } else if (hwmgr->chip_id == CHIP_POLARIS11) { 524 } else if ((hwmgr->chip_id == CHIP_POLARIS11) || (hwmgr->chip_id == CHIP_POLARIS12)) {
525 result = smu7_program_pt_config_registers(hwmgr, GCCACConfig_Polaris11); 525 result = smu7_program_pt_config_registers(hwmgr, GCCACConfig_Polaris11);
526 PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", return result); 526 PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", return result);
527 result = smu7_program_pt_config_registers(hwmgr, DIDTConfig_Polaris11); 527 result = smu7_program_pt_config_registers(hwmgr, DIDTConfig_Polaris11);
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
index e5812aa456f3..6e618aa20719 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
@@ -65,6 +65,7 @@ int smum_init(struct amd_pp_init *pp_init, struct pp_instance *handle)
65 break; 65 break;
66 case CHIP_POLARIS11: 66 case CHIP_POLARIS11:
67 case CHIP_POLARIS10: 67 case CHIP_POLARIS10:
68 case CHIP_POLARIS12:
68 polaris10_smum_init(smumgr); 69 polaris10_smum_init(smumgr);
69 break; 70 break;
70 default: 71 default:
diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h
index 908011d2c8f5..7abda94fc2cf 100644
--- a/drivers/gpu/drm/ast/ast_drv.h
+++ b/drivers/gpu/drm/ast/ast_drv.h
@@ -113,6 +113,7 @@ struct ast_private {
113 struct ttm_bo_kmap_obj cache_kmap; 113 struct ttm_bo_kmap_obj cache_kmap;
114 int next_cursor; 114 int next_cursor;
115 bool support_wide_screen; 115 bool support_wide_screen;
116 bool DisableP2A;
116 117
117 enum ast_tx_chip tx_chip_type; 118 enum ast_tx_chip tx_chip_type;
118 u8 dp501_maxclk; 119 u8 dp501_maxclk;
diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c
index f75c6421db62..533e762d036d 100644
--- a/drivers/gpu/drm/ast/ast_main.c
+++ b/drivers/gpu/drm/ast/ast_main.c
@@ -124,6 +124,12 @@ static int ast_detect_chip(struct drm_device *dev, bool *need_post)
124 } else 124 } else
125 *need_post = false; 125 *need_post = false;
126 126
127 /* Check P2A Access */
128 ast->DisableP2A = true;
129 data = ast_read32(ast, 0xf004);
130 if (data != 0xFFFFFFFF)
131 ast->DisableP2A = false;
132
127 /* Check if we support wide screen */ 133 /* Check if we support wide screen */
128 switch (ast->chip) { 134 switch (ast->chip) {
129 case AST1180: 135 case AST1180:
@@ -140,15 +146,17 @@ static int ast_detect_chip(struct drm_device *dev, bool *need_post)
140 ast->support_wide_screen = true; 146 ast->support_wide_screen = true;
141 else { 147 else {
142 ast->support_wide_screen = false; 148 ast->support_wide_screen = false;
143 /* Read SCU7c (silicon revision register) */ 149 if (ast->DisableP2A == false) {
144 ast_write32(ast, 0xf004, 0x1e6e0000); 150 /* Read SCU7c (silicon revision register) */
145 ast_write32(ast, 0xf000, 0x1); 151 ast_write32(ast, 0xf004, 0x1e6e0000);
146 data = ast_read32(ast, 0x1207c); 152 ast_write32(ast, 0xf000, 0x1);
147 data &= 0x300; 153 data = ast_read32(ast, 0x1207c);
148 if (ast->chip == AST2300 && data == 0x0) /* ast1300 */ 154 data &= 0x300;
149 ast->support_wide_screen = true; 155 if (ast->chip == AST2300 && data == 0x0) /* ast1300 */
150 if (ast->chip == AST2400 && data == 0x100) /* ast1400 */ 156 ast->support_wide_screen = true;
151 ast->support_wide_screen = true; 157 if (ast->chip == AST2400 && data == 0x100) /* ast1400 */
158 ast->support_wide_screen = true;
159 }
152 } 160 }
153 break; 161 break;
154 } 162 }
@@ -216,80 +224,81 @@ static int ast_get_dram_info(struct drm_device *dev)
216 uint32_t data, data2; 224 uint32_t data, data2;
217 uint32_t denum, num, div, ref_pll; 225 uint32_t denum, num, div, ref_pll;
218 226
219 ast_write32(ast, 0xf004, 0x1e6e0000); 227 if (ast->DisableP2A)
220 ast_write32(ast, 0xf000, 0x1); 228 {
221
222
223 ast_write32(ast, 0x10000, 0xfc600309);
224
225 do {
226 if (pci_channel_offline(dev->pdev))
227 return -EIO;
228 } while (ast_read32(ast, 0x10000) != 0x01);
229 data = ast_read32(ast, 0x10004);
230
231 if (data & 0x40)
232 ast->dram_bus_width = 16; 229 ast->dram_bus_width = 16;
230 ast->dram_type = AST_DRAM_1Gx16;
231 ast->mclk = 396;
232 }
233 else 233 else
234 ast->dram_bus_width = 32; 234 {
235 ast_write32(ast, 0xf004, 0x1e6e0000);
236 ast_write32(ast, 0xf000, 0x1);
237 data = ast_read32(ast, 0x10004);
238
239 if (data & 0x40)
240 ast->dram_bus_width = 16;
241 else
242 ast->dram_bus_width = 32;
243
244 if (ast->chip == AST2300 || ast->chip == AST2400) {
245 switch (data & 0x03) {
246 case 0:
247 ast->dram_type = AST_DRAM_512Mx16;
248 break;
249 default:
250 case 1:
251 ast->dram_type = AST_DRAM_1Gx16;
252 break;
253 case 2:
254 ast->dram_type = AST_DRAM_2Gx16;
255 break;
256 case 3:
257 ast->dram_type = AST_DRAM_4Gx16;
258 break;
259 }
260 } else {
261 switch (data & 0x0c) {
262 case 0:
263 case 4:
264 ast->dram_type = AST_DRAM_512Mx16;
265 break;
266 case 8:
267 if (data & 0x40)
268 ast->dram_type = AST_DRAM_1Gx16;
269 else
270 ast->dram_type = AST_DRAM_512Mx32;
271 break;
272 case 0xc:
273 ast->dram_type = AST_DRAM_1Gx32;
274 break;
275 }
276 }
235 277
236 if (ast->chip == AST2300 || ast->chip == AST2400) { 278 data = ast_read32(ast, 0x10120);
237 switch (data & 0x03) { 279 data2 = ast_read32(ast, 0x10170);
238 case 0: 280 if (data2 & 0x2000)
239 ast->dram_type = AST_DRAM_512Mx16; 281 ref_pll = 14318;
240 break; 282 else
241 default: 283 ref_pll = 12000;
242 case 1: 284
243 ast->dram_type = AST_DRAM_1Gx16; 285 denum = data & 0x1f;
244 break; 286 num = (data & 0x3fe0) >> 5;
245 case 2: 287 data = (data & 0xc000) >> 14;
246 ast->dram_type = AST_DRAM_2Gx16; 288 switch (data) {
247 break;
248 case 3: 289 case 3:
249 ast->dram_type = AST_DRAM_4Gx16; 290 div = 0x4;
250 break;
251 }
252 } else {
253 switch (data & 0x0c) {
254 case 0:
255 case 4:
256 ast->dram_type = AST_DRAM_512Mx16;
257 break; 291 break;
258 case 8: 292 case 2:
259 if (data & 0x40) 293 case 1:
260 ast->dram_type = AST_DRAM_1Gx16; 294 div = 0x2;
261 else
262 ast->dram_type = AST_DRAM_512Mx32;
263 break; 295 break;
264 case 0xc: 296 default:
265 ast->dram_type = AST_DRAM_1Gx32; 297 div = 0x1;
266 break; 298 break;
267 } 299 }
300 ast->mclk = ref_pll * (num + 2) / (denum + 2) * (div * 1000);
268 } 301 }
269
270 data = ast_read32(ast, 0x10120);
271 data2 = ast_read32(ast, 0x10170);
272 if (data2 & 0x2000)
273 ref_pll = 14318;
274 else
275 ref_pll = 12000;
276
277 denum = data & 0x1f;
278 num = (data & 0x3fe0) >> 5;
279 data = (data & 0xc000) >> 14;
280 switch (data) {
281 case 3:
282 div = 0x4;
283 break;
284 case 2:
285 case 1:
286 div = 0x2;
287 break;
288 default:
289 div = 0x1;
290 break;
291 }
292 ast->mclk = ref_pll * (num + 2) / (denum + 2) * (div * 1000);
293 return 0; 302 return 0;
294} 303}
295 304
diff --git a/drivers/gpu/drm/ast/ast_post.c b/drivers/gpu/drm/ast/ast_post.c
index 810c51d92b99..5331ee1df086 100644
--- a/drivers/gpu/drm/ast/ast_post.c
+++ b/drivers/gpu/drm/ast/ast_post.c
@@ -379,12 +379,20 @@ void ast_post_gpu(struct drm_device *dev)
379 ast_open_key(ast); 379 ast_open_key(ast);
380 ast_set_def_ext_reg(dev); 380 ast_set_def_ext_reg(dev);
381 381
382 if (ast->chip == AST2300 || ast->chip == AST2400) 382 if (ast->DisableP2A == false)
383 ast_init_dram_2300(dev); 383 {
384 else 384 if (ast->chip == AST2300 || ast->chip == AST2400)
385 ast_init_dram_reg(dev); 385 ast_init_dram_2300(dev);
386 else
387 ast_init_dram_reg(dev);
386 388
387 ast_init_3rdtx(dev); 389 ast_init_3rdtx(dev);
390 }
391 else
392 {
393 if (ast->tx_chip_type != AST_TX_NONE)
394 ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa3, 0xcf, 0x80); /* Enable DVO */
395 }
388} 396}
389 397
390/* AST 2300 DRAM settings */ 398/* AST 2300 DRAM settings */
diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
index eb9bf8786c24..18eefdcbf1ba 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
+++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
@@ -1382,6 +1382,7 @@ int analogix_dp_bind(struct device *dev, struct drm_device *drm_dev,
1382 1382
1383 pm_runtime_enable(dev); 1383 pm_runtime_enable(dev);
1384 1384
1385 pm_runtime_get_sync(dev);
1385 phy_power_on(dp->phy); 1386 phy_power_on(dp->phy);
1386 1387
1387 analogix_dp_init_dp(dp); 1388 analogix_dp_init_dp(dp);
@@ -1414,9 +1415,15 @@ int analogix_dp_bind(struct device *dev, struct drm_device *drm_dev,
1414 goto err_disable_pm_runtime; 1415 goto err_disable_pm_runtime;
1415 } 1416 }
1416 1417
1418 phy_power_off(dp->phy);
1419 pm_runtime_put(dev);
1420
1417 return 0; 1421 return 0;
1418 1422
1419err_disable_pm_runtime: 1423err_disable_pm_runtime:
1424
1425 phy_power_off(dp->phy);
1426 pm_runtime_put(dev);
1420 pm_runtime_disable(dev); 1427 pm_runtime_disable(dev);
1421 1428
1422 return ret; 1429 return ret;
diff --git a/drivers/gpu/drm/cirrus/Kconfig b/drivers/gpu/drm/cirrus/Kconfig
index 04b3c161dfae..7f4cc6e172ab 100644
--- a/drivers/gpu/drm/cirrus/Kconfig
+++ b/drivers/gpu/drm/cirrus/Kconfig
@@ -7,3 +7,12 @@ config DRM_CIRRUS_QEMU
7 This is a KMS driver for emulated cirrus device in qemu. 7 This is a KMS driver for emulated cirrus device in qemu.
8 It is *NOT* intended for real cirrus devices. This requires 8 It is *NOT* intended for real cirrus devices. This requires
9 the modesetting userspace X.org driver. 9 the modesetting userspace X.org driver.
10
11 Cirrus is obsolete, the hardware was designed in the 90ies
12 and can't keep up with todays needs. More background:
13 https://www.kraxel.org/blog/2014/10/qemu-using-cirrus-considered-harmful/
14
15 Better alternatives are:
16 - stdvga (DRM_BOCHS, qemu -vga std, default in qemu 2.2+)
17 - qxl (DRM_QXL, qemu -vga qxl, works best with spice)
18 - virtio (DRM_VIRTIO_GPU), qemu -vga virtio)
diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
index 60697482b94c..fdfb1ec17e66 100644
--- a/drivers/gpu/drm/drm_atomic.c
+++ b/drivers/gpu/drm/drm_atomic.c
@@ -291,15 +291,15 @@ drm_atomic_get_crtc_state(struct drm_atomic_state *state,
291EXPORT_SYMBOL(drm_atomic_get_crtc_state); 291EXPORT_SYMBOL(drm_atomic_get_crtc_state);
292 292
293static void set_out_fence_for_crtc(struct drm_atomic_state *state, 293static void set_out_fence_for_crtc(struct drm_atomic_state *state,
294 struct drm_crtc *crtc, s64 __user *fence_ptr) 294 struct drm_crtc *crtc, s32 __user *fence_ptr)
295{ 295{
296 state->crtcs[drm_crtc_index(crtc)].out_fence_ptr = fence_ptr; 296 state->crtcs[drm_crtc_index(crtc)].out_fence_ptr = fence_ptr;
297} 297}
298 298
299static s64 __user *get_out_fence_for_crtc(struct drm_atomic_state *state, 299static s32 __user *get_out_fence_for_crtc(struct drm_atomic_state *state,
300 struct drm_crtc *crtc) 300 struct drm_crtc *crtc)
301{ 301{
302 s64 __user *fence_ptr; 302 s32 __user *fence_ptr;
303 303
304 fence_ptr = state->crtcs[drm_crtc_index(crtc)].out_fence_ptr; 304 fence_ptr = state->crtcs[drm_crtc_index(crtc)].out_fence_ptr;
305 state->crtcs[drm_crtc_index(crtc)].out_fence_ptr = NULL; 305 state->crtcs[drm_crtc_index(crtc)].out_fence_ptr = NULL;
@@ -512,7 +512,7 @@ int drm_atomic_crtc_set_property(struct drm_crtc *crtc,
512 state->color_mgmt_changed |= replaced; 512 state->color_mgmt_changed |= replaced;
513 return ret; 513 return ret;
514 } else if (property == config->prop_out_fence_ptr) { 514 } else if (property == config->prop_out_fence_ptr) {
515 s64 __user *fence_ptr = u64_to_user_ptr(val); 515 s32 __user *fence_ptr = u64_to_user_ptr(val);
516 516
517 if (!fence_ptr) 517 if (!fence_ptr)
518 return 0; 518 return 0;
@@ -1915,7 +1915,7 @@ EXPORT_SYMBOL(drm_atomic_clean_old_fb);
1915 */ 1915 */
1916 1916
1917struct drm_out_fence_state { 1917struct drm_out_fence_state {
1918 s64 __user *out_fence_ptr; 1918 s32 __user *out_fence_ptr;
1919 struct sync_file *sync_file; 1919 struct sync_file *sync_file;
1920 int fd; 1920 int fd;
1921}; 1921};
@@ -1952,7 +1952,7 @@ static int prepare_crtc_signaling(struct drm_device *dev,
1952 return 0; 1952 return 0;
1953 1953
1954 for_each_crtc_in_state(state, crtc, crtc_state, i) { 1954 for_each_crtc_in_state(state, crtc, crtc_state, i) {
1955 u64 __user *fence_ptr; 1955 s32 __user *fence_ptr;
1956 1956
1957 fence_ptr = get_out_fence_for_crtc(crtc_state->state, crtc); 1957 fence_ptr = get_out_fence_for_crtc(crtc_state->state, crtc);
1958 1958
@@ -2032,13 +2032,16 @@ static void complete_crtc_signaling(struct drm_device *dev,
2032 } 2032 }
2033 2033
2034 for_each_crtc_in_state(state, crtc, crtc_state, i) { 2034 for_each_crtc_in_state(state, crtc, crtc_state, i) {
2035 struct drm_pending_vblank_event *event = crtc_state->event;
2035 /* 2036 /*
2036 * TEST_ONLY and PAGE_FLIP_EVENT are mutually 2037 * Free the allocated event. drm_atomic_helper_setup_commit
2037 * exclusive, if they weren't, this code should be 2038 * can allocate an event too, so only free it if it's ours
2038 * called on success for TEST_ONLY too. 2039 * to prevent a double free in drm_atomic_state_clear.
2039 */ 2040 */
2040 if (crtc_state->event) 2041 if (event && (event->base.fence || event->base.file_priv)) {
2041 drm_event_cancel_free(dev, &crtc_state->event->base); 2042 drm_event_cancel_free(dev, &event->base);
2043 crtc_state->event = NULL;
2044 }
2042 } 2045 }
2043 2046
2044 if (!fence_state) 2047 if (!fence_state)
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index 583f47f27b36..4594477dee00 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -1259,8 +1259,10 @@ int drm_atomic_helper_commit(struct drm_device *dev,
1259 1259
1260 if (!nonblock) { 1260 if (!nonblock) {
1261 ret = drm_atomic_helper_wait_for_fences(dev, state, true); 1261 ret = drm_atomic_helper_wait_for_fences(dev, state, true);
1262 if (ret) 1262 if (ret) {
1263 drm_atomic_helper_cleanup_planes(dev, state);
1263 return ret; 1264 return ret;
1265 }
1264 } 1266 }
1265 1267
1266 /* 1268 /*
@@ -1664,9 +1666,6 @@ int drm_atomic_helper_prepare_planes(struct drm_device *dev,
1664 1666
1665 funcs = plane->helper_private; 1667 funcs = plane->helper_private;
1666 1668
1667 if (!drm_atomic_helper_framebuffer_changed(dev, state, plane_state->crtc))
1668 continue;
1669
1670 if (funcs->prepare_fb) { 1669 if (funcs->prepare_fb) {
1671 ret = funcs->prepare_fb(plane, plane_state); 1670 ret = funcs->prepare_fb(plane, plane_state);
1672 if (ret) 1671 if (ret)
@@ -1683,9 +1682,6 @@ fail:
1683 if (j >= i) 1682 if (j >= i)
1684 continue; 1683 continue;
1685 1684
1686 if (!drm_atomic_helper_framebuffer_changed(dev, state, plane_state->crtc))
1687 continue;
1688
1689 funcs = plane->helper_private; 1685 funcs = plane->helper_private;
1690 1686
1691 if (funcs->cleanup_fb) 1687 if (funcs->cleanup_fb)
@@ -1952,9 +1948,6 @@ void drm_atomic_helper_cleanup_planes(struct drm_device *dev,
1952 for_each_plane_in_state(old_state, plane, plane_state, i) { 1948 for_each_plane_in_state(old_state, plane, plane_state, i) {
1953 const struct drm_plane_helper_funcs *funcs; 1949 const struct drm_plane_helper_funcs *funcs;
1954 1950
1955 if (!drm_atomic_helper_framebuffer_changed(dev, old_state, plane_state->crtc))
1956 continue;
1957
1958 funcs = plane->helper_private; 1951 funcs = plane->helper_private;
1959 1952
1960 if (funcs->cleanup_fb) 1953 if (funcs->cleanup_fb)
diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c
index 5a4526289392..7a7019ac9388 100644
--- a/drivers/gpu/drm/drm_connector.c
+++ b/drivers/gpu/drm/drm_connector.c
@@ -225,6 +225,7 @@ int drm_connector_init(struct drm_device *dev,
225 225
226 INIT_LIST_HEAD(&connector->probed_modes); 226 INIT_LIST_HEAD(&connector->probed_modes);
227 INIT_LIST_HEAD(&connector->modes); 227 INIT_LIST_HEAD(&connector->modes);
228 mutex_init(&connector->mutex);
228 connector->edid_blob_ptr = NULL; 229 connector->edid_blob_ptr = NULL;
229 connector->status = connector_status_unknown; 230 connector->status = connector_status_unknown;
230 231
@@ -359,6 +360,8 @@ void drm_connector_cleanup(struct drm_connector *connector)
359 connector->funcs->atomic_destroy_state(connector, 360 connector->funcs->atomic_destroy_state(connector,
360 connector->state); 361 connector->state);
361 362
363 mutex_destroy(&connector->mutex);
364
362 memset(connector, 0, sizeof(*connector)); 365 memset(connector, 0, sizeof(*connector));
363} 366}
364EXPORT_SYMBOL(drm_connector_cleanup); 367EXPORT_SYMBOL(drm_connector_cleanup);
@@ -374,14 +377,18 @@ EXPORT_SYMBOL(drm_connector_cleanup);
374 */ 377 */
375int drm_connector_register(struct drm_connector *connector) 378int drm_connector_register(struct drm_connector *connector)
376{ 379{
377 int ret; 380 int ret = 0;
378 381
379 if (connector->registered) 382 if (!connector->dev->registered)
380 return 0; 383 return 0;
381 384
385 mutex_lock(&connector->mutex);
386 if (connector->registered)
387 goto unlock;
388
382 ret = drm_sysfs_connector_add(connector); 389 ret = drm_sysfs_connector_add(connector);
383 if (ret) 390 if (ret)
384 return ret; 391 goto unlock;
385 392
386 ret = drm_debugfs_connector_add(connector); 393 ret = drm_debugfs_connector_add(connector);
387 if (ret) { 394 if (ret) {
@@ -397,12 +404,14 @@ int drm_connector_register(struct drm_connector *connector)
397 drm_mode_object_register(connector->dev, &connector->base); 404 drm_mode_object_register(connector->dev, &connector->base);
398 405
399 connector->registered = true; 406 connector->registered = true;
400 return 0; 407 goto unlock;
401 408
402err_debugfs: 409err_debugfs:
403 drm_debugfs_connector_remove(connector); 410 drm_debugfs_connector_remove(connector);
404err_sysfs: 411err_sysfs:
405 drm_sysfs_connector_remove(connector); 412 drm_sysfs_connector_remove(connector);
413unlock:
414 mutex_unlock(&connector->mutex);
406 return ret; 415 return ret;
407} 416}
408EXPORT_SYMBOL(drm_connector_register); 417EXPORT_SYMBOL(drm_connector_register);
@@ -415,8 +424,11 @@ EXPORT_SYMBOL(drm_connector_register);
415 */ 424 */
416void drm_connector_unregister(struct drm_connector *connector) 425void drm_connector_unregister(struct drm_connector *connector)
417{ 426{
418 if (!connector->registered) 427 mutex_lock(&connector->mutex);
428 if (!connector->registered) {
429 mutex_unlock(&connector->mutex);
419 return; 430 return;
431 }
420 432
421 if (connector->funcs->early_unregister) 433 if (connector->funcs->early_unregister)
422 connector->funcs->early_unregister(connector); 434 connector->funcs->early_unregister(connector);
@@ -425,6 +437,7 @@ void drm_connector_unregister(struct drm_connector *connector)
425 drm_debugfs_connector_remove(connector); 437 drm_debugfs_connector_remove(connector);
426 438
427 connector->registered = false; 439 connector->registered = false;
440 mutex_unlock(&connector->mutex);
428} 441}
429EXPORT_SYMBOL(drm_connector_unregister); 442EXPORT_SYMBOL(drm_connector_unregister);
430 443
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index aa644487749c..f59771da52ee 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -1817,7 +1817,7 @@ int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr)
1817 mgr->payloads[i].vcpi = req_payload.vcpi; 1817 mgr->payloads[i].vcpi = req_payload.vcpi;
1818 } else if (mgr->payloads[i].num_slots) { 1818 } else if (mgr->payloads[i].num_slots) {
1819 mgr->payloads[i].num_slots = 0; 1819 mgr->payloads[i].num_slots = 0;
1820 drm_dp_destroy_payload_step1(mgr, port, port->vcpi.vcpi, &mgr->payloads[i]); 1820 drm_dp_destroy_payload_step1(mgr, port, mgr->payloads[i].vcpi, &mgr->payloads[i]);
1821 req_payload.payload_state = mgr->payloads[i].payload_state; 1821 req_payload.payload_state = mgr->payloads[i].payload_state;
1822 mgr->payloads[i].start_slot = 0; 1822 mgr->payloads[i].start_slot = 0;
1823 } 1823 }
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index a525751b4559..6594b4088f11 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -745,6 +745,8 @@ int drm_dev_register(struct drm_device *dev, unsigned long flags)
745 if (ret) 745 if (ret)
746 goto err_minors; 746 goto err_minors;
747 747
748 dev->registered = true;
749
748 if (dev->driver->load) { 750 if (dev->driver->load) {
749 ret = dev->driver->load(dev, flags); 751 ret = dev->driver->load(dev, flags);
750 if (ret) 752 if (ret)
@@ -785,6 +787,8 @@ void drm_dev_unregister(struct drm_device *dev)
785 787
786 drm_lastclose(dev); 788 drm_lastclose(dev);
787 789
790 dev->registered = false;
791
788 if (drm_core_check_feature(dev, DRIVER_MODESET)) 792 if (drm_core_check_feature(dev, DRIVER_MODESET))
789 drm_modeset_unregister_all(dev); 793 drm_modeset_unregister_all(dev);
790 794
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index ac6a35212501..e6b19bc9021a 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -1460,6 +1460,13 @@ drm_mode_create_from_cmdline_mode(struct drm_device *dev,
1460 return NULL; 1460 return NULL;
1461 1461
1462 mode->type |= DRM_MODE_TYPE_USERDEF; 1462 mode->type |= DRM_MODE_TYPE_USERDEF;
1463 /* fix up 1368x768: GFT/CVT can't express 1366 width due to alignment */
1464 if (cmd->xres == 1366 && mode->hdisplay == 1368) {
1465 mode->hdisplay = 1366;
1466 mode->hsync_start--;
1467 mode->hsync_end--;
1468 drm_mode_set_name(mode);
1469 }
1463 drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V); 1470 drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
1464 return mode; 1471 return mode;
1465} 1472}
diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c
index ac953f037be7..cf8f0128c161 100644
--- a/drivers/gpu/drm/drm_probe_helper.c
+++ b/drivers/gpu/drm/drm_probe_helper.c
@@ -143,8 +143,18 @@ void drm_kms_helper_poll_enable_locked(struct drm_device *dev)
143 } 143 }
144 144
145 if (dev->mode_config.delayed_event) { 145 if (dev->mode_config.delayed_event) {
146 /*
147 * FIXME:
148 *
149 * Use short (1s) delay to handle the initial delayed event.
150 * This delay should not be needed, but Optimus/nouveau will
151 * fail in a mysterious way if the delayed event is handled as
152 * soon as possible like it is done in
153 * drm_helper_probe_single_connector_modes() in case the poll
154 * was enabled before.
155 */
146 poll = true; 156 poll = true;
147 delay = 0; 157 delay = HZ;
148 } 158 }
149 159
150 if (poll) 160 if (poll)
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
index 169ac96e8f08..fe0e85b41310 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
@@ -116,9 +116,14 @@ static int etnaviv_iommu_find_iova(struct etnaviv_iommu *mmu,
116 struct list_head list; 116 struct list_head list;
117 bool found; 117 bool found;
118 118
119 /*
120 * XXX: The DRM_MM_SEARCH_BELOW is really a hack to trick
121 * drm_mm into giving out a low IOVA after address space
122 * rollover. This needs a proper fix.
123 */
119 ret = drm_mm_insert_node_in_range(&mmu->mm, node, 124 ret = drm_mm_insert_node_in_range(&mmu->mm, node,
120 size, 0, mmu->last_iova, ~0UL, 125 size, 0, mmu->last_iova, ~0UL,
121 DRM_MM_SEARCH_DEFAULT); 126 mmu->last_iova ? DRM_MM_SEARCH_DEFAULT : DRM_MM_SEARCH_BELOW);
122 127
123 if (ret != -ENOSPC) 128 if (ret != -ENOSPC)
124 break; 129 break;
diff --git a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
index 6ca1f3117fe8..75eeb831ed6a 100644
--- a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
+++ b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
@@ -46,7 +46,8 @@ enum decon_flag_bits {
46 BIT_CLKS_ENABLED, 46 BIT_CLKS_ENABLED,
47 BIT_IRQS_ENABLED, 47 BIT_IRQS_ENABLED,
48 BIT_WIN_UPDATED, 48 BIT_WIN_UPDATED,
49 BIT_SUSPENDED 49 BIT_SUSPENDED,
50 BIT_REQUEST_UPDATE
50}; 51};
51 52
52struct decon_context { 53struct decon_context {
@@ -141,12 +142,6 @@ static void decon_commit(struct exynos_drm_crtc *crtc)
141 m->crtc_vsync_end = m->crtc_vsync_start + 1; 142 m->crtc_vsync_end = m->crtc_vsync_start + 1;
142 } 143 }
143 144
144 decon_set_bits(ctx, DECON_VIDCON0, VIDCON0_ENVID, 0);
145
146 /* enable clock gate */
147 val = CMU_CLKGAGE_MODE_SFR_F | CMU_CLKGAGE_MODE_MEM_F;
148 writel(val, ctx->addr + DECON_CMU);
149
150 if (ctx->out_type & (IFTYPE_I80 | I80_HW_TRG)) 145 if (ctx->out_type & (IFTYPE_I80 | I80_HW_TRG))
151 decon_setup_trigger(ctx); 146 decon_setup_trigger(ctx);
152 147
@@ -315,6 +310,7 @@ static void decon_update_plane(struct exynos_drm_crtc *crtc,
315 310
316 /* window enable */ 311 /* window enable */
317 decon_set_bits(ctx, DECON_WINCONx(win), WINCONx_ENWIN_F, ~0); 312 decon_set_bits(ctx, DECON_WINCONx(win), WINCONx_ENWIN_F, ~0);
313 set_bit(BIT_REQUEST_UPDATE, &ctx->flags);
318} 314}
319 315
320static void decon_disable_plane(struct exynos_drm_crtc *crtc, 316static void decon_disable_plane(struct exynos_drm_crtc *crtc,
@@ -327,6 +323,7 @@ static void decon_disable_plane(struct exynos_drm_crtc *crtc,
327 return; 323 return;
328 324
329 decon_set_bits(ctx, DECON_WINCONx(win), WINCONx_ENWIN_F, 0); 325 decon_set_bits(ctx, DECON_WINCONx(win), WINCONx_ENWIN_F, 0);
326 set_bit(BIT_REQUEST_UPDATE, &ctx->flags);
330} 327}
331 328
332static void decon_atomic_flush(struct exynos_drm_crtc *crtc) 329static void decon_atomic_flush(struct exynos_drm_crtc *crtc)
@@ -340,8 +337,8 @@ static void decon_atomic_flush(struct exynos_drm_crtc *crtc)
340 for (i = ctx->first_win; i < WINDOWS_NR; i++) 337 for (i = ctx->first_win; i < WINDOWS_NR; i++)
341 decon_shadow_protect_win(ctx, i, false); 338 decon_shadow_protect_win(ctx, i, false);
342 339
343 /* standalone update */ 340 if (test_and_clear_bit(BIT_REQUEST_UPDATE, &ctx->flags))
344 decon_set_bits(ctx, DECON_UPDATE, STANDALONE_UPDATE_F, ~0); 341 decon_set_bits(ctx, DECON_UPDATE, STANDALONE_UPDATE_F, ~0);
345 342
346 if (ctx->out_type & IFTYPE_I80) 343 if (ctx->out_type & IFTYPE_I80)
347 set_bit(BIT_WIN_UPDATED, &ctx->flags); 344 set_bit(BIT_WIN_UPDATED, &ctx->flags);
diff --git a/drivers/gpu/drm/i915/gvt/aperture_gm.c b/drivers/gpu/drm/i915/gvt/aperture_gm.c
index 0d41ebc4aea6..f7bce8603958 100644
--- a/drivers/gpu/drm/i915/gvt/aperture_gm.c
+++ b/drivers/gpu/drm/i915/gvt/aperture_gm.c
@@ -37,13 +37,6 @@
37#include "i915_drv.h" 37#include "i915_drv.h"
38#include "gvt.h" 38#include "gvt.h"
39 39
40#define MB_TO_BYTES(mb) ((mb) << 20ULL)
41#define BYTES_TO_MB(b) ((b) >> 20ULL)
42
43#define HOST_LOW_GM_SIZE MB_TO_BYTES(128)
44#define HOST_HIGH_GM_SIZE MB_TO_BYTES(384)
45#define HOST_FENCE 4
46
47static int alloc_gm(struct intel_vgpu *vgpu, bool high_gm) 40static int alloc_gm(struct intel_vgpu *vgpu, bool high_gm)
48{ 41{
49 struct intel_gvt *gvt = vgpu->gvt; 42 struct intel_gvt *gvt = vgpu->gvt;
@@ -165,6 +158,14 @@ void intel_vgpu_write_fence(struct intel_vgpu *vgpu,
165 POSTING_READ(fence_reg_lo); 158 POSTING_READ(fence_reg_lo);
166} 159}
167 160
161static void _clear_vgpu_fence(struct intel_vgpu *vgpu)
162{
163 int i;
164
165 for (i = 0; i < vgpu_fence_sz(vgpu); i++)
166 intel_vgpu_write_fence(vgpu, i, 0);
167}
168
168static void free_vgpu_fence(struct intel_vgpu *vgpu) 169static void free_vgpu_fence(struct intel_vgpu *vgpu)
169{ 170{
170 struct intel_gvt *gvt = vgpu->gvt; 171 struct intel_gvt *gvt = vgpu->gvt;
@@ -178,9 +179,9 @@ static void free_vgpu_fence(struct intel_vgpu *vgpu)
178 intel_runtime_pm_get(dev_priv); 179 intel_runtime_pm_get(dev_priv);
179 180
180 mutex_lock(&dev_priv->drm.struct_mutex); 181 mutex_lock(&dev_priv->drm.struct_mutex);
182 _clear_vgpu_fence(vgpu);
181 for (i = 0; i < vgpu_fence_sz(vgpu); i++) { 183 for (i = 0; i < vgpu_fence_sz(vgpu); i++) {
182 reg = vgpu->fence.regs[i]; 184 reg = vgpu->fence.regs[i];
183 intel_vgpu_write_fence(vgpu, i, 0);
184 list_add_tail(&reg->link, 185 list_add_tail(&reg->link,
185 &dev_priv->mm.fence_list); 186 &dev_priv->mm.fence_list);
186 } 187 }
@@ -208,13 +209,14 @@ static int alloc_vgpu_fence(struct intel_vgpu *vgpu)
208 continue; 209 continue;
209 list_del(pos); 210 list_del(pos);
210 vgpu->fence.regs[i] = reg; 211 vgpu->fence.regs[i] = reg;
211 intel_vgpu_write_fence(vgpu, i, 0);
212 if (++i == vgpu_fence_sz(vgpu)) 212 if (++i == vgpu_fence_sz(vgpu))
213 break; 213 break;
214 } 214 }
215 if (i != vgpu_fence_sz(vgpu)) 215 if (i != vgpu_fence_sz(vgpu))
216 goto out_free_fence; 216 goto out_free_fence;
217 217
218 _clear_vgpu_fence(vgpu);
219
218 mutex_unlock(&dev_priv->drm.struct_mutex); 220 mutex_unlock(&dev_priv->drm.struct_mutex);
219 intel_runtime_pm_put(dev_priv); 221 intel_runtime_pm_put(dev_priv);
220 return 0; 222 return 0;
@@ -314,6 +316,22 @@ void intel_vgpu_free_resource(struct intel_vgpu *vgpu)
314} 316}
315 317
316/** 318/**
319 * intel_vgpu_reset_resource - reset resource state owned by a vGPU
320 * @vgpu: a vGPU
321 *
322 * This function is used to reset resource state owned by a vGPU.
323 *
324 */
325void intel_vgpu_reset_resource(struct intel_vgpu *vgpu)
326{
327 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
328
329 intel_runtime_pm_get(dev_priv);
330 _clear_vgpu_fence(vgpu);
331 intel_runtime_pm_put(dev_priv);
332}
333
334/**
317 * intel_alloc_vgpu_resource - allocate HW resource for a vGPU 335 * intel_alloc_vgpu_resource - allocate HW resource for a vGPU
318 * @vgpu: vGPU 336 * @vgpu: vGPU
319 * @param: vGPU creation params 337 * @param: vGPU creation params
diff --git a/drivers/gpu/drm/i915/gvt/cfg_space.c b/drivers/gpu/drm/i915/gvt/cfg_space.c
index db516382a4d4..4a6a2ed65732 100644
--- a/drivers/gpu/drm/i915/gvt/cfg_space.c
+++ b/drivers/gpu/drm/i915/gvt/cfg_space.c
@@ -123,6 +123,7 @@ static int emulate_pci_command_write(struct intel_vgpu *vgpu,
123 u8 changed = old ^ new; 123 u8 changed = old ^ new;
124 int ret; 124 int ret;
125 125
126 memcpy(vgpu_cfg_space(vgpu) + offset, p_data, bytes);
126 if (!(changed & PCI_COMMAND_MEMORY)) 127 if (!(changed & PCI_COMMAND_MEMORY))
127 return 0; 128 return 0;
128 129
@@ -142,7 +143,6 @@ static int emulate_pci_command_write(struct intel_vgpu *vgpu,
142 return ret; 143 return ret;
143 } 144 }
144 145
145 memcpy(vgpu_cfg_space(vgpu) + offset, p_data, bytes);
146 return 0; 146 return 0;
147} 147}
148 148
@@ -240,7 +240,7 @@ int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset,
240 if (WARN_ON(bytes > 4)) 240 if (WARN_ON(bytes > 4))
241 return -EINVAL; 241 return -EINVAL;
242 242
243 if (WARN_ON(offset + bytes >= INTEL_GVT_MAX_CFG_SPACE_SZ)) 243 if (WARN_ON(offset + bytes > INTEL_GVT_MAX_CFG_SPACE_SZ))
244 return -EINVAL; 244 return -EINVAL;
245 245
246 /* First check if it's PCI_COMMAND */ 246 /* First check if it's PCI_COMMAND */
@@ -282,3 +282,77 @@ int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset,
282 } 282 }
283 return 0; 283 return 0;
284} 284}
285
286/**
287 * intel_vgpu_init_cfg_space - init vGPU configuration space when create vGPU
288 *
289 * @vgpu: a vGPU
290 * @primary: is the vGPU presented as primary
291 *
292 */
293void intel_vgpu_init_cfg_space(struct intel_vgpu *vgpu,
294 bool primary)
295{
296 struct intel_gvt *gvt = vgpu->gvt;
297 const struct intel_gvt_device_info *info = &gvt->device_info;
298 u16 *gmch_ctl;
299 int i;
300
301 memcpy(vgpu_cfg_space(vgpu), gvt->firmware.cfg_space,
302 info->cfg_space_size);
303
304 if (!primary) {
305 vgpu_cfg_space(vgpu)[PCI_CLASS_DEVICE] =
306 INTEL_GVT_PCI_CLASS_VGA_OTHER;
307 vgpu_cfg_space(vgpu)[PCI_CLASS_PROG] =
308 INTEL_GVT_PCI_CLASS_VGA_OTHER;
309 }
310
311 /* Show guest that there isn't any stolen memory.*/
312 gmch_ctl = (u16 *)(vgpu_cfg_space(vgpu) + INTEL_GVT_PCI_GMCH_CONTROL);
313 *gmch_ctl &= ~(BDW_GMCH_GMS_MASK << BDW_GMCH_GMS_SHIFT);
314
315 intel_vgpu_write_pci_bar(vgpu, PCI_BASE_ADDRESS_2,
316 gvt_aperture_pa_base(gvt), true);
317
318 vgpu_cfg_space(vgpu)[PCI_COMMAND] &= ~(PCI_COMMAND_IO
319 | PCI_COMMAND_MEMORY
320 | PCI_COMMAND_MASTER);
321 /*
322 * Clear the bar upper 32bit and let guest to assign the new value
323 */
324 memset(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_1, 0, 4);
325 memset(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_3, 0, 4);
326 memset(vgpu_cfg_space(vgpu) + INTEL_GVT_PCI_OPREGION, 0, 4);
327
328 for (i = 0; i < INTEL_GVT_MAX_BAR_NUM; i++) {
329 vgpu->cfg_space.bar[i].size = pci_resource_len(
330 gvt->dev_priv->drm.pdev, i * 2);
331 vgpu->cfg_space.bar[i].tracked = false;
332 }
333}
334
335/**
336 * intel_vgpu_reset_cfg_space - reset vGPU configuration space
337 *
338 * @vgpu: a vGPU
339 *
340 */
341void intel_vgpu_reset_cfg_space(struct intel_vgpu *vgpu)
342{
343 u8 cmd = vgpu_cfg_space(vgpu)[PCI_COMMAND];
344 bool primary = vgpu_cfg_space(vgpu)[PCI_CLASS_DEVICE] !=
345 INTEL_GVT_PCI_CLASS_VGA_OTHER;
346
347 if (cmd & PCI_COMMAND_MEMORY) {
348 trap_gttmmio(vgpu, false);
349 map_aperture(vgpu, false);
350 }
351
352 /**
353 * Currently we only do such reset when vGPU is not
354 * owned by any VM, so we simply restore entire cfg
355 * space to default value.
356 */
357 intel_vgpu_init_cfg_space(vgpu, primary);
358}
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
index d26a092c70e8..e4563984cb1e 100644
--- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
@@ -481,7 +481,6 @@ struct parser_exec_state {
481 (s->vgpu->gvt->device_info.gmadr_bytes_in_cmd >> 2) 481 (s->vgpu->gvt->device_info.gmadr_bytes_in_cmd >> 2)
482 482
483static unsigned long bypass_scan_mask = 0; 483static unsigned long bypass_scan_mask = 0;
484static bool bypass_batch_buffer_scan = true;
485 484
486/* ring ALL, type = 0 */ 485/* ring ALL, type = 0 */
487static struct sub_op_bits sub_op_mi[] = { 486static struct sub_op_bits sub_op_mi[] = {
@@ -1525,9 +1524,6 @@ static int batch_buffer_needs_scan(struct parser_exec_state *s)
1525{ 1524{
1526 struct intel_gvt *gvt = s->vgpu->gvt; 1525 struct intel_gvt *gvt = s->vgpu->gvt;
1527 1526
1528 if (bypass_batch_buffer_scan)
1529 return 0;
1530
1531 if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv)) { 1527 if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv)) {
1532 /* BDW decides privilege based on address space */ 1528 /* BDW decides privilege based on address space */
1533 if (cmd_val(s, 0) & (1 << 8)) 1529 if (cmd_val(s, 0) & (1 << 8))
diff --git a/drivers/gpu/drm/i915/gvt/execlist.c b/drivers/gpu/drm/i915/gvt/execlist.c
index f32bb6f6495c..34083731669d 100644
--- a/drivers/gpu/drm/i915/gvt/execlist.c
+++ b/drivers/gpu/drm/i915/gvt/execlist.c
@@ -364,58 +364,30 @@ static void free_workload(struct intel_vgpu_workload *workload)
364#define get_desc_from_elsp_dwords(ed, i) \ 364#define get_desc_from_elsp_dwords(ed, i) \
365 ((struct execlist_ctx_descriptor_format *)&((ed)->data[i * 2])) 365 ((struct execlist_ctx_descriptor_format *)&((ed)->data[i * 2]))
366 366
367
368#define BATCH_BUFFER_ADDR_MASK ((1UL << 32) - (1U << 2))
369#define BATCH_BUFFER_ADDR_HIGH_MASK ((1UL << 16) - (1U))
370static int set_gma_to_bb_cmd(struct intel_shadow_bb_entry *entry_obj,
371 unsigned long add, int gmadr_bytes)
372{
373 if (WARN_ON(gmadr_bytes != 4 && gmadr_bytes != 8))
374 return -1;
375
376 *((u32 *)(entry_obj->bb_start_cmd_va + (1 << 2))) = add &
377 BATCH_BUFFER_ADDR_MASK;
378 if (gmadr_bytes == 8) {
379 *((u32 *)(entry_obj->bb_start_cmd_va + (2 << 2))) =
380 add & BATCH_BUFFER_ADDR_HIGH_MASK;
381 }
382
383 return 0;
384}
385
386static void prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload) 367static void prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload)
387{ 368{
388 int gmadr_bytes = workload->vgpu->gvt->device_info.gmadr_bytes_in_cmd; 369 const int gmadr_bytes = workload->vgpu->gvt->device_info.gmadr_bytes_in_cmd;
370 struct intel_shadow_bb_entry *entry_obj;
389 371
390 /* pin the gem object to ggtt */ 372 /* pin the gem object to ggtt */
391 if (!list_empty(&workload->shadow_bb)) { 373 list_for_each_entry(entry_obj, &workload->shadow_bb, list) {
392 struct intel_shadow_bb_entry *entry_obj = 374 struct i915_vma *vma;
393 list_first_entry(&workload->shadow_bb,
394 struct intel_shadow_bb_entry,
395 list);
396 struct intel_shadow_bb_entry *temp;
397 375
398 list_for_each_entry_safe(entry_obj, temp, &workload->shadow_bb, 376 vma = i915_gem_object_ggtt_pin(entry_obj->obj, NULL, 0, 4, 0);
399 list) { 377 if (IS_ERR(vma)) {
400 struct i915_vma *vma; 378 gvt_err("Cannot pin\n");
401 379 return;
402 vma = i915_gem_object_ggtt_pin(entry_obj->obj, NULL, 0,
403 4, 0);
404 if (IS_ERR(vma)) {
405 gvt_err("Cannot pin\n");
406 return;
407 }
408
409 /* FIXME: we are not tracking our pinned VMA leaving it
410 * up to the core to fix up the stray pin_count upon
411 * free.
412 */
413
414 /* update the relocate gma with shadow batch buffer*/
415 set_gma_to_bb_cmd(entry_obj,
416 i915_ggtt_offset(vma),
417 gmadr_bytes);
418 } 380 }
381
382 /* FIXME: we are not tracking our pinned VMA leaving it
383 * up to the core to fix up the stray pin_count upon
384 * free.
385 */
386
387 /* update the relocate gma with shadow batch buffer*/
388 entry_obj->bb_start_cmd_va[1] = i915_ggtt_offset(vma);
389 if (gmadr_bytes == 8)
390 entry_obj->bb_start_cmd_va[2] = 0;
419 } 391 }
420} 392}
421 393
@@ -826,7 +798,7 @@ int intel_vgpu_init_execlist(struct intel_vgpu *vgpu)
826 INIT_LIST_HEAD(&vgpu->workload_q_head[i]); 798 INIT_LIST_HEAD(&vgpu->workload_q_head[i]);
827 } 799 }
828 800
829 vgpu->workloads = kmem_cache_create("gvt-g vgpu workload", 801 vgpu->workloads = kmem_cache_create("gvt-g_vgpu_workload",
830 sizeof(struct intel_vgpu_workload), 0, 802 sizeof(struct intel_vgpu_workload), 0,
831 SLAB_HWCACHE_ALIGN, 803 SLAB_HWCACHE_ALIGN,
832 NULL); 804 NULL);
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
index 7eaaf1c9ed2b..47dec4acf7ff 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.c
+++ b/drivers/gpu/drm/i915/gvt/gtt.c
@@ -240,15 +240,8 @@ static inline int get_pse_type(int type)
240static u64 read_pte64(struct drm_i915_private *dev_priv, unsigned long index) 240static u64 read_pte64(struct drm_i915_private *dev_priv, unsigned long index)
241{ 241{
242 void __iomem *addr = (gen8_pte_t __iomem *)dev_priv->ggtt.gsm + index; 242 void __iomem *addr = (gen8_pte_t __iomem *)dev_priv->ggtt.gsm + index;
243 u64 pte;
244 243
245#ifdef readq 244 return readq(addr);
246 pte = readq(addr);
247#else
248 pte = ioread32(addr);
249 pte |= (u64)ioread32(addr + 4) << 32;
250#endif
251 return pte;
252} 245}
253 246
254static void write_pte64(struct drm_i915_private *dev_priv, 247static void write_pte64(struct drm_i915_private *dev_priv,
@@ -256,12 +249,8 @@ static void write_pte64(struct drm_i915_private *dev_priv,
256{ 249{
257 void __iomem *addr = (gen8_pte_t __iomem *)dev_priv->ggtt.gsm + index; 250 void __iomem *addr = (gen8_pte_t __iomem *)dev_priv->ggtt.gsm + index;
258 251
259#ifdef writeq
260 writeq(pte, addr); 252 writeq(pte, addr);
261#else 253
262 iowrite32((u32)pte, addr);
263 iowrite32(pte >> 32, addr + 4);
264#endif
265 I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN); 254 I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
266 POSTING_READ(GFX_FLSH_CNTL_GEN6); 255 POSTING_READ(GFX_FLSH_CNTL_GEN6);
267} 256}
@@ -1380,8 +1369,7 @@ static int gen8_mm_alloc_page_table(struct intel_vgpu_mm *mm)
1380 info->gtt_entry_size; 1369 info->gtt_entry_size;
1381 mem = kzalloc(mm->has_shadow_page_table ? 1370 mem = kzalloc(mm->has_shadow_page_table ?
1382 mm->page_table_entry_size * 2 1371 mm->page_table_entry_size * 2
1383 : mm->page_table_entry_size, 1372 : mm->page_table_entry_size, GFP_KERNEL);
1384 GFP_ATOMIC);
1385 if (!mem) 1373 if (!mem)
1386 return -ENOMEM; 1374 return -ENOMEM;
1387 mm->virtual_page_table = mem; 1375 mm->virtual_page_table = mem;
@@ -1532,7 +1520,7 @@ struct intel_vgpu_mm *intel_vgpu_create_mm(struct intel_vgpu *vgpu,
1532 struct intel_vgpu_mm *mm; 1520 struct intel_vgpu_mm *mm;
1533 int ret; 1521 int ret;
1534 1522
1535 mm = kzalloc(sizeof(*mm), GFP_ATOMIC); 1523 mm = kzalloc(sizeof(*mm), GFP_KERNEL);
1536 if (!mm) { 1524 if (!mm) {
1537 ret = -ENOMEM; 1525 ret = -ENOMEM;
1538 goto fail; 1526 goto fail;
@@ -1886,30 +1874,27 @@ static int alloc_scratch_pages(struct intel_vgpu *vgpu,
1886 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; 1874 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
1887 int page_entry_num = GTT_PAGE_SIZE >> 1875 int page_entry_num = GTT_PAGE_SIZE >>
1888 vgpu->gvt->device_info.gtt_entry_size_shift; 1876 vgpu->gvt->device_info.gtt_entry_size_shift;
1889 struct page *scratch_pt; 1877 void *scratch_pt;
1890 unsigned long mfn; 1878 unsigned long mfn;
1891 int i; 1879 int i;
1892 void *p;
1893 1880
1894 if (WARN_ON(type < GTT_TYPE_PPGTT_PTE_PT || type >= GTT_TYPE_MAX)) 1881 if (WARN_ON(type < GTT_TYPE_PPGTT_PTE_PT || type >= GTT_TYPE_MAX))
1895 return -EINVAL; 1882 return -EINVAL;
1896 1883
1897 scratch_pt = alloc_page(GFP_KERNEL | GFP_ATOMIC | __GFP_ZERO); 1884 scratch_pt = (void *)get_zeroed_page(GFP_KERNEL);
1898 if (!scratch_pt) { 1885 if (!scratch_pt) {
1899 gvt_err("fail to allocate scratch page\n"); 1886 gvt_err("fail to allocate scratch page\n");
1900 return -ENOMEM; 1887 return -ENOMEM;
1901 } 1888 }
1902 1889
1903 p = kmap_atomic(scratch_pt); 1890 mfn = intel_gvt_hypervisor_virt_to_mfn(scratch_pt);
1904 mfn = intel_gvt_hypervisor_virt_to_mfn(p);
1905 if (mfn == INTEL_GVT_INVALID_ADDR) { 1891 if (mfn == INTEL_GVT_INVALID_ADDR) {
1906 gvt_err("fail to translate vaddr:0x%llx\n", (u64)p); 1892 gvt_err("fail to translate vaddr:0x%lx\n", (unsigned long)scratch_pt);
1907 kunmap_atomic(p); 1893 free_page((unsigned long)scratch_pt);
1908 __free_page(scratch_pt);
1909 return -EFAULT; 1894 return -EFAULT;
1910 } 1895 }
1911 gtt->scratch_pt[type].page_mfn = mfn; 1896 gtt->scratch_pt[type].page_mfn = mfn;
1912 gtt->scratch_pt[type].page = scratch_pt; 1897 gtt->scratch_pt[type].page = virt_to_page(scratch_pt);
1913 gvt_dbg_mm("vgpu%d create scratch_pt: type %d mfn=0x%lx\n", 1898 gvt_dbg_mm("vgpu%d create scratch_pt: type %d mfn=0x%lx\n",
1914 vgpu->id, type, mfn); 1899 vgpu->id, type, mfn);
1915 1900
@@ -1918,7 +1903,7 @@ static int alloc_scratch_pages(struct intel_vgpu *vgpu,
1918 * scratch_pt[type] indicate the scratch pt/scratch page used by the 1903 * scratch_pt[type] indicate the scratch pt/scratch page used by the
1919 * 'type' pt. 1904 * 'type' pt.
1920 * e.g. scratch_pt[GTT_TYPE_PPGTT_PDE_PT] is used by 1905 * e.g. scratch_pt[GTT_TYPE_PPGTT_PDE_PT] is used by
1921 * GTT_TYPE_PPGTT_PDE_PT level pt, that means this scatch_pt it self 1906 * GTT_TYPE_PPGTT_PDE_PT level pt, that means this scratch_pt it self
1922 * is GTT_TYPE_PPGTT_PTE_PT, and full filled by scratch page mfn. 1907 * is GTT_TYPE_PPGTT_PTE_PT, and full filled by scratch page mfn.
1923 */ 1908 */
1924 if (type > GTT_TYPE_PPGTT_PTE_PT && type < GTT_TYPE_MAX) { 1909 if (type > GTT_TYPE_PPGTT_PTE_PT && type < GTT_TYPE_MAX) {
@@ -1936,11 +1921,9 @@ static int alloc_scratch_pages(struct intel_vgpu *vgpu,
1936 se.val64 |= PPAT_CACHED_INDEX; 1921 se.val64 |= PPAT_CACHED_INDEX;
1937 1922
1938 for (i = 0; i < page_entry_num; i++) 1923 for (i = 0; i < page_entry_num; i++)
1939 ops->set_entry(p, &se, i, false, 0, vgpu); 1924 ops->set_entry(scratch_pt, &se, i, false, 0, vgpu);
1940 } 1925 }
1941 1926
1942 kunmap_atomic(p);
1943
1944 return 0; 1927 return 0;
1945} 1928}
1946 1929
@@ -1998,6 +1981,8 @@ int intel_vgpu_init_gtt(struct intel_vgpu *vgpu)
1998 INIT_LIST_HEAD(&gtt->oos_page_list_head); 1981 INIT_LIST_HEAD(&gtt->oos_page_list_head);
1999 INIT_LIST_HEAD(&gtt->post_shadow_list_head); 1982 INIT_LIST_HEAD(&gtt->post_shadow_list_head);
2000 1983
1984 intel_vgpu_reset_ggtt(vgpu);
1985
2001 ggtt_mm = intel_vgpu_create_mm(vgpu, INTEL_GVT_MM_GGTT, 1986 ggtt_mm = intel_vgpu_create_mm(vgpu, INTEL_GVT_MM_GGTT,
2002 NULL, 1, 0); 1987 NULL, 1, 0);
2003 if (IS_ERR(ggtt_mm)) { 1988 if (IS_ERR(ggtt_mm)) {
@@ -2206,6 +2191,7 @@ int intel_vgpu_g2v_destroy_ppgtt_mm(struct intel_vgpu *vgpu,
2206int intel_gvt_init_gtt(struct intel_gvt *gvt) 2191int intel_gvt_init_gtt(struct intel_gvt *gvt)
2207{ 2192{
2208 int ret; 2193 int ret;
2194 void *page;
2209 2195
2210 gvt_dbg_core("init gtt\n"); 2196 gvt_dbg_core("init gtt\n");
2211 2197
@@ -2218,6 +2204,20 @@ int intel_gvt_init_gtt(struct intel_gvt *gvt)
2218 return -ENODEV; 2204 return -ENODEV;
2219 } 2205 }
2220 2206
2207 page = (void *)get_zeroed_page(GFP_KERNEL);
2208 if (!page) {
2209 gvt_err("fail to allocate scratch ggtt page\n");
2210 return -ENOMEM;
2211 }
2212 gvt->gtt.scratch_ggtt_page = virt_to_page(page);
2213
2214 gvt->gtt.scratch_ggtt_mfn = intel_gvt_hypervisor_virt_to_mfn(page);
2215 if (gvt->gtt.scratch_ggtt_mfn == INTEL_GVT_INVALID_ADDR) {
2216 gvt_err("fail to translate scratch ggtt page\n");
2217 __free_page(gvt->gtt.scratch_ggtt_page);
2218 return -EFAULT;
2219 }
2220
2221 if (enable_out_of_sync) { 2221 if (enable_out_of_sync) {
2222 ret = setup_spt_oos(gvt); 2222 ret = setup_spt_oos(gvt);
2223 if (ret) { 2223 if (ret) {
@@ -2239,6 +2239,68 @@ int intel_gvt_init_gtt(struct intel_gvt *gvt)
2239 */ 2239 */
2240void intel_gvt_clean_gtt(struct intel_gvt *gvt) 2240void intel_gvt_clean_gtt(struct intel_gvt *gvt)
2241{ 2241{
2242 __free_page(gvt->gtt.scratch_ggtt_page);
2243
2242 if (enable_out_of_sync) 2244 if (enable_out_of_sync)
2243 clean_spt_oos(gvt); 2245 clean_spt_oos(gvt);
2244} 2246}
2247
2248/**
2249 * intel_vgpu_reset_ggtt - reset the GGTT entry
2250 * @vgpu: a vGPU
2251 *
2252 * This function is called at the vGPU create stage
2253 * to reset all the GGTT entries.
2254 *
2255 */
2256void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu)
2257{
2258 struct intel_gvt *gvt = vgpu->gvt;
2259 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
2260 u32 index;
2261 u32 offset;
2262 u32 num_entries;
2263 struct intel_gvt_gtt_entry e;
2264
2265 memset(&e, 0, sizeof(struct intel_gvt_gtt_entry));
2266 e.type = GTT_TYPE_GGTT_PTE;
2267 ops->set_pfn(&e, gvt->gtt.scratch_ggtt_mfn);
2268 e.val64 |= _PAGE_PRESENT;
2269
2270 index = vgpu_aperture_gmadr_base(vgpu) >> PAGE_SHIFT;
2271 num_entries = vgpu_aperture_sz(vgpu) >> PAGE_SHIFT;
2272 for (offset = 0; offset < num_entries; offset++)
2273 ops->set_entry(NULL, &e, index + offset, false, 0, vgpu);
2274
2275 index = vgpu_hidden_gmadr_base(vgpu) >> PAGE_SHIFT;
2276 num_entries = vgpu_hidden_sz(vgpu) >> PAGE_SHIFT;
2277 for (offset = 0; offset < num_entries; offset++)
2278 ops->set_entry(NULL, &e, index + offset, false, 0, vgpu);
2279}
2280
2281/**
2282 * intel_vgpu_reset_gtt - reset the all GTT related status
2283 * @vgpu: a vGPU
2284 * @dmlr: true for vGPU Device Model Level Reset, false for GT Reset
2285 *
2286 * This function is called from vfio core to reset reset all
2287 * GTT related status, including GGTT, PPGTT, scratch page.
2288 *
2289 */
2290void intel_vgpu_reset_gtt(struct intel_vgpu *vgpu, bool dmlr)
2291{
2292 int i;
2293
2294 ppgtt_free_all_shadow_page(vgpu);
2295 if (!dmlr)
2296 return;
2297
2298 intel_vgpu_reset_ggtt(vgpu);
2299
2300 /* clear scratch page for security */
2301 for (i = GTT_TYPE_PPGTT_PTE_PT; i < GTT_TYPE_MAX; i++) {
2302 if (vgpu->gtt.scratch_pt[i].page != NULL)
2303 memset(page_address(vgpu->gtt.scratch_pt[i].page),
2304 0, PAGE_SIZE);
2305 }
2306}
diff --git a/drivers/gpu/drm/i915/gvt/gtt.h b/drivers/gpu/drm/i915/gvt/gtt.h
index d250013bc37b..f88eb5e89bea 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.h
+++ b/drivers/gpu/drm/i915/gvt/gtt.h
@@ -81,6 +81,9 @@ struct intel_gvt_gtt {
81 struct list_head oos_page_use_list_head; 81 struct list_head oos_page_use_list_head;
82 struct list_head oos_page_free_list_head; 82 struct list_head oos_page_free_list_head;
83 struct list_head mm_lru_list_head; 83 struct list_head mm_lru_list_head;
84
85 struct page *scratch_ggtt_page;
86 unsigned long scratch_ggtt_mfn;
84}; 87};
85 88
86enum { 89enum {
@@ -202,8 +205,10 @@ struct intel_vgpu_gtt {
202 205
203extern int intel_vgpu_init_gtt(struct intel_vgpu *vgpu); 206extern int intel_vgpu_init_gtt(struct intel_vgpu *vgpu);
204extern void intel_vgpu_clean_gtt(struct intel_vgpu *vgpu); 207extern void intel_vgpu_clean_gtt(struct intel_vgpu *vgpu);
208void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu);
205 209
206extern int intel_gvt_init_gtt(struct intel_gvt *gvt); 210extern int intel_gvt_init_gtt(struct intel_gvt *gvt);
211extern void intel_vgpu_reset_gtt(struct intel_vgpu *vgpu, bool dmlr);
207extern void intel_gvt_clean_gtt(struct intel_gvt *gvt); 212extern void intel_gvt_clean_gtt(struct intel_gvt *gvt);
208 213
209extern struct intel_vgpu_mm *intel_gvt_find_ppgtt_mm(struct intel_vgpu *vgpu, 214extern struct intel_vgpu_mm *intel_gvt_find_ppgtt_mm(struct intel_vgpu *vgpu,
diff --git a/drivers/gpu/drm/i915/gvt/gvt.c b/drivers/gpu/drm/i915/gvt/gvt.c
index 398877c3d2fd..e6bf5c533fbe 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.c
+++ b/drivers/gpu/drm/i915/gvt/gvt.c
@@ -201,6 +201,8 @@ void intel_gvt_clean_device(struct drm_i915_private *dev_priv)
201 intel_gvt_hypervisor_host_exit(&dev_priv->drm.pdev->dev, gvt); 201 intel_gvt_hypervisor_host_exit(&dev_priv->drm.pdev->dev, gvt);
202 intel_gvt_clean_vgpu_types(gvt); 202 intel_gvt_clean_vgpu_types(gvt);
203 203
204 idr_destroy(&gvt->vgpu_idr);
205
204 kfree(dev_priv->gvt); 206 kfree(dev_priv->gvt);
205 dev_priv->gvt = NULL; 207 dev_priv->gvt = NULL;
206} 208}
@@ -237,6 +239,8 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv)
237 239
238 gvt_dbg_core("init gvt device\n"); 240 gvt_dbg_core("init gvt device\n");
239 241
242 idr_init(&gvt->vgpu_idr);
243
240 mutex_init(&gvt->lock); 244 mutex_init(&gvt->lock);
241 gvt->dev_priv = dev_priv; 245 gvt->dev_priv = dev_priv;
242 246
@@ -244,7 +248,7 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv)
244 248
245 ret = intel_gvt_setup_mmio_info(gvt); 249 ret = intel_gvt_setup_mmio_info(gvt);
246 if (ret) 250 if (ret)
247 return ret; 251 goto out_clean_idr;
248 252
249 ret = intel_gvt_load_firmware(gvt); 253 ret = intel_gvt_load_firmware(gvt);
250 if (ret) 254 if (ret)
@@ -313,6 +317,8 @@ out_free_firmware:
313 intel_gvt_free_firmware(gvt); 317 intel_gvt_free_firmware(gvt);
314out_clean_mmio_info: 318out_clean_mmio_info:
315 intel_gvt_clean_mmio_info(gvt); 319 intel_gvt_clean_mmio_info(gvt);
320out_clean_idr:
321 idr_destroy(&gvt->vgpu_idr);
316 kfree(gvt); 322 kfree(gvt);
317 return ret; 323 return ret;
318} 324}
diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h
index ad0e9364ee70..e227caf5859e 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.h
+++ b/drivers/gpu/drm/i915/gvt/gvt.h
@@ -175,6 +175,7 @@ struct intel_vgpu {
175 struct notifier_block group_notifier; 175 struct notifier_block group_notifier;
176 struct kvm *kvm; 176 struct kvm *kvm;
177 struct work_struct release_work; 177 struct work_struct release_work;
178 atomic_t released;
178 } vdev; 179 } vdev;
179#endif 180#endif
180}; 181};
@@ -322,6 +323,7 @@ struct intel_vgpu_creation_params {
322 323
323int intel_vgpu_alloc_resource(struct intel_vgpu *vgpu, 324int intel_vgpu_alloc_resource(struct intel_vgpu *vgpu,
324 struct intel_vgpu_creation_params *param); 325 struct intel_vgpu_creation_params *param);
326void intel_vgpu_reset_resource(struct intel_vgpu *vgpu);
325void intel_vgpu_free_resource(struct intel_vgpu *vgpu); 327void intel_vgpu_free_resource(struct intel_vgpu *vgpu);
326void intel_vgpu_write_fence(struct intel_vgpu *vgpu, 328void intel_vgpu_write_fence(struct intel_vgpu *vgpu,
327 u32 fence, u64 value); 329 u32 fence, u64 value);
@@ -374,6 +376,8 @@ void intel_gvt_clean_vgpu_types(struct intel_gvt *gvt);
374struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt, 376struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt,
375 struct intel_vgpu_type *type); 377 struct intel_vgpu_type *type);
376void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu); 378void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu);
379void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
380 unsigned int engine_mask);
377void intel_gvt_reset_vgpu(struct intel_vgpu *vgpu); 381void intel_gvt_reset_vgpu(struct intel_vgpu *vgpu);
378 382
379 383
@@ -410,6 +414,10 @@ int intel_gvt_ggtt_index_g2h(struct intel_vgpu *vgpu, unsigned long g_index,
410int intel_gvt_ggtt_h2g_index(struct intel_vgpu *vgpu, unsigned long h_index, 414int intel_gvt_ggtt_h2g_index(struct intel_vgpu *vgpu, unsigned long h_index,
411 unsigned long *g_index); 415 unsigned long *g_index);
412 416
417void intel_vgpu_init_cfg_space(struct intel_vgpu *vgpu,
418 bool primary);
419void intel_vgpu_reset_cfg_space(struct intel_vgpu *vgpu);
420
413int intel_vgpu_emulate_cfg_read(struct intel_vgpu *vgpu, unsigned int offset, 421int intel_vgpu_emulate_cfg_read(struct intel_vgpu *vgpu, unsigned int offset,
414 void *p_data, unsigned int bytes); 422 void *p_data, unsigned int bytes);
415 423
@@ -423,7 +431,6 @@ void intel_vgpu_clean_opregion(struct intel_vgpu *vgpu);
423int intel_vgpu_init_opregion(struct intel_vgpu *vgpu, u32 gpa); 431int intel_vgpu_init_opregion(struct intel_vgpu *vgpu, u32 gpa);
424 432
425int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci); 433int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci);
426int setup_vgpu_mmio(struct intel_vgpu *vgpu);
427void populate_pvinfo_page(struct intel_vgpu *vgpu); 434void populate_pvinfo_page(struct intel_vgpu *vgpu);
428 435
429struct intel_gvt_ops { 436struct intel_gvt_ops {
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index 522809710312..ab2ea157da4c 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -93,7 +93,8 @@ static void write_vreg(struct intel_vgpu *vgpu, unsigned int offset,
93static int new_mmio_info(struct intel_gvt *gvt, 93static int new_mmio_info(struct intel_gvt *gvt,
94 u32 offset, u32 flags, u32 size, 94 u32 offset, u32 flags, u32 size,
95 u32 addr_mask, u32 ro_mask, u32 device, 95 u32 addr_mask, u32 ro_mask, u32 device,
96 void *read, void *write) 96 int (*read)(struct intel_vgpu *, unsigned int, void *, unsigned int),
97 int (*write)(struct intel_vgpu *, unsigned int, void *, unsigned int))
97{ 98{
98 struct intel_gvt_mmio_info *info, *p; 99 struct intel_gvt_mmio_info *info, *p;
99 u32 start, end, i; 100 u32 start, end, i;
@@ -219,7 +220,7 @@ static int mul_force_wake_write(struct intel_vgpu *vgpu,
219 default: 220 default:
220 /*should not hit here*/ 221 /*should not hit here*/
221 gvt_err("invalid forcewake offset 0x%x\n", offset); 222 gvt_err("invalid forcewake offset 0x%x\n", offset);
222 return 1; 223 return -EINVAL;
223 } 224 }
224 } else { 225 } else {
225 ack_reg_offset = FORCEWAKE_ACK_HSW_REG; 226 ack_reg_offset = FORCEWAKE_ACK_HSW_REG;
@@ -230,77 +231,45 @@ static int mul_force_wake_write(struct intel_vgpu *vgpu,
230 return 0; 231 return 0;
231} 232}
232 233
233static int handle_device_reset(struct intel_vgpu *vgpu, unsigned int offset,
234 void *p_data, unsigned int bytes, unsigned long bitmap)
235{
236 struct intel_gvt_workload_scheduler *scheduler =
237 &vgpu->gvt->scheduler;
238
239 vgpu->resetting = true;
240
241 intel_vgpu_stop_schedule(vgpu);
242 /*
243 * The current_vgpu will set to NULL after stopping the
244 * scheduler when the reset is triggered by current vgpu.
245 */
246 if (scheduler->current_vgpu == NULL) {
247 mutex_unlock(&vgpu->gvt->lock);
248 intel_gvt_wait_vgpu_idle(vgpu);
249 mutex_lock(&vgpu->gvt->lock);
250 }
251
252 intel_vgpu_reset_execlist(vgpu, bitmap);
253
254 /* full GPU reset */
255 if (bitmap == 0xff) {
256 mutex_unlock(&vgpu->gvt->lock);
257 intel_vgpu_clean_gtt(vgpu);
258 mutex_lock(&vgpu->gvt->lock);
259 setup_vgpu_mmio(vgpu);
260 populate_pvinfo_page(vgpu);
261 intel_vgpu_init_gtt(vgpu);
262 }
263
264 vgpu->resetting = false;
265
266 return 0;
267}
268
269static int gdrst_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, 234static int gdrst_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
270 void *p_data, unsigned int bytes) 235 void *p_data, unsigned int bytes)
271{ 236{
237 unsigned int engine_mask = 0;
272 u32 data; 238 u32 data;
273 u64 bitmap = 0;
274 239
275 write_vreg(vgpu, offset, p_data, bytes); 240 write_vreg(vgpu, offset, p_data, bytes);
276 data = vgpu_vreg(vgpu, offset); 241 data = vgpu_vreg(vgpu, offset);
277 242
278 if (data & GEN6_GRDOM_FULL) { 243 if (data & GEN6_GRDOM_FULL) {
279 gvt_dbg_mmio("vgpu%d: request full GPU reset\n", vgpu->id); 244 gvt_dbg_mmio("vgpu%d: request full GPU reset\n", vgpu->id);
280 bitmap = 0xff; 245 engine_mask = ALL_ENGINES;
281 } 246 } else {
282 if (data & GEN6_GRDOM_RENDER) { 247 if (data & GEN6_GRDOM_RENDER) {
283 gvt_dbg_mmio("vgpu%d: request RCS reset\n", vgpu->id); 248 gvt_dbg_mmio("vgpu%d: request RCS reset\n", vgpu->id);
284 bitmap |= (1 << RCS); 249 engine_mask |= (1 << RCS);
285 } 250 }
286 if (data & GEN6_GRDOM_MEDIA) { 251 if (data & GEN6_GRDOM_MEDIA) {
287 gvt_dbg_mmio("vgpu%d: request VCS reset\n", vgpu->id); 252 gvt_dbg_mmio("vgpu%d: request VCS reset\n", vgpu->id);
288 bitmap |= (1 << VCS); 253 engine_mask |= (1 << VCS);
289 } 254 }
290 if (data & GEN6_GRDOM_BLT) { 255 if (data & GEN6_GRDOM_BLT) {
291 gvt_dbg_mmio("vgpu%d: request BCS Reset\n", vgpu->id); 256 gvt_dbg_mmio("vgpu%d: request BCS Reset\n", vgpu->id);
292 bitmap |= (1 << BCS); 257 engine_mask |= (1 << BCS);
293 } 258 }
294 if (data & GEN6_GRDOM_VECS) { 259 if (data & GEN6_GRDOM_VECS) {
295 gvt_dbg_mmio("vgpu%d: request VECS Reset\n", vgpu->id); 260 gvt_dbg_mmio("vgpu%d: request VECS Reset\n", vgpu->id);
296 bitmap |= (1 << VECS); 261 engine_mask |= (1 << VECS);
297 } 262 }
298 if (data & GEN8_GRDOM_MEDIA2) { 263 if (data & GEN8_GRDOM_MEDIA2) {
299 gvt_dbg_mmio("vgpu%d: request VCS2 Reset\n", vgpu->id); 264 gvt_dbg_mmio("vgpu%d: request VCS2 Reset\n", vgpu->id);
300 if (HAS_BSD2(vgpu->gvt->dev_priv)) 265 if (HAS_BSD2(vgpu->gvt->dev_priv))
301 bitmap |= (1 << VCS2); 266 engine_mask |= (1 << VCS2);
267 }
302 } 268 }
303 return handle_device_reset(vgpu, offset, p_data, bytes, bitmap); 269
270 intel_gvt_reset_vgpu_locked(vgpu, false, engine_mask);
271
272 return 0;
304} 273}
305 274
306static int gmbus_mmio_read(struct intel_vgpu *vgpu, unsigned int offset, 275static int gmbus_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
@@ -974,7 +943,7 @@ static int sbi_data_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
974 return 0; 943 return 0;
975} 944}
976 945
977static bool sbi_ctl_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, 946static int sbi_ctl_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
978 void *p_data, unsigned int bytes) 947 void *p_data, unsigned int bytes)
979{ 948{
980 u32 data; 949 u32 data;
@@ -1366,7 +1335,6 @@ static int ring_mode_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
1366static int gvt_reg_tlb_control_handler(struct intel_vgpu *vgpu, 1335static int gvt_reg_tlb_control_handler(struct intel_vgpu *vgpu,
1367 unsigned int offset, void *p_data, unsigned int bytes) 1336 unsigned int offset, void *p_data, unsigned int bytes)
1368{ 1337{
1369 int rc = 0;
1370 unsigned int id = 0; 1338 unsigned int id = 0;
1371 1339
1372 write_vreg(vgpu, offset, p_data, bytes); 1340 write_vreg(vgpu, offset, p_data, bytes);
@@ -1389,12 +1357,11 @@ static int gvt_reg_tlb_control_handler(struct intel_vgpu *vgpu,
1389 id = VECS; 1357 id = VECS;
1390 break; 1358 break;
1391 default: 1359 default:
1392 rc = -EINVAL; 1360 return -EINVAL;
1393 break;
1394 } 1361 }
1395 set_bit(id, (void *)vgpu->tlb_handle_pending); 1362 set_bit(id, (void *)vgpu->tlb_handle_pending);
1396 1363
1397 return rc; 1364 return 0;
1398} 1365}
1399 1366
1400static int ring_reset_ctl_write(struct intel_vgpu *vgpu, 1367static int ring_reset_ctl_write(struct intel_vgpu *vgpu,
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c
index 4dd6722a7339..3f656e3a6e5a 100644
--- a/drivers/gpu/drm/i915/gvt/kvmgt.c
+++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
@@ -114,12 +114,15 @@ out:
114static kvm_pfn_t gvt_cache_find(struct intel_vgpu *vgpu, gfn_t gfn) 114static kvm_pfn_t gvt_cache_find(struct intel_vgpu *vgpu, gfn_t gfn)
115{ 115{
116 struct gvt_dma *entry; 116 struct gvt_dma *entry;
117 kvm_pfn_t pfn;
117 118
118 mutex_lock(&vgpu->vdev.cache_lock); 119 mutex_lock(&vgpu->vdev.cache_lock);
120
119 entry = __gvt_cache_find(vgpu, gfn); 121 entry = __gvt_cache_find(vgpu, gfn);
120 mutex_unlock(&vgpu->vdev.cache_lock); 122 pfn = (entry == NULL) ? 0 : entry->pfn;
121 123
122 return entry == NULL ? 0 : entry->pfn; 124 mutex_unlock(&vgpu->vdev.cache_lock);
125 return pfn;
123} 126}
124 127
125static void gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn, kvm_pfn_t pfn) 128static void gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn, kvm_pfn_t pfn)
@@ -166,7 +169,7 @@ static void __gvt_cache_remove_entry(struct intel_vgpu *vgpu,
166 169
167static void gvt_cache_remove(struct intel_vgpu *vgpu, gfn_t gfn) 170static void gvt_cache_remove(struct intel_vgpu *vgpu, gfn_t gfn)
168{ 171{
169 struct device *dev = &vgpu->vdev.mdev->dev; 172 struct device *dev = mdev_dev(vgpu->vdev.mdev);
170 struct gvt_dma *this; 173 struct gvt_dma *this;
171 unsigned long g1; 174 unsigned long g1;
172 int rc; 175 int rc;
@@ -195,7 +198,7 @@ static void gvt_cache_destroy(struct intel_vgpu *vgpu)
195{ 198{
196 struct gvt_dma *dma; 199 struct gvt_dma *dma;
197 struct rb_node *node = NULL; 200 struct rb_node *node = NULL;
198 struct device *dev = &vgpu->vdev.mdev->dev; 201 struct device *dev = mdev_dev(vgpu->vdev.mdev);
199 unsigned long gfn; 202 unsigned long gfn;
200 203
201 mutex_lock(&vgpu->vdev.cache_lock); 204 mutex_lock(&vgpu->vdev.cache_lock);
@@ -227,8 +230,8 @@ static struct intel_vgpu_type *intel_gvt_find_vgpu_type(struct intel_gvt *gvt,
227 return NULL; 230 return NULL;
228} 231}
229 232
230static ssize_t available_instance_show(struct kobject *kobj, struct device *dev, 233static ssize_t available_instances_show(struct kobject *kobj,
231 char *buf) 234 struct device *dev, char *buf)
232{ 235{
233 struct intel_vgpu_type *type; 236 struct intel_vgpu_type *type;
234 unsigned int num = 0; 237 unsigned int num = 0;
@@ -266,12 +269,12 @@ static ssize_t description_show(struct kobject *kobj, struct device *dev,
266 type->fence); 269 type->fence);
267} 270}
268 271
269static MDEV_TYPE_ATTR_RO(available_instance); 272static MDEV_TYPE_ATTR_RO(available_instances);
270static MDEV_TYPE_ATTR_RO(device_api); 273static MDEV_TYPE_ATTR_RO(device_api);
271static MDEV_TYPE_ATTR_RO(description); 274static MDEV_TYPE_ATTR_RO(description);
272 275
273static struct attribute *type_attrs[] = { 276static struct attribute *type_attrs[] = {
274 &mdev_type_attr_available_instance.attr, 277 &mdev_type_attr_available_instances.attr,
275 &mdev_type_attr_device_api.attr, 278 &mdev_type_attr_device_api.attr,
276 &mdev_type_attr_description.attr, 279 &mdev_type_attr_description.attr,
277 NULL, 280 NULL,
@@ -395,21 +398,24 @@ static int intel_vgpu_create(struct kobject *kobj, struct mdev_device *mdev)
395 struct intel_vgpu_type *type; 398 struct intel_vgpu_type *type;
396 struct device *pdev; 399 struct device *pdev;
397 void *gvt; 400 void *gvt;
401 int ret;
398 402
399 pdev = mdev->parent->dev; 403 pdev = mdev_parent_dev(mdev);
400 gvt = kdev_to_i915(pdev)->gvt; 404 gvt = kdev_to_i915(pdev)->gvt;
401 405
402 type = intel_gvt_find_vgpu_type(gvt, kobject_name(kobj)); 406 type = intel_gvt_find_vgpu_type(gvt, kobject_name(kobj));
403 if (!type) { 407 if (!type) {
404 gvt_err("failed to find type %s to create\n", 408 gvt_err("failed to find type %s to create\n",
405 kobject_name(kobj)); 409 kobject_name(kobj));
406 return -EINVAL; 410 ret = -EINVAL;
411 goto out;
407 } 412 }
408 413
409 vgpu = intel_gvt_ops->vgpu_create(gvt, type); 414 vgpu = intel_gvt_ops->vgpu_create(gvt, type);
410 if (IS_ERR_OR_NULL(vgpu)) { 415 if (IS_ERR_OR_NULL(vgpu)) {
411 gvt_err("create intel vgpu failed\n"); 416 ret = vgpu == NULL ? -EFAULT : PTR_ERR(vgpu);
412 return -EINVAL; 417 gvt_err("failed to create intel vgpu: %d\n", ret);
418 goto out;
413 } 419 }
414 420
415 INIT_WORK(&vgpu->vdev.release_work, intel_vgpu_release_work); 421 INIT_WORK(&vgpu->vdev.release_work, intel_vgpu_release_work);
@@ -418,8 +424,11 @@ static int intel_vgpu_create(struct kobject *kobj, struct mdev_device *mdev)
418 mdev_set_drvdata(mdev, vgpu); 424 mdev_set_drvdata(mdev, vgpu);
419 425
420 gvt_dbg_core("intel_vgpu_create succeeded for mdev: %s\n", 426 gvt_dbg_core("intel_vgpu_create succeeded for mdev: %s\n",
421 dev_name(&mdev->dev)); 427 dev_name(mdev_dev(mdev)));
422 return 0; 428 ret = 0;
429
430out:
431 return ret;
423} 432}
424 433
425static int intel_vgpu_remove(struct mdev_device *mdev) 434static int intel_vgpu_remove(struct mdev_device *mdev)
@@ -482,7 +491,7 @@ static int intel_vgpu_open(struct mdev_device *mdev)
482 vgpu->vdev.group_notifier.notifier_call = intel_vgpu_group_notifier; 491 vgpu->vdev.group_notifier.notifier_call = intel_vgpu_group_notifier;
483 492
484 events = VFIO_IOMMU_NOTIFY_DMA_UNMAP; 493 events = VFIO_IOMMU_NOTIFY_DMA_UNMAP;
485 ret = vfio_register_notifier(&mdev->dev, VFIO_IOMMU_NOTIFY, &events, 494 ret = vfio_register_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY, &events,
486 &vgpu->vdev.iommu_notifier); 495 &vgpu->vdev.iommu_notifier);
487 if (ret != 0) { 496 if (ret != 0) {
488 gvt_err("vfio_register_notifier for iommu failed: %d\n", ret); 497 gvt_err("vfio_register_notifier for iommu failed: %d\n", ret);
@@ -490,17 +499,26 @@ static int intel_vgpu_open(struct mdev_device *mdev)
490 } 499 }
491 500
492 events = VFIO_GROUP_NOTIFY_SET_KVM; 501 events = VFIO_GROUP_NOTIFY_SET_KVM;
493 ret = vfio_register_notifier(&mdev->dev, VFIO_GROUP_NOTIFY, &events, 502 ret = vfio_register_notifier(mdev_dev(mdev), VFIO_GROUP_NOTIFY, &events,
494 &vgpu->vdev.group_notifier); 503 &vgpu->vdev.group_notifier);
495 if (ret != 0) { 504 if (ret != 0) {
496 gvt_err("vfio_register_notifier for group failed: %d\n", ret); 505 gvt_err("vfio_register_notifier for group failed: %d\n", ret);
497 goto undo_iommu; 506 goto undo_iommu;
498 } 507 }
499 508
500 return kvmgt_guest_init(mdev); 509 ret = kvmgt_guest_init(mdev);
510 if (ret)
511 goto undo_group;
512
513 atomic_set(&vgpu->vdev.released, 0);
514 return ret;
515
516undo_group:
517 vfio_unregister_notifier(mdev_dev(mdev), VFIO_GROUP_NOTIFY,
518 &vgpu->vdev.group_notifier);
501 519
502undo_iommu: 520undo_iommu:
503 vfio_unregister_notifier(&mdev->dev, VFIO_IOMMU_NOTIFY, 521 vfio_unregister_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY,
504 &vgpu->vdev.iommu_notifier); 522 &vgpu->vdev.iommu_notifier);
505out: 523out:
506 return ret; 524 return ret;
@@ -509,17 +527,26 @@ out:
509static void __intel_vgpu_release(struct intel_vgpu *vgpu) 527static void __intel_vgpu_release(struct intel_vgpu *vgpu)
510{ 528{
511 struct kvmgt_guest_info *info; 529 struct kvmgt_guest_info *info;
530 int ret;
512 531
513 if (!handle_valid(vgpu->handle)) 532 if (!handle_valid(vgpu->handle))
514 return; 533 return;
515 534
516 vfio_unregister_notifier(&vgpu->vdev.mdev->dev, VFIO_IOMMU_NOTIFY, 535 if (atomic_cmpxchg(&vgpu->vdev.released, 0, 1))
536 return;
537
538 ret = vfio_unregister_notifier(mdev_dev(vgpu->vdev.mdev), VFIO_IOMMU_NOTIFY,
517 &vgpu->vdev.iommu_notifier); 539 &vgpu->vdev.iommu_notifier);
518 vfio_unregister_notifier(&vgpu->vdev.mdev->dev, VFIO_GROUP_NOTIFY, 540 WARN(ret, "vfio_unregister_notifier for iommu failed: %d\n", ret);
541
542 ret = vfio_unregister_notifier(mdev_dev(vgpu->vdev.mdev), VFIO_GROUP_NOTIFY,
519 &vgpu->vdev.group_notifier); 543 &vgpu->vdev.group_notifier);
544 WARN(ret, "vfio_unregister_notifier for group failed: %d\n", ret);
520 545
521 info = (struct kvmgt_guest_info *)vgpu->handle; 546 info = (struct kvmgt_guest_info *)vgpu->handle;
522 kvmgt_guest_exit(info); 547 kvmgt_guest_exit(info);
548
549 vgpu->vdev.kvm = NULL;
523 vgpu->handle = 0; 550 vgpu->handle = 0;
524} 551}
525 552
@@ -534,6 +561,7 @@ static void intel_vgpu_release_work(struct work_struct *work)
534{ 561{
535 struct intel_vgpu *vgpu = container_of(work, struct intel_vgpu, 562 struct intel_vgpu *vgpu = container_of(work, struct intel_vgpu,
536 vdev.release_work); 563 vdev.release_work);
564
537 __intel_vgpu_release(vgpu); 565 __intel_vgpu_release(vgpu);
538} 566}
539 567
@@ -1089,7 +1117,7 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd,
1089 return 0; 1117 return 0;
1090} 1118}
1091 1119
1092static const struct parent_ops intel_vgpu_ops = { 1120static const struct mdev_parent_ops intel_vgpu_ops = {
1093 .supported_type_groups = intel_vgpu_type_groups, 1121 .supported_type_groups = intel_vgpu_type_groups,
1094 .create = intel_vgpu_create, 1122 .create = intel_vgpu_create,
1095 .remove = intel_vgpu_remove, 1123 .remove = intel_vgpu_remove,
@@ -1134,6 +1162,10 @@ static int kvmgt_write_protect_add(unsigned long handle, u64 gfn)
1134 1162
1135 idx = srcu_read_lock(&kvm->srcu); 1163 idx = srcu_read_lock(&kvm->srcu);
1136 slot = gfn_to_memslot(kvm, gfn); 1164 slot = gfn_to_memslot(kvm, gfn);
1165 if (!slot) {
1166 srcu_read_unlock(&kvm->srcu, idx);
1167 return -EINVAL;
1168 }
1137 1169
1138 spin_lock(&kvm->mmu_lock); 1170 spin_lock(&kvm->mmu_lock);
1139 1171
@@ -1164,6 +1196,10 @@ static int kvmgt_write_protect_remove(unsigned long handle, u64 gfn)
1164 1196
1165 idx = srcu_read_lock(&kvm->srcu); 1197 idx = srcu_read_lock(&kvm->srcu);
1166 slot = gfn_to_memslot(kvm, gfn); 1198 slot = gfn_to_memslot(kvm, gfn);
1199 if (!slot) {
1200 srcu_read_unlock(&kvm->srcu, idx);
1201 return -EINVAL;
1202 }
1167 1203
1168 spin_lock(&kvm->mmu_lock); 1204 spin_lock(&kvm->mmu_lock);
1169 1205
@@ -1311,18 +1347,14 @@ static int kvmgt_guest_init(struct mdev_device *mdev)
1311 1347
1312static bool kvmgt_guest_exit(struct kvmgt_guest_info *info) 1348static bool kvmgt_guest_exit(struct kvmgt_guest_info *info)
1313{ 1349{
1314 struct intel_vgpu *vgpu;
1315
1316 if (!info) { 1350 if (!info) {
1317 gvt_err("kvmgt_guest_info invalid\n"); 1351 gvt_err("kvmgt_guest_info invalid\n");
1318 return false; 1352 return false;
1319 } 1353 }
1320 1354
1321 vgpu = info->vgpu;
1322
1323 kvm_page_track_unregister_notifier(info->kvm, &info->track_node); 1355 kvm_page_track_unregister_notifier(info->kvm, &info->track_node);
1324 kvmgt_protect_table_destroy(info); 1356 kvmgt_protect_table_destroy(info);
1325 gvt_cache_destroy(vgpu); 1357 gvt_cache_destroy(info->vgpu);
1326 vfree(info); 1358 vfree(info);
1327 1359
1328 return true; 1360 return true;
@@ -1372,7 +1404,7 @@ static unsigned long kvmgt_gfn_to_pfn(unsigned long handle, unsigned long gfn)
1372 return pfn; 1404 return pfn;
1373 1405
1374 pfn = INTEL_GVT_INVALID_ADDR; 1406 pfn = INTEL_GVT_INVALID_ADDR;
1375 dev = &info->vgpu->vdev.mdev->dev; 1407 dev = mdev_dev(info->vgpu->vdev.mdev);
1376 rc = vfio_pin_pages(dev, &gfn, 1, IOMMU_READ | IOMMU_WRITE, &pfn); 1408 rc = vfio_pin_pages(dev, &gfn, 1, IOMMU_READ | IOMMU_WRITE, &pfn);
1377 if (rc != 1) { 1409 if (rc != 1) {
1378 gvt_err("vfio_pin_pages failed for gfn 0x%lx: %d\n", gfn, rc); 1410 gvt_err("vfio_pin_pages failed for gfn 0x%lx: %d\n", gfn, rc);
diff --git a/drivers/gpu/drm/i915/gvt/mmio.c b/drivers/gpu/drm/i915/gvt/mmio.c
index 09c9450a1946..4df078bc5d04 100644
--- a/drivers/gpu/drm/i915/gvt/mmio.c
+++ b/drivers/gpu/drm/i915/gvt/mmio.c
@@ -125,25 +125,12 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa,
125 if (WARN_ON(!reg_is_mmio(gvt, offset + bytes - 1))) 125 if (WARN_ON(!reg_is_mmio(gvt, offset + bytes - 1)))
126 goto err; 126 goto err;
127 127
128 mmio = intel_gvt_find_mmio_info(gvt, rounddown(offset, 4));
129 if (!mmio && !vgpu->mmio.disable_warn_untrack) {
130 gvt_err("vgpu%d: read untracked MMIO %x len %d val %x\n",
131 vgpu->id, offset, bytes, *(u32 *)p_data);
132
133 if (offset == 0x206c) {
134 gvt_err("------------------------------------------\n");
135 gvt_err("vgpu%d: likely triggers a gfx reset\n",
136 vgpu->id);
137 gvt_err("------------------------------------------\n");
138 vgpu->mmio.disable_warn_untrack = true;
139 }
140 }
141
142 if (!intel_gvt_mmio_is_unalign(gvt, offset)) { 128 if (!intel_gvt_mmio_is_unalign(gvt, offset)) {
143 if (WARN_ON(!IS_ALIGNED(offset, bytes))) 129 if (WARN_ON(!IS_ALIGNED(offset, bytes)))
144 goto err; 130 goto err;
145 } 131 }
146 132
133 mmio = intel_gvt_find_mmio_info(gvt, rounddown(offset, 4));
147 if (mmio) { 134 if (mmio) {
148 if (!intel_gvt_mmio_is_unalign(gvt, mmio->offset)) { 135 if (!intel_gvt_mmio_is_unalign(gvt, mmio->offset)) {
149 if (WARN_ON(offset + bytes > mmio->offset + mmio->size)) 136 if (WARN_ON(offset + bytes > mmio->offset + mmio->size))
@@ -152,9 +139,23 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa,
152 goto err; 139 goto err;
153 } 140 }
154 ret = mmio->read(vgpu, offset, p_data, bytes); 141 ret = mmio->read(vgpu, offset, p_data, bytes);
155 } else 142 } else {
156 ret = intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes); 143 ret = intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes);
157 144
145 if (!vgpu->mmio.disable_warn_untrack) {
146 gvt_err("vgpu%d: read untracked MMIO %x(%dB) val %x\n",
147 vgpu->id, offset, bytes, *(u32 *)p_data);
148
149 if (offset == 0x206c) {
150 gvt_err("------------------------------------------\n");
151 gvt_err("vgpu%d: likely triggers a gfx reset\n",
152 vgpu->id);
153 gvt_err("------------------------------------------\n");
154 vgpu->mmio.disable_warn_untrack = true;
155 }
156 }
157 }
158
158 if (ret) 159 if (ret)
159 goto err; 160 goto err;
160 161
@@ -302,3 +303,56 @@ err:
302 mutex_unlock(&gvt->lock); 303 mutex_unlock(&gvt->lock);
303 return ret; 304 return ret;
304} 305}
306
307
308/**
309 * intel_vgpu_reset_mmio - reset virtual MMIO space
310 * @vgpu: a vGPU
311 *
312 */
313void intel_vgpu_reset_mmio(struct intel_vgpu *vgpu)
314{
315 struct intel_gvt *gvt = vgpu->gvt;
316 const struct intel_gvt_device_info *info = &gvt->device_info;
317
318 memcpy(vgpu->mmio.vreg, gvt->firmware.mmio, info->mmio_size);
319 memcpy(vgpu->mmio.sreg, gvt->firmware.mmio, info->mmio_size);
320
321 vgpu_vreg(vgpu, GEN6_GT_THREAD_STATUS_REG) = 0;
322
323 /* set the bit 0:2(Core C-State ) to C0 */
324 vgpu_vreg(vgpu, GEN6_GT_CORE_STATUS) = 0;
325}
326
327/**
328 * intel_vgpu_init_mmio - init MMIO space
329 * @vgpu: a vGPU
330 *
331 * Returns:
332 * Zero on success, negative error code if failed
333 */
334int intel_vgpu_init_mmio(struct intel_vgpu *vgpu)
335{
336 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
337
338 vgpu->mmio.vreg = vzalloc(info->mmio_size * 2);
339 if (!vgpu->mmio.vreg)
340 return -ENOMEM;
341
342 vgpu->mmio.sreg = vgpu->mmio.vreg + info->mmio_size;
343
344 intel_vgpu_reset_mmio(vgpu);
345
346 return 0;
347}
348
349/**
350 * intel_vgpu_clean_mmio - clean MMIO space
351 * @vgpu: a vGPU
352 *
353 */
354void intel_vgpu_clean_mmio(struct intel_vgpu *vgpu)
355{
356 vfree(vgpu->mmio.vreg);
357 vgpu->mmio.vreg = vgpu->mmio.sreg = NULL;
358}
diff --git a/drivers/gpu/drm/i915/gvt/mmio.h b/drivers/gpu/drm/i915/gvt/mmio.h
index 87d5b5e366a3..3bc620f56f35 100644
--- a/drivers/gpu/drm/i915/gvt/mmio.h
+++ b/drivers/gpu/drm/i915/gvt/mmio.h
@@ -86,6 +86,10 @@ struct intel_gvt_mmio_info *intel_gvt_find_mmio_info(struct intel_gvt *gvt,
86 *offset; \ 86 *offset; \
87}) 87})
88 88
89int intel_vgpu_init_mmio(struct intel_vgpu *vgpu);
90void intel_vgpu_reset_mmio(struct intel_vgpu *vgpu);
91void intel_vgpu_clean_mmio(struct intel_vgpu *vgpu);
92
89int intel_vgpu_gpa_to_mmio_offset(struct intel_vgpu *vgpu, u64 gpa); 93int intel_vgpu_gpa_to_mmio_offset(struct intel_vgpu *vgpu, u64 gpa);
90 94
91int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, u64 pa, 95int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, u64 pa,
diff --git a/drivers/gpu/drm/i915/gvt/opregion.c b/drivers/gpu/drm/i915/gvt/opregion.c
index d2a0fbc896c3..d9fb41ab7119 100644
--- a/drivers/gpu/drm/i915/gvt/opregion.c
+++ b/drivers/gpu/drm/i915/gvt/opregion.c
@@ -36,9 +36,9 @@ static int init_vgpu_opregion(struct intel_vgpu *vgpu, u32 gpa)
36 vgpu->id)) 36 vgpu->id))
37 return -EINVAL; 37 return -EINVAL;
38 38
39 vgpu_opregion(vgpu)->va = (void *)__get_free_pages(GFP_ATOMIC | 39 vgpu_opregion(vgpu)->va = (void *)__get_free_pages(GFP_KERNEL |
40 GFP_DMA32 | __GFP_ZERO, 40 __GFP_ZERO,
41 INTEL_GVT_OPREGION_PORDER); 41 get_order(INTEL_GVT_OPREGION_SIZE));
42 42
43 if (!vgpu_opregion(vgpu)->va) 43 if (!vgpu_opregion(vgpu)->va)
44 return -ENOMEM; 44 return -ENOMEM;
@@ -65,7 +65,7 @@ static int map_vgpu_opregion(struct intel_vgpu *vgpu, bool map)
65 int i, ret; 65 int i, ret;
66 66
67 for (i = 0; i < INTEL_GVT_OPREGION_PAGES; i++) { 67 for (i = 0; i < INTEL_GVT_OPREGION_PAGES; i++) {
68 mfn = intel_gvt_hypervisor_virt_to_mfn(vgpu_opregion(vgpu) 68 mfn = intel_gvt_hypervisor_virt_to_mfn(vgpu_opregion(vgpu)->va
69 + i * PAGE_SIZE); 69 + i * PAGE_SIZE);
70 if (mfn == INTEL_GVT_INVALID_ADDR) { 70 if (mfn == INTEL_GVT_INVALID_ADDR) {
71 gvt_err("fail to get MFN from VA\n"); 71 gvt_err("fail to get MFN from VA\n");
@@ -97,7 +97,7 @@ void intel_vgpu_clean_opregion(struct intel_vgpu *vgpu)
97 if (intel_gvt_host.hypervisor_type == INTEL_GVT_HYPERVISOR_XEN) { 97 if (intel_gvt_host.hypervisor_type == INTEL_GVT_HYPERVISOR_XEN) {
98 map_vgpu_opregion(vgpu, false); 98 map_vgpu_opregion(vgpu, false);
99 free_pages((unsigned long)vgpu_opregion(vgpu)->va, 99 free_pages((unsigned long)vgpu_opregion(vgpu)->va,
100 INTEL_GVT_OPREGION_PORDER); 100 get_order(INTEL_GVT_OPREGION_SIZE));
101 101
102 vgpu_opregion(vgpu)->va = NULL; 102 vgpu_opregion(vgpu)->va = NULL;
103 } 103 }
diff --git a/drivers/gpu/drm/i915/gvt/reg.h b/drivers/gpu/drm/i915/gvt/reg.h
index 0dfe789d8f02..fbd023a16f18 100644
--- a/drivers/gpu/drm/i915/gvt/reg.h
+++ b/drivers/gpu/drm/i915/gvt/reg.h
@@ -50,8 +50,7 @@
50#define INTEL_GVT_OPREGION_PARM 0x204 50#define INTEL_GVT_OPREGION_PARM 0x204
51 51
52#define INTEL_GVT_OPREGION_PAGES 2 52#define INTEL_GVT_OPREGION_PAGES 2
53#define INTEL_GVT_OPREGION_PORDER 1 53#define INTEL_GVT_OPREGION_SIZE (INTEL_GVT_OPREGION_PAGES * PAGE_SIZE)
54#define INTEL_GVT_OPREGION_SIZE (2 * 4096)
55 54
56#define VGT_SPRSTRIDE(pipe) _PIPE(pipe, _SPRA_STRIDE, _PLANE_STRIDE_2_B) 55#define VGT_SPRSTRIDE(pipe) _PIPE(pipe, _SPRA_STRIDE, _PLANE_STRIDE_2_B)
57 56
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index 4db242250235..e91885dffeff 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -350,13 +350,15 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
350{ 350{
351 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; 351 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
352 struct intel_vgpu_workload *workload; 352 struct intel_vgpu_workload *workload;
353 struct intel_vgpu *vgpu;
353 int event; 354 int event;
354 355
355 mutex_lock(&gvt->lock); 356 mutex_lock(&gvt->lock);
356 357
357 workload = scheduler->current_workload[ring_id]; 358 workload = scheduler->current_workload[ring_id];
359 vgpu = workload->vgpu;
358 360
359 if (!workload->status && !workload->vgpu->resetting) { 361 if (!workload->status && !vgpu->resetting) {
360 wait_event(workload->shadow_ctx_status_wq, 362 wait_event(workload->shadow_ctx_status_wq,
361 !atomic_read(&workload->shadow_ctx_active)); 363 !atomic_read(&workload->shadow_ctx_active));
362 364
@@ -364,8 +366,7 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
364 366
365 for_each_set_bit(event, workload->pending_events, 367 for_each_set_bit(event, workload->pending_events,
366 INTEL_GVT_EVENT_MAX) 368 INTEL_GVT_EVENT_MAX)
367 intel_vgpu_trigger_virtual_event(workload->vgpu, 369 intel_vgpu_trigger_virtual_event(vgpu, event);
368 event);
369 } 370 }
370 371
371 gvt_dbg_sched("ring id %d complete workload %p status %d\n", 372 gvt_dbg_sched("ring id %d complete workload %p status %d\n",
@@ -373,11 +374,10 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
373 374
374 scheduler->current_workload[ring_id] = NULL; 375 scheduler->current_workload[ring_id] = NULL;
375 376
376 atomic_dec(&workload->vgpu->running_workload_num);
377
378 list_del_init(&workload->list); 377 list_del_init(&workload->list);
379 workload->complete(workload); 378 workload->complete(workload);
380 379
380 atomic_dec(&vgpu->running_workload_num);
381 wake_up(&scheduler->workload_complete_wq); 381 wake_up(&scheduler->workload_complete_wq);
382 mutex_unlock(&gvt->lock); 382 mutex_unlock(&gvt->lock);
383} 383}
@@ -459,11 +459,11 @@ complete:
459 gvt_dbg_sched("will complete workload %p\n, status: %d\n", 459 gvt_dbg_sched("will complete workload %p\n, status: %d\n",
460 workload, workload->status); 460 workload, workload->status);
461 461
462 complete_current_workload(gvt, ring_id);
463
464 if (workload->req) 462 if (workload->req)
465 i915_gem_request_put(fetch_and_zero(&workload->req)); 463 i915_gem_request_put(fetch_and_zero(&workload->req));
466 464
465 complete_current_workload(gvt, ring_id);
466
467 if (need_force_wake) 467 if (need_force_wake)
468 intel_uncore_forcewake_put(gvt->dev_priv, 468 intel_uncore_forcewake_put(gvt->dev_priv,
469 FORCEWAKE_ALL); 469 FORCEWAKE_ALL);
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.h b/drivers/gpu/drm/i915/gvt/scheduler.h
index 3b30c28bff51..2833dfa8c9ae 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.h
+++ b/drivers/gpu/drm/i915/gvt/scheduler.h
@@ -113,7 +113,7 @@ struct intel_shadow_bb_entry {
113 struct drm_i915_gem_object *obj; 113 struct drm_i915_gem_object *obj;
114 void *va; 114 void *va;
115 unsigned long len; 115 unsigned long len;
116 void *bb_start_cmd_va; 116 u32 *bb_start_cmd_va;
117}; 117};
118 118
119#define workload_q_head(vgpu, ring_id) \ 119#define workload_q_head(vgpu, ring_id) \
diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c
index 536d2b9d5777..7295bc8e12fb 100644
--- a/drivers/gpu/drm/i915/gvt/vgpu.c
+++ b/drivers/gpu/drm/i915/gvt/vgpu.c
@@ -35,79 +35,6 @@
35#include "gvt.h" 35#include "gvt.h"
36#include "i915_pvinfo.h" 36#include "i915_pvinfo.h"
37 37
38static void clean_vgpu_mmio(struct intel_vgpu *vgpu)
39{
40 vfree(vgpu->mmio.vreg);
41 vgpu->mmio.vreg = vgpu->mmio.sreg = NULL;
42}
43
44int setup_vgpu_mmio(struct intel_vgpu *vgpu)
45{
46 struct intel_gvt *gvt = vgpu->gvt;
47 const struct intel_gvt_device_info *info = &gvt->device_info;
48
49 if (vgpu->mmio.vreg)
50 memset(vgpu->mmio.vreg, 0, info->mmio_size * 2);
51 else {
52 vgpu->mmio.vreg = vzalloc(info->mmio_size * 2);
53 if (!vgpu->mmio.vreg)
54 return -ENOMEM;
55 }
56
57 vgpu->mmio.sreg = vgpu->mmio.vreg + info->mmio_size;
58
59 memcpy(vgpu->mmio.vreg, gvt->firmware.mmio, info->mmio_size);
60 memcpy(vgpu->mmio.sreg, gvt->firmware.mmio, info->mmio_size);
61
62 vgpu_vreg(vgpu, GEN6_GT_THREAD_STATUS_REG) = 0;
63
64 /* set the bit 0:2(Core C-State ) to C0 */
65 vgpu_vreg(vgpu, GEN6_GT_CORE_STATUS) = 0;
66 return 0;
67}
68
69static void setup_vgpu_cfg_space(struct intel_vgpu *vgpu,
70 struct intel_vgpu_creation_params *param)
71{
72 struct intel_gvt *gvt = vgpu->gvt;
73 const struct intel_gvt_device_info *info = &gvt->device_info;
74 u16 *gmch_ctl;
75 int i;
76
77 memcpy(vgpu_cfg_space(vgpu), gvt->firmware.cfg_space,
78 info->cfg_space_size);
79
80 if (!param->primary) {
81 vgpu_cfg_space(vgpu)[PCI_CLASS_DEVICE] =
82 INTEL_GVT_PCI_CLASS_VGA_OTHER;
83 vgpu_cfg_space(vgpu)[PCI_CLASS_PROG] =
84 INTEL_GVT_PCI_CLASS_VGA_OTHER;
85 }
86
87 /* Show guest that there isn't any stolen memory.*/
88 gmch_ctl = (u16 *)(vgpu_cfg_space(vgpu) + INTEL_GVT_PCI_GMCH_CONTROL);
89 *gmch_ctl &= ~(BDW_GMCH_GMS_MASK << BDW_GMCH_GMS_SHIFT);
90
91 intel_vgpu_write_pci_bar(vgpu, PCI_BASE_ADDRESS_2,
92 gvt_aperture_pa_base(gvt), true);
93
94 vgpu_cfg_space(vgpu)[PCI_COMMAND] &= ~(PCI_COMMAND_IO
95 | PCI_COMMAND_MEMORY
96 | PCI_COMMAND_MASTER);
97 /*
98 * Clear the bar upper 32bit and let guest to assign the new value
99 */
100 memset(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_1, 0, 4);
101 memset(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_3, 0, 4);
102 memset(vgpu_cfg_space(vgpu) + INTEL_GVT_PCI_OPREGION, 0, 4);
103
104 for (i = 0; i < INTEL_GVT_MAX_BAR_NUM; i++) {
105 vgpu->cfg_space.bar[i].size = pci_resource_len(
106 gvt->dev_priv->drm.pdev, i * 2);
107 vgpu->cfg_space.bar[i].tracked = false;
108 }
109}
110
111void populate_pvinfo_page(struct intel_vgpu *vgpu) 38void populate_pvinfo_page(struct intel_vgpu *vgpu)
112{ 39{
113 /* setup the ballooning information */ 40 /* setup the ballooning information */
@@ -177,7 +104,7 @@ int intel_gvt_init_vgpu_types(struct intel_gvt *gvt)
177 if (low_avail / min_low == 0) 104 if (low_avail / min_low == 0)
178 break; 105 break;
179 gvt->types[i].low_gm_size = min_low; 106 gvt->types[i].low_gm_size = min_low;
180 gvt->types[i].high_gm_size = 3 * gvt->types[i].low_gm_size; 107 gvt->types[i].high_gm_size = max((min_low<<3), MB_TO_BYTES(384U));
181 gvt->types[i].fence = 4; 108 gvt->types[i].fence = 4;
182 gvt->types[i].max_instance = low_avail / min_low; 109 gvt->types[i].max_instance = low_avail / min_low;
183 gvt->types[i].avail_instance = gvt->types[i].max_instance; 110 gvt->types[i].avail_instance = gvt->types[i].max_instance;
@@ -217,7 +144,7 @@ static void intel_gvt_update_vgpu_types(struct intel_gvt *gvt)
217 */ 144 */
218 low_gm_avail = MB_TO_BYTES(256) - HOST_LOW_GM_SIZE - 145 low_gm_avail = MB_TO_BYTES(256) - HOST_LOW_GM_SIZE -
219 gvt->gm.vgpu_allocated_low_gm_size; 146 gvt->gm.vgpu_allocated_low_gm_size;
220 high_gm_avail = MB_TO_BYTES(256) * 3 - HOST_HIGH_GM_SIZE - 147 high_gm_avail = MB_TO_BYTES(256) * 8UL - HOST_HIGH_GM_SIZE -
221 gvt->gm.vgpu_allocated_high_gm_size; 148 gvt->gm.vgpu_allocated_high_gm_size;
222 fence_avail = gvt_fence_sz(gvt) - HOST_FENCE - 149 fence_avail = gvt_fence_sz(gvt) - HOST_FENCE -
223 gvt->fence.vgpu_allocated_fence_num; 150 gvt->fence.vgpu_allocated_fence_num;
@@ -268,7 +195,7 @@ void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu)
268 intel_vgpu_clean_gtt(vgpu); 195 intel_vgpu_clean_gtt(vgpu);
269 intel_gvt_hypervisor_detach_vgpu(vgpu); 196 intel_gvt_hypervisor_detach_vgpu(vgpu);
270 intel_vgpu_free_resource(vgpu); 197 intel_vgpu_free_resource(vgpu);
271 clean_vgpu_mmio(vgpu); 198 intel_vgpu_clean_mmio(vgpu);
272 vfree(vgpu); 199 vfree(vgpu);
273 200
274 intel_gvt_update_vgpu_types(gvt); 201 intel_gvt_update_vgpu_types(gvt);
@@ -300,11 +227,11 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
300 vgpu->gvt = gvt; 227 vgpu->gvt = gvt;
301 bitmap_zero(vgpu->tlb_handle_pending, I915_NUM_ENGINES); 228 bitmap_zero(vgpu->tlb_handle_pending, I915_NUM_ENGINES);
302 229
303 setup_vgpu_cfg_space(vgpu, param); 230 intel_vgpu_init_cfg_space(vgpu, param->primary);
304 231
305 ret = setup_vgpu_mmio(vgpu); 232 ret = intel_vgpu_init_mmio(vgpu);
306 if (ret) 233 if (ret)
307 goto out_free_vgpu; 234 goto out_clean_idr;
308 235
309 ret = intel_vgpu_alloc_resource(vgpu, param); 236 ret = intel_vgpu_alloc_resource(vgpu, param);
310 if (ret) 237 if (ret)
@@ -354,7 +281,9 @@ out_detach_hypervisor_vgpu:
354out_clean_vgpu_resource: 281out_clean_vgpu_resource:
355 intel_vgpu_free_resource(vgpu); 282 intel_vgpu_free_resource(vgpu);
356out_clean_vgpu_mmio: 283out_clean_vgpu_mmio:
357 clean_vgpu_mmio(vgpu); 284 intel_vgpu_clean_mmio(vgpu);
285out_clean_idr:
286 idr_remove(&gvt->vgpu_idr, vgpu->id);
358out_free_vgpu: 287out_free_vgpu:
359 vfree(vgpu); 288 vfree(vgpu);
360 mutex_unlock(&gvt->lock); 289 mutex_unlock(&gvt->lock);
@@ -398,7 +327,75 @@ struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt,
398} 327}
399 328
400/** 329/**
401 * intel_gvt_reset_vgpu - reset a virtual GPU 330 * intel_gvt_reset_vgpu_locked - reset a virtual GPU by DMLR or GT reset
331 * @vgpu: virtual GPU
332 * @dmlr: vGPU Device Model Level Reset or GT Reset
333 * @engine_mask: engines to reset for GT reset
334 *
335 * This function is called when user wants to reset a virtual GPU through
336 * device model reset or GT reset. The caller should hold the gvt lock.
337 *
338 * vGPU Device Model Level Reset (DMLR) simulates the PCI level reset to reset
339 * the whole vGPU to default state as when it is created. This vGPU function
340 * is required both for functionary and security concerns.The ultimate goal
341 * of vGPU FLR is that reuse a vGPU instance by virtual machines. When we
342 * assign a vGPU to a virtual machine we must isse such reset first.
343 *
344 * Full GT Reset and Per-Engine GT Reset are soft reset flow for GPU engines
345 * (Render, Blitter, Video, Video Enhancement). It is defined by GPU Spec.
346 * Unlike the FLR, GT reset only reset particular resource of a vGPU per
347 * the reset request. Guest driver can issue a GT reset by programming the
348 * virtual GDRST register to reset specific virtual GPU engine or all
349 * engines.
350 *
351 * The parameter dev_level is to identify if we will do DMLR or GT reset.
352 * The parameter engine_mask is to specific the engines that need to be
353 * resetted. If value ALL_ENGINES is given for engine_mask, it means
354 * the caller requests a full GT reset that we will reset all virtual
355 * GPU engines. For FLR, engine_mask is ignored.
356 */
357void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
358 unsigned int engine_mask)
359{
360 struct intel_gvt *gvt = vgpu->gvt;
361 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
362
363 gvt_dbg_core("------------------------------------------\n");
364 gvt_dbg_core("resseting vgpu%d, dmlr %d, engine_mask %08x\n",
365 vgpu->id, dmlr, engine_mask);
366 vgpu->resetting = true;
367
368 intel_vgpu_stop_schedule(vgpu);
369 /*
370 * The current_vgpu will set to NULL after stopping the
371 * scheduler when the reset is triggered by current vgpu.
372 */
373 if (scheduler->current_vgpu == NULL) {
374 mutex_unlock(&gvt->lock);
375 intel_gvt_wait_vgpu_idle(vgpu);
376 mutex_lock(&gvt->lock);
377 }
378
379 intel_vgpu_reset_execlist(vgpu, dmlr ? ALL_ENGINES : engine_mask);
380
381 /* full GPU reset or device model level reset */
382 if (engine_mask == ALL_ENGINES || dmlr) {
383 intel_vgpu_reset_gtt(vgpu, dmlr);
384 intel_vgpu_reset_resource(vgpu);
385 intel_vgpu_reset_mmio(vgpu);
386 populate_pvinfo_page(vgpu);
387
388 if (dmlr)
389 intel_vgpu_reset_cfg_space(vgpu);
390 }
391
392 vgpu->resetting = false;
393 gvt_dbg_core("reset vgpu%d done\n", vgpu->id);
394 gvt_dbg_core("------------------------------------------\n");
395}
396
397/**
398 * intel_gvt_reset_vgpu - reset a virtual GPU (Function Level)
402 * @vgpu: virtual GPU 399 * @vgpu: virtual GPU
403 * 400 *
404 * This function is called when user wants to reset a virtual GPU. 401 * This function is called when user wants to reset a virtual GPU.
@@ -406,4 +403,7 @@ struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt,
406 */ 403 */
407void intel_gvt_reset_vgpu(struct intel_vgpu *vgpu) 404void intel_gvt_reset_vgpu(struct intel_vgpu *vgpu)
408{ 405{
406 mutex_lock(&vgpu->gvt->lock);
407 intel_gvt_reset_vgpu_locked(vgpu, true, 0);
408 mutex_unlock(&vgpu->gvt->lock);
409} 409}
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 445fec9c2841..728ca3ea74d2 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -213,7 +213,8 @@ static void intel_detect_pch(struct drm_device *dev)
213 } else if (id == INTEL_PCH_KBP_DEVICE_ID_TYPE) { 213 } else if (id == INTEL_PCH_KBP_DEVICE_ID_TYPE) {
214 dev_priv->pch_type = PCH_KBP; 214 dev_priv->pch_type = PCH_KBP;
215 DRM_DEBUG_KMS("Found KabyPoint PCH\n"); 215 DRM_DEBUG_KMS("Found KabyPoint PCH\n");
216 WARN_ON(!IS_KABYLAKE(dev_priv)); 216 WARN_ON(!IS_SKYLAKE(dev_priv) &&
217 !IS_KABYLAKE(dev_priv));
217 } else if ((id == INTEL_PCH_P2X_DEVICE_ID_TYPE) || 218 } else if ((id == INTEL_PCH_P2X_DEVICE_ID_TYPE) ||
218 (id == INTEL_PCH_P3X_DEVICE_ID_TYPE) || 219 (id == INTEL_PCH_P3X_DEVICE_ID_TYPE) ||
219 ((id == INTEL_PCH_QEMU_DEVICE_ID_TYPE) && 220 ((id == INTEL_PCH_QEMU_DEVICE_ID_TYPE) &&
@@ -2378,7 +2379,7 @@ static int intel_runtime_suspend(struct device *kdev)
2378 2379
2379 assert_forcewakes_inactive(dev_priv); 2380 assert_forcewakes_inactive(dev_priv);
2380 2381
2381 if (!IS_VALLEYVIEW(dev_priv) || !IS_CHERRYVIEW(dev_priv)) 2382 if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
2382 intel_hpd_poll_init(dev_priv); 2383 intel_hpd_poll_init(dev_priv);
2383 2384
2384 DRM_DEBUG_KMS("Device suspended\n"); 2385 DRM_DEBUG_KMS("Device suspended\n");
@@ -2427,6 +2428,7 @@ static int intel_runtime_resume(struct device *kdev)
2427 * we can do is to hope that things will still work (and disable RPM). 2428 * we can do is to hope that things will still work (and disable RPM).
2428 */ 2429 */
2429 i915_gem_init_swizzling(dev_priv); 2430 i915_gem_init_swizzling(dev_priv);
2431 i915_gem_restore_fences(dev_priv);
2430 2432
2431 intel_runtime_pm_enable_interrupts(dev_priv); 2433 intel_runtime_pm_enable_interrupts(dev_priv);
2432 2434
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 243224aeabf8..8493e19b563a 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1012,6 +1012,8 @@ struct intel_fbc {
1012 struct work_struct underrun_work; 1012 struct work_struct underrun_work;
1013 1013
1014 struct intel_fbc_state_cache { 1014 struct intel_fbc_state_cache {
1015 struct i915_vma *vma;
1016
1015 struct { 1017 struct {
1016 unsigned int mode_flags; 1018 unsigned int mode_flags;
1017 uint32_t hsw_bdw_pixel_rate; 1019 uint32_t hsw_bdw_pixel_rate;
@@ -1025,15 +1027,14 @@ struct intel_fbc {
1025 } plane; 1027 } plane;
1026 1028
1027 struct { 1029 struct {
1028 u64 ilk_ggtt_offset;
1029 uint32_t pixel_format; 1030 uint32_t pixel_format;
1030 unsigned int stride; 1031 unsigned int stride;
1031 int fence_reg;
1032 unsigned int tiling_mode;
1033 } fb; 1032 } fb;
1034 } state_cache; 1033 } state_cache;
1035 1034
1036 struct intel_fbc_reg_params { 1035 struct intel_fbc_reg_params {
1036 struct i915_vma *vma;
1037
1037 struct { 1038 struct {
1038 enum pipe pipe; 1039 enum pipe pipe;
1039 enum plane plane; 1040 enum plane plane;
@@ -1041,10 +1042,8 @@ struct intel_fbc {
1041 } crtc; 1042 } crtc;
1042 1043
1043 struct { 1044 struct {
1044 u64 ggtt_offset;
1045 uint32_t pixel_format; 1045 uint32_t pixel_format;
1046 unsigned int stride; 1046 unsigned int stride;
1047 int fence_reg;
1048 } fb; 1047 } fb;
1049 1048
1050 int cfb_size; 1049 int cfb_size;
@@ -1977,6 +1976,11 @@ struct drm_i915_private {
1977 1976
1978 struct i915_frontbuffer_tracking fb_tracking; 1977 struct i915_frontbuffer_tracking fb_tracking;
1979 1978
1979 struct intel_atomic_helper {
1980 struct llist_head free_list;
1981 struct work_struct free_work;
1982 } atomic_helper;
1983
1980 u16 orig_clock; 1984 u16 orig_clock;
1981 1985
1982 bool mchbar_need_disable; 1986 bool mchbar_need_disable;
@@ -3163,13 +3167,6 @@ i915_gem_object_to_ggtt(struct drm_i915_gem_object *obj,
3163 return i915_gem_obj_to_vma(obj, &to_i915(obj->base.dev)->ggtt.base, view); 3167 return i915_gem_obj_to_vma(obj, &to_i915(obj->base.dev)->ggtt.base, view);
3164} 3168}
3165 3169
3166static inline unsigned long
3167i915_gem_object_ggtt_offset(struct drm_i915_gem_object *o,
3168 const struct i915_ggtt_view *view)
3169{
3170 return i915_ggtt_offset(i915_gem_object_to_ggtt(o, view));
3171}
3172
3173/* i915_gem_fence_reg.c */ 3170/* i915_gem_fence_reg.c */
3174int __must_check i915_vma_get_fence(struct i915_vma *vma); 3171int __must_check i915_vma_get_fence(struct i915_vma *vma);
3175int __must_check i915_vma_put_fence(struct i915_vma *vma); 3172int __must_check i915_vma_put_fence(struct i915_vma *vma);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 4a31b7a891ec..24b5b046754b 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -244,14 +244,16 @@ err_phys:
244 244
245static void 245static void
246__i915_gem_object_release_shmem(struct drm_i915_gem_object *obj, 246__i915_gem_object_release_shmem(struct drm_i915_gem_object *obj,
247 struct sg_table *pages) 247 struct sg_table *pages,
248 bool needs_clflush)
248{ 249{
249 GEM_BUG_ON(obj->mm.madv == __I915_MADV_PURGED); 250 GEM_BUG_ON(obj->mm.madv == __I915_MADV_PURGED);
250 251
251 if (obj->mm.madv == I915_MADV_DONTNEED) 252 if (obj->mm.madv == I915_MADV_DONTNEED)
252 obj->mm.dirty = false; 253 obj->mm.dirty = false;
253 254
254 if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0 && 255 if (needs_clflush &&
256 (obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0 &&
255 !cpu_cache_is_coherent(obj->base.dev, obj->cache_level)) 257 !cpu_cache_is_coherent(obj->base.dev, obj->cache_level))
256 drm_clflush_sg(pages); 258 drm_clflush_sg(pages);
257 259
@@ -263,7 +265,7 @@ static void
263i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj, 265i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj,
264 struct sg_table *pages) 266 struct sg_table *pages)
265{ 267{
266 __i915_gem_object_release_shmem(obj, pages); 268 __i915_gem_object_release_shmem(obj, pages, false);
267 269
268 if (obj->mm.dirty) { 270 if (obj->mm.dirty) {
269 struct address_space *mapping = obj->base.filp->f_mapping; 271 struct address_space *mapping = obj->base.filp->f_mapping;
@@ -593,47 +595,21 @@ i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
593 struct drm_i915_gem_pwrite *args, 595 struct drm_i915_gem_pwrite *args,
594 struct drm_file *file) 596 struct drm_file *file)
595{ 597{
596 struct drm_device *dev = obj->base.dev;
597 void *vaddr = obj->phys_handle->vaddr + args->offset; 598 void *vaddr = obj->phys_handle->vaddr + args->offset;
598 char __user *user_data = u64_to_user_ptr(args->data_ptr); 599 char __user *user_data = u64_to_user_ptr(args->data_ptr);
599 int ret;
600 600
601 /* We manually control the domain here and pretend that it 601 /* We manually control the domain here and pretend that it
602 * remains coherent i.e. in the GTT domain, like shmem_pwrite. 602 * remains coherent i.e. in the GTT domain, like shmem_pwrite.
603 */ 603 */
604 lockdep_assert_held(&obj->base.dev->struct_mutex);
605 ret = i915_gem_object_wait(obj,
606 I915_WAIT_INTERRUPTIBLE |
607 I915_WAIT_LOCKED |
608 I915_WAIT_ALL,
609 MAX_SCHEDULE_TIMEOUT,
610 to_rps_client(file));
611 if (ret)
612 return ret;
613
614 intel_fb_obj_invalidate(obj, ORIGIN_CPU); 604 intel_fb_obj_invalidate(obj, ORIGIN_CPU);
615 if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) { 605 if (copy_from_user(vaddr, user_data, args->size))
616 unsigned long unwritten; 606 return -EFAULT;
617
618 /* The physical object once assigned is fixed for the lifetime
619 * of the obj, so we can safely drop the lock and continue
620 * to access vaddr.
621 */
622 mutex_unlock(&dev->struct_mutex);
623 unwritten = copy_from_user(vaddr, user_data, args->size);
624 mutex_lock(&dev->struct_mutex);
625 if (unwritten) {
626 ret = -EFAULT;
627 goto out;
628 }
629 }
630 607
631 drm_clflush_virt_range(vaddr, args->size); 608 drm_clflush_virt_range(vaddr, args->size);
632 i915_gem_chipset_flush(to_i915(dev)); 609 i915_gem_chipset_flush(to_i915(obj->base.dev));
633 610
634out:
635 intel_fb_obj_flush(obj, false, ORIGIN_CPU); 611 intel_fb_obj_flush(obj, false, ORIGIN_CPU);
636 return ret; 612 return 0;
637} 613}
638 614
639void *i915_gem_object_alloc(struct drm_device *dev) 615void *i915_gem_object_alloc(struct drm_device *dev)
@@ -2034,8 +2010,16 @@ void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv)
2034 for (i = 0; i < dev_priv->num_fence_regs; i++) { 2010 for (i = 0; i < dev_priv->num_fence_regs; i++) {
2035 struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i]; 2011 struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
2036 2012
2037 if (WARN_ON(reg->pin_count)) 2013 /* Ideally we want to assert that the fence register is not
2038 continue; 2014 * live at this point (i.e. that no piece of code will be
2015 * trying to write through fence + GTT, as that both violates
2016 * our tracking of activity and associated locking/barriers,
2017 * but also is illegal given that the hw is powered down).
2018 *
2019 * Previously we used reg->pin_count as a "liveness" indicator.
2020 * That is not sufficient, and we need a more fine-grained
2021 * tool if we want to have a sanity check here.
2022 */
2039 2023
2040 if (!reg->vma) 2024 if (!reg->vma)
2041 continue; 2025 continue;
@@ -2231,7 +2215,7 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj,
2231 struct sgt_iter sgt_iter; 2215 struct sgt_iter sgt_iter;
2232 struct page *page; 2216 struct page *page;
2233 2217
2234 __i915_gem_object_release_shmem(obj, pages); 2218 __i915_gem_object_release_shmem(obj, pages, true);
2235 2219
2236 i915_gem_gtt_finish_pages(obj, pages); 2220 i915_gem_gtt_finish_pages(obj, pages);
2237 2221
@@ -2304,15 +2288,6 @@ unlock:
2304 mutex_unlock(&obj->mm.lock); 2288 mutex_unlock(&obj->mm.lock);
2305} 2289}
2306 2290
2307static unsigned int swiotlb_max_size(void)
2308{
2309#if IS_ENABLED(CONFIG_SWIOTLB)
2310 return rounddown(swiotlb_nr_tbl() << IO_TLB_SHIFT, PAGE_SIZE);
2311#else
2312 return 0;
2313#endif
2314}
2315
2316static void i915_sg_trim(struct sg_table *orig_st) 2291static void i915_sg_trim(struct sg_table *orig_st)
2317{ 2292{
2318 struct sg_table new_st; 2293 struct sg_table new_st;
@@ -2322,7 +2297,7 @@ static void i915_sg_trim(struct sg_table *orig_st)
2322 if (orig_st->nents == orig_st->orig_nents) 2297 if (orig_st->nents == orig_st->orig_nents)
2323 return; 2298 return;
2324 2299
2325 if (sg_alloc_table(&new_st, orig_st->nents, GFP_KERNEL)) 2300 if (sg_alloc_table(&new_st, orig_st->nents, GFP_KERNEL | __GFP_NOWARN))
2326 return; 2301 return;
2327 2302
2328 new_sg = new_st.sgl; 2303 new_sg = new_st.sgl;
@@ -2360,7 +2335,7 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
2360 GEM_BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS); 2335 GEM_BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
2361 GEM_BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS); 2336 GEM_BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
2362 2337
2363 max_segment = swiotlb_max_size(); 2338 max_segment = swiotlb_max_segment();
2364 if (!max_segment) 2339 if (!max_segment)
2365 max_segment = rounddown(UINT_MAX, PAGE_SIZE); 2340 max_segment = rounddown(UINT_MAX, PAGE_SIZE);
2366 2341
@@ -2728,6 +2703,7 @@ static void i915_gem_reset_engine(struct intel_engine_cs *engine)
2728 struct drm_i915_gem_request *request; 2703 struct drm_i915_gem_request *request;
2729 struct i915_gem_context *incomplete_ctx; 2704 struct i915_gem_context *incomplete_ctx;
2730 struct intel_timeline *timeline; 2705 struct intel_timeline *timeline;
2706 unsigned long flags;
2731 bool ring_hung; 2707 bool ring_hung;
2732 2708
2733 if (engine->irq_seqno_barrier) 2709 if (engine->irq_seqno_barrier)
@@ -2763,13 +2739,20 @@ static void i915_gem_reset_engine(struct intel_engine_cs *engine)
2763 if (i915_gem_context_is_default(incomplete_ctx)) 2739 if (i915_gem_context_is_default(incomplete_ctx))
2764 return; 2740 return;
2765 2741
2742 timeline = i915_gem_context_lookup_timeline(incomplete_ctx, engine);
2743
2744 spin_lock_irqsave(&engine->timeline->lock, flags);
2745 spin_lock(&timeline->lock);
2746
2766 list_for_each_entry_continue(request, &engine->timeline->requests, link) 2747 list_for_each_entry_continue(request, &engine->timeline->requests, link)
2767 if (request->ctx == incomplete_ctx) 2748 if (request->ctx == incomplete_ctx)
2768 reset_request(request); 2749 reset_request(request);
2769 2750
2770 timeline = i915_gem_context_lookup_timeline(incomplete_ctx, engine);
2771 list_for_each_entry(request, &timeline->requests, link) 2751 list_for_each_entry(request, &timeline->requests, link)
2772 reset_request(request); 2752 reset_request(request);
2753
2754 spin_unlock(&timeline->lock);
2755 spin_unlock_irqrestore(&engine->timeline->lock, flags);
2773} 2756}
2774 2757
2775void i915_gem_reset(struct drm_i915_private *dev_priv) 2758void i915_gem_reset(struct drm_i915_private *dev_priv)
@@ -3503,7 +3486,7 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
3503 vma->display_alignment = max_t(u64, vma->display_alignment, alignment); 3486 vma->display_alignment = max_t(u64, vma->display_alignment, alignment);
3504 3487
3505 /* Treat this as an end-of-frame, like intel_user_framebuffer_dirty() */ 3488 /* Treat this as an end-of-frame, like intel_user_framebuffer_dirty() */
3506 if (obj->cache_dirty) { 3489 if (obj->cache_dirty || obj->base.write_domain == I915_GEM_DOMAIN_CPU) {
3507 i915_gem_clflush_object(obj, true); 3490 i915_gem_clflush_object(obj, true);
3508 intel_fb_obj_flush(obj, false, ORIGIN_DIRTYFB); 3491 intel_fb_obj_flush(obj, false, ORIGIN_DIRTYFB);
3509 } 3492 }
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index bd08814b015c..d534a316a16e 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -199,6 +199,7 @@ found:
199 } 199 }
200 200
201 /* Unbinding will emit any required flushes */ 201 /* Unbinding will emit any required flushes */
202 ret = 0;
202 while (!list_empty(&eviction_list)) { 203 while (!list_empty(&eviction_list)) {
203 vma = list_first_entry(&eviction_list, 204 vma = list_first_entry(&eviction_list,
204 struct i915_vma, 205 struct i915_vma,
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 097d9d8c2315..b8b877c91b0a 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -1181,14 +1181,14 @@ validate_exec_list(struct drm_device *dev,
1181 if (exec[i].offset != 1181 if (exec[i].offset !=
1182 gen8_canonical_addr(exec[i].offset & PAGE_MASK)) 1182 gen8_canonical_addr(exec[i].offset & PAGE_MASK))
1183 return -EINVAL; 1183 return -EINVAL;
1184
1185 /* From drm_mm perspective address space is continuous,
1186 * so from this point we're always using non-canonical
1187 * form internally.
1188 */
1189 exec[i].offset = gen8_noncanonical_addr(exec[i].offset);
1190 } 1184 }
1191 1185
1186 /* From drm_mm perspective address space is continuous,
1187 * so from this point we're always using non-canonical
1188 * form internally.
1189 */
1190 exec[i].offset = gen8_noncanonical_addr(exec[i].offset);
1191
1192 if (exec[i].alignment && !is_power_of_2(exec[i].alignment)) 1192 if (exec[i].alignment && !is_power_of_2(exec[i].alignment))
1193 return -EINVAL; 1193 return -EINVAL;
1194 1194
diff --git a/drivers/gpu/drm/i915/i915_gem_internal.c b/drivers/gpu/drm/i915/i915_gem_internal.c
index 4b3ff3e5b911..d09c74973cb3 100644
--- a/drivers/gpu/drm/i915/i915_gem_internal.c
+++ b/drivers/gpu/drm/i915/i915_gem_internal.c
@@ -66,8 +66,16 @@ i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj)
66 66
67 max_order = MAX_ORDER; 67 max_order = MAX_ORDER;
68#ifdef CONFIG_SWIOTLB 68#ifdef CONFIG_SWIOTLB
69 if (swiotlb_nr_tbl()) /* minimum max swiotlb size is IO_TLB_SEGSIZE */ 69 if (swiotlb_nr_tbl()) {
70 max_order = min(max_order, ilog2(IO_TLB_SEGPAGES)); 70 unsigned int max_segment;
71
72 max_segment = swiotlb_max_segment();
73 if (max_segment) {
74 max_segment = max_t(unsigned int, max_segment,
75 PAGE_SIZE) >> PAGE_SHIFT;
76 max_order = min(max_order, ilog2(max_segment));
77 }
78 }
71#endif 79#endif
72 80
73 gfp = GFP_KERNEL | __GFP_HIGHMEM | __GFP_RECLAIMABLE; 81 gfp = GFP_KERNEL | __GFP_HIGHMEM | __GFP_RECLAIMABLE;
diff --git a/drivers/gpu/drm/i915/i915_gem_request.h b/drivers/gpu/drm/i915/i915_gem_request.h
index e2b077df2da0..d229f47d1028 100644
--- a/drivers/gpu/drm/i915/i915_gem_request.h
+++ b/drivers/gpu/drm/i915/i915_gem_request.h
@@ -413,6 +413,25 @@ i915_gem_active_set(struct i915_gem_active *active,
413 rcu_assign_pointer(active->request, request); 413 rcu_assign_pointer(active->request, request);
414} 414}
415 415
416/**
417 * i915_gem_active_set_retire_fn - updates the retirement callback
418 * @active - the active tracker
419 * @fn - the routine called when the request is retired
420 * @mutex - struct_mutex used to guard retirements
421 *
422 * i915_gem_active_set_retire_fn() updates the function pointer that
423 * is called when the final request associated with the @active tracker
424 * is retired.
425 */
426static inline void
427i915_gem_active_set_retire_fn(struct i915_gem_active *active,
428 i915_gem_retire_fn fn,
429 struct mutex *mutex)
430{
431 lockdep_assert_held(mutex);
432 active->retire = fn ?: i915_gem_retire_noop;
433}
434
416static inline struct drm_i915_gem_request * 435static inline struct drm_i915_gem_request *
417__i915_gem_active_peek(const struct i915_gem_active *active) 436__i915_gem_active_peek(const struct i915_gem_active *active)
418{ 437{
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index a792dcb902b5..e924a9516079 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -185,6 +185,7 @@ int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
185 return ret; 185 return ret;
186 } 186 }
187 187
188 trace_i915_vma_bind(vma, bind_flags);
188 ret = vma->vm->bind_vma(vma, cache_level, bind_flags); 189 ret = vma->vm->bind_vma(vma, cache_level, bind_flags);
189 if (ret) 190 if (ret)
190 return ret; 191 return ret;
diff --git a/drivers/gpu/drm/i915/intel_atomic_plane.c b/drivers/gpu/drm/i915/intel_atomic_plane.c
index dbe9fb41ae53..8d3e515f27ba 100644
--- a/drivers/gpu/drm/i915/intel_atomic_plane.c
+++ b/drivers/gpu/drm/i915/intel_atomic_plane.c
@@ -85,6 +85,8 @@ intel_plane_duplicate_state(struct drm_plane *plane)
85 85
86 __drm_atomic_helper_plane_duplicate_state(plane, state); 86 __drm_atomic_helper_plane_duplicate_state(plane, state);
87 87
88 intel_state->vma = NULL;
89
88 return state; 90 return state;
89} 91}
90 92
@@ -100,6 +102,24 @@ void
100intel_plane_destroy_state(struct drm_plane *plane, 102intel_plane_destroy_state(struct drm_plane *plane,
101 struct drm_plane_state *state) 103 struct drm_plane_state *state)
102{ 104{
105 struct i915_vma *vma;
106
107 vma = fetch_and_zero(&to_intel_plane_state(state)->vma);
108
109 /*
110 * FIXME: Normally intel_cleanup_plane_fb handles destruction of vma.
111 * We currently don't clear all planes during driver unload, so we have
112 * to be able to unpin vma here for now.
113 *
114 * Normally this can only happen during unload when kmscon is disabled
115 * and userspace doesn't attempt to set a framebuffer at all.
116 */
117 if (vma) {
118 mutex_lock(&plane->dev->struct_mutex);
119 intel_unpin_fb_vma(vma);
120 mutex_unlock(&plane->dev->struct_mutex);
121 }
122
103 drm_atomic_helper_plane_destroy_state(plane, state); 123 drm_atomic_helper_plane_destroy_state(plane, state);
104} 124}
105 125
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 86ecec5601d4..588470eb8d39 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -499,6 +499,7 @@ static bool intel_crt_detect_ddc(struct drm_connector *connector)
499 struct drm_i915_private *dev_priv = to_i915(crt->base.base.dev); 499 struct drm_i915_private *dev_priv = to_i915(crt->base.base.dev);
500 struct edid *edid; 500 struct edid *edid;
501 struct i2c_adapter *i2c; 501 struct i2c_adapter *i2c;
502 bool ret = false;
502 503
503 BUG_ON(crt->base.type != INTEL_OUTPUT_ANALOG); 504 BUG_ON(crt->base.type != INTEL_OUTPUT_ANALOG);
504 505
@@ -515,17 +516,17 @@ static bool intel_crt_detect_ddc(struct drm_connector *connector)
515 */ 516 */
516 if (!is_digital) { 517 if (!is_digital) {
517 DRM_DEBUG_KMS("CRT detected via DDC:0x50 [EDID]\n"); 518 DRM_DEBUG_KMS("CRT detected via DDC:0x50 [EDID]\n");
518 return true; 519 ret = true;
520 } else {
521 DRM_DEBUG_KMS("CRT not detected via DDC:0x50 [EDID reports a digital panel]\n");
519 } 522 }
520
521 DRM_DEBUG_KMS("CRT not detected via DDC:0x50 [EDID reports a digital panel]\n");
522 } else { 523 } else {
523 DRM_DEBUG_KMS("CRT not detected via DDC:0x50 [no valid EDID found]\n"); 524 DRM_DEBUG_KMS("CRT not detected via DDC:0x50 [no valid EDID found]\n");
524 } 525 }
525 526
526 kfree(edid); 527 kfree(edid);
527 528
528 return false; 529 return ret;
529} 530}
530 531
531static enum drm_connector_status 532static enum drm_connector_status
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 6daad8613760..891c86aef99d 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -2235,24 +2235,22 @@ intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb, unsigned int rotation)
2235 i915_vma_pin_fence(vma); 2235 i915_vma_pin_fence(vma);
2236 } 2236 }
2237 2237
2238 i915_vma_get(vma);
2238err: 2239err:
2239 intel_runtime_pm_put(dev_priv); 2240 intel_runtime_pm_put(dev_priv);
2240 return vma; 2241 return vma;
2241} 2242}
2242 2243
2243void intel_unpin_fb_obj(struct drm_framebuffer *fb, unsigned int rotation) 2244void intel_unpin_fb_vma(struct i915_vma *vma)
2244{ 2245{
2245 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 2246 lockdep_assert_held(&vma->vm->dev->struct_mutex);
2246 struct i915_ggtt_view view;
2247 struct i915_vma *vma;
2248
2249 WARN_ON(!mutex_is_locked(&obj->base.dev->struct_mutex));
2250 2247
2251 intel_fill_fb_ggtt_view(&view, fb, rotation); 2248 if (WARN_ON_ONCE(!vma))
2252 vma = i915_gem_object_to_ggtt(obj, &view); 2249 return;
2253 2250
2254 i915_vma_unpin_fence(vma); 2251 i915_vma_unpin_fence(vma);
2255 i915_gem_object_unpin_from_display_plane(vma); 2252 i915_gem_object_unpin_from_display_plane(vma);
2253 i915_vma_put(vma);
2256} 2254}
2257 2255
2258static int intel_fb_pitch(const struct drm_framebuffer *fb, int plane, 2256static int intel_fb_pitch(const struct drm_framebuffer *fb, int plane,
@@ -2585,8 +2583,9 @@ intel_fill_fb_info(struct drm_i915_private *dev_priv,
2585 * We only keep the x/y offsets, so push all of the 2583 * We only keep the x/y offsets, so push all of the
2586 * gtt offset into the x/y offsets. 2584 * gtt offset into the x/y offsets.
2587 */ 2585 */
2588 _intel_adjust_tile_offset(&x, &y, tile_size, 2586 _intel_adjust_tile_offset(&x, &y,
2589 tile_width, tile_height, pitch_tiles, 2587 tile_width, tile_height,
2588 tile_size, pitch_tiles,
2590 gtt_offset_rotated * tile_size, 0); 2589 gtt_offset_rotated * tile_size, 0);
2591 2590
2592 gtt_offset_rotated += rot_info->plane[i].width * rot_info->plane[i].height; 2591 gtt_offset_rotated += rot_info->plane[i].width * rot_info->plane[i].height;
@@ -2746,7 +2745,6 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
2746 struct drm_device *dev = intel_crtc->base.dev; 2745 struct drm_device *dev = intel_crtc->base.dev;
2747 struct drm_i915_private *dev_priv = to_i915(dev); 2746 struct drm_i915_private *dev_priv = to_i915(dev);
2748 struct drm_crtc *c; 2747 struct drm_crtc *c;
2749 struct intel_crtc *i;
2750 struct drm_i915_gem_object *obj; 2748 struct drm_i915_gem_object *obj;
2751 struct drm_plane *primary = intel_crtc->base.primary; 2749 struct drm_plane *primary = intel_crtc->base.primary;
2752 struct drm_plane_state *plane_state = primary->state; 2750 struct drm_plane_state *plane_state = primary->state;
@@ -2771,20 +2769,20 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
2771 * an fb with another CRTC instead 2769 * an fb with another CRTC instead
2772 */ 2770 */
2773 for_each_crtc(dev, c) { 2771 for_each_crtc(dev, c) {
2774 i = to_intel_crtc(c); 2772 struct intel_plane_state *state;
2775 2773
2776 if (c == &intel_crtc->base) 2774 if (c == &intel_crtc->base)
2777 continue; 2775 continue;
2778 2776
2779 if (!i->active) 2777 if (!to_intel_crtc(c)->active)
2780 continue; 2778 continue;
2781 2779
2782 fb = c->primary->fb; 2780 state = to_intel_plane_state(c->primary->state);
2783 if (!fb) 2781 if (!state->vma)
2784 continue; 2782 continue;
2785 2783
2786 obj = intel_fb_obj(fb); 2784 if (intel_plane_ggtt_offset(state) == plane_config->base) {
2787 if (i915_gem_object_ggtt_offset(obj, NULL) == plane_config->base) { 2785 fb = c->primary->fb;
2788 drm_framebuffer_reference(fb); 2786 drm_framebuffer_reference(fb);
2789 goto valid_fb; 2787 goto valid_fb;
2790 } 2788 }
@@ -2805,6 +2803,19 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
2805 return; 2803 return;
2806 2804
2807valid_fb: 2805valid_fb:
2806 mutex_lock(&dev->struct_mutex);
2807 intel_state->vma =
2808 intel_pin_and_fence_fb_obj(fb, primary->state->rotation);
2809 mutex_unlock(&dev->struct_mutex);
2810 if (IS_ERR(intel_state->vma)) {
2811 DRM_ERROR("failed to pin boot fb on pipe %d: %li\n",
2812 intel_crtc->pipe, PTR_ERR(intel_state->vma));
2813
2814 intel_state->vma = NULL;
2815 drm_framebuffer_unreference(fb);
2816 return;
2817 }
2818
2808 plane_state->src_x = 0; 2819 plane_state->src_x = 0;
2809 plane_state->src_y = 0; 2820 plane_state->src_y = 0;
2810 plane_state->src_w = fb->width << 16; 2821 plane_state->src_w = fb->width << 16;
@@ -2967,6 +2978,9 @@ int skl_check_plane_surface(struct intel_plane_state *plane_state)
2967 unsigned int rotation = plane_state->base.rotation; 2978 unsigned int rotation = plane_state->base.rotation;
2968 int ret; 2979 int ret;
2969 2980
2981 if (!plane_state->base.visible)
2982 return 0;
2983
2970 /* Rotate src coordinates to match rotated GTT view */ 2984 /* Rotate src coordinates to match rotated GTT view */
2971 if (drm_rotation_90_or_270(rotation)) 2985 if (drm_rotation_90_or_270(rotation))
2972 drm_rect_rotate(&plane_state->base.src, 2986 drm_rect_rotate(&plane_state->base.src,
@@ -3097,13 +3111,13 @@ static void i9xx_update_primary_plane(struct drm_plane *primary,
3097 I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]); 3111 I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
3098 if (INTEL_GEN(dev_priv) >= 4) { 3112 if (INTEL_GEN(dev_priv) >= 4) {
3099 I915_WRITE(DSPSURF(plane), 3113 I915_WRITE(DSPSURF(plane),
3100 intel_fb_gtt_offset(fb, rotation) + 3114 intel_plane_ggtt_offset(plane_state) +
3101 intel_crtc->dspaddr_offset); 3115 intel_crtc->dspaddr_offset);
3102 I915_WRITE(DSPTILEOFF(plane), (y << 16) | x); 3116 I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
3103 I915_WRITE(DSPLINOFF(plane), linear_offset); 3117 I915_WRITE(DSPLINOFF(plane), linear_offset);
3104 } else { 3118 } else {
3105 I915_WRITE(DSPADDR(plane), 3119 I915_WRITE(DSPADDR(plane),
3106 intel_fb_gtt_offset(fb, rotation) + 3120 intel_plane_ggtt_offset(plane_state) +
3107 intel_crtc->dspaddr_offset); 3121 intel_crtc->dspaddr_offset);
3108 } 3122 }
3109 POSTING_READ(reg); 3123 POSTING_READ(reg);
@@ -3200,7 +3214,7 @@ static void ironlake_update_primary_plane(struct drm_plane *primary,
3200 3214
3201 I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]); 3215 I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
3202 I915_WRITE(DSPSURF(plane), 3216 I915_WRITE(DSPSURF(plane),
3203 intel_fb_gtt_offset(fb, rotation) + 3217 intel_plane_ggtt_offset(plane_state) +
3204 intel_crtc->dspaddr_offset); 3218 intel_crtc->dspaddr_offset);
3205 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { 3219 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
3206 I915_WRITE(DSPOFFSET(plane), (y << 16) | x); 3220 I915_WRITE(DSPOFFSET(plane), (y << 16) | x);
@@ -3223,23 +3237,6 @@ u32 intel_fb_stride_alignment(const struct drm_i915_private *dev_priv,
3223 } 3237 }
3224} 3238}
3225 3239
3226u32 intel_fb_gtt_offset(struct drm_framebuffer *fb,
3227 unsigned int rotation)
3228{
3229 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
3230 struct i915_ggtt_view view;
3231 struct i915_vma *vma;
3232
3233 intel_fill_fb_ggtt_view(&view, fb, rotation);
3234
3235 vma = i915_gem_object_to_ggtt(obj, &view);
3236 if (WARN(!vma, "ggtt vma for display object not found! (view=%u)\n",
3237 view.type))
3238 return -1;
3239
3240 return i915_ggtt_offset(vma);
3241}
3242
3243static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id) 3240static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id)
3244{ 3241{
3245 struct drm_device *dev = intel_crtc->base.dev; 3242 struct drm_device *dev = intel_crtc->base.dev;
@@ -3434,7 +3431,7 @@ static void skylake_update_primary_plane(struct drm_plane *plane,
3434 } 3431 }
3435 3432
3436 I915_WRITE(PLANE_SURF(pipe, 0), 3433 I915_WRITE(PLANE_SURF(pipe, 0),
3437 intel_fb_gtt_offset(fb, rotation) + surf_addr); 3434 intel_plane_ggtt_offset(plane_state) + surf_addr);
3438 3435
3439 POSTING_READ(PLANE_SURF(pipe, 0)); 3436 POSTING_READ(PLANE_SURF(pipe, 0));
3440} 3437}
@@ -4265,10 +4262,10 @@ static void page_flip_completed(struct intel_crtc *intel_crtc)
4265 drm_crtc_vblank_put(&intel_crtc->base); 4262 drm_crtc_vblank_put(&intel_crtc->base);
4266 4263
4267 wake_up_all(&dev_priv->pending_flip_queue); 4264 wake_up_all(&dev_priv->pending_flip_queue);
4268 queue_work(dev_priv->wq, &work->unpin_work);
4269
4270 trace_i915_flip_complete(intel_crtc->plane, 4265 trace_i915_flip_complete(intel_crtc->plane,
4271 work->pending_flip_obj); 4266 work->pending_flip_obj);
4267
4268 queue_work(dev_priv->wq, &work->unpin_work);
4272} 4269}
4273 4270
4274static int intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc) 4271static int intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
@@ -6846,6 +6843,12 @@ static void intel_crtc_disable_noatomic(struct drm_crtc *crtc)
6846 } 6843 }
6847 6844
6848 state = drm_atomic_state_alloc(crtc->dev); 6845 state = drm_atomic_state_alloc(crtc->dev);
6846 if (!state) {
6847 DRM_DEBUG_KMS("failed to disable [CRTC:%d:%s], out of memory",
6848 crtc->base.id, crtc->name);
6849 return;
6850 }
6851
6849 state->acquire_ctx = crtc->dev->mode_config.acquire_ctx; 6852 state->acquire_ctx = crtc->dev->mode_config.acquire_ctx;
6850 6853
6851 /* Everything's already locked, -EDEADLK can't happen. */ 6854 /* Everything's already locked, -EDEADLK can't happen. */
@@ -11243,6 +11246,7 @@ found:
11243 } 11246 }
11244 11247
11245 old->restore_state = restore_state; 11248 old->restore_state = restore_state;
11249 drm_atomic_state_put(state);
11246 11250
11247 /* let the connector get through one full cycle before testing */ 11251 /* let the connector get through one full cycle before testing */
11248 intel_wait_for_vblank(dev_priv, intel_crtc->pipe); 11252 intel_wait_for_vblank(dev_priv, intel_crtc->pipe);
@@ -11522,7 +11526,7 @@ static void intel_unpin_work_fn(struct work_struct *__work)
11522 flush_work(&work->mmio_work); 11526 flush_work(&work->mmio_work);
11523 11527
11524 mutex_lock(&dev->struct_mutex); 11528 mutex_lock(&dev->struct_mutex);
11525 intel_unpin_fb_obj(work->old_fb, primary->state->rotation); 11529 intel_unpin_fb_vma(work->old_vma);
11526 i915_gem_object_put(work->pending_flip_obj); 11530 i915_gem_object_put(work->pending_flip_obj);
11527 mutex_unlock(&dev->struct_mutex); 11531 mutex_unlock(&dev->struct_mutex);
11528 11532
@@ -12232,8 +12236,10 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
12232 goto cleanup_pending; 12236 goto cleanup_pending;
12233 } 12237 }
12234 12238
12235 work->gtt_offset = intel_fb_gtt_offset(fb, primary->state->rotation); 12239 work->old_vma = to_intel_plane_state(primary->state)->vma;
12236 work->gtt_offset += intel_crtc->dspaddr_offset; 12240 to_intel_plane_state(primary->state)->vma = vma;
12241
12242 work->gtt_offset = i915_ggtt_offset(vma) + intel_crtc->dspaddr_offset;
12237 work->rotation = crtc->primary->state->rotation; 12243 work->rotation = crtc->primary->state->rotation;
12238 12244
12239 /* 12245 /*
@@ -12287,7 +12293,8 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
12287cleanup_request: 12293cleanup_request:
12288 i915_add_request_no_flush(request); 12294 i915_add_request_no_flush(request);
12289cleanup_unpin: 12295cleanup_unpin:
12290 intel_unpin_fb_obj(fb, crtc->primary->state->rotation); 12296 to_intel_plane_state(primary->state)->vma = work->old_vma;
12297 intel_unpin_fb_vma(vma);
12291cleanup_pending: 12298cleanup_pending:
12292 atomic_dec(&intel_crtc->unpin_work_count); 12299 atomic_dec(&intel_crtc->unpin_work_count);
12293unlock: 12300unlock:
@@ -14512,8 +14519,14 @@ intel_atomic_commit_ready(struct i915_sw_fence *fence,
14512 break; 14519 break;
14513 14520
14514 case FENCE_FREE: 14521 case FENCE_FREE:
14515 drm_atomic_state_put(&state->base); 14522 {
14516 break; 14523 struct intel_atomic_helper *helper =
14524 &to_i915(state->base.dev)->atomic_helper;
14525
14526 if (llist_add(&state->freed, &helper->free_list))
14527 schedule_work(&helper->free_work);
14528 break;
14529 }
14517 } 14530 }
14518 14531
14519 return NOTIFY_DONE; 14532 return NOTIFY_DONE;
@@ -14774,6 +14787,8 @@ intel_prepare_plane_fb(struct drm_plane *plane,
14774 DRM_DEBUG_KMS("failed to pin object\n"); 14787 DRM_DEBUG_KMS("failed to pin object\n");
14775 return PTR_ERR(vma); 14788 return PTR_ERR(vma);
14776 } 14789 }
14790
14791 to_intel_plane_state(new_state)->vma = vma;
14777 } 14792 }
14778 14793
14779 return 0; 14794 return 0;
@@ -14792,19 +14807,12 @@ void
14792intel_cleanup_plane_fb(struct drm_plane *plane, 14807intel_cleanup_plane_fb(struct drm_plane *plane,
14793 struct drm_plane_state *old_state) 14808 struct drm_plane_state *old_state)
14794{ 14809{
14795 struct drm_i915_private *dev_priv = to_i915(plane->dev); 14810 struct i915_vma *vma;
14796 struct intel_plane_state *old_intel_state;
14797 struct drm_i915_gem_object *old_obj = intel_fb_obj(old_state->fb);
14798 struct drm_i915_gem_object *obj = intel_fb_obj(plane->state->fb);
14799
14800 old_intel_state = to_intel_plane_state(old_state);
14801
14802 if (!obj && !old_obj)
14803 return;
14804 14811
14805 if (old_obj && (plane->type != DRM_PLANE_TYPE_CURSOR || 14812 /* Should only be called after a successful intel_prepare_plane_fb()! */
14806 !INTEL_INFO(dev_priv)->cursor_needs_physical)) 14813 vma = fetch_and_zero(&to_intel_plane_state(old_state)->vma);
14807 intel_unpin_fb_obj(old_state->fb, old_state->rotation); 14814 if (vma)
14815 intel_unpin_fb_vma(vma);
14808} 14816}
14809 14817
14810int 14818int
@@ -15146,7 +15154,7 @@ intel_update_cursor_plane(struct drm_plane *plane,
15146 if (!obj) 15154 if (!obj)
15147 addr = 0; 15155 addr = 0;
15148 else if (!INTEL_INFO(dev_priv)->cursor_needs_physical) 15156 else if (!INTEL_INFO(dev_priv)->cursor_needs_physical)
15149 addr = i915_gem_object_ggtt_offset(obj, NULL); 15157 addr = intel_plane_ggtt_offset(state);
15150 else 15158 else
15151 addr = obj->phys_handle->busaddr; 15159 addr = obj->phys_handle->busaddr;
15152 15160
@@ -16392,6 +16400,18 @@ fail:
16392 drm_modeset_acquire_fini(&ctx); 16400 drm_modeset_acquire_fini(&ctx);
16393} 16401}
16394 16402
16403static void intel_atomic_helper_free_state(struct work_struct *work)
16404{
16405 struct drm_i915_private *dev_priv =
16406 container_of(work, typeof(*dev_priv), atomic_helper.free_work);
16407 struct intel_atomic_state *state, *next;
16408 struct llist_node *freed;
16409
16410 freed = llist_del_all(&dev_priv->atomic_helper.free_list);
16411 llist_for_each_entry_safe(state, next, freed, freed)
16412 drm_atomic_state_put(&state->base);
16413}
16414
16395int intel_modeset_init(struct drm_device *dev) 16415int intel_modeset_init(struct drm_device *dev)
16396{ 16416{
16397 struct drm_i915_private *dev_priv = to_i915(dev); 16417 struct drm_i915_private *dev_priv = to_i915(dev);
@@ -16411,6 +16431,9 @@ int intel_modeset_init(struct drm_device *dev)
16411 16431
16412 dev->mode_config.funcs = &intel_mode_funcs; 16432 dev->mode_config.funcs = &intel_mode_funcs;
16413 16433
16434 INIT_WORK(&dev_priv->atomic_helper.free_work,
16435 intel_atomic_helper_free_state);
16436
16414 intel_init_quirks(dev); 16437 intel_init_quirks(dev);
16415 16438
16416 intel_init_pm(dev_priv); 16439 intel_init_pm(dev_priv);
@@ -16791,7 +16814,6 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
16791 16814
16792 for_each_intel_crtc(dev, crtc) { 16815 for_each_intel_crtc(dev, crtc) {
16793 struct intel_crtc_state *crtc_state = crtc->config; 16816 struct intel_crtc_state *crtc_state = crtc->config;
16794 int pixclk = 0;
16795 16817
16796 __drm_atomic_helper_crtc_destroy_state(&crtc_state->base); 16818 __drm_atomic_helper_crtc_destroy_state(&crtc_state->base);
16797 memset(crtc_state, 0, sizeof(*crtc_state)); 16819 memset(crtc_state, 0, sizeof(*crtc_state));
@@ -16803,23 +16825,9 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
16803 crtc->base.enabled = crtc_state->base.enable; 16825 crtc->base.enabled = crtc_state->base.enable;
16804 crtc->active = crtc_state->base.active; 16826 crtc->active = crtc_state->base.active;
16805 16827
16806 if (crtc_state->base.active) { 16828 if (crtc_state->base.active)
16807 dev_priv->active_crtcs |= 1 << crtc->pipe; 16829 dev_priv->active_crtcs |= 1 << crtc->pipe;
16808 16830
16809 if (INTEL_GEN(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
16810 pixclk = ilk_pipe_pixel_rate(crtc_state);
16811 else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
16812 pixclk = crtc_state->base.adjusted_mode.crtc_clock;
16813 else
16814 WARN_ON(dev_priv->display.modeset_calc_cdclk);
16815
16816 /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
16817 if (IS_BROADWELL(dev_priv) && crtc_state->ips_enabled)
16818 pixclk = DIV_ROUND_UP(pixclk * 100, 95);
16819 }
16820
16821 dev_priv->min_pixclk[crtc->pipe] = pixclk;
16822
16823 readout_plane_state(crtc); 16831 readout_plane_state(crtc);
16824 16832
16825 DRM_DEBUG_KMS("[CRTC:%d:%s] hw state readout: %s\n", 16833 DRM_DEBUG_KMS("[CRTC:%d:%s] hw state readout: %s\n",
@@ -16892,6 +16900,8 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
16892 } 16900 }
16893 16901
16894 for_each_intel_crtc(dev, crtc) { 16902 for_each_intel_crtc(dev, crtc) {
16903 int pixclk = 0;
16904
16895 crtc->base.hwmode = crtc->config->base.adjusted_mode; 16905 crtc->base.hwmode = crtc->config->base.adjusted_mode;
16896 16906
16897 memset(&crtc->base.mode, 0, sizeof(crtc->base.mode)); 16907 memset(&crtc->base.mode, 0, sizeof(crtc->base.mode));
@@ -16919,10 +16929,23 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
16919 */ 16929 */
16920 crtc->base.state->mode.private_flags = I915_MODE_FLAG_INHERITED; 16930 crtc->base.state->mode.private_flags = I915_MODE_FLAG_INHERITED;
16921 16931
16932 if (INTEL_GEN(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
16933 pixclk = ilk_pipe_pixel_rate(crtc->config);
16934 else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
16935 pixclk = crtc->config->base.adjusted_mode.crtc_clock;
16936 else
16937 WARN_ON(dev_priv->display.modeset_calc_cdclk);
16938
16939 /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
16940 if (IS_BROADWELL(dev_priv) && crtc->config->ips_enabled)
16941 pixclk = DIV_ROUND_UP(pixclk * 100, 95);
16942
16922 drm_calc_timestamping_constants(&crtc->base, &crtc->base.hwmode); 16943 drm_calc_timestamping_constants(&crtc->base, &crtc->base.hwmode);
16923 update_scanline_offset(crtc); 16944 update_scanline_offset(crtc);
16924 } 16945 }
16925 16946
16947 dev_priv->min_pixclk[crtc->pipe] = pixclk;
16948
16926 intel_pipe_config_sanity_check(dev_priv, crtc->config); 16949 intel_pipe_config_sanity_check(dev_priv, crtc->config);
16927 } 16950 }
16928} 16951}
@@ -17024,47 +17047,19 @@ void intel_display_resume(struct drm_device *dev)
17024 17047
17025 if (ret) 17048 if (ret)
17026 DRM_ERROR("Restoring old state failed with %i\n", ret); 17049 DRM_ERROR("Restoring old state failed with %i\n", ret);
17027 drm_atomic_state_put(state); 17050 if (state)
17051 drm_atomic_state_put(state);
17028} 17052}
17029 17053
17030void intel_modeset_gem_init(struct drm_device *dev) 17054void intel_modeset_gem_init(struct drm_device *dev)
17031{ 17055{
17032 struct drm_i915_private *dev_priv = to_i915(dev); 17056 struct drm_i915_private *dev_priv = to_i915(dev);
17033 struct drm_crtc *c;
17034 struct drm_i915_gem_object *obj;
17035 17057
17036 intel_init_gt_powersave(dev_priv); 17058 intel_init_gt_powersave(dev_priv);
17037 17059
17038 intel_modeset_init_hw(dev); 17060 intel_modeset_init_hw(dev);
17039 17061
17040 intel_setup_overlay(dev_priv); 17062 intel_setup_overlay(dev_priv);
17041
17042 /*
17043 * Make sure any fbs we allocated at startup are properly
17044 * pinned & fenced. When we do the allocation it's too early
17045 * for this.
17046 */
17047 for_each_crtc(dev, c) {
17048 struct i915_vma *vma;
17049
17050 obj = intel_fb_obj(c->primary->fb);
17051 if (obj == NULL)
17052 continue;
17053
17054 mutex_lock(&dev->struct_mutex);
17055 vma = intel_pin_and_fence_fb_obj(c->primary->fb,
17056 c->primary->state->rotation);
17057 mutex_unlock(&dev->struct_mutex);
17058 if (IS_ERR(vma)) {
17059 DRM_ERROR("failed to pin boot fb on pipe %d\n",
17060 to_intel_crtc(c)->pipe);
17061 drm_framebuffer_unreference(c->primary->fb);
17062 c->primary->fb = NULL;
17063 c->primary->crtc = c->primary->state->crtc = NULL;
17064 update_state_fb(c->primary);
17065 c->state->plane_mask &= ~(1 << drm_plane_index(c->primary));
17066 }
17067 }
17068} 17063}
17069 17064
17070int intel_connector_register(struct drm_connector *connector) 17065int intel_connector_register(struct drm_connector *connector)
@@ -17094,6 +17089,9 @@ void intel_modeset_cleanup(struct drm_device *dev)
17094{ 17089{
17095 struct drm_i915_private *dev_priv = to_i915(dev); 17090 struct drm_i915_private *dev_priv = to_i915(dev);
17096 17091
17092 flush_work(&dev_priv->atomic_helper.free_work);
17093 WARN_ON(!llist_empty(&dev_priv->atomic_helper.free_list));
17094
17097 intel_disable_gt_powersave(dev_priv); 17095 intel_disable_gt_powersave(dev_priv);
17098 17096
17099 /* 17097 /*
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index d9bc19be855e..0b8e8eb85c19 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -355,7 +355,8 @@ intel_dp_init_panel_power_sequencer(struct drm_device *dev,
355 struct intel_dp *intel_dp); 355 struct intel_dp *intel_dp);
356static void 356static void
357intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev, 357intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
358 struct intel_dp *intel_dp); 358 struct intel_dp *intel_dp,
359 bool force_disable_vdd);
359static void 360static void
360intel_dp_pps_init(struct drm_device *dev, struct intel_dp *intel_dp); 361intel_dp_pps_init(struct drm_device *dev, struct intel_dp *intel_dp);
361 362
@@ -516,7 +517,7 @@ vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
516 517
517 /* init power sequencer on this pipe and port */ 518 /* init power sequencer on this pipe and port */
518 intel_dp_init_panel_power_sequencer(dev, intel_dp); 519 intel_dp_init_panel_power_sequencer(dev, intel_dp);
519 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp); 520 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, true);
520 521
521 /* 522 /*
522 * Even vdd force doesn't work until we've made 523 * Even vdd force doesn't work until we've made
@@ -553,7 +554,7 @@ bxt_power_sequencer_idx(struct intel_dp *intel_dp)
553 * Only the HW needs to be reprogrammed, the SW state is fixed and 554 * Only the HW needs to be reprogrammed, the SW state is fixed and
554 * has been setup during connector init. 555 * has been setup during connector init.
555 */ 556 */
556 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp); 557 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, false);
557 558
558 return 0; 559 return 0;
559} 560}
@@ -636,7 +637,7 @@ vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
636 port_name(port), pipe_name(intel_dp->pps_pipe)); 637 port_name(port), pipe_name(intel_dp->pps_pipe));
637 638
638 intel_dp_init_panel_power_sequencer(dev, intel_dp); 639 intel_dp_init_panel_power_sequencer(dev, intel_dp);
639 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp); 640 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, false);
640} 641}
641 642
642void intel_power_sequencer_reset(struct drm_i915_private *dev_priv) 643void intel_power_sequencer_reset(struct drm_i915_private *dev_priv)
@@ -2912,7 +2913,7 @@ static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp)
2912 2913
2913 /* init power sequencer on this pipe and port */ 2914 /* init power sequencer on this pipe and port */
2914 intel_dp_init_panel_power_sequencer(dev, intel_dp); 2915 intel_dp_init_panel_power_sequencer(dev, intel_dp);
2915 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp); 2916 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, true);
2916} 2917}
2917 2918
2918static void vlv_pre_enable_dp(struct intel_encoder *encoder, 2919static void vlv_pre_enable_dp(struct intel_encoder *encoder,
@@ -5055,7 +5056,8 @@ intel_dp_init_panel_power_sequencer(struct drm_device *dev,
5055 5056
5056static void 5057static void
5057intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev, 5058intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
5058 struct intel_dp *intel_dp) 5059 struct intel_dp *intel_dp,
5060 bool force_disable_vdd)
5059{ 5061{
5060 struct drm_i915_private *dev_priv = to_i915(dev); 5062 struct drm_i915_private *dev_priv = to_i915(dev);
5061 u32 pp_on, pp_off, pp_div, port_sel = 0; 5063 u32 pp_on, pp_off, pp_div, port_sel = 0;
@@ -5068,6 +5070,31 @@ intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
5068 5070
5069 intel_pps_get_registers(dev_priv, intel_dp, &regs); 5071 intel_pps_get_registers(dev_priv, intel_dp, &regs);
5070 5072
5073 /*
5074 * On some VLV machines the BIOS can leave the VDD
5075 * enabled even on power seqeuencers which aren't
5076 * hooked up to any port. This would mess up the
5077 * power domain tracking the first time we pick
5078 * one of these power sequencers for use since
5079 * edp_panel_vdd_on() would notice that the VDD was
5080 * already on and therefore wouldn't grab the power
5081 * domain reference. Disable VDD first to avoid this.
5082 * This also avoids spuriously turning the VDD on as
5083 * soon as the new power seqeuencer gets initialized.
5084 */
5085 if (force_disable_vdd) {
5086 u32 pp = ironlake_get_pp_control(intel_dp);
5087
5088 WARN(pp & PANEL_POWER_ON, "Panel power already on\n");
5089
5090 if (pp & EDP_FORCE_VDD)
5091 DRM_DEBUG_KMS("VDD already on, disabling first\n");
5092
5093 pp &= ~EDP_FORCE_VDD;
5094
5095 I915_WRITE(regs.pp_ctrl, pp);
5096 }
5097
5071 pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) | 5098 pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
5072 (seq->t8 << PANEL_LIGHT_ON_DELAY_SHIFT); 5099 (seq->t8 << PANEL_LIGHT_ON_DELAY_SHIFT);
5073 pp_off = (seq->t9 << PANEL_LIGHT_OFF_DELAY_SHIFT) | 5100 pp_off = (seq->t9 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
@@ -5122,7 +5149,7 @@ static void intel_dp_pps_init(struct drm_device *dev,
5122 vlv_initial_power_sequencer_setup(intel_dp); 5149 vlv_initial_power_sequencer_setup(intel_dp);
5123 } else { 5150 } else {
5124 intel_dp_init_panel_power_sequencer(dev, intel_dp); 5151 intel_dp_init_panel_power_sequencer(dev, intel_dp);
5125 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp); 5152 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, false);
5126 } 5153 }
5127} 5154}
5128 5155
diff --git a/drivers/gpu/drm/i915/intel_dpll_mgr.c b/drivers/gpu/drm/i915/intel_dpll_mgr.c
index 58a756f2f224..a2f0e070d38d 100644
--- a/drivers/gpu/drm/i915/intel_dpll_mgr.c
+++ b/drivers/gpu/drm/i915/intel_dpll_mgr.c
@@ -1730,7 +1730,8 @@ bxt_get_dpll(struct intel_crtc *crtc,
1730 return NULL; 1730 return NULL;
1731 1731
1732 if ((encoder->type == INTEL_OUTPUT_DP || 1732 if ((encoder->type == INTEL_OUTPUT_DP ||
1733 encoder->type == INTEL_OUTPUT_EDP) && 1733 encoder->type == INTEL_OUTPUT_EDP ||
1734 encoder->type == INTEL_OUTPUT_DP_MST) &&
1734 !bxt_ddi_dp_set_dpll_hw_state(clock, &dpll_hw_state)) 1735 !bxt_ddi_dp_set_dpll_hw_state(clock, &dpll_hw_state))
1735 return NULL; 1736 return NULL;
1736 1737
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index cd132c216a67..03a2112004f9 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -370,11 +370,14 @@ struct intel_atomic_state {
370 struct skl_wm_values wm_results; 370 struct skl_wm_values wm_results;
371 371
372 struct i915_sw_fence commit_ready; 372 struct i915_sw_fence commit_ready;
373
374 struct llist_node freed;
373}; 375};
374 376
375struct intel_plane_state { 377struct intel_plane_state {
376 struct drm_plane_state base; 378 struct drm_plane_state base;
377 struct drm_rect clip; 379 struct drm_rect clip;
380 struct i915_vma *vma;
378 381
379 struct { 382 struct {
380 u32 offset; 383 u32 offset;
@@ -1044,6 +1047,7 @@ struct intel_flip_work {
1044 struct work_struct mmio_work; 1047 struct work_struct mmio_work;
1045 1048
1046 struct drm_crtc *crtc; 1049 struct drm_crtc *crtc;
1050 struct i915_vma *old_vma;
1047 struct drm_framebuffer *old_fb; 1051 struct drm_framebuffer *old_fb;
1048 struct drm_i915_gem_object *pending_flip_obj; 1052 struct drm_i915_gem_object *pending_flip_obj;
1049 struct drm_pending_vblank_event *event; 1053 struct drm_pending_vblank_event *event;
@@ -1271,7 +1275,7 @@ void intel_release_load_detect_pipe(struct drm_connector *connector,
1271 struct drm_modeset_acquire_ctx *ctx); 1275 struct drm_modeset_acquire_ctx *ctx);
1272struct i915_vma * 1276struct i915_vma *
1273intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb, unsigned int rotation); 1277intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb, unsigned int rotation);
1274void intel_unpin_fb_obj(struct drm_framebuffer *fb, unsigned int rotation); 1278void intel_unpin_fb_vma(struct i915_vma *vma);
1275struct drm_framebuffer * 1279struct drm_framebuffer *
1276__intel_framebuffer_create(struct drm_device *dev, 1280__intel_framebuffer_create(struct drm_device *dev,
1277 struct drm_mode_fb_cmd2 *mode_cmd, 1281 struct drm_mode_fb_cmd2 *mode_cmd,
@@ -1360,7 +1364,10 @@ void intel_mode_from_pipe_config(struct drm_display_mode *mode,
1360int skl_update_scaler_crtc(struct intel_crtc_state *crtc_state); 1364int skl_update_scaler_crtc(struct intel_crtc_state *crtc_state);
1361int skl_max_scale(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state); 1365int skl_max_scale(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state);
1362 1366
1363u32 intel_fb_gtt_offset(struct drm_framebuffer *fb, unsigned int rotation); 1367static inline u32 intel_plane_ggtt_offset(const struct intel_plane_state *state)
1368{
1369 return i915_ggtt_offset(state->vma);
1370}
1364 1371
1365u32 skl_plane_ctl_format(uint32_t pixel_format); 1372u32 skl_plane_ctl_format(uint32_t pixel_format);
1366u32 skl_plane_ctl_tiling(uint64_t fb_modifier); 1373u32 skl_plane_ctl_tiling(uint64_t fb_modifier);
diff --git a/drivers/gpu/drm/i915/intel_fbc.c b/drivers/gpu/drm/i915/intel_fbc.c
index 62f215b12eb5..f3a1d6a5cabe 100644
--- a/drivers/gpu/drm/i915/intel_fbc.c
+++ b/drivers/gpu/drm/i915/intel_fbc.c
@@ -173,7 +173,7 @@ static void i8xx_fbc_activate(struct drm_i915_private *dev_priv)
173 if (IS_I945GM(dev_priv)) 173 if (IS_I945GM(dev_priv))
174 fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */ 174 fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */
175 fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT; 175 fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
176 fbc_ctl |= params->fb.fence_reg; 176 fbc_ctl |= params->vma->fence->id;
177 I915_WRITE(FBC_CONTROL, fbc_ctl); 177 I915_WRITE(FBC_CONTROL, fbc_ctl);
178} 178}
179 179
@@ -193,8 +193,8 @@ static void g4x_fbc_activate(struct drm_i915_private *dev_priv)
193 else 193 else
194 dpfc_ctl |= DPFC_CTL_LIMIT_1X; 194 dpfc_ctl |= DPFC_CTL_LIMIT_1X;
195 195
196 if (params->fb.fence_reg != I915_FENCE_REG_NONE) { 196 if (params->vma->fence) {
197 dpfc_ctl |= DPFC_CTL_FENCE_EN | params->fb.fence_reg; 197 dpfc_ctl |= DPFC_CTL_FENCE_EN | params->vma->fence->id;
198 I915_WRITE(DPFC_FENCE_YOFF, params->crtc.fence_y_offset); 198 I915_WRITE(DPFC_FENCE_YOFF, params->crtc.fence_y_offset);
199 } else { 199 } else {
200 I915_WRITE(DPFC_FENCE_YOFF, 0); 200 I915_WRITE(DPFC_FENCE_YOFF, 0);
@@ -251,13 +251,14 @@ static void ilk_fbc_activate(struct drm_i915_private *dev_priv)
251 break; 251 break;
252 } 252 }
253 253
254 if (params->fb.fence_reg != I915_FENCE_REG_NONE) { 254 if (params->vma->fence) {
255 dpfc_ctl |= DPFC_CTL_FENCE_EN; 255 dpfc_ctl |= DPFC_CTL_FENCE_EN;
256 if (IS_GEN5(dev_priv)) 256 if (IS_GEN5(dev_priv))
257 dpfc_ctl |= params->fb.fence_reg; 257 dpfc_ctl |= params->vma->fence->id;
258 if (IS_GEN6(dev_priv)) { 258 if (IS_GEN6(dev_priv)) {
259 I915_WRITE(SNB_DPFC_CTL_SA, 259 I915_WRITE(SNB_DPFC_CTL_SA,
260 SNB_CPU_FENCE_ENABLE | params->fb.fence_reg); 260 SNB_CPU_FENCE_ENABLE |
261 params->vma->fence->id);
261 I915_WRITE(DPFC_CPU_FENCE_OFFSET, 262 I915_WRITE(DPFC_CPU_FENCE_OFFSET,
262 params->crtc.fence_y_offset); 263 params->crtc.fence_y_offset);
263 } 264 }
@@ -269,7 +270,8 @@ static void ilk_fbc_activate(struct drm_i915_private *dev_priv)
269 } 270 }
270 271
271 I915_WRITE(ILK_DPFC_FENCE_YOFF, params->crtc.fence_y_offset); 272 I915_WRITE(ILK_DPFC_FENCE_YOFF, params->crtc.fence_y_offset);
272 I915_WRITE(ILK_FBC_RT_BASE, params->fb.ggtt_offset | ILK_FBC_RT_VALID); 273 I915_WRITE(ILK_FBC_RT_BASE,
274 i915_ggtt_offset(params->vma) | ILK_FBC_RT_VALID);
273 /* enable it... */ 275 /* enable it... */
274 I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN); 276 I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
275 277
@@ -319,10 +321,11 @@ static void gen7_fbc_activate(struct drm_i915_private *dev_priv)
319 break; 321 break;
320 } 322 }
321 323
322 if (params->fb.fence_reg != I915_FENCE_REG_NONE) { 324 if (params->vma->fence) {
323 dpfc_ctl |= IVB_DPFC_CTL_FENCE_EN; 325 dpfc_ctl |= IVB_DPFC_CTL_FENCE_EN;
324 I915_WRITE(SNB_DPFC_CTL_SA, 326 I915_WRITE(SNB_DPFC_CTL_SA,
325 SNB_CPU_FENCE_ENABLE | params->fb.fence_reg); 327 SNB_CPU_FENCE_ENABLE |
328 params->vma->fence->id);
326 I915_WRITE(DPFC_CPU_FENCE_OFFSET, params->crtc.fence_y_offset); 329 I915_WRITE(DPFC_CPU_FENCE_OFFSET, params->crtc.fence_y_offset);
327 } else { 330 } else {
328 I915_WRITE(SNB_DPFC_CTL_SA,0); 331 I915_WRITE(SNB_DPFC_CTL_SA,0);
@@ -727,14 +730,6 @@ static bool intel_fbc_hw_tracking_covers_screen(struct intel_crtc *crtc)
727 return effective_w <= max_w && effective_h <= max_h; 730 return effective_w <= max_w && effective_h <= max_h;
728} 731}
729 732
730/* XXX replace me when we have VMA tracking for intel_plane_state */
731static int get_fence_id(struct drm_framebuffer *fb)
732{
733 struct i915_vma *vma = i915_gem_object_to_ggtt(intel_fb_obj(fb), NULL);
734
735 return vma && vma->fence ? vma->fence->id : I915_FENCE_REG_NONE;
736}
737
738static void intel_fbc_update_state_cache(struct intel_crtc *crtc, 733static void intel_fbc_update_state_cache(struct intel_crtc *crtc,
739 struct intel_crtc_state *crtc_state, 734 struct intel_crtc_state *crtc_state,
740 struct intel_plane_state *plane_state) 735 struct intel_plane_state *plane_state)
@@ -743,7 +738,8 @@ static void intel_fbc_update_state_cache(struct intel_crtc *crtc,
743 struct intel_fbc *fbc = &dev_priv->fbc; 738 struct intel_fbc *fbc = &dev_priv->fbc;
744 struct intel_fbc_state_cache *cache = &fbc->state_cache; 739 struct intel_fbc_state_cache *cache = &fbc->state_cache;
745 struct drm_framebuffer *fb = plane_state->base.fb; 740 struct drm_framebuffer *fb = plane_state->base.fb;
746 struct drm_i915_gem_object *obj; 741
742 cache->vma = NULL;
747 743
748 cache->crtc.mode_flags = crtc_state->base.adjusted_mode.flags; 744 cache->crtc.mode_flags = crtc_state->base.adjusted_mode.flags;
749 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) 745 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
@@ -758,16 +754,10 @@ static void intel_fbc_update_state_cache(struct intel_crtc *crtc,
758 if (!cache->plane.visible) 754 if (!cache->plane.visible)
759 return; 755 return;
760 756
761 obj = intel_fb_obj(fb);
762
763 /* FIXME: We lack the proper locking here, so only run this on the
764 * platforms that need. */
765 if (IS_GEN(dev_priv, 5, 6))
766 cache->fb.ilk_ggtt_offset = i915_gem_object_ggtt_offset(obj, NULL);
767 cache->fb.pixel_format = fb->pixel_format; 757 cache->fb.pixel_format = fb->pixel_format;
768 cache->fb.stride = fb->pitches[0]; 758 cache->fb.stride = fb->pitches[0];
769 cache->fb.fence_reg = get_fence_id(fb); 759
770 cache->fb.tiling_mode = i915_gem_object_get_tiling(obj); 760 cache->vma = plane_state->vma;
771} 761}
772 762
773static bool intel_fbc_can_activate(struct intel_crtc *crtc) 763static bool intel_fbc_can_activate(struct intel_crtc *crtc)
@@ -784,7 +774,7 @@ static bool intel_fbc_can_activate(struct intel_crtc *crtc)
784 return false; 774 return false;
785 } 775 }
786 776
787 if (!cache->plane.visible) { 777 if (!cache->vma) {
788 fbc->no_fbc_reason = "primary plane not visible"; 778 fbc->no_fbc_reason = "primary plane not visible";
789 return false; 779 return false;
790 } 780 }
@@ -807,8 +797,7 @@ static bool intel_fbc_can_activate(struct intel_crtc *crtc)
807 * so have no fence associated with it) due to aperture constaints 797 * so have no fence associated with it) due to aperture constaints
808 * at the time of pinning. 798 * at the time of pinning.
809 */ 799 */
810 if (cache->fb.tiling_mode != I915_TILING_X || 800 if (!cache->vma->fence) {
811 cache->fb.fence_reg == I915_FENCE_REG_NONE) {
812 fbc->no_fbc_reason = "framebuffer not tiled or fenced"; 801 fbc->no_fbc_reason = "framebuffer not tiled or fenced";
813 return false; 802 return false;
814 } 803 }
@@ -888,17 +877,16 @@ static void intel_fbc_get_reg_params(struct intel_crtc *crtc,
888 * zero. */ 877 * zero. */
889 memset(params, 0, sizeof(*params)); 878 memset(params, 0, sizeof(*params));
890 879
880 params->vma = cache->vma;
881
891 params->crtc.pipe = crtc->pipe; 882 params->crtc.pipe = crtc->pipe;
892 params->crtc.plane = crtc->plane; 883 params->crtc.plane = crtc->plane;
893 params->crtc.fence_y_offset = get_crtc_fence_y_offset(crtc); 884 params->crtc.fence_y_offset = get_crtc_fence_y_offset(crtc);
894 885
895 params->fb.pixel_format = cache->fb.pixel_format; 886 params->fb.pixel_format = cache->fb.pixel_format;
896 params->fb.stride = cache->fb.stride; 887 params->fb.stride = cache->fb.stride;
897 params->fb.fence_reg = cache->fb.fence_reg;
898 888
899 params->cfb_size = intel_fbc_calculate_cfb_size(dev_priv, cache); 889 params->cfb_size = intel_fbc_calculate_cfb_size(dev_priv, cache);
900
901 params->fb.ggtt_offset = cache->fb.ilk_ggtt_offset;
902} 890}
903 891
904static bool intel_fbc_reg_params_equal(struct intel_fbc_reg_params *params1, 892static bool intel_fbc_reg_params_equal(struct intel_fbc_reg_params *params1,
diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c
index beb08982dc0b..f4a8c4fc57c4 100644
--- a/drivers/gpu/drm/i915/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/intel_fbdev.c
@@ -284,7 +284,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
284out_destroy_fbi: 284out_destroy_fbi:
285 drm_fb_helper_release_fbi(helper); 285 drm_fb_helper_release_fbi(helper);
286out_unpin: 286out_unpin:
287 intel_unpin_fb_obj(&ifbdev->fb->base, DRM_ROTATE_0); 287 intel_unpin_fb_vma(vma);
288out_unlock: 288out_unlock:
289 mutex_unlock(&dev->struct_mutex); 289 mutex_unlock(&dev->struct_mutex);
290 return ret; 290 return ret;
@@ -549,7 +549,7 @@ static void intel_fbdev_destroy(struct intel_fbdev *ifbdev)
549 549
550 if (ifbdev->fb) { 550 if (ifbdev->fb) {
551 mutex_lock(&ifbdev->helper.dev->struct_mutex); 551 mutex_lock(&ifbdev->helper.dev->struct_mutex);
552 intel_unpin_fb_obj(&ifbdev->fb->base, DRM_ROTATE_0); 552 intel_unpin_fb_vma(ifbdev->vma);
553 mutex_unlock(&ifbdev->helper.dev->struct_mutex); 553 mutex_unlock(&ifbdev->helper.dev->struct_mutex);
554 554
555 drm_framebuffer_remove(&ifbdev->fb->base); 555 drm_framebuffer_remove(&ifbdev->fb->base);
@@ -742,6 +742,9 @@ void intel_fbdev_initial_config_async(struct drm_device *dev)
742{ 742{
743 struct intel_fbdev *ifbdev = to_i915(dev)->fbdev; 743 struct intel_fbdev *ifbdev = to_i915(dev)->fbdev;
744 744
745 if (!ifbdev)
746 return;
747
745 ifbdev->cookie = async_schedule(intel_fbdev_initial_config, ifbdev); 748 ifbdev->cookie = async_schedule(intel_fbdev_initial_config, ifbdev);
746} 749}
747 750
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index d4961fa20c73..beabc17e7c8a 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -979,18 +979,8 @@ static inline int gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *engine,
979 uint32_t *batch, 979 uint32_t *batch,
980 uint32_t index) 980 uint32_t index)
981{ 981{
982 struct drm_i915_private *dev_priv = engine->i915;
983 uint32_t l3sqc4_flush = (0x40400000 | GEN8_LQSC_FLUSH_COHERENT_LINES); 982 uint32_t l3sqc4_flush = (0x40400000 | GEN8_LQSC_FLUSH_COHERENT_LINES);
984 983
985 /*
986 * WaDisableLSQCROPERFforOCL:kbl
987 * This WA is implemented in skl_init_clock_gating() but since
988 * this batch updates GEN8_L3SQCREG4 with default value we need to
989 * set this bit here to retain the WA during flush.
990 */
991 if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_E0))
992 l3sqc4_flush |= GEN8_LQSC_RO_PERF_DIS;
993
994 wa_ctx_emit(batch, index, (MI_STORE_REGISTER_MEM_GEN8 | 984 wa_ctx_emit(batch, index, (MI_STORE_REGISTER_MEM_GEN8 |
995 MI_SRM_LRM_GLOBAL_GTT)); 985 MI_SRM_LRM_GLOBAL_GTT));
996 wa_ctx_emit_reg(batch, index, GEN8_L3SQCREG4); 986 wa_ctx_emit_reg(batch, index, GEN8_L3SQCREG4);
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index fd0e4dac7cc1..e589e17876dc 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -216,7 +216,8 @@ static void intel_overlay_submit_request(struct intel_overlay *overlay,
216{ 216{
217 GEM_BUG_ON(i915_gem_active_peek(&overlay->last_flip, 217 GEM_BUG_ON(i915_gem_active_peek(&overlay->last_flip,
218 &overlay->i915->drm.struct_mutex)); 218 &overlay->i915->drm.struct_mutex));
219 overlay->last_flip.retire = retire; 219 i915_gem_active_set_retire_fn(&overlay->last_flip, retire,
220 &overlay->i915->drm.struct_mutex);
220 i915_gem_active_set(&overlay->last_flip, req); 221 i915_gem_active_set(&overlay->last_flip, req);
221 i915_add_request(req); 222 i915_add_request(req);
222} 223}
@@ -839,8 +840,8 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
839 if (ret) 840 if (ret)
840 goto out_unpin; 841 goto out_unpin;
841 842
842 i915_gem_track_fb(overlay->vma->obj, new_bo, 843 i915_gem_track_fb(overlay->vma ? overlay->vma->obj : NULL,
843 INTEL_FRONTBUFFER_OVERLAY(pipe)); 844 vma->obj, INTEL_FRONTBUFFER_OVERLAY(pipe));
844 845
845 overlay->old_vma = overlay->vma; 846 overlay->old_vma = overlay->vma;
846 overlay->vma = vma; 847 overlay->vma = vma;
@@ -1430,6 +1431,8 @@ void intel_setup_overlay(struct drm_i915_private *dev_priv)
1430 overlay->contrast = 75; 1431 overlay->contrast = 75;
1431 overlay->saturation = 146; 1432 overlay->saturation = 146;
1432 1433
1434 init_request_active(&overlay->last_flip, NULL);
1435
1433 regs = intel_overlay_map_regs(overlay); 1436 regs = intel_overlay_map_regs(overlay);
1434 if (!regs) 1437 if (!regs)
1435 goto out_unpin_bo; 1438 goto out_unpin_bo;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index aeb637dc1fdf..91cb4c422ad5 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -1095,14 +1095,6 @@ static int kbl_init_workarounds(struct intel_engine_cs *engine)
1095 WA_SET_BIT_MASKED(HDC_CHICKEN0, 1095 WA_SET_BIT_MASKED(HDC_CHICKEN0,
1096 HDC_FENCE_DEST_SLM_DISABLE); 1096 HDC_FENCE_DEST_SLM_DISABLE);
1097 1097
1098 /* GEN8_L3SQCREG4 has a dependency with WA batch so any new changes
1099 * involving this register should also be added to WA batch as required.
1100 */
1101 if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_E0))
1102 /* WaDisableLSQCROPERFforOCL:kbl */
1103 I915_WRITE(GEN8_L3SQCREG4, I915_READ(GEN8_L3SQCREG4) |
1104 GEN8_LQSC_RO_PERF_DIS);
1105
1106 /* WaToEnableHwFixForPushConstHWBug:kbl */ 1098 /* WaToEnableHwFixForPushConstHWBug:kbl */
1107 if (IS_KBL_REVID(dev_priv, KBL_REVID_C0, REVID_FOREVER)) 1099 if (IS_KBL_REVID(dev_priv, KBL_REVID_C0, REVID_FOREVER))
1108 WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2, 1100 WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index 8f131a08d440..242a73e66d82 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -273,7 +273,7 @@ skl_update_plane(struct drm_plane *drm_plane,
273 273
274 I915_WRITE(PLANE_CTL(pipe, plane), plane_ctl); 274 I915_WRITE(PLANE_CTL(pipe, plane), plane_ctl);
275 I915_WRITE(PLANE_SURF(pipe, plane), 275 I915_WRITE(PLANE_SURF(pipe, plane),
276 intel_fb_gtt_offset(fb, rotation) + surf_addr); 276 intel_plane_ggtt_offset(plane_state) + surf_addr);
277 POSTING_READ(PLANE_SURF(pipe, plane)); 277 POSTING_READ(PLANE_SURF(pipe, plane));
278} 278}
279 279
@@ -458,7 +458,7 @@ vlv_update_plane(struct drm_plane *dplane,
458 I915_WRITE(SPSIZE(pipe, plane), (crtc_h << 16) | crtc_w); 458 I915_WRITE(SPSIZE(pipe, plane), (crtc_h << 16) | crtc_w);
459 I915_WRITE(SPCNTR(pipe, plane), sprctl); 459 I915_WRITE(SPCNTR(pipe, plane), sprctl);
460 I915_WRITE(SPSURF(pipe, plane), 460 I915_WRITE(SPSURF(pipe, plane),
461 intel_fb_gtt_offset(fb, rotation) + sprsurf_offset); 461 intel_plane_ggtt_offset(plane_state) + sprsurf_offset);
462 POSTING_READ(SPSURF(pipe, plane)); 462 POSTING_READ(SPSURF(pipe, plane));
463} 463}
464 464
@@ -594,7 +594,7 @@ ivb_update_plane(struct drm_plane *plane,
594 I915_WRITE(SPRSCALE(pipe), sprscale); 594 I915_WRITE(SPRSCALE(pipe), sprscale);
595 I915_WRITE(SPRCTL(pipe), sprctl); 595 I915_WRITE(SPRCTL(pipe), sprctl);
596 I915_WRITE(SPRSURF(pipe), 596 I915_WRITE(SPRSURF(pipe),
597 intel_fb_gtt_offset(fb, rotation) + sprsurf_offset); 597 intel_plane_ggtt_offset(plane_state) + sprsurf_offset);
598 POSTING_READ(SPRSURF(pipe)); 598 POSTING_READ(SPRSURF(pipe));
599} 599}
600 600
@@ -721,7 +721,7 @@ ilk_update_plane(struct drm_plane *plane,
721 I915_WRITE(DVSSCALE(pipe), dvsscale); 721 I915_WRITE(DVSSCALE(pipe), dvsscale);
722 I915_WRITE(DVSCNTR(pipe), dvscntr); 722 I915_WRITE(DVSCNTR(pipe), dvscntr);
723 I915_WRITE(DVSSURF(pipe), 723 I915_WRITE(DVSSURF(pipe),
724 intel_fb_gtt_offset(fb, rotation) + dvssurf_offset); 724 intel_plane_ggtt_offset(plane_state) + dvssurf_offset);
725 POSTING_READ(DVSSURF(pipe)); 725 POSTING_READ(DVSSURF(pipe));
726} 726}
727 727
diff --git a/drivers/gpu/drm/meson/meson_plane.c b/drivers/gpu/drm/meson/meson_plane.c
index 4942ca090b46..7890e30eb584 100644
--- a/drivers/gpu/drm/meson/meson_plane.c
+++ b/drivers/gpu/drm/meson/meson_plane.c
@@ -51,6 +51,9 @@ static int meson_plane_atomic_check(struct drm_plane *plane,
51 struct drm_crtc_state *crtc_state; 51 struct drm_crtc_state *crtc_state;
52 struct drm_rect clip = { 0, }; 52 struct drm_rect clip = { 0, };
53 53
54 if (!state->crtc)
55 return 0;
56
54 crtc_state = drm_atomic_get_crtc_state(state->state, state->crtc); 57 crtc_state = drm_atomic_get_crtc_state(state->state, state->crtc);
55 if (IS_ERR(crtc_state)) 58 if (IS_ERR(crtc_state))
56 return PTR_ERR(crtc_state); 59 return PTR_ERR(crtc_state);
diff --git a/drivers/gpu/drm/meson/meson_venc.c b/drivers/gpu/drm/meson/meson_venc.c
index d836b2274531..f7c870172220 100644
--- a/drivers/gpu/drm/meson/meson_venc.c
+++ b/drivers/gpu/drm/meson/meson_venc.c
@@ -38,6 +38,11 @@
38 * - TV Panel encoding via ENCT 38 * - TV Panel encoding via ENCT
39 */ 39 */
40 40
41/* HHI Registers */
42#define HHI_VDAC_CNTL0 0x2F4 /* 0xbd offset in data sheet */
43#define HHI_VDAC_CNTL1 0x2F8 /* 0xbe offset in data sheet */
44#define HHI_HDMI_PHY_CNTL0 0x3a0 /* 0xe8 offset in data sheet */
45
41struct meson_cvbs_enci_mode meson_cvbs_enci_pal = { 46struct meson_cvbs_enci_mode meson_cvbs_enci_pal = {
42 .mode_tag = MESON_VENC_MODE_CVBS_PAL, 47 .mode_tag = MESON_VENC_MODE_CVBS_PAL,
43 .hso_begin = 3, 48 .hso_begin = 3,
@@ -242,6 +247,20 @@ void meson_venc_disable_vsync(struct meson_drm *priv)
242 247
243void meson_venc_init(struct meson_drm *priv) 248void meson_venc_init(struct meson_drm *priv)
244{ 249{
250 /* Disable CVBS VDAC */
251 regmap_write(priv->hhi, HHI_VDAC_CNTL0, 0);
252 regmap_write(priv->hhi, HHI_VDAC_CNTL1, 8);
253
254 /* Power Down Dacs */
255 writel_relaxed(0xff, priv->io_base + _REG(VENC_VDAC_SETTING));
256
257 /* Disable HDMI PHY */
258 regmap_write(priv->hhi, HHI_HDMI_PHY_CNTL0, 0);
259
260 /* Disable HDMI */
261 writel_bits_relaxed(0x3, 0,
262 priv->io_base + _REG(VPU_HDMI_SETTING));
263
245 /* Disable all encoders */ 264 /* Disable all encoders */
246 writel_relaxed(0, priv->io_base + _REG(ENCI_VIDEO_EN)); 265 writel_relaxed(0, priv->io_base + _REG(ENCI_VIDEO_EN));
247 writel_relaxed(0, priv->io_base + _REG(ENCP_VIDEO_EN)); 266 writel_relaxed(0, priv->io_base + _REG(ENCP_VIDEO_EN));
diff --git a/drivers/gpu/drm/meson/meson_venc_cvbs.c b/drivers/gpu/drm/meson/meson_venc_cvbs.c
index c809c085fd78..a2bcc70a03ef 100644
--- a/drivers/gpu/drm/meson/meson_venc_cvbs.c
+++ b/drivers/gpu/drm/meson/meson_venc_cvbs.c
@@ -167,7 +167,7 @@ static void meson_venc_cvbs_encoder_disable(struct drm_encoder *encoder)
167 167
168 /* Disable CVBS VDAC */ 168 /* Disable CVBS VDAC */
169 regmap_write(priv->hhi, HHI_VDAC_CNTL0, 0); 169 regmap_write(priv->hhi, HHI_VDAC_CNTL0, 0);
170 regmap_write(priv->hhi, HHI_VDAC_CNTL1, 0); 170 regmap_write(priv->hhi, HHI_VDAC_CNTL1, 8);
171} 171}
172 172
173static void meson_venc_cvbs_encoder_enable(struct drm_encoder *encoder) 173static void meson_venc_cvbs_encoder_enable(struct drm_encoder *encoder)
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index a18126150e11..686a580c711a 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -213,7 +213,14 @@ void adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
213void adreno_flush(struct msm_gpu *gpu) 213void adreno_flush(struct msm_gpu *gpu)
214{ 214{
215 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); 215 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
216 uint32_t wptr = get_wptr(gpu->rb); 216 uint32_t wptr;
217
218 /*
219 * Mask wptr value that we calculate to fit in the HW range. This is
220 * to account for the possibility that the last command fit exactly into
221 * the ringbuffer and rb->next hasn't wrapped to zero yet
222 */
223 wptr = get_wptr(gpu->rb) & ((gpu->rb->size / 4) - 1);
217 224
218 /* ensure writes to ringbuffer have hit system memory: */ 225 /* ensure writes to ringbuffer have hit system memory: */
219 mb(); 226 mb();
@@ -338,7 +345,6 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
338{ 345{
339 struct adreno_platform_config *config = pdev->dev.platform_data; 346 struct adreno_platform_config *config = pdev->dev.platform_data;
340 struct msm_gpu *gpu = &adreno_gpu->base; 347 struct msm_gpu *gpu = &adreno_gpu->base;
341 struct msm_mmu *mmu;
342 int ret; 348 int ret;
343 349
344 adreno_gpu->funcs = funcs; 350 adreno_gpu->funcs = funcs;
@@ -378,8 +384,8 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
378 return ret; 384 return ret;
379 } 385 }
380 386
381 mmu = gpu->aspace->mmu; 387 if (gpu->aspace && gpu->aspace->mmu) {
382 if (mmu) { 388 struct msm_mmu *mmu = gpu->aspace->mmu;
383 ret = mmu->funcs->attach(mmu, iommu_ports, 389 ret = mmu->funcs->attach(mmu, iommu_ports,
384 ARRAY_SIZE(iommu_ports)); 390 ARRAY_SIZE(iommu_ports));
385 if (ret) 391 if (ret)
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
index 5f6cd8745dbc..c396d459a9d0 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
@@ -119,13 +119,7 @@ static void mdp5_prepare_commit(struct msm_kms *kms, struct drm_atomic_state *st
119 119
120static void mdp5_complete_commit(struct msm_kms *kms, struct drm_atomic_state *state) 120static void mdp5_complete_commit(struct msm_kms *kms, struct drm_atomic_state *state)
121{ 121{
122 int i;
123 struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); 122 struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
124 struct drm_plane *plane;
125 struct drm_plane_state *plane_state;
126
127 for_each_plane_in_state(state, plane, plane_state, i)
128 mdp5_plane_complete_commit(plane, plane_state);
129 123
130 if (mdp5_kms->smp) 124 if (mdp5_kms->smp)
131 mdp5_smp_complete_commit(mdp5_kms->smp, &mdp5_kms->state->smp); 125 mdp5_smp_complete_commit(mdp5_kms->smp, &mdp5_kms->state->smp);
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
index 17b0cc101171..cdfc63d90c7b 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
@@ -104,8 +104,6 @@ struct mdp5_plane_state {
104 104
105 /* assigned by crtc blender */ 105 /* assigned by crtc blender */
106 enum mdp_mixer_stage_id stage; 106 enum mdp_mixer_stage_id stage;
107
108 bool pending : 1;
109}; 107};
110#define to_mdp5_plane_state(x) \ 108#define to_mdp5_plane_state(x) \
111 container_of(x, struct mdp5_plane_state, base) 109 container_of(x, struct mdp5_plane_state, base)
@@ -232,8 +230,6 @@ int mdp5_irq_domain_init(struct mdp5_kms *mdp5_kms);
232void mdp5_irq_domain_fini(struct mdp5_kms *mdp5_kms); 230void mdp5_irq_domain_fini(struct mdp5_kms *mdp5_kms);
233 231
234uint32_t mdp5_plane_get_flush(struct drm_plane *plane); 232uint32_t mdp5_plane_get_flush(struct drm_plane *plane);
235void mdp5_plane_complete_commit(struct drm_plane *plane,
236 struct drm_plane_state *state);
237enum mdp5_pipe mdp5_plane_pipe(struct drm_plane *plane); 233enum mdp5_pipe mdp5_plane_pipe(struct drm_plane *plane);
238struct drm_plane *mdp5_plane_init(struct drm_device *dev, bool primary); 234struct drm_plane *mdp5_plane_init(struct drm_device *dev, bool primary);
239 235
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
index c099da7bc212..25d9d0a97156 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
@@ -179,7 +179,6 @@ mdp5_plane_atomic_print_state(struct drm_printer *p,
179 drm_printf(p, "\tzpos=%u\n", pstate->zpos); 179 drm_printf(p, "\tzpos=%u\n", pstate->zpos);
180 drm_printf(p, "\talpha=%u\n", pstate->alpha); 180 drm_printf(p, "\talpha=%u\n", pstate->alpha);
181 drm_printf(p, "\tstage=%s\n", stage2name(pstate->stage)); 181 drm_printf(p, "\tstage=%s\n", stage2name(pstate->stage));
182 drm_printf(p, "\tpending=%u\n", pstate->pending);
183} 182}
184 183
185static void mdp5_plane_reset(struct drm_plane *plane) 184static void mdp5_plane_reset(struct drm_plane *plane)
@@ -220,8 +219,6 @@ mdp5_plane_duplicate_state(struct drm_plane *plane)
220 if (mdp5_state && mdp5_state->base.fb) 219 if (mdp5_state && mdp5_state->base.fb)
221 drm_framebuffer_reference(mdp5_state->base.fb); 220 drm_framebuffer_reference(mdp5_state->base.fb);
222 221
223 mdp5_state->pending = false;
224
225 return &mdp5_state->base; 222 return &mdp5_state->base;
226} 223}
227 224
@@ -288,13 +285,6 @@ static int mdp5_plane_atomic_check(struct drm_plane *plane,
288 DBG("%s: check (%d -> %d)", plane->name, 285 DBG("%s: check (%d -> %d)", plane->name,
289 plane_enabled(old_state), plane_enabled(state)); 286 plane_enabled(old_state), plane_enabled(state));
290 287
291 /* We don't allow faster-than-vblank updates.. if we did add this
292 * some day, we would need to disallow in cases where hwpipe
293 * changes
294 */
295 if (WARN_ON(to_mdp5_plane_state(old_state)->pending))
296 return -EBUSY;
297
298 max_width = config->hw->lm.max_width << 16; 288 max_width = config->hw->lm.max_width << 16;
299 max_height = config->hw->lm.max_height << 16; 289 max_height = config->hw->lm.max_height << 16;
300 290
@@ -370,12 +360,9 @@ static void mdp5_plane_atomic_update(struct drm_plane *plane,
370 struct drm_plane_state *old_state) 360 struct drm_plane_state *old_state)
371{ 361{
372 struct drm_plane_state *state = plane->state; 362 struct drm_plane_state *state = plane->state;
373 struct mdp5_plane_state *mdp5_state = to_mdp5_plane_state(state);
374 363
375 DBG("%s: update", plane->name); 364 DBG("%s: update", plane->name);
376 365
377 mdp5_state->pending = true;
378
379 if (plane_enabled(state)) { 366 if (plane_enabled(state)) {
380 int ret; 367 int ret;
381 368
@@ -851,15 +838,6 @@ uint32_t mdp5_plane_get_flush(struct drm_plane *plane)
851 return pstate->hwpipe->flush_mask; 838 return pstate->hwpipe->flush_mask;
852} 839}
853 840
854/* called after vsync in thread context */
855void mdp5_plane_complete_commit(struct drm_plane *plane,
856 struct drm_plane_state *state)
857{
858 struct mdp5_plane_state *pstate = to_mdp5_plane_state(plane->state);
859
860 pstate->pending = false;
861}
862
863/* initialize plane */ 841/* initialize plane */
864struct drm_plane *mdp5_plane_init(struct drm_device *dev, bool primary) 842struct drm_plane *mdp5_plane_init(struct drm_device *dev, bool primary)
865{ 843{
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index d8bc59c7e261..8098677a3916 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -294,6 +294,8 @@ put_iova(struct drm_gem_object *obj)
294 WARN_ON(!mutex_is_locked(&dev->struct_mutex)); 294 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
295 295
296 for (id = 0; id < ARRAY_SIZE(msm_obj->domain); id++) { 296 for (id = 0; id < ARRAY_SIZE(msm_obj->domain); id++) {
297 if (!priv->aspace[id])
298 continue;
297 msm_gem_unmap_vma(priv->aspace[id], 299 msm_gem_unmap_vma(priv->aspace[id],
298 &msm_obj->domain[id], msm_obj->sgt); 300 &msm_obj->domain[id], msm_obj->sgt);
299 } 301 }
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
index 166e84e4f0d4..489676568a10 100644
--- a/drivers/gpu/drm/msm/msm_gem_submit.c
+++ b/drivers/gpu/drm/msm/msm_gem_submit.c
@@ -106,7 +106,8 @@ static int submit_lookup_objects(struct msm_gem_submit *submit,
106 pagefault_disable(); 106 pagefault_disable();
107 } 107 }
108 108
109 if (submit_bo.flags & ~MSM_SUBMIT_BO_FLAGS) { 109 if ((submit_bo.flags & ~MSM_SUBMIT_BO_FLAGS) ||
110 !(submit_bo.flags & MSM_SUBMIT_BO_FLAGS)) {
110 DRM_ERROR("invalid flags: %x\n", submit_bo.flags); 111 DRM_ERROR("invalid flags: %x\n", submit_bo.flags);
111 ret = -EINVAL; 112 ret = -EINVAL;
112 goto out_unlock; 113 goto out_unlock;
@@ -290,7 +291,7 @@ static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *ob
290{ 291{
291 uint32_t i, last_offset = 0; 292 uint32_t i, last_offset = 0;
292 uint32_t *ptr; 293 uint32_t *ptr;
293 int ret; 294 int ret = 0;
294 295
295 if (offset % 4) { 296 if (offset % 4) {
296 DRM_ERROR("non-aligned cmdstream buffer: %u\n", offset); 297 DRM_ERROR("non-aligned cmdstream buffer: %u\n", offset);
@@ -318,12 +319,13 @@ static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *ob
318 319
319 ret = copy_from_user(&submit_reloc, userptr, sizeof(submit_reloc)); 320 ret = copy_from_user(&submit_reloc, userptr, sizeof(submit_reloc));
320 if (ret) 321 if (ret)
321 return -EFAULT; 322 goto out;
322 323
323 if (submit_reloc.submit_offset % 4) { 324 if (submit_reloc.submit_offset % 4) {
324 DRM_ERROR("non-aligned reloc offset: %u\n", 325 DRM_ERROR("non-aligned reloc offset: %u\n",
325 submit_reloc.submit_offset); 326 submit_reloc.submit_offset);
326 return -EINVAL; 327 ret = -EINVAL;
328 goto out;
327 } 329 }
328 330
329 /* offset in dwords: */ 331 /* offset in dwords: */
@@ -332,12 +334,13 @@ static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *ob
332 if ((off >= (obj->base.size / 4)) || 334 if ((off >= (obj->base.size / 4)) ||
333 (off < last_offset)) { 335 (off < last_offset)) {
334 DRM_ERROR("invalid offset %u at reloc %u\n", off, i); 336 DRM_ERROR("invalid offset %u at reloc %u\n", off, i);
335 return -EINVAL; 337 ret = -EINVAL;
338 goto out;
336 } 339 }
337 340
338 ret = submit_bo(submit, submit_reloc.reloc_idx, NULL, &iova, &valid); 341 ret = submit_bo(submit, submit_reloc.reloc_idx, NULL, &iova, &valid);
339 if (ret) 342 if (ret)
340 return ret; 343 goto out;
341 344
342 if (valid) 345 if (valid)
343 continue; 346 continue;
@@ -354,9 +357,10 @@ static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *ob
354 last_offset = off; 357 last_offset = off;
355 } 358 }
356 359
360out:
357 msm_gem_put_vaddr_locked(&obj->base); 361 msm_gem_put_vaddr_locked(&obj->base);
358 362
359 return 0; 363 return ret;
360} 364}
361 365
362static void submit_cleanup(struct msm_gem_submit *submit) 366static void submit_cleanup(struct msm_gem_submit *submit)
diff --git a/drivers/gpu/drm/msm/msm_ringbuffer.c b/drivers/gpu/drm/msm/msm_ringbuffer.c
index f326cf6a32e6..67b34e069abf 100644
--- a/drivers/gpu/drm/msm/msm_ringbuffer.c
+++ b/drivers/gpu/drm/msm/msm_ringbuffer.c
@@ -23,7 +23,8 @@ struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int size)
23 struct msm_ringbuffer *ring; 23 struct msm_ringbuffer *ring;
24 int ret; 24 int ret;
25 25
26 size = ALIGN(size, 4); /* size should be dword aligned */ 26 if (WARN_ON(!is_power_of_2(size)))
27 return ERR_PTR(-EINVAL);
27 28
28 ring = kzalloc(sizeof(*ring), GFP_KERNEL); 29 ring = kzalloc(sizeof(*ring), GFP_KERNEL);
29 if (!ring) { 30 if (!ring) {
diff --git a/drivers/gpu/drm/nouveau/dispnv04/hw.c b/drivers/gpu/drm/nouveau/dispnv04/hw.c
index 74856a8b8f35..e64f52464ecf 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/hw.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/hw.c
@@ -222,6 +222,7 @@ nouveau_hw_get_clock(struct drm_device *dev, enum nvbios_pll_type plltype)
222 uint32_t mpllP; 222 uint32_t mpllP;
223 223
224 pci_read_config_dword(pci_get_bus_and_slot(0, 3), 0x6c, &mpllP); 224 pci_read_config_dword(pci_get_bus_and_slot(0, 3), 0x6c, &mpllP);
225 mpllP = (mpllP >> 8) & 0xf;
225 if (!mpllP) 226 if (!mpllP)
226 mpllP = 4; 227 mpllP = 4;
227 228
@@ -232,7 +233,7 @@ nouveau_hw_get_clock(struct drm_device *dev, enum nvbios_pll_type plltype)
232 uint32_t clock; 233 uint32_t clock;
233 234
234 pci_read_config_dword(pci_get_bus_and_slot(0, 5), 0x4c, &clock); 235 pci_read_config_dword(pci_get_bus_and_slot(0, 5), 0x4c, &clock);
235 return clock; 236 return clock / 1000;
236 } 237 }
237 238
238 ret = nouveau_hw_get_pllvals(dev, plltype, &pllvals); 239 ret = nouveau_hw_get_pllvals(dev, plltype, &pllvals);
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index cef08da1da4e..6a157763dfc3 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -411,7 +411,8 @@ nouveau_display_init(struct drm_device *dev)
411 return ret; 411 return ret;
412 412
413 /* enable polling for external displays */ 413 /* enable polling for external displays */
414 drm_kms_helper_poll_enable(dev); 414 if (!dev->mode_config.poll_enabled)
415 drm_kms_helper_poll_enable(dev);
415 416
416 /* enable hotplug interrupts */ 417 /* enable hotplug interrupts */
417 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 418 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 59348fc41c77..bc85a45f91cd 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -773,7 +773,10 @@ nouveau_pmops_runtime_resume(struct device *dev)
773 pci_set_master(pdev); 773 pci_set_master(pdev);
774 774
775 ret = nouveau_do_resume(drm_dev, true); 775 ret = nouveau_do_resume(drm_dev, true);
776 drm_kms_helper_poll_enable(drm_dev); 776
777 if (!drm_dev->mode_config.poll_enabled)
778 drm_kms_helper_poll_enable(drm_dev);
779
777 /* do magic */ 780 /* do magic */
778 nvif_mask(&device->object, 0x088488, (1 << 25), (1 << 25)); 781 nvif_mask(&device->object, 0x088488, (1 << 25), (1 << 25));
779 vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_ON); 782 vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_ON);
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index 8d5ed5bfdacb..42c1fa53d431 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -165,6 +165,8 @@ struct nouveau_drm {
165 struct backlight_device *backlight; 165 struct backlight_device *backlight;
166 struct list_head bl_connectors; 166 struct list_head bl_connectors;
167 struct work_struct hpd_work; 167 struct work_struct hpd_work;
168 struct work_struct fbcon_work;
169 int fbcon_new_state;
168#ifdef CONFIG_ACPI 170#ifdef CONFIG_ACPI
169 struct notifier_block acpi_nb; 171 struct notifier_block acpi_nb;
170#endif 172#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index 2f2a3dcd4ad7..fa2d0a978ccc 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -470,19 +470,43 @@ static const struct drm_fb_helper_funcs nouveau_fbcon_helper_funcs = {
470 .fb_probe = nouveau_fbcon_create, 470 .fb_probe = nouveau_fbcon_create,
471}; 471};
472 472
473static void
474nouveau_fbcon_set_suspend_work(struct work_struct *work)
475{
476 struct nouveau_drm *drm = container_of(work, typeof(*drm), fbcon_work);
477 int state = READ_ONCE(drm->fbcon_new_state);
478
479 if (state == FBINFO_STATE_RUNNING)
480 pm_runtime_get_sync(drm->dev->dev);
481
482 console_lock();
483 if (state == FBINFO_STATE_RUNNING)
484 nouveau_fbcon_accel_restore(drm->dev);
485 drm_fb_helper_set_suspend(&drm->fbcon->helper, state);
486 if (state != FBINFO_STATE_RUNNING)
487 nouveau_fbcon_accel_save_disable(drm->dev);
488 console_unlock();
489
490 if (state == FBINFO_STATE_RUNNING) {
491 pm_runtime_mark_last_busy(drm->dev->dev);
492 pm_runtime_put_sync(drm->dev->dev);
493 }
494}
495
473void 496void
474nouveau_fbcon_set_suspend(struct drm_device *dev, int state) 497nouveau_fbcon_set_suspend(struct drm_device *dev, int state)
475{ 498{
476 struct nouveau_drm *drm = nouveau_drm(dev); 499 struct nouveau_drm *drm = nouveau_drm(dev);
477 if (drm->fbcon) { 500
478 console_lock(); 501 if (!drm->fbcon)
479 if (state == FBINFO_STATE_RUNNING) 502 return;
480 nouveau_fbcon_accel_restore(dev); 503
481 drm_fb_helper_set_suspend(&drm->fbcon->helper, state); 504 drm->fbcon_new_state = state;
482 if (state != FBINFO_STATE_RUNNING) 505 /* Since runtime resume can happen as a result of a sysfs operation,
483 nouveau_fbcon_accel_save_disable(dev); 506 * it's possible we already have the console locked. So handle fbcon
484 console_unlock(); 507 * init/deinit from a seperate work thread
485 } 508 */
509 schedule_work(&drm->fbcon_work);
486} 510}
487 511
488int 512int
@@ -502,6 +526,7 @@ nouveau_fbcon_init(struct drm_device *dev)
502 return -ENOMEM; 526 return -ENOMEM;
503 527
504 drm->fbcon = fbcon; 528 drm->fbcon = fbcon;
529 INIT_WORK(&drm->fbcon_work, nouveau_fbcon_set_suspend_work);
505 530
506 drm_fb_helper_prepare(dev, &fbcon->helper, &nouveau_fbcon_helper_funcs); 531 drm_fb_helper_prepare(dev, &fbcon->helper, &nouveau_fbcon_helper_funcs);
507 532
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.h b/drivers/gpu/drm/nouveau/nouveau_fence.h
index ccdce1b4eec4..d5e58a38f160 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.h
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.h
@@ -99,6 +99,7 @@ struct nv84_fence_priv {
99 struct nouveau_bo *bo; 99 struct nouveau_bo *bo;
100 struct nouveau_bo *bo_gart; 100 struct nouveau_bo *bo_gart;
101 u32 *suspend; 101 u32 *suspend;
102 struct mutex mutex;
102}; 103};
103 104
104int nv84_fence_context_new(struct nouveau_channel *); 105int nv84_fence_context_new(struct nouveau_channel *);
diff --git a/drivers/gpu/drm/nouveau/nouveau_led.h b/drivers/gpu/drm/nouveau/nouveau_led.h
index 187ecdb82002..21a5775028cc 100644
--- a/drivers/gpu/drm/nouveau/nouveau_led.h
+++ b/drivers/gpu/drm/nouveau/nouveau_led.h
@@ -42,7 +42,7 @@ nouveau_led(struct drm_device *dev)
42} 42}
43 43
44/* nouveau_led.c */ 44/* nouveau_led.c */
45#if IS_ENABLED(CONFIG_LEDS_CLASS) 45#if IS_REACHABLE(CONFIG_LEDS_CLASS)
46int nouveau_led_init(struct drm_device *dev); 46int nouveau_led_init(struct drm_device *dev);
47void nouveau_led_suspend(struct drm_device *dev); 47void nouveau_led_suspend(struct drm_device *dev);
48void nouveau_led_resume(struct drm_device *dev); 48void nouveau_led_resume(struct drm_device *dev);
diff --git a/drivers/gpu/drm/nouveau/nouveau_usif.c b/drivers/gpu/drm/nouveau/nouveau_usif.c
index 08f9c6fa0f7f..1fba38622744 100644
--- a/drivers/gpu/drm/nouveau/nouveau_usif.c
+++ b/drivers/gpu/drm/nouveau/nouveau_usif.c
@@ -313,7 +313,8 @@ usif_ioctl(struct drm_file *filp, void __user *user, u32 argc)
313 if (!(ret = nvif_unpack(-ENOSYS, &data, &size, argv->v0, 0, 0, true))) { 313 if (!(ret = nvif_unpack(-ENOSYS, &data, &size, argv->v0, 0, 0, true))) {
314 /* block access to objects not created via this interface */ 314 /* block access to objects not created via this interface */
315 owner = argv->v0.owner; 315 owner = argv->v0.owner;
316 if (argv->v0.object == 0ULL) 316 if (argv->v0.object == 0ULL &&
317 argv->v0.type != NVIF_IOCTL_V0_DEL)
317 argv->v0.owner = NVDRM_OBJECT_ANY; /* except client */ 318 argv->v0.owner = NVDRM_OBJECT_ANY; /* except client */
318 else 319 else
319 argv->v0.owner = NVDRM_OBJECT_USIF; 320 argv->v0.owner = NVDRM_OBJECT_USIF;
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index 2c2c64507661..32097fd615fd 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -4052,6 +4052,11 @@ nv50_disp_atomic_commit_tail(struct drm_atomic_state *state)
4052 } 4052 }
4053 } 4053 }
4054 4054
4055 for_each_crtc_in_state(state, crtc, crtc_state, i) {
4056 if (crtc->state->event)
4057 drm_crtc_vblank_get(crtc);
4058 }
4059
4055 /* Update plane(s). */ 4060 /* Update plane(s). */
4056 for_each_plane_in_state(state, plane, plane_state, i) { 4061 for_each_plane_in_state(state, plane, plane_state, i) {
4057 struct nv50_wndw_atom *asyw = nv50_wndw_atom(plane->state); 4062 struct nv50_wndw_atom *asyw = nv50_wndw_atom(plane->state);
@@ -4101,6 +4106,7 @@ nv50_disp_atomic_commit_tail(struct drm_atomic_state *state)
4101 drm_crtc_send_vblank_event(crtc, crtc->state->event); 4106 drm_crtc_send_vblank_event(crtc, crtc->state->event);
4102 spin_unlock_irqrestore(&crtc->dev->event_lock, flags); 4107 spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
4103 crtc->state->event = NULL; 4108 crtc->state->event = NULL;
4109 drm_crtc_vblank_put(crtc);
4104 } 4110 }
4105 } 4111 }
4106 4112
diff --git a/drivers/gpu/drm/nouveau/nv84_fence.c b/drivers/gpu/drm/nouveau/nv84_fence.c
index 52b87ae83e7b..f0b322bec7df 100644
--- a/drivers/gpu/drm/nouveau/nv84_fence.c
+++ b/drivers/gpu/drm/nouveau/nv84_fence.c
@@ -107,8 +107,10 @@ nv84_fence_context_del(struct nouveau_channel *chan)
107 struct nv84_fence_chan *fctx = chan->fence; 107 struct nv84_fence_chan *fctx = chan->fence;
108 108
109 nouveau_bo_wr32(priv->bo, chan->chid * 16 / 4, fctx->base.sequence); 109 nouveau_bo_wr32(priv->bo, chan->chid * 16 / 4, fctx->base.sequence);
110 mutex_lock(&priv->mutex);
110 nouveau_bo_vma_del(priv->bo, &fctx->vma_gart); 111 nouveau_bo_vma_del(priv->bo, &fctx->vma_gart);
111 nouveau_bo_vma_del(priv->bo, &fctx->vma); 112 nouveau_bo_vma_del(priv->bo, &fctx->vma);
113 mutex_unlock(&priv->mutex);
112 nouveau_fence_context_del(&fctx->base); 114 nouveau_fence_context_del(&fctx->base);
113 chan->fence = NULL; 115 chan->fence = NULL;
114 nouveau_fence_context_free(&fctx->base); 116 nouveau_fence_context_free(&fctx->base);
@@ -134,11 +136,13 @@ nv84_fence_context_new(struct nouveau_channel *chan)
134 fctx->base.sync32 = nv84_fence_sync32; 136 fctx->base.sync32 = nv84_fence_sync32;
135 fctx->base.sequence = nv84_fence_read(chan); 137 fctx->base.sequence = nv84_fence_read(chan);
136 138
139 mutex_lock(&priv->mutex);
137 ret = nouveau_bo_vma_add(priv->bo, cli->vm, &fctx->vma); 140 ret = nouveau_bo_vma_add(priv->bo, cli->vm, &fctx->vma);
138 if (ret == 0) { 141 if (ret == 0) {
139 ret = nouveau_bo_vma_add(priv->bo_gart, cli->vm, 142 ret = nouveau_bo_vma_add(priv->bo_gart, cli->vm,
140 &fctx->vma_gart); 143 &fctx->vma_gart);
141 } 144 }
145 mutex_unlock(&priv->mutex);
142 146
143 if (ret) 147 if (ret)
144 nv84_fence_context_del(chan); 148 nv84_fence_context_del(chan);
@@ -212,6 +216,8 @@ nv84_fence_create(struct nouveau_drm *drm)
212 priv->base.context_base = dma_fence_context_alloc(priv->base.contexts); 216 priv->base.context_base = dma_fence_context_alloc(priv->base.contexts);
213 priv->base.uevent = true; 217 priv->base.uevent = true;
214 218
219 mutex_init(&priv->mutex);
220
215 /* Use VRAM if there is any ; otherwise fallback to system memory */ 221 /* Use VRAM if there is any ; otherwise fallback to system memory */
216 domain = drm->device.info.ram_size != 0 ? TTM_PL_FLAG_VRAM : 222 domain = drm->device.info.ram_size != 0 ? TTM_PL_FLAG_VRAM :
217 /* 223 /*
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdagt215.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdagt215.c
index 6f0436df0219..f8f2f16c22a2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdagt215.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdagt215.c
@@ -59,7 +59,7 @@ gt215_hda_eld(NV50_DISP_MTHD_V1)
59 ); 59 );
60 } 60 }
61 for (i = 0; i < size; i++) 61 for (i = 0; i < size; i++)
62 nvkm_wr32(device, 0x61c440 + soff, (i << 8) | args->v0.data[0]); 62 nvkm_wr32(device, 0x61c440 + soff, (i << 8) | args->v0.data[i]);
63 for (; i < 0x60; i++) 63 for (; i < 0x60; i++)
64 nvkm_wr32(device, 0x61c440 + soff, (i << 8)); 64 nvkm_wr32(device, 0x61c440 + soff, (i << 8));
65 nvkm_mask(device, 0x61c448 + soff, 0x80000003, 0x80000003); 65 nvkm_mask(device, 0x61c448 + soff, 0x80000003, 0x80000003);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
index 567466f93cd5..0db8efbf1c2e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
@@ -433,8 +433,6 @@ nv50_disp_dptmds_war(struct nvkm_device *device)
433 case 0x94: 433 case 0x94:
434 case 0x96: 434 case 0x96:
435 case 0x98: 435 case 0x98:
436 case 0xaa:
437 case 0xac:
438 return true; 436 return true;
439 default: 437 default:
440 break; 438 break;
diff --git a/drivers/gpu/drm/radeon/radeon_cursor.c b/drivers/gpu/drm/radeon/radeon_cursor.c
index fb16070b266e..4a4f9533c53b 100644
--- a/drivers/gpu/drm/radeon/radeon_cursor.c
+++ b/drivers/gpu/drm/radeon/radeon_cursor.c
@@ -205,8 +205,8 @@ static int radeon_cursor_move_locked(struct drm_crtc *crtc, int x, int y)
205 } 205 }
206 206
207 if (x <= (crtc->x - w) || y <= (crtc->y - radeon_crtc->cursor_height) || 207 if (x <= (crtc->x - w) || y <= (crtc->y - radeon_crtc->cursor_height) ||
208 x >= (crtc->x + crtc->mode.crtc_hdisplay) || 208 x >= (crtc->x + crtc->mode.hdisplay) ||
209 y >= (crtc->y + crtc->mode.crtc_vdisplay)) 209 y >= (crtc->y + crtc->mode.vdisplay))
210 goto out_of_bounds; 210 goto out_of_bounds;
211 211
212 x += xorigin; 212 x += xorigin;
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 00ea0002b539..30bd4a6a9d46 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -97,9 +97,10 @@
97 * 2.46.0 - Add PFP_SYNC_ME support on evergreen 97 * 2.46.0 - Add PFP_SYNC_ME support on evergreen
98 * 2.47.0 - Add UVD_NO_OP register support 98 * 2.47.0 - Add UVD_NO_OP register support
99 * 2.48.0 - TA_CS_BC_BASE_ADDR allowed on SI 99 * 2.48.0 - TA_CS_BC_BASE_ADDR allowed on SI
100 * 2.49.0 - DRM_RADEON_GEM_INFO ioctl returns correct vram_size/visible values
100 */ 101 */
101#define KMS_DRIVER_MAJOR 2 102#define KMS_DRIVER_MAJOR 2
102#define KMS_DRIVER_MINOR 48 103#define KMS_DRIVER_MINOR 49
103#define KMS_DRIVER_PATCHLEVEL 0 104#define KMS_DRIVER_PATCHLEVEL 0
104int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags); 105int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
105int radeon_driver_unload_kms(struct drm_device *dev); 106int radeon_driver_unload_kms(struct drm_device *dev);
@@ -366,11 +367,10 @@ static void
366radeon_pci_shutdown(struct pci_dev *pdev) 367radeon_pci_shutdown(struct pci_dev *pdev)
367{ 368{
368 /* if we are running in a VM, make sure the device 369 /* if we are running in a VM, make sure the device
369 * torn down properly on reboot/shutdown. 370 * torn down properly on reboot/shutdown
370 * unfortunately we can't detect certain
371 * hypervisors so just do this all the time.
372 */ 371 */
373 radeon_pci_remove(pdev); 372 if (radeon_device_is_virtual())
373 radeon_pci_remove(pdev);
374} 374}
375 375
376static int radeon_pmops_suspend(struct device *dev) 376static int radeon_pmops_suspend(struct device *dev)
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index 0bcffd8a7bd3..96683f5b2b1b 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -220,8 +220,8 @@ int radeon_gem_info_ioctl(struct drm_device *dev, void *data,
220 220
221 man = &rdev->mman.bdev.man[TTM_PL_VRAM]; 221 man = &rdev->mman.bdev.man[TTM_PL_VRAM];
222 222
223 args->vram_size = rdev->mc.real_vram_size; 223 args->vram_size = (u64)man->size << PAGE_SHIFT;
224 args->vram_visible = (u64)man->size << PAGE_SHIFT; 224 args->vram_visible = rdev->mc.visible_vram_size;
225 args->vram_visible -= rdev->vram_pin_size; 225 args->vram_visible -= rdev->vram_pin_size;
226 args->gart_size = rdev->mc.gtt_size; 226 args->gart_size = rdev->mc.gtt_size;
227 args->gart_size -= rdev->gart_pin_size; 227 args->gart_size -= rdev->gart_pin_size;
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index ad4d7b8b8322..414776811e71 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -50,7 +50,6 @@ MODULE_FIRMWARE("radeon/tahiti_ce.bin");
50MODULE_FIRMWARE("radeon/tahiti_mc.bin"); 50MODULE_FIRMWARE("radeon/tahiti_mc.bin");
51MODULE_FIRMWARE("radeon/tahiti_rlc.bin"); 51MODULE_FIRMWARE("radeon/tahiti_rlc.bin");
52MODULE_FIRMWARE("radeon/tahiti_smc.bin"); 52MODULE_FIRMWARE("radeon/tahiti_smc.bin");
53MODULE_FIRMWARE("radeon/tahiti_k_smc.bin");
54 53
55MODULE_FIRMWARE("radeon/PITCAIRN_pfp.bin"); 54MODULE_FIRMWARE("radeon/PITCAIRN_pfp.bin");
56MODULE_FIRMWARE("radeon/PITCAIRN_me.bin"); 55MODULE_FIRMWARE("radeon/PITCAIRN_me.bin");
@@ -115,6 +114,9 @@ MODULE_FIRMWARE("radeon/hainan_mc.bin");
115MODULE_FIRMWARE("radeon/hainan_rlc.bin"); 114MODULE_FIRMWARE("radeon/hainan_rlc.bin");
116MODULE_FIRMWARE("radeon/hainan_smc.bin"); 115MODULE_FIRMWARE("radeon/hainan_smc.bin");
117MODULE_FIRMWARE("radeon/hainan_k_smc.bin"); 116MODULE_FIRMWARE("radeon/hainan_k_smc.bin");
117MODULE_FIRMWARE("radeon/banks_k_2_smc.bin");
118
119MODULE_FIRMWARE("radeon/si58_mc.bin");
118 120
119static u32 si_get_cu_active_bitmap(struct radeon_device *rdev, u32 se, u32 sh); 121static u32 si_get_cu_active_bitmap(struct radeon_device *rdev, u32 se, u32 sh);
120static void si_pcie_gen3_enable(struct radeon_device *rdev); 122static void si_pcie_gen3_enable(struct radeon_device *rdev);
@@ -1651,15 +1653,14 @@ static int si_init_microcode(struct radeon_device *rdev)
1651 int err; 1653 int err;
1652 int new_fw = 0; 1654 int new_fw = 0;
1653 bool new_smc = false; 1655 bool new_smc = false;
1656 bool si58_fw = false;
1657 bool banks2_fw = false;
1654 1658
1655 DRM_DEBUG("\n"); 1659 DRM_DEBUG("\n");
1656 1660
1657 switch (rdev->family) { 1661 switch (rdev->family) {
1658 case CHIP_TAHITI: 1662 case CHIP_TAHITI:
1659 chip_name = "TAHITI"; 1663 chip_name = "TAHITI";
1660 /* XXX: figure out which Tahitis need the new ucode */
1661 if (0)
1662 new_smc = true;
1663 new_chip_name = "tahiti"; 1664 new_chip_name = "tahiti";
1664 pfp_req_size = SI_PFP_UCODE_SIZE * 4; 1665 pfp_req_size = SI_PFP_UCODE_SIZE * 4;
1665 me_req_size = SI_PM4_UCODE_SIZE * 4; 1666 me_req_size = SI_PM4_UCODE_SIZE * 4;
@@ -1671,12 +1672,9 @@ static int si_init_microcode(struct radeon_device *rdev)
1671 break; 1672 break;
1672 case CHIP_PITCAIRN: 1673 case CHIP_PITCAIRN:
1673 chip_name = "PITCAIRN"; 1674 chip_name = "PITCAIRN";
1674 if ((rdev->pdev->revision == 0x81) || 1675 if ((rdev->pdev->revision == 0x81) &&
1675 (rdev->pdev->device == 0x6810) || 1676 ((rdev->pdev->device == 0x6810) ||
1676 (rdev->pdev->device == 0x6811) || 1677 (rdev->pdev->device == 0x6811)))
1677 (rdev->pdev->device == 0x6816) ||
1678 (rdev->pdev->device == 0x6817) ||
1679 (rdev->pdev->device == 0x6806))
1680 new_smc = true; 1678 new_smc = true;
1681 new_chip_name = "pitcairn"; 1679 new_chip_name = "pitcairn";
1682 pfp_req_size = SI_PFP_UCODE_SIZE * 4; 1680 pfp_req_size = SI_PFP_UCODE_SIZE * 4;
@@ -1689,15 +1687,15 @@ static int si_init_microcode(struct radeon_device *rdev)
1689 break; 1687 break;
1690 case CHIP_VERDE: 1688 case CHIP_VERDE:
1691 chip_name = "VERDE"; 1689 chip_name = "VERDE";
1692 if ((rdev->pdev->revision == 0x81) || 1690 if (((rdev->pdev->device == 0x6820) &&
1693 (rdev->pdev->revision == 0x83) || 1691 ((rdev->pdev->revision == 0x81) ||
1694 (rdev->pdev->revision == 0x87) || 1692 (rdev->pdev->revision == 0x83))) ||
1695 (rdev->pdev->device == 0x6820) || 1693 ((rdev->pdev->device == 0x6821) &&
1696 (rdev->pdev->device == 0x6821) || 1694 ((rdev->pdev->revision == 0x83) ||
1697 (rdev->pdev->device == 0x6822) || 1695 (rdev->pdev->revision == 0x87))) ||
1698 (rdev->pdev->device == 0x6823) || 1696 ((rdev->pdev->revision == 0x87) &&
1699 (rdev->pdev->device == 0x682A) || 1697 ((rdev->pdev->device == 0x6823) ||
1700 (rdev->pdev->device == 0x682B)) 1698 (rdev->pdev->device == 0x682b))))
1701 new_smc = true; 1699 new_smc = true;
1702 new_chip_name = "verde"; 1700 new_chip_name = "verde";
1703 pfp_req_size = SI_PFP_UCODE_SIZE * 4; 1701 pfp_req_size = SI_PFP_UCODE_SIZE * 4;
@@ -1710,13 +1708,13 @@ static int si_init_microcode(struct radeon_device *rdev)
1710 break; 1708 break;
1711 case CHIP_OLAND: 1709 case CHIP_OLAND:
1712 chip_name = "OLAND"; 1710 chip_name = "OLAND";
1713 if ((rdev->pdev->revision == 0xC7) || 1711 if (((rdev->pdev->revision == 0x81) &&
1714 (rdev->pdev->revision == 0x80) || 1712 ((rdev->pdev->device == 0x6600) ||
1715 (rdev->pdev->revision == 0x81) || 1713 (rdev->pdev->device == 0x6604) ||
1716 (rdev->pdev->revision == 0x83) || 1714 (rdev->pdev->device == 0x6605) ||
1717 (rdev->pdev->revision == 0x87) || 1715 (rdev->pdev->device == 0x6610))) ||
1718 (rdev->pdev->device == 0x6604) || 1716 ((rdev->pdev->revision == 0x83) &&
1719 (rdev->pdev->device == 0x6605)) 1717 (rdev->pdev->device == 0x6610)))
1720 new_smc = true; 1718 new_smc = true;
1721 new_chip_name = "oland"; 1719 new_chip_name = "oland";
1722 pfp_req_size = SI_PFP_UCODE_SIZE * 4; 1720 pfp_req_size = SI_PFP_UCODE_SIZE * 4;
@@ -1728,13 +1726,17 @@ static int si_init_microcode(struct radeon_device *rdev)
1728 break; 1726 break;
1729 case CHIP_HAINAN: 1727 case CHIP_HAINAN:
1730 chip_name = "HAINAN"; 1728 chip_name = "HAINAN";
1731 if ((rdev->pdev->revision == 0x81) || 1729 if (((rdev->pdev->revision == 0x81) &&
1732 (rdev->pdev->revision == 0x83) || 1730 (rdev->pdev->device == 0x6660)) ||
1733 (rdev->pdev->revision == 0xC3) || 1731 ((rdev->pdev->revision == 0x83) &&
1734 (rdev->pdev->device == 0x6664) || 1732 ((rdev->pdev->device == 0x6660) ||
1735 (rdev->pdev->device == 0x6665) || 1733 (rdev->pdev->device == 0x6663) ||
1736 (rdev->pdev->device == 0x6667)) 1734 (rdev->pdev->device == 0x6665) ||
1735 (rdev->pdev->device == 0x6667))))
1737 new_smc = true; 1736 new_smc = true;
1737 else if ((rdev->pdev->revision == 0xc3) &&
1738 (rdev->pdev->device == 0x6665))
1739 banks2_fw = true;
1738 new_chip_name = "hainan"; 1740 new_chip_name = "hainan";
1739 pfp_req_size = SI_PFP_UCODE_SIZE * 4; 1741 pfp_req_size = SI_PFP_UCODE_SIZE * 4;
1740 me_req_size = SI_PM4_UCODE_SIZE * 4; 1742 me_req_size = SI_PM4_UCODE_SIZE * 4;
@@ -1746,6 +1748,10 @@ static int si_init_microcode(struct radeon_device *rdev)
1746 default: BUG(); 1748 default: BUG();
1747 } 1749 }
1748 1750
1751 /* this memory configuration requires special firmware */
1752 if (((RREG32(MC_SEQ_MISC0) & 0xff000000) >> 24) == 0x58)
1753 si58_fw = true;
1754
1749 DRM_INFO("Loading %s Microcode\n", new_chip_name); 1755 DRM_INFO("Loading %s Microcode\n", new_chip_name);
1750 1756
1751 snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", new_chip_name); 1757 snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", new_chip_name);
@@ -1849,7 +1855,10 @@ static int si_init_microcode(struct radeon_device *rdev)
1849 } 1855 }
1850 } 1856 }
1851 1857
1852 snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", new_chip_name); 1858 if (si58_fw)
1859 snprintf(fw_name, sizeof(fw_name), "radeon/si58_mc.bin");
1860 else
1861 snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", new_chip_name);
1853 err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev); 1862 err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev);
1854 if (err) { 1863 if (err) {
1855 snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc2.bin", chip_name); 1864 snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc2.bin", chip_name);
@@ -1880,7 +1889,9 @@ static int si_init_microcode(struct radeon_device *rdev)
1880 } 1889 }
1881 } 1890 }
1882 1891
1883 if (new_smc) 1892 if (banks2_fw)
1893 snprintf(fw_name, sizeof(fw_name), "radeon/banks_k_2_smc.bin");
1894 else if (new_smc)
1884 snprintf(fw_name, sizeof(fw_name), "radeon/%s_k_smc.bin", new_chip_name); 1895 snprintf(fw_name, sizeof(fw_name), "radeon/%s_k_smc.bin", new_chip_name);
1885 else 1896 else
1886 snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", new_chip_name); 1897 snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", new_chip_name);
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
index 8b5e697f2549..2944916f7102 100644
--- a/drivers/gpu/drm/radeon/si_dpm.c
+++ b/drivers/gpu/drm/radeon/si_dpm.c
@@ -3008,30 +3008,6 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
3008 (rdev->pdev->device == 0x6817) || 3008 (rdev->pdev->device == 0x6817) ||
3009 (rdev->pdev->device == 0x6806)) 3009 (rdev->pdev->device == 0x6806))
3010 max_mclk = 120000; 3010 max_mclk = 120000;
3011 } else if (rdev->family == CHIP_VERDE) {
3012 if ((rdev->pdev->revision == 0x81) ||
3013 (rdev->pdev->revision == 0x83) ||
3014 (rdev->pdev->revision == 0x87) ||
3015 (rdev->pdev->device == 0x6820) ||
3016 (rdev->pdev->device == 0x6821) ||
3017 (rdev->pdev->device == 0x6822) ||
3018 (rdev->pdev->device == 0x6823) ||
3019 (rdev->pdev->device == 0x682A) ||
3020 (rdev->pdev->device == 0x682B)) {
3021 max_sclk = 75000;
3022 max_mclk = 80000;
3023 }
3024 } else if (rdev->family == CHIP_OLAND) {
3025 if ((rdev->pdev->revision == 0xC7) ||
3026 (rdev->pdev->revision == 0x80) ||
3027 (rdev->pdev->revision == 0x81) ||
3028 (rdev->pdev->revision == 0x83) ||
3029 (rdev->pdev->revision == 0x87) ||
3030 (rdev->pdev->device == 0x6604) ||
3031 (rdev->pdev->device == 0x6605)) {
3032 max_sclk = 75000;
3033 max_mclk = 80000;
3034 }
3035 } else if (rdev->family == CHIP_HAINAN) { 3011 } else if (rdev->family == CHIP_HAINAN) {
3036 if ((rdev->pdev->revision == 0x81) || 3012 if ((rdev->pdev->revision == 0x81) ||
3037 (rdev->pdev->revision == 0x83) || 3013 (rdev->pdev->revision == 0x83) ||
@@ -3040,7 +3016,6 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
3040 (rdev->pdev->device == 0x6665) || 3016 (rdev->pdev->device == 0x6665) ||
3041 (rdev->pdev->device == 0x6667)) { 3017 (rdev->pdev->device == 0x6667)) {
3042 max_sclk = 75000; 3018 max_sclk = 75000;
3043 max_mclk = 80000;
3044 } 3019 }
3045 } 3020 }
3046 /* Apply dpm quirks */ 3021 /* Apply dpm quirks */
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
index 725dffad5640..6dfdb145f3bb 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
@@ -856,7 +856,7 @@ irqreturn_t tilcdc_crtc_irq(struct drm_crtc *crtc)
856 struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc); 856 struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
857 struct drm_device *dev = crtc->dev; 857 struct drm_device *dev = crtc->dev;
858 struct tilcdc_drm_private *priv = dev->dev_private; 858 struct tilcdc_drm_private *priv = dev->dev_private;
859 uint32_t stat; 859 uint32_t stat, reg;
860 860
861 stat = tilcdc_read_irqstatus(dev); 861 stat = tilcdc_read_irqstatus(dev);
862 tilcdc_clear_irqstatus(dev, stat); 862 tilcdc_clear_irqstatus(dev, stat);
@@ -921,17 +921,26 @@ irqreturn_t tilcdc_crtc_irq(struct drm_crtc *crtc)
921 dev_err_ratelimited(dev->dev, "%s(0x%08x): Sync lost", 921 dev_err_ratelimited(dev->dev, "%s(0x%08x): Sync lost",
922 __func__, stat); 922 __func__, stat);
923 tilcdc_crtc->frame_intact = false; 923 tilcdc_crtc->frame_intact = false;
924 if (tilcdc_crtc->sync_lost_count++ > 924 if (priv->rev == 1) {
925 SYNC_LOST_COUNT_LIMIT) { 925 reg = tilcdc_read(dev, LCDC_RASTER_CTRL_REG);
926 dev_err(dev->dev, "%s(0x%08x): Sync lost flood detected, recovering", __func__, stat); 926 if (reg & LCDC_RASTER_ENABLE) {
927 queue_work(system_wq, &tilcdc_crtc->recover_work);
928 if (priv->rev == 1)
929 tilcdc_clear(dev, LCDC_RASTER_CTRL_REG, 927 tilcdc_clear(dev, LCDC_RASTER_CTRL_REG,
930 LCDC_V1_SYNC_LOST_INT_ENA); 928 LCDC_RASTER_ENABLE);
931 else 929 tilcdc_set(dev, LCDC_RASTER_CTRL_REG,
930 LCDC_RASTER_ENABLE);
931 }
932 } else {
933 if (tilcdc_crtc->sync_lost_count++ >
934 SYNC_LOST_COUNT_LIMIT) {
935 dev_err(dev->dev,
936 "%s(0x%08x): Sync lost flood detected, recovering",
937 __func__, stat);
938 queue_work(system_wq,
939 &tilcdc_crtc->recover_work);
932 tilcdc_write(dev, LCDC_INT_ENABLE_CLR_REG, 940 tilcdc_write(dev, LCDC_INT_ENABLE_CLR_REG,
933 LCDC_SYNC_LOST); 941 LCDC_SYNC_LOST);
934 tilcdc_crtc->sync_lost_count = 0; 942 tilcdc_crtc->sync_lost_count = 0;
943 }
935 } 944 }
936 } 945 }
937 946
diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c
index a0fd3e66bc4b..7aadce1f7e7a 100644
--- a/drivers/gpu/drm/vc4/vc4_crtc.c
+++ b/drivers/gpu/drm/vc4/vc4_crtc.c
@@ -839,7 +839,7 @@ static void vc4_crtc_destroy_state(struct drm_crtc *crtc,
839 839
840 } 840 }
841 841
842 __drm_atomic_helper_crtc_destroy_state(state); 842 drm_atomic_helper_crtc_destroy_state(crtc, state);
843} 843}
844 844
845static const struct drm_crtc_funcs vc4_crtc_funcs = { 845static const struct drm_crtc_funcs vc4_crtc_funcs = {
diff --git a/drivers/gpu/drm/vc4/vc4_gem.c b/drivers/gpu/drm/vc4/vc4_gem.c
index db920771bfb5..ab3016982466 100644
--- a/drivers/gpu/drm/vc4/vc4_gem.c
+++ b/drivers/gpu/drm/vc4/vc4_gem.c
@@ -594,12 +594,14 @@ vc4_get_bcl(struct drm_device *dev, struct vc4_exec_info *exec)
594 args->shader_rec_count); 594 args->shader_rec_count);
595 struct vc4_bo *bo; 595 struct vc4_bo *bo;
596 596
597 if (uniforms_offset < shader_rec_offset || 597 if (shader_rec_offset < args->bin_cl_size ||
598 uniforms_offset < shader_rec_offset ||
598 exec_size < uniforms_offset || 599 exec_size < uniforms_offset ||
599 args->shader_rec_count >= (UINT_MAX / 600 args->shader_rec_count >= (UINT_MAX /
600 sizeof(struct vc4_shader_state)) || 601 sizeof(struct vc4_shader_state)) ||
601 temp_size < exec_size) { 602 temp_size < exec_size) {
602 DRM_ERROR("overflow in exec arguments\n"); 603 DRM_ERROR("overflow in exec arguments\n");
604 ret = -EINVAL;
603 goto fail; 605 goto fail;
604 } 606 }
605 607
diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c
index 881bf489478b..686cdd3c86f2 100644
--- a/drivers/gpu/drm/vc4/vc4_plane.c
+++ b/drivers/gpu/drm/vc4/vc4_plane.c
@@ -858,7 +858,7 @@ struct drm_plane *vc4_plane_init(struct drm_device *dev,
858 } 858 }
859 } 859 }
860 plane = &vc4_plane->base; 860 plane = &vc4_plane->base;
861 ret = drm_universal_plane_init(dev, plane, 0xff, 861 ret = drm_universal_plane_init(dev, plane, 0,
862 &vc4_plane_funcs, 862 &vc4_plane_funcs,
863 formats, num_formats, 863 formats, num_formats,
864 type, NULL); 864 type, NULL);
diff --git a/drivers/gpu/drm/vc4/vc4_render_cl.c b/drivers/gpu/drm/vc4/vc4_render_cl.c
index 08886a309757..5cdd003605f5 100644
--- a/drivers/gpu/drm/vc4/vc4_render_cl.c
+++ b/drivers/gpu/drm/vc4/vc4_render_cl.c
@@ -461,7 +461,7 @@ static int vc4_rcl_surface_setup(struct vc4_exec_info *exec,
461 } 461 }
462 462
463 ret = vc4_full_res_bounds_check(exec, *obj, surf); 463 ret = vc4_full_res_bounds_check(exec, *obj, surf);
464 if (!ret) 464 if (ret)
465 return ret; 465 return ret;
466 466
467 return 0; 467 return 0;
diff --git a/drivers/gpu/drm/virtio/virtgpu_fb.c b/drivers/gpu/drm/virtio/virtgpu_fb.c
index dd21f950e129..cde9f3758106 100644
--- a/drivers/gpu/drm/virtio/virtgpu_fb.c
+++ b/drivers/gpu/drm/virtio/virtgpu_fb.c
@@ -331,7 +331,7 @@ static int virtio_gpufb_create(struct drm_fb_helper *helper,
331 info->fbops = &virtio_gpufb_ops; 331 info->fbops = &virtio_gpufb_ops;
332 info->pixmap.flags = FB_PIXMAP_SYSTEM; 332 info->pixmap.flags = FB_PIXMAP_SYSTEM;
333 333
334 info->screen_base = obj->vmap; 334 info->screen_buffer = obj->vmap;
335 info->screen_size = obj->gem_base.size; 335 info->screen_size = obj->gem_base.size;
336 drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth); 336 drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
337 drm_fb_helper_fill_var(info, &vfbdev->helper, 337 drm_fb_helper_fill_var(info, &vfbdev->helper,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
index 723fd763da8e..7a96798b9c0a 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
@@ -481,8 +481,7 @@ static int vmw_fb_kms_framebuffer(struct fb_info *info)
481 mode_cmd.height = var->yres; 481 mode_cmd.height = var->yres;
482 mode_cmd.pitches[0] = ((var->bits_per_pixel + 7) / 8) * mode_cmd.width; 482 mode_cmd.pitches[0] = ((var->bits_per_pixel + 7) / 8) * mode_cmd.width;
483 mode_cmd.pixel_format = 483 mode_cmd.pixel_format =
484 drm_mode_legacy_fb_format(var->bits_per_pixel, 484 drm_mode_legacy_fb_format(var->bits_per_pixel, depth);
485 ((var->bits_per_pixel + 7) / 8) * mode_cmd.width);
486 485
487 cur_fb = par->set_fb; 486 cur_fb = par->set_fb;
488 if (cur_fb && cur_fb->width == mode_cmd.width && 487 if (cur_fb && cur_fb->width == mode_cmd.width &&
diff --git a/drivers/hid/hid-asus.c b/drivers/hid/hid-asus.c
index d40ed9fdf68d..70b12f89a193 100644
--- a/drivers/hid/hid-asus.c
+++ b/drivers/hid/hid-asus.c
@@ -64,7 +64,8 @@ MODULE_DESCRIPTION("Asus HID Keyboard and TouchPad");
64#define QUIRK_SKIP_INPUT_MAPPING BIT(2) 64#define QUIRK_SKIP_INPUT_MAPPING BIT(2)
65#define QUIRK_IS_MULTITOUCH BIT(3) 65#define QUIRK_IS_MULTITOUCH BIT(3)
66 66
67#define NOTEBOOK_QUIRKS QUIRK_FIX_NOTEBOOK_REPORT 67#define KEYBOARD_QUIRKS (QUIRK_FIX_NOTEBOOK_REPORT | \
68 QUIRK_NO_INIT_REPORTS)
68#define TOUCHPAD_QUIRKS (QUIRK_NO_INIT_REPORTS | \ 69#define TOUCHPAD_QUIRKS (QUIRK_NO_INIT_REPORTS | \
69 QUIRK_SKIP_INPUT_MAPPING | \ 70 QUIRK_SKIP_INPUT_MAPPING | \
70 QUIRK_IS_MULTITOUCH) 71 QUIRK_IS_MULTITOUCH)
@@ -170,11 +171,11 @@ static int asus_raw_event(struct hid_device *hdev,
170 171
171static int asus_input_configured(struct hid_device *hdev, struct hid_input *hi) 172static int asus_input_configured(struct hid_device *hdev, struct hid_input *hi)
172{ 173{
174 struct input_dev *input = hi->input;
173 struct asus_drvdata *drvdata = hid_get_drvdata(hdev); 175 struct asus_drvdata *drvdata = hid_get_drvdata(hdev);
174 176
175 if (drvdata->quirks & QUIRK_IS_MULTITOUCH) { 177 if (drvdata->quirks & QUIRK_IS_MULTITOUCH) {
176 int ret; 178 int ret;
177 struct input_dev *input = hi->input;
178 179
179 input_set_abs_params(input, ABS_MT_POSITION_X, 0, MAX_X, 0, 0); 180 input_set_abs_params(input, ABS_MT_POSITION_X, 0, MAX_X, 0, 0);
180 input_set_abs_params(input, ABS_MT_POSITION_Y, 0, MAX_Y, 0, 0); 181 input_set_abs_params(input, ABS_MT_POSITION_Y, 0, MAX_Y, 0, 0);
@@ -191,10 +192,10 @@ static int asus_input_configured(struct hid_device *hdev, struct hid_input *hi)
191 hid_err(hdev, "Asus input mt init slots failed: %d\n", ret); 192 hid_err(hdev, "Asus input mt init slots failed: %d\n", ret);
192 return ret; 193 return ret;
193 } 194 }
194
195 drvdata->input = input;
196 } 195 }
197 196
197 drvdata->input = input;
198
198 return 0; 199 return 0;
199} 200}
200 201
@@ -286,7 +287,11 @@ static int asus_probe(struct hid_device *hdev, const struct hid_device_id *id)
286 goto err_stop_hw; 287 goto err_stop_hw;
287 } 288 }
288 289
289 drvdata->input->name = "Asus TouchPad"; 290 if (drvdata->quirks & QUIRK_IS_MULTITOUCH) {
291 drvdata->input->name = "Asus TouchPad";
292 } else {
293 drvdata->input->name = "Asus Keyboard";
294 }
290 295
291 if (drvdata->quirks & QUIRK_IS_MULTITOUCH) { 296 if (drvdata->quirks & QUIRK_IS_MULTITOUCH) {
292 ret = asus_start_multitouch(hdev); 297 ret = asus_start_multitouch(hdev);
@@ -315,7 +320,7 @@ static __u8 *asus_report_fixup(struct hid_device *hdev, __u8 *rdesc,
315 320
316static const struct hid_device_id asus_devices[] = { 321static const struct hid_device_id asus_devices[] = {
317 { HID_I2C_DEVICE(USB_VENDOR_ID_ASUSTEK, 322 { HID_I2C_DEVICE(USB_VENDOR_ID_ASUSTEK,
318 USB_DEVICE_ID_ASUSTEK_NOTEBOOK_KEYBOARD), NOTEBOOK_QUIRKS}, 323 USB_DEVICE_ID_ASUSTEK_NOTEBOOK_KEYBOARD), KEYBOARD_QUIRKS},
319 { HID_I2C_DEVICE(USB_VENDOR_ID_ASUSTEK, 324 { HID_I2C_DEVICE(USB_VENDOR_ID_ASUSTEK,
320 USB_DEVICE_ID_ASUSTEK_TOUCHPAD), TOUCHPAD_QUIRKS }, 325 USB_DEVICE_ID_ASUSTEK_TOUCHPAD), TOUCHPAD_QUIRKS },
321 { } 326 { }
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index cff060b56da9..ea36b557d5ee 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -2496,6 +2496,7 @@ static const struct hid_device_id hid_ignore_list[] = {
2496 { HID_USB_DEVICE(USB_VENDOR_ID_PANJIT, 0x0002) }, 2496 { HID_USB_DEVICE(USB_VENDOR_ID_PANJIT, 0x0002) },
2497 { HID_USB_DEVICE(USB_VENDOR_ID_PANJIT, 0x0003) }, 2497 { HID_USB_DEVICE(USB_VENDOR_ID_PANJIT, 0x0003) },
2498 { HID_USB_DEVICE(USB_VENDOR_ID_PANJIT, 0x0004) }, 2498 { HID_USB_DEVICE(USB_VENDOR_ID_PANJIT, 0x0004) },
2499 { HID_USB_DEVICE(USB_VENDOR_ID_PETZL, USB_DEVICE_ID_PETZL_HEADLAMP) },
2499 { HID_USB_DEVICE(USB_VENDOR_ID_PHILIPS, USB_DEVICE_ID_PHILIPS_IEEE802154_DONGLE) }, 2500 { HID_USB_DEVICE(USB_VENDOR_ID_PHILIPS, USB_DEVICE_ID_PHILIPS_IEEE802154_DONGLE) },
2500 { HID_USB_DEVICE(USB_VENDOR_ID_POWERCOM, USB_DEVICE_ID_POWERCOM_UPS) }, 2501 { HID_USB_DEVICE(USB_VENDOR_ID_POWERCOM, USB_DEVICE_ID_POWERCOM_UPS) },
2501#if IS_ENABLED(CONFIG_MOUSE_SYNAPTICS_USB) 2502#if IS_ENABLED(CONFIG_MOUSE_SYNAPTICS_USB)
diff --git a/drivers/hid/hid-corsair.c b/drivers/hid/hid-corsair.c
index 717704e9ae07..c0303f61c26a 100644
--- a/drivers/hid/hid-corsair.c
+++ b/drivers/hid/hid-corsair.c
@@ -148,26 +148,36 @@ static enum led_brightness k90_backlight_get(struct led_classdev *led_cdev)
148 struct usb_interface *usbif = to_usb_interface(dev->parent); 148 struct usb_interface *usbif = to_usb_interface(dev->parent);
149 struct usb_device *usbdev = interface_to_usbdev(usbif); 149 struct usb_device *usbdev = interface_to_usbdev(usbif);
150 int brightness; 150 int brightness;
151 char data[8]; 151 char *data;
152
153 data = kmalloc(8, GFP_KERNEL);
154 if (!data)
155 return -ENOMEM;
152 156
153 ret = usb_control_msg(usbdev, usb_rcvctrlpipe(usbdev, 0), 157 ret = usb_control_msg(usbdev, usb_rcvctrlpipe(usbdev, 0),
154 K90_REQUEST_STATUS, 158 K90_REQUEST_STATUS,
155 USB_DIR_IN | USB_TYPE_VENDOR | 159 USB_DIR_IN | USB_TYPE_VENDOR |
156 USB_RECIP_DEVICE, 0, 0, data, 8, 160 USB_RECIP_DEVICE, 0, 0, data, 8,
157 USB_CTRL_SET_TIMEOUT); 161 USB_CTRL_SET_TIMEOUT);
158 if (ret < 0) { 162 if (ret < 5) {
159 dev_warn(dev, "Failed to get K90 initial state (error %d).\n", 163 dev_warn(dev, "Failed to get K90 initial state (error %d).\n",
160 ret); 164 ret);
161 return -EIO; 165 ret = -EIO;
166 goto out;
162 } 167 }
163 brightness = data[4]; 168 brightness = data[4];
164 if (brightness < 0 || brightness > 3) { 169 if (brightness < 0 || brightness > 3) {
165 dev_warn(dev, 170 dev_warn(dev,
166 "Read invalid backlight brightness: %02hhx.\n", 171 "Read invalid backlight brightness: %02hhx.\n",
167 data[4]); 172 data[4]);
168 return -EIO; 173 ret = -EIO;
174 goto out;
169 } 175 }
170 return brightness; 176 ret = brightness;
177out:
178 kfree(data);
179
180 return ret;
171} 181}
172 182
173static enum led_brightness k90_record_led_get(struct led_classdev *led_cdev) 183static enum led_brightness k90_record_led_get(struct led_classdev *led_cdev)
@@ -253,17 +263,22 @@ static ssize_t k90_show_macro_mode(struct device *dev,
253 struct usb_interface *usbif = to_usb_interface(dev->parent); 263 struct usb_interface *usbif = to_usb_interface(dev->parent);
254 struct usb_device *usbdev = interface_to_usbdev(usbif); 264 struct usb_device *usbdev = interface_to_usbdev(usbif);
255 const char *macro_mode; 265 const char *macro_mode;
256 char data[8]; 266 char *data;
267
268 data = kmalloc(2, GFP_KERNEL);
269 if (!data)
270 return -ENOMEM;
257 271
258 ret = usb_control_msg(usbdev, usb_rcvctrlpipe(usbdev, 0), 272 ret = usb_control_msg(usbdev, usb_rcvctrlpipe(usbdev, 0),
259 K90_REQUEST_GET_MODE, 273 K90_REQUEST_GET_MODE,
260 USB_DIR_IN | USB_TYPE_VENDOR | 274 USB_DIR_IN | USB_TYPE_VENDOR |
261 USB_RECIP_DEVICE, 0, 0, data, 2, 275 USB_RECIP_DEVICE, 0, 0, data, 2,
262 USB_CTRL_SET_TIMEOUT); 276 USB_CTRL_SET_TIMEOUT);
263 if (ret < 0) { 277 if (ret < 1) {
264 dev_warn(dev, "Failed to get K90 initial mode (error %d).\n", 278 dev_warn(dev, "Failed to get K90 initial mode (error %d).\n",
265 ret); 279 ret);
266 return -EIO; 280 ret = -EIO;
281 goto out;
267 } 282 }
268 283
269 switch (data[0]) { 284 switch (data[0]) {
@@ -277,10 +292,15 @@ static ssize_t k90_show_macro_mode(struct device *dev,
277 default: 292 default:
278 dev_warn(dev, "K90 in unknown mode: %02hhx.\n", 293 dev_warn(dev, "K90 in unknown mode: %02hhx.\n",
279 data[0]); 294 data[0]);
280 return -EIO; 295 ret = -EIO;
296 goto out;
281 } 297 }
282 298
283 return snprintf(buf, PAGE_SIZE, "%s\n", macro_mode); 299 ret = snprintf(buf, PAGE_SIZE, "%s\n", macro_mode);
300out:
301 kfree(data);
302
303 return ret;
284} 304}
285 305
286static ssize_t k90_store_macro_mode(struct device *dev, 306static ssize_t k90_store_macro_mode(struct device *dev,
@@ -320,26 +340,36 @@ static ssize_t k90_show_current_profile(struct device *dev,
320 struct usb_interface *usbif = to_usb_interface(dev->parent); 340 struct usb_interface *usbif = to_usb_interface(dev->parent);
321 struct usb_device *usbdev = interface_to_usbdev(usbif); 341 struct usb_device *usbdev = interface_to_usbdev(usbif);
322 int current_profile; 342 int current_profile;
323 char data[8]; 343 char *data;
344
345 data = kmalloc(8, GFP_KERNEL);
346 if (!data)
347 return -ENOMEM;
324 348
325 ret = usb_control_msg(usbdev, usb_rcvctrlpipe(usbdev, 0), 349 ret = usb_control_msg(usbdev, usb_rcvctrlpipe(usbdev, 0),
326 K90_REQUEST_STATUS, 350 K90_REQUEST_STATUS,
327 USB_DIR_IN | USB_TYPE_VENDOR | 351 USB_DIR_IN | USB_TYPE_VENDOR |
328 USB_RECIP_DEVICE, 0, 0, data, 8, 352 USB_RECIP_DEVICE, 0, 0, data, 8,
329 USB_CTRL_SET_TIMEOUT); 353 USB_CTRL_SET_TIMEOUT);
330 if (ret < 0) { 354 if (ret < 8) {
331 dev_warn(dev, "Failed to get K90 initial state (error %d).\n", 355 dev_warn(dev, "Failed to get K90 initial state (error %d).\n",
332 ret); 356 ret);
333 return -EIO; 357 ret = -EIO;
358 goto out;
334 } 359 }
335 current_profile = data[7]; 360 current_profile = data[7];
336 if (current_profile < 1 || current_profile > 3) { 361 if (current_profile < 1 || current_profile > 3) {
337 dev_warn(dev, "Read invalid current profile: %02hhx.\n", 362 dev_warn(dev, "Read invalid current profile: %02hhx.\n",
338 data[7]); 363 data[7]);
339 return -EIO; 364 ret = -EIO;
365 goto out;
340 } 366 }
341 367
342 return snprintf(buf, PAGE_SIZE, "%d\n", current_profile); 368 ret = snprintf(buf, PAGE_SIZE, "%d\n", current_profile);
369out:
370 kfree(data);
371
372 return ret;
343} 373}
344 374
345static ssize_t k90_store_current_profile(struct device *dev, 375static ssize_t k90_store_current_profile(struct device *dev,
diff --git a/drivers/hid/hid-cp2112.c b/drivers/hid/hid-cp2112.c
index f31a778b0851..b22d0f83f8e3 100644
--- a/drivers/hid/hid-cp2112.c
+++ b/drivers/hid/hid-cp2112.c
@@ -168,7 +168,7 @@ struct cp2112_device {
168 atomic_t xfer_avail; 168 atomic_t xfer_avail;
169 struct gpio_chip gc; 169 struct gpio_chip gc;
170 u8 *in_out_buffer; 170 u8 *in_out_buffer;
171 spinlock_t lock; 171 struct mutex lock;
172 172
173 struct gpio_desc *desc[8]; 173 struct gpio_desc *desc[8];
174 bool gpio_poll; 174 bool gpio_poll;
@@ -186,10 +186,9 @@ static int cp2112_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
186 struct cp2112_device *dev = gpiochip_get_data(chip); 186 struct cp2112_device *dev = gpiochip_get_data(chip);
187 struct hid_device *hdev = dev->hdev; 187 struct hid_device *hdev = dev->hdev;
188 u8 *buf = dev->in_out_buffer; 188 u8 *buf = dev->in_out_buffer;
189 unsigned long flags;
190 int ret; 189 int ret;
191 190
192 spin_lock_irqsave(&dev->lock, flags); 191 mutex_lock(&dev->lock);
193 192
194 ret = hid_hw_raw_request(hdev, CP2112_GPIO_CONFIG, buf, 193 ret = hid_hw_raw_request(hdev, CP2112_GPIO_CONFIG, buf,
195 CP2112_GPIO_CONFIG_LENGTH, HID_FEATURE_REPORT, 194 CP2112_GPIO_CONFIG_LENGTH, HID_FEATURE_REPORT,
@@ -213,8 +212,8 @@ static int cp2112_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
213 ret = 0; 212 ret = 0;
214 213
215exit: 214exit:
216 spin_unlock_irqrestore(&dev->lock, flags); 215 mutex_unlock(&dev->lock);
217 return ret <= 0 ? ret : -EIO; 216 return ret < 0 ? ret : -EIO;
218} 217}
219 218
220static void cp2112_gpio_set(struct gpio_chip *chip, unsigned offset, int value) 219static void cp2112_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
@@ -222,10 +221,9 @@ static void cp2112_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
222 struct cp2112_device *dev = gpiochip_get_data(chip); 221 struct cp2112_device *dev = gpiochip_get_data(chip);
223 struct hid_device *hdev = dev->hdev; 222 struct hid_device *hdev = dev->hdev;
224 u8 *buf = dev->in_out_buffer; 223 u8 *buf = dev->in_out_buffer;
225 unsigned long flags;
226 int ret; 224 int ret;
227 225
228 spin_lock_irqsave(&dev->lock, flags); 226 mutex_lock(&dev->lock);
229 227
230 buf[0] = CP2112_GPIO_SET; 228 buf[0] = CP2112_GPIO_SET;
231 buf[1] = value ? 0xff : 0; 229 buf[1] = value ? 0xff : 0;
@@ -237,7 +235,7 @@ static void cp2112_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
237 if (ret < 0) 235 if (ret < 0)
238 hid_err(hdev, "error setting GPIO values: %d\n", ret); 236 hid_err(hdev, "error setting GPIO values: %d\n", ret);
239 237
240 spin_unlock_irqrestore(&dev->lock, flags); 238 mutex_unlock(&dev->lock);
241} 239}
242 240
243static int cp2112_gpio_get_all(struct gpio_chip *chip) 241static int cp2112_gpio_get_all(struct gpio_chip *chip)
@@ -245,10 +243,9 @@ static int cp2112_gpio_get_all(struct gpio_chip *chip)
245 struct cp2112_device *dev = gpiochip_get_data(chip); 243 struct cp2112_device *dev = gpiochip_get_data(chip);
246 struct hid_device *hdev = dev->hdev; 244 struct hid_device *hdev = dev->hdev;
247 u8 *buf = dev->in_out_buffer; 245 u8 *buf = dev->in_out_buffer;
248 unsigned long flags;
249 int ret; 246 int ret;
250 247
251 spin_lock_irqsave(&dev->lock, flags); 248 mutex_lock(&dev->lock);
252 249
253 ret = hid_hw_raw_request(hdev, CP2112_GPIO_GET, buf, 250 ret = hid_hw_raw_request(hdev, CP2112_GPIO_GET, buf,
254 CP2112_GPIO_GET_LENGTH, HID_FEATURE_REPORT, 251 CP2112_GPIO_GET_LENGTH, HID_FEATURE_REPORT,
@@ -262,7 +259,7 @@ static int cp2112_gpio_get_all(struct gpio_chip *chip)
262 ret = buf[1]; 259 ret = buf[1];
263 260
264exit: 261exit:
265 spin_unlock_irqrestore(&dev->lock, flags); 262 mutex_unlock(&dev->lock);
266 263
267 return ret; 264 return ret;
268} 265}
@@ -284,10 +281,9 @@ static int cp2112_gpio_direction_output(struct gpio_chip *chip,
284 struct cp2112_device *dev = gpiochip_get_data(chip); 281 struct cp2112_device *dev = gpiochip_get_data(chip);
285 struct hid_device *hdev = dev->hdev; 282 struct hid_device *hdev = dev->hdev;
286 u8 *buf = dev->in_out_buffer; 283 u8 *buf = dev->in_out_buffer;
287 unsigned long flags;
288 int ret; 284 int ret;
289 285
290 spin_lock_irqsave(&dev->lock, flags); 286 mutex_lock(&dev->lock);
291 287
292 ret = hid_hw_raw_request(hdev, CP2112_GPIO_CONFIG, buf, 288 ret = hid_hw_raw_request(hdev, CP2112_GPIO_CONFIG, buf,
293 CP2112_GPIO_CONFIG_LENGTH, HID_FEATURE_REPORT, 289 CP2112_GPIO_CONFIG_LENGTH, HID_FEATURE_REPORT,
@@ -308,7 +304,7 @@ static int cp2112_gpio_direction_output(struct gpio_chip *chip,
308 goto fail; 304 goto fail;
309 } 305 }
310 306
311 spin_unlock_irqrestore(&dev->lock, flags); 307 mutex_unlock(&dev->lock);
312 308
313 /* 309 /*
314 * Set gpio value when output direction is already set, 310 * Set gpio value when output direction is already set,
@@ -319,7 +315,7 @@ static int cp2112_gpio_direction_output(struct gpio_chip *chip,
319 return 0; 315 return 0;
320 316
321fail: 317fail:
322 spin_unlock_irqrestore(&dev->lock, flags); 318 mutex_unlock(&dev->lock);
323 return ret < 0 ? ret : -EIO; 319 return ret < 0 ? ret : -EIO;
324} 320}
325 321
@@ -1235,7 +1231,7 @@ static int cp2112_probe(struct hid_device *hdev, const struct hid_device_id *id)
1235 if (!dev->in_out_buffer) 1231 if (!dev->in_out_buffer)
1236 return -ENOMEM; 1232 return -ENOMEM;
1237 1233
1238 spin_lock_init(&dev->lock); 1234 mutex_init(&dev->lock);
1239 1235
1240 ret = hid_parse(hdev); 1236 ret = hid_parse(hdev);
1241 if (ret) { 1237 if (ret) {
diff --git a/drivers/hid/hid-cypress.c b/drivers/hid/hid-cypress.c
index 1b764d1745f3..1689568b597d 100644
--- a/drivers/hid/hid-cypress.c
+++ b/drivers/hid/hid-cypress.c
@@ -39,6 +39,9 @@ static __u8 *cp_report_fixup(struct hid_device *hdev, __u8 *rdesc,
39 if (!(quirks & CP_RDESC_SWAPPED_MIN_MAX)) 39 if (!(quirks & CP_RDESC_SWAPPED_MIN_MAX))
40 return rdesc; 40 return rdesc;
41 41
42 if (*rsize < 4)
43 return rdesc;
44
42 for (i = 0; i < *rsize - 4; i++) 45 for (i = 0; i < *rsize - 4; i++)
43 if (rdesc[i] == 0x29 && rdesc[i + 2] == 0x19) { 46 if (rdesc[i] == 0x29 && rdesc[i + 2] == 0x19) {
44 rdesc[i] = 0x19; 47 rdesc[i] = 0x19;
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index ec277b96eaa1..350accfee8e8 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -76,6 +76,9 @@
76#define USB_VENDOR_ID_ALPS_JP 0x044E 76#define USB_VENDOR_ID_ALPS_JP 0x044E
77#define HID_DEVICE_ID_ALPS_U1_DUAL 0x120B 77#define HID_DEVICE_ID_ALPS_U1_DUAL 0x120B
78 78
79#define USB_VENDOR_ID_AMI 0x046b
80#define USB_DEVICE_ID_AMI_VIRT_KEYBOARD_AND_MOUSE 0xff10
81
79#define USB_VENDOR_ID_ANTON 0x1130 82#define USB_VENDOR_ID_ANTON 0x1130
80#define USB_DEVICE_ID_ANTON_TOUCH_PAD 0x3101 83#define USB_DEVICE_ID_ANTON_TOUCH_PAD 0x3101
81 84
@@ -319,6 +322,7 @@
319#define USB_VENDOR_ID_DRAGONRISE 0x0079 322#define USB_VENDOR_ID_DRAGONRISE 0x0079
320#define USB_DEVICE_ID_DRAGONRISE_WIIU 0x1800 323#define USB_DEVICE_ID_DRAGONRISE_WIIU 0x1800
321#define USB_DEVICE_ID_DRAGONRISE_PS3 0x1801 324#define USB_DEVICE_ID_DRAGONRISE_PS3 0x1801
325#define USB_DEVICE_ID_DRAGONRISE_DOLPHINBAR 0x1803
322#define USB_DEVICE_ID_DRAGONRISE_GAMECUBE 0x1843 326#define USB_DEVICE_ID_DRAGONRISE_GAMECUBE 0x1843
323 327
324#define USB_VENDOR_ID_DWAV 0x0eef 328#define USB_VENDOR_ID_DWAV 0x0eef
@@ -365,6 +369,9 @@
365#define USB_VENDOR_ID_FLATFROG 0x25b5 369#define USB_VENDOR_ID_FLATFROG 0x25b5
366#define USB_DEVICE_ID_MULTITOUCH_3200 0x0002 370#define USB_DEVICE_ID_MULTITOUCH_3200 0x0002
367 371
372#define USB_VENDOR_ID_FUTABA 0x0547
373#define USB_DEVICE_ID_LED_DISPLAY 0x7000
374
368#define USB_VENDOR_ID_ESSENTIAL_REALITY 0x0d7f 375#define USB_VENDOR_ID_ESSENTIAL_REALITY 0x0d7f
369#define USB_DEVICE_ID_ESSENTIAL_REALITY_P5 0x0100 376#define USB_DEVICE_ID_ESSENTIAL_REALITY_P5 0x0100
370 377
@@ -812,6 +819,9 @@
812#define USB_VENDOR_ID_PETALYNX 0x18b1 819#define USB_VENDOR_ID_PETALYNX 0x18b1
813#define USB_DEVICE_ID_PETALYNX_MAXTER_REMOTE 0x0037 820#define USB_DEVICE_ID_PETALYNX_MAXTER_REMOTE 0x0037
814 821
822#define USB_VENDOR_ID_PETZL 0x2122
823#define USB_DEVICE_ID_PETZL_HEADLAMP 0x1234
824
815#define USB_VENDOR_ID_PHILIPS 0x0471 825#define USB_VENDOR_ID_PHILIPS 0x0471
816#define USB_DEVICE_ID_PHILIPS_IEEE802154_DONGLE 0x0617 826#define USB_DEVICE_ID_PHILIPS_IEEE802154_DONGLE 0x0617
817 827
diff --git a/drivers/hid/hid-lg.c b/drivers/hid/hid-lg.c
index c5c5fbe9d605..52026dc94d5c 100644
--- a/drivers/hid/hid-lg.c
+++ b/drivers/hid/hid-lg.c
@@ -872,7 +872,7 @@ static const struct hid_device_id lg_devices[] = {
872 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WINGMAN_FFG), 872 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WINGMAN_FFG),
873 .driver_data = LG_NOGET | LG_FF4 }, 873 .driver_data = LG_NOGET | LG_FF4 },
874 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RUMBLEPAD2), 874 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RUMBLEPAD2),
875 .driver_data = LG_FF2 }, 875 .driver_data = LG_NOGET | LG_FF2 },
876 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_FLIGHT_SYSTEM_G940), 876 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_FLIGHT_SYSTEM_G940),
877 .driver_data = LG_FF3 }, 877 .driver_data = LG_FF3 },
878 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_SPACENAVIGATOR), 878 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_SPACENAVIGATOR),
diff --git a/drivers/hid/hid-sensor-hub.c b/drivers/hid/hid-sensor-hub.c
index 5c925228847c..4ef73374a8f9 100644
--- a/drivers/hid/hid-sensor-hub.c
+++ b/drivers/hid/hid-sensor-hub.c
@@ -212,7 +212,6 @@ int sensor_hub_set_feature(struct hid_sensor_hub_device *hsdev, u32 report_id,
212 __s32 value; 212 __s32 value;
213 int ret = 0; 213 int ret = 0;
214 214
215 memset(buffer, 0, buffer_size);
216 mutex_lock(&data->mutex); 215 mutex_lock(&data->mutex);
217 report = sensor_hub_report(report_id, hsdev->hdev, HID_FEATURE_REPORT); 216 report = sensor_hub_report(report_id, hsdev->hdev, HID_FEATURE_REPORT);
218 if (!report || (field_index >= report->maxfield)) { 217 if (!report || (field_index >= report->maxfield)) {
@@ -256,6 +255,8 @@ int sensor_hub_get_feature(struct hid_sensor_hub_device *hsdev, u32 report_id,
256 int buffer_index = 0; 255 int buffer_index = 0;
257 int i; 256 int i;
258 257
258 memset(buffer, 0, buffer_size);
259
259 mutex_lock(&data->mutex); 260 mutex_lock(&data->mutex);
260 report = sensor_hub_report(report_id, hsdev->hdev, HID_FEATURE_REPORT); 261 report = sensor_hub_report(report_id, hsdev->hdev, HID_FEATURE_REPORT);
261 if (!report || (field_index >= report->maxfield) || 262 if (!report || (field_index >= report->maxfield) ||
diff --git a/drivers/hid/hid-sony.c b/drivers/hid/hid-sony.c
index 7687c0875395..f405b07d0381 100644
--- a/drivers/hid/hid-sony.c
+++ b/drivers/hid/hid-sony.c
@@ -1099,8 +1099,11 @@ struct sony_sc {
1099 u8 led_delay_on[MAX_LEDS]; 1099 u8 led_delay_on[MAX_LEDS];
1100 u8 led_delay_off[MAX_LEDS]; 1100 u8 led_delay_off[MAX_LEDS];
1101 u8 led_count; 1101 u8 led_count;
1102 bool ds4_dongle_connected;
1102}; 1103};
1103 1104
1105static void sony_set_leds(struct sony_sc *sc);
1106
1104static inline void sony_schedule_work(struct sony_sc *sc) 1107static inline void sony_schedule_work(struct sony_sc *sc)
1105{ 1108{
1106 if (!sc->defer_initialization) 1109 if (!sc->defer_initialization)
@@ -1430,6 +1433,31 @@ static int sony_raw_event(struct hid_device *hdev, struct hid_report *report,
1430 return -EILSEQ; 1433 return -EILSEQ;
1431 } 1434 }
1432 } 1435 }
1436
1437 /*
1438 * In the case of a DS4 USB dongle, bit[2] of byte 31 indicates
1439 * if a DS4 is actually connected (indicated by '0').
1440 * For non-dongle, this bit is always 0 (connected).
1441 */
1442 if (sc->hdev->vendor == USB_VENDOR_ID_SONY &&
1443 sc->hdev->product == USB_DEVICE_ID_SONY_PS4_CONTROLLER_DONGLE) {
1444 bool connected = (rd[31] & 0x04) ? false : true;
1445
1446 if (!sc->ds4_dongle_connected && connected) {
1447 hid_info(sc->hdev, "DualShock 4 USB dongle: controller connected\n");
1448 sony_set_leds(sc);
1449 sc->ds4_dongle_connected = true;
1450 } else if (sc->ds4_dongle_connected && !connected) {
1451 hid_info(sc->hdev, "DualShock 4 USB dongle: controller disconnected\n");
1452 sc->ds4_dongle_connected = false;
1453 /* Return 0, so hidraw can get the report. */
1454 return 0;
1455 } else if (!sc->ds4_dongle_connected) {
1456 /* Return 0, so hidraw can get the report. */
1457 return 0;
1458 }
1459 }
1460
1433 dualshock4_parse_report(sc, rd, size); 1461 dualshock4_parse_report(sc, rd, size);
1434 } 1462 }
1435 1463
@@ -2390,6 +2418,12 @@ static int sony_check_add(struct sony_sc *sc)
2390 } 2418 }
2391 2419
2392 memcpy(sc->mac_address, &buf[1], sizeof(sc->mac_address)); 2420 memcpy(sc->mac_address, &buf[1], sizeof(sc->mac_address));
2421
2422 snprintf(sc->hdev->uniq, sizeof(sc->hdev->uniq),
2423 "%02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx",
2424 sc->mac_address[5], sc->mac_address[4],
2425 sc->mac_address[3], sc->mac_address[2],
2426 sc->mac_address[1], sc->mac_address[0]);
2393 } else if ((sc->quirks & SIXAXIS_CONTROLLER_USB) || 2427 } else if ((sc->quirks & SIXAXIS_CONTROLLER_USB) ||
2394 (sc->quirks & NAVIGATION_CONTROLLER_USB)) { 2428 (sc->quirks & NAVIGATION_CONTROLLER_USB)) {
2395 buf = kmalloc(SIXAXIS_REPORT_0xF2_SIZE, GFP_KERNEL); 2429 buf = kmalloc(SIXAXIS_REPORT_0xF2_SIZE, GFP_KERNEL);
@@ -2548,7 +2582,7 @@ static int sony_input_configured(struct hid_device *hdev,
2548 hid_err(sc->hdev, 2582 hid_err(sc->hdev,
2549 "Unable to initialize multi-touch slots: %d\n", 2583 "Unable to initialize multi-touch slots: %d\n",
2550 ret); 2584 ret);
2551 return ret; 2585 goto err_stop;
2552 } 2586 }
2553 2587
2554 sony_init_output_report(sc, dualshock4_send_output_report); 2588 sony_init_output_report(sc, dualshock4_send_output_report);
diff --git a/drivers/hid/i2c-hid/i2c-hid.c b/drivers/hid/i2c-hid/i2c-hid.c
index 78fb32a7b103..ea3c3546cef7 100644
--- a/drivers/hid/i2c-hid/i2c-hid.c
+++ b/drivers/hid/i2c-hid/i2c-hid.c
@@ -426,6 +426,15 @@ static int i2c_hid_hwreset(struct i2c_client *client)
426 if (ret) 426 if (ret)
427 goto out_unlock; 427 goto out_unlock;
428 428
429 /*
430 * The HID over I2C specification states that if a DEVICE needs time
431 * after the PWR_ON request, it should utilise CLOCK stretching.
432 * However, it has been observered that the Windows driver provides a
433 * 1ms sleep between the PWR_ON and RESET requests and that some devices
434 * rely on this.
435 */
436 usleep_range(1000, 5000);
437
429 i2c_hid_dbg(ihid, "resetting...\n"); 438 i2c_hid_dbg(ihid, "resetting...\n");
430 439
431 ret = i2c_hid_command(client, &hid_reset_cmd, NULL, 0); 440 ret = i2c_hid_command(client, &hid_reset_cmd, NULL, 0);
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
index b3e01c82af05..30a2977e2645 100644
--- a/drivers/hid/usbhid/hid-quirks.c
+++ b/drivers/hid/usbhid/hid-quirks.c
@@ -57,6 +57,7 @@ static const struct hid_blacklist {
57 { USB_VENDOR_ID_AIREN, USB_DEVICE_ID_AIREN_SLIMPLUS, HID_QUIRK_NOGET }, 57 { USB_VENDOR_ID_AIREN, USB_DEVICE_ID_AIREN_SLIMPLUS, HID_QUIRK_NOGET },
58 { USB_VENDOR_ID_AKAI, USB_DEVICE_ID_AKAI_MPKMINI2, HID_QUIRK_NO_INIT_REPORTS }, 58 { USB_VENDOR_ID_AKAI, USB_DEVICE_ID_AKAI_MPKMINI2, HID_QUIRK_NO_INIT_REPORTS },
59 { USB_VENDOR_ID_AKAI_09E8, USB_DEVICE_ID_AKAI_09E8_MIDIMIX, HID_QUIRK_NO_INIT_REPORTS }, 59 { USB_VENDOR_ID_AKAI_09E8, USB_DEVICE_ID_AKAI_09E8_MIDIMIX, HID_QUIRK_NO_INIT_REPORTS },
60 { USB_VENDOR_ID_AMI, USB_DEVICE_ID_AMI_VIRT_KEYBOARD_AND_MOUSE, HID_QUIRK_ALWAYS_POLL },
60 { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_UC100KM, HID_QUIRK_NOGET }, 61 { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_UC100KM, HID_QUIRK_NOGET },
61 { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_CS124U, HID_QUIRK_NOGET }, 62 { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_CS124U, HID_QUIRK_NOGET },
62 { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_2PORTKVM, HID_QUIRK_NOGET }, 63 { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_2PORTKVM, HID_QUIRK_NOGET },
@@ -83,11 +84,13 @@ static const struct hid_blacklist {
83 { USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC, HID_QUIRK_NOGET }, 84 { USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC, HID_QUIRK_NOGET },
84 { USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_WIIU, HID_QUIRK_MULTI_INPUT }, 85 { USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_WIIU, HID_QUIRK_MULTI_INPUT },
85 { USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_PS3, HID_QUIRK_MULTI_INPUT }, 86 { USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_PS3, HID_QUIRK_MULTI_INPUT },
87 { USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_DOLPHINBAR, HID_QUIRK_MULTI_INPUT },
86 { USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_GAMECUBE, HID_QUIRK_MULTI_INPUT }, 88 { USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_GAMECUBE, HID_QUIRK_MULTI_INPUT },
87 { USB_VENDOR_ID_ELAN, HID_ANY_ID, HID_QUIRK_ALWAYS_POLL }, 89 { USB_VENDOR_ID_ELAN, HID_ANY_ID, HID_QUIRK_ALWAYS_POLL },
88 { USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_TS2700, HID_QUIRK_NOGET }, 90 { USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_TS2700, HID_QUIRK_NOGET },
89 { USB_VENDOR_ID_FORMOSA, USB_DEVICE_ID_FORMOSA_IR_RECEIVER, HID_QUIRK_NO_INIT_REPORTS }, 91 { USB_VENDOR_ID_FORMOSA, USB_DEVICE_ID_FORMOSA_IR_RECEIVER, HID_QUIRK_NO_INIT_REPORTS },
90 { USB_VENDOR_ID_FREESCALE, USB_DEVICE_ID_FREESCALE_MX28, HID_QUIRK_NOGET }, 92 { USB_VENDOR_ID_FREESCALE, USB_DEVICE_ID_FREESCALE_MX28, HID_QUIRK_NOGET },
93 { USB_VENDOR_ID_FUTABA, USB_DEVICE_ID_LED_DISPLAY, HID_QUIRK_NO_INIT_REPORTS },
91 { USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE_0A4A, HID_QUIRK_ALWAYS_POLL }, 94 { USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE_0A4A, HID_QUIRK_ALWAYS_POLL },
92 { USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE_0B4A, HID_QUIRK_ALWAYS_POLL }, 95 { USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE_0B4A, HID_QUIRK_ALWAYS_POLL },
93 { USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE, HID_QUIRK_ALWAYS_POLL }, 96 { USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE, HID_QUIRK_ALWAYS_POLL },
diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c
index b9779bcbd140..8aeca038cc73 100644
--- a/drivers/hid/wacom_sys.c
+++ b/drivers/hid/wacom_sys.c
@@ -740,6 +740,11 @@ static int wacom_add_shared_data(struct hid_device *hdev)
740 return retval; 740 return retval;
741 } 741 }
742 742
743 if (wacom_wac->features.device_type & WACOM_DEVICETYPE_TOUCH)
744 wacom_wac->shared->touch = hdev;
745 else if (wacom_wac->features.device_type & WACOM_DEVICETYPE_PEN)
746 wacom_wac->shared->pen = hdev;
747
743out: 748out:
744 mutex_unlock(&wacom_udev_list_lock); 749 mutex_unlock(&wacom_udev_list_lock);
745 return retval; 750 return retval;
@@ -2036,10 +2041,6 @@ static int wacom_parse_and_register(struct wacom *wacom, bool wireless)
2036 if (error) 2041 if (error)
2037 goto fail; 2042 goto fail;
2038 2043
2039 error = wacom_add_shared_data(hdev);
2040 if (error)
2041 goto fail;
2042
2043 /* 2044 /*
2044 * Bamboo Pad has a generic hid handling for the Pen, and we switch it 2045 * Bamboo Pad has a generic hid handling for the Pen, and we switch it
2045 * into debug mode for the touch part. 2046 * into debug mode for the touch part.
@@ -2080,10 +2081,9 @@ static int wacom_parse_and_register(struct wacom *wacom, bool wireless)
2080 2081
2081 wacom_update_name(wacom, wireless ? " (WL)" : ""); 2082 wacom_update_name(wacom, wireless ? " (WL)" : "");
2082 2083
2083 if (wacom_wac->features.device_type & WACOM_DEVICETYPE_TOUCH) 2084 error = wacom_add_shared_data(hdev);
2084 wacom_wac->shared->touch = hdev; 2085 if (error)
2085 else if (wacom_wac->features.device_type & WACOM_DEVICETYPE_PEN) 2086 goto fail;
2086 wacom_wac->shared->pen = hdev;
2087 2087
2088 if (!(features->device_type & WACOM_DEVICETYPE_WL_MONITOR) && 2088 if (!(features->device_type & WACOM_DEVICETYPE_WL_MONITOR) &&
2089 (features->quirks & WACOM_QUIRK_BATTERY)) { 2089 (features->quirks & WACOM_QUIRK_BATTERY)) {
diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
index b1a9a3ca6d56..672145b0d8f5 100644
--- a/drivers/hid/wacom_wac.c
+++ b/drivers/hid/wacom_wac.c
@@ -166,19 +166,21 @@ static int wacom_pl_irq(struct wacom_wac *wacom)
166 wacom->id[0] = STYLUS_DEVICE_ID; 166 wacom->id[0] = STYLUS_DEVICE_ID;
167 } 167 }
168 168
169 pressure = (signed char)((data[7] << 1) | ((data[4] >> 2) & 1)); 169 if (prox) {
170 if (features->pressure_max > 255) 170 pressure = (signed char)((data[7] << 1) | ((data[4] >> 2) & 1));
171 pressure = (pressure << 1) | ((data[4] >> 6) & 1); 171 if (features->pressure_max > 255)
172 pressure += (features->pressure_max + 1) / 2; 172 pressure = (pressure << 1) | ((data[4] >> 6) & 1);
173 pressure += (features->pressure_max + 1) / 2;
173 174
174 input_report_abs(input, ABS_X, data[3] | (data[2] << 7) | ((data[1] & 0x03) << 14)); 175 input_report_abs(input, ABS_X, data[3] | (data[2] << 7) | ((data[1] & 0x03) << 14));
175 input_report_abs(input, ABS_Y, data[6] | (data[5] << 7) | ((data[4] & 0x03) << 14)); 176 input_report_abs(input, ABS_Y, data[6] | (data[5] << 7) | ((data[4] & 0x03) << 14));
176 input_report_abs(input, ABS_PRESSURE, pressure); 177 input_report_abs(input, ABS_PRESSURE, pressure);
177 178
178 input_report_key(input, BTN_TOUCH, data[4] & 0x08); 179 input_report_key(input, BTN_TOUCH, data[4] & 0x08);
179 input_report_key(input, BTN_STYLUS, data[4] & 0x10); 180 input_report_key(input, BTN_STYLUS, data[4] & 0x10);
180 /* Only allow the stylus2 button to be reported for the pen tool. */ 181 /* Only allow the stylus2 button to be reported for the pen tool. */
181 input_report_key(input, BTN_STYLUS2, (wacom->tool[0] == BTN_TOOL_PEN) && (data[4] & 0x20)); 182 input_report_key(input, BTN_STYLUS2, (wacom->tool[0] == BTN_TOOL_PEN) && (data[4] & 0x20));
183 }
182 184
183 if (!prox) 185 if (!prox)
184 wacom->id[0] = 0; 186 wacom->id[0] = 0;
@@ -2187,6 +2189,16 @@ void wacom_wac_report(struct hid_device *hdev, struct hid_report *report)
2187 2189
2188 wacom_report_events(hdev, report); 2190 wacom_report_events(hdev, report);
2189 2191
2192 /*
2193 * Non-input reports may be sent prior to the device being
2194 * completely initialized. Since only their events need
2195 * to be processed, exit after 'wacom_report_events' has
2196 * been called to prevent potential crashes in the report-
2197 * processing functions.
2198 */
2199 if (report->type != HID_INPUT_REPORT)
2200 return;
2201
2190 if (WACOM_PAD_FIELD(field)) { 2202 if (WACOM_PAD_FIELD(field)) {
2191 wacom_wac_pad_battery_report(hdev, report); 2203 wacom_wac_pad_battery_report(hdev, report);
2192 if (wacom->wacom_wac.pad_input) 2204 if (wacom->wacom_wac.pad_input)
diff --git a/drivers/hv/ring_buffer.c b/drivers/hv/ring_buffer.c
index cd49cb17eb7f..308dbda700eb 100644
--- a/drivers/hv/ring_buffer.c
+++ b/drivers/hv/ring_buffer.c
@@ -383,6 +383,7 @@ int hv_ringbuffer_read(struct vmbus_channel *channel,
383 return ret; 383 return ret;
384 } 384 }
385 385
386 init_cached_read_index(channel);
386 next_read_location = hv_get_next_read_location(inring_info); 387 next_read_location = hv_get_next_read_location(inring_info);
387 next_read_location = hv_copyfrom_ringbuffer(inring_info, &desc, 388 next_read_location = hv_copyfrom_ringbuffer(inring_info, &desc,
388 sizeof(desc), 389 sizeof(desc),
diff --git a/drivers/hwmon/lm90.c b/drivers/hwmon/lm90.c
index 322ed9272811..841f2428e84a 100644
--- a/drivers/hwmon/lm90.c
+++ b/drivers/hwmon/lm90.c
@@ -1036,7 +1036,7 @@ static const u8 lm90_temp_emerg_index[3] = {
1036}; 1036};
1037 1037
1038static const u8 lm90_min_alarm_bits[3] = { 5, 3, 11 }; 1038static const u8 lm90_min_alarm_bits[3] = { 5, 3, 11 };
1039static const u8 lm90_max_alarm_bits[3] = { 0, 4, 12 }; 1039static const u8 lm90_max_alarm_bits[3] = { 6, 4, 12 };
1040static const u8 lm90_crit_alarm_bits[3] = { 0, 1, 9 }; 1040static const u8 lm90_crit_alarm_bits[3] = { 0, 1, 9 };
1041static const u8 lm90_emergency_alarm_bits[3] = { 15, 13, 14 }; 1041static const u8 lm90_emergency_alarm_bits[3] = { 15, 13, 14 };
1042static const u8 lm90_fault_bits[3] = { 0, 2, 10 }; 1042static const u8 lm90_fault_bits[3] = { 0, 2, 10 };
diff --git a/drivers/i2c/busses/i2c-cadence.c b/drivers/i2c/busses/i2c-cadence.c
index 686971263bef..45d6771fac8c 100644
--- a/drivers/i2c/busses/i2c-cadence.c
+++ b/drivers/i2c/busses/i2c-cadence.c
@@ -962,10 +962,6 @@ static int cdns_i2c_probe(struct platform_device *pdev)
962 goto err_clk_dis; 962 goto err_clk_dis;
963 } 963 }
964 964
965 ret = i2c_add_adapter(&id->adap);
966 if (ret < 0)
967 goto err_clk_dis;
968
969 /* 965 /*
970 * Cadence I2C controller has a bug wherein it generates 966 * Cadence I2C controller has a bug wherein it generates
971 * invalid read transaction after HW timeout in master receiver mode. 967 * invalid read transaction after HW timeout in master receiver mode.
@@ -975,6 +971,10 @@ static int cdns_i2c_probe(struct platform_device *pdev)
975 */ 971 */
976 cdns_i2c_writereg(CDNS_I2C_TIMEOUT_MAX, CDNS_I2C_TIME_OUT_OFFSET); 972 cdns_i2c_writereg(CDNS_I2C_TIMEOUT_MAX, CDNS_I2C_TIME_OUT_OFFSET);
977 973
974 ret = i2c_add_adapter(&id->adap);
975 if (ret < 0)
976 goto err_clk_dis;
977
978 dev_info(&pdev->dev, "%u kHz mmio %08lx irq %d\n", 978 dev_info(&pdev->dev, "%u kHz mmio %08lx irq %d\n",
979 id->i2c_clk / 1000, (unsigned long)r_mem->start, id->irq); 979 id->i2c_clk / 1000, (unsigned long)r_mem->start, id->irq);
980 980
diff --git a/drivers/i2c/busses/i2c-designware-core.c b/drivers/i2c/busses/i2c-designware-core.c
index 6d81c56184d3..e9db857c6226 100644
--- a/drivers/i2c/busses/i2c-designware-core.c
+++ b/drivers/i2c/busses/i2c-designware-core.c
@@ -475,30 +475,28 @@ static int i2c_dw_wait_bus_not_busy(struct dw_i2c_dev *dev)
475static void i2c_dw_xfer_init(struct dw_i2c_dev *dev) 475static void i2c_dw_xfer_init(struct dw_i2c_dev *dev)
476{ 476{
477 struct i2c_msg *msgs = dev->msgs; 477 struct i2c_msg *msgs = dev->msgs;
478 u32 ic_tar = 0; 478 u32 ic_con, ic_tar = 0;
479 479
480 /* Disable the adapter */ 480 /* Disable the adapter */
481 __i2c_dw_enable_and_wait(dev, false); 481 __i2c_dw_enable_and_wait(dev, false);
482 482
483 /* if the slave address is ten bit address, enable 10BITADDR */ 483 /* if the slave address is ten bit address, enable 10BITADDR */
484 if (dev->dynamic_tar_update_enabled) { 484 ic_con = dw_readl(dev, DW_IC_CON);
485 if (msgs[dev->msg_write_idx].flags & I2C_M_TEN) {
486 ic_con |= DW_IC_CON_10BITADDR_MASTER;
485 /* 487 /*
486 * If I2C_DYNAMIC_TAR_UPDATE is set, the 10-bit addressing 488 * If I2C_DYNAMIC_TAR_UPDATE is set, the 10-bit addressing
487 * mode has to be enabled via bit 12 of IC_TAR register, 489 * mode has to be enabled via bit 12 of IC_TAR register.
488 * otherwise bit 4 of IC_CON is used. 490 * We set it always as I2C_DYNAMIC_TAR_UPDATE can't be
491 * detected from registers.
489 */ 492 */
490 if (msgs[dev->msg_write_idx].flags & I2C_M_TEN) 493 ic_tar = DW_IC_TAR_10BITADDR_MASTER;
491 ic_tar = DW_IC_TAR_10BITADDR_MASTER;
492 } else { 494 } else {
493 u32 ic_con = dw_readl(dev, DW_IC_CON); 495 ic_con &= ~DW_IC_CON_10BITADDR_MASTER;
494
495 if (msgs[dev->msg_write_idx].flags & I2C_M_TEN)
496 ic_con |= DW_IC_CON_10BITADDR_MASTER;
497 else
498 ic_con &= ~DW_IC_CON_10BITADDR_MASTER;
499 dw_writel(dev, ic_con, DW_IC_CON);
500 } 496 }
501 497
498 dw_writel(dev, ic_con, DW_IC_CON);
499
502 /* 500 /*
503 * Set the slave (target) address and enable 10-bit addressing mode 501 * Set the slave (target) address and enable 10-bit addressing mode
504 * if applicable. 502 * if applicable.
@@ -963,7 +961,6 @@ int i2c_dw_probe(struct dw_i2c_dev *dev)
963{ 961{
964 struct i2c_adapter *adap = &dev->adapter; 962 struct i2c_adapter *adap = &dev->adapter;
965 int r; 963 int r;
966 u32 reg;
967 964
968 init_completion(&dev->cmd_complete); 965 init_completion(&dev->cmd_complete);
969 966
@@ -971,26 +968,6 @@ int i2c_dw_probe(struct dw_i2c_dev *dev)
971 if (r) 968 if (r)
972 return r; 969 return r;
973 970
974 r = i2c_dw_acquire_lock(dev);
975 if (r)
976 return r;
977
978 /*
979 * Test if dynamic TAR update is enabled in this controller by writing
980 * to IC_10BITADDR_MASTER field in IC_CON: when it is enabled this
981 * field is read-only so it should not succeed
982 */
983 reg = dw_readl(dev, DW_IC_CON);
984 dw_writel(dev, reg ^ DW_IC_CON_10BITADDR_MASTER, DW_IC_CON);
985
986 if ((dw_readl(dev, DW_IC_CON) & DW_IC_CON_10BITADDR_MASTER) ==
987 (reg & DW_IC_CON_10BITADDR_MASTER)) {
988 dev->dynamic_tar_update_enabled = true;
989 dev_dbg(dev->dev, "Dynamic TAR update enabled");
990 }
991
992 i2c_dw_release_lock(dev);
993
994 snprintf(adap->name, sizeof(adap->name), 971 snprintf(adap->name, sizeof(adap->name),
995 "Synopsys DesignWare I2C adapter"); 972 "Synopsys DesignWare I2C adapter");
996 adap->retries = 3; 973 adap->retries = 3;
diff --git a/drivers/i2c/busses/i2c-designware-core.h b/drivers/i2c/busses/i2c-designware-core.h
index 26250b425e2f..c1db3a5a340f 100644
--- a/drivers/i2c/busses/i2c-designware-core.h
+++ b/drivers/i2c/busses/i2c-designware-core.h
@@ -125,7 +125,6 @@ struct dw_i2c_dev {
125 int (*acquire_lock)(struct dw_i2c_dev *dev); 125 int (*acquire_lock)(struct dw_i2c_dev *dev);
126 void (*release_lock)(struct dw_i2c_dev *dev); 126 void (*release_lock)(struct dw_i2c_dev *dev);
127 bool pm_runtime_disabled; 127 bool pm_runtime_disabled;
128 bool dynamic_tar_update_enabled;
129}; 128};
130 129
131#define ACCESS_SWAP 0x00000001 130#define ACCESS_SWAP 0x00000001
diff --git a/drivers/i2c/busses/i2c-imx-lpi2c.c b/drivers/i2c/busses/i2c-imx-lpi2c.c
index c62b7cd475f8..3310f2e0dbd3 100644
--- a/drivers/i2c/busses/i2c-imx-lpi2c.c
+++ b/drivers/i2c/busses/i2c-imx-lpi2c.c
@@ -28,6 +28,7 @@
28#include <linux/module.h> 28#include <linux/module.h>
29#include <linux/of.h> 29#include <linux/of.h>
30#include <linux/of_device.h> 30#include <linux/of_device.h>
31#include <linux/pinctrl/consumer.h>
31#include <linux/platform_device.h> 32#include <linux/platform_device.h>
32#include <linux/sched.h> 33#include <linux/sched.h>
33#include <linux/slab.h> 34#include <linux/slab.h>
@@ -636,12 +637,31 @@ static int lpi2c_imx_remove(struct platform_device *pdev)
636 return 0; 637 return 0;
637} 638}
638 639
640#ifdef CONFIG_PM_SLEEP
641static int lpi2c_imx_suspend(struct device *dev)
642{
643 pinctrl_pm_select_sleep_state(dev);
644
645 return 0;
646}
647
648static int lpi2c_imx_resume(struct device *dev)
649{
650 pinctrl_pm_select_default_state(dev);
651
652 return 0;
653}
654#endif
655
656static SIMPLE_DEV_PM_OPS(imx_lpi2c_pm, lpi2c_imx_suspend, lpi2c_imx_resume);
657
639static struct platform_driver lpi2c_imx_driver = { 658static struct platform_driver lpi2c_imx_driver = {
640 .probe = lpi2c_imx_probe, 659 .probe = lpi2c_imx_probe,
641 .remove = lpi2c_imx_remove, 660 .remove = lpi2c_imx_remove,
642 .driver = { 661 .driver = {
643 .name = DRIVER_NAME, 662 .name = DRIVER_NAME,
644 .of_match_table = lpi2c_imx_of_match, 663 .of_match_table = lpi2c_imx_of_match,
664 .pm = &imx_lpi2c_pm,
645 }, 665 },
646}; 666};
647 667
diff --git a/drivers/i2c/busses/i2c-piix4.c b/drivers/i2c/busses/i2c-piix4.c
index c2268cdf38e8..c21ca7bf2efe 100644
--- a/drivers/i2c/busses/i2c-piix4.c
+++ b/drivers/i2c/busses/i2c-piix4.c
@@ -58,7 +58,7 @@
58#define SMBSLVDAT (0xC + piix4_smba) 58#define SMBSLVDAT (0xC + piix4_smba)
59 59
60/* count for request_region */ 60/* count for request_region */
61#define SMBIOSIZE 8 61#define SMBIOSIZE 9
62 62
63/* PCI Address Constants */ 63/* PCI Address Constants */
64#define SMBBA 0x090 64#define SMBBA 0x090
@@ -585,12 +585,33 @@ static s32 piix4_access_sb800(struct i2c_adapter *adap, u16 addr,
585 u8 command, int size, union i2c_smbus_data *data) 585 u8 command, int size, union i2c_smbus_data *data)
586{ 586{
587 struct i2c_piix4_adapdata *adapdata = i2c_get_adapdata(adap); 587 struct i2c_piix4_adapdata *adapdata = i2c_get_adapdata(adap);
588 unsigned short piix4_smba = adapdata->smba;
589 int retries = MAX_TIMEOUT;
590 int smbslvcnt;
588 u8 smba_en_lo; 591 u8 smba_en_lo;
589 u8 port; 592 u8 port;
590 int retval; 593 int retval;
591 594
592 mutex_lock(&piix4_mutex_sb800); 595 mutex_lock(&piix4_mutex_sb800);
593 596
597 /* Request the SMBUS semaphore, avoid conflicts with the IMC */
598 smbslvcnt = inb_p(SMBSLVCNT);
599 do {
600 outb_p(smbslvcnt | 0x10, SMBSLVCNT);
601
602 /* Check the semaphore status */
603 smbslvcnt = inb_p(SMBSLVCNT);
604 if (smbslvcnt & 0x10)
605 break;
606
607 usleep_range(1000, 2000);
608 } while (--retries);
609 /* SMBus is still owned by the IMC, we give up */
610 if (!retries) {
611 mutex_unlock(&piix4_mutex_sb800);
612 return -EBUSY;
613 }
614
594 outb_p(piix4_port_sel_sb800, SB800_PIIX4_SMB_IDX); 615 outb_p(piix4_port_sel_sb800, SB800_PIIX4_SMB_IDX);
595 smba_en_lo = inb_p(SB800_PIIX4_SMB_IDX + 1); 616 smba_en_lo = inb_p(SB800_PIIX4_SMB_IDX + 1);
596 617
@@ -604,6 +625,9 @@ static s32 piix4_access_sb800(struct i2c_adapter *adap, u16 addr,
604 625
605 outb_p(smba_en_lo, SB800_PIIX4_SMB_IDX + 1); 626 outb_p(smba_en_lo, SB800_PIIX4_SMB_IDX + 1);
606 627
628 /* Release the semaphore */
629 outb_p(smbslvcnt | 0x20, SMBSLVCNT);
630
607 mutex_unlock(&piix4_mutex_sb800); 631 mutex_unlock(&piix4_mutex_sb800);
608 632
609 return retval; 633 return retval;
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
index cf9e396d7702..583e95042a21 100644
--- a/drivers/i2c/i2c-core.c
+++ b/drivers/i2c/i2c-core.c
@@ -931,7 +931,10 @@ static int i2c_device_probe(struct device *dev)
931 if (!client->irq) { 931 if (!client->irq) {
932 int irq = -ENOENT; 932 int irq = -ENOENT;
933 933
934 if (dev->of_node) { 934 if (client->flags & I2C_CLIENT_HOST_NOTIFY) {
935 dev_dbg(dev, "Using Host Notify IRQ\n");
936 irq = i2c_smbus_host_notify_to_irq(client);
937 } else if (dev->of_node) {
935 irq = of_irq_get_byname(dev->of_node, "irq"); 938 irq = of_irq_get_byname(dev->of_node, "irq");
936 if (irq == -EINVAL || irq == -ENODATA) 939 if (irq == -EINVAL || irq == -ENODATA)
937 irq = of_irq_get(dev->of_node, 0); 940 irq = of_irq_get(dev->of_node, 0);
@@ -940,14 +943,7 @@ static int i2c_device_probe(struct device *dev)
940 } 943 }
941 if (irq == -EPROBE_DEFER) 944 if (irq == -EPROBE_DEFER)
942 return irq; 945 return irq;
943 /* 946
944 * ACPI and OF did not find any useful IRQ, try to see
945 * if Host Notify can be used.
946 */
947 if (irq < 0) {
948 dev_dbg(dev, "Using Host Notify IRQ\n");
949 irq = i2c_smbus_host_notify_to_irq(client);
950 }
951 if (irq < 0) 947 if (irq < 0)
952 irq = 0; 948 irq = 0;
953 949
@@ -1708,7 +1704,7 @@ static struct i2c_client *of_i2c_register_device(struct i2c_adapter *adap,
1708 1704
1709 if (i2c_check_addr_validity(addr, info.flags)) { 1705 if (i2c_check_addr_validity(addr, info.flags)) {
1710 dev_err(&adap->dev, "of_i2c: invalid addr=%x on %s\n", 1706 dev_err(&adap->dev, "of_i2c: invalid addr=%x on %s\n",
1711 info.addr, node->full_name); 1707 addr, node->full_name);
1712 return ERR_PTR(-EINVAL); 1708 return ERR_PTR(-EINVAL);
1713 } 1709 }
1714 1710
@@ -1716,6 +1712,9 @@ static struct i2c_client *of_i2c_register_device(struct i2c_adapter *adap,
1716 info.of_node = of_node_get(node); 1712 info.of_node = of_node_get(node);
1717 info.archdata = &dev_ad; 1713 info.archdata = &dev_ad;
1718 1714
1715 if (of_property_read_bool(node, "host-notify"))
1716 info.flags |= I2C_CLIENT_HOST_NOTIFY;
1717
1719 if (of_get_property(node, "wakeup-source", NULL)) 1718 if (of_get_property(node, "wakeup-source", NULL))
1720 info.flags |= I2C_CLIENT_WAKE; 1719 info.flags |= I2C_CLIENT_WAKE;
1721 1720
@@ -3633,7 +3632,7 @@ int i2c_slave_register(struct i2c_client *client, i2c_slave_cb_t slave_cb)
3633 int ret; 3632 int ret;
3634 3633
3635 if (!client || !slave_cb) { 3634 if (!client || !slave_cb) {
3636 WARN(1, "insufficent data\n"); 3635 WARN(1, "insufficient data\n");
3637 return -EINVAL; 3636 return -EINVAL;
3638 } 3637 }
3639 3638
diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c
index 66f323fd3982..6f638bbc922d 100644
--- a/drivers/i2c/i2c-dev.c
+++ b/drivers/i2c/i2c-dev.c
@@ -331,7 +331,7 @@ static noinline int i2cdev_ioctl_smbus(struct i2c_client *client,
331 unsigned long arg) 331 unsigned long arg)
332{ 332{
333 struct i2c_smbus_ioctl_data data_arg; 333 struct i2c_smbus_ioctl_data data_arg;
334 union i2c_smbus_data temp; 334 union i2c_smbus_data temp = {};
335 int datasize, res; 335 int datasize, res;
336 336
337 if (copy_from_user(&data_arg, 337 if (copy_from_user(&data_arg,
diff --git a/drivers/iio/accel/st_accel_core.c b/drivers/iio/accel/st_accel_core.c
index f6b6d42385e1..784670e2736b 100644
--- a/drivers/iio/accel/st_accel_core.c
+++ b/drivers/iio/accel/st_accel_core.c
@@ -353,12 +353,12 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
353 [0] = { 353 [0] = {
354 .num = ST_ACCEL_FS_AVL_2G, 354 .num = ST_ACCEL_FS_AVL_2G,
355 .value = 0x00, 355 .value = 0x00,
356 .gain = IIO_G_TO_M_S_2(1024), 356 .gain = IIO_G_TO_M_S_2(1000),
357 }, 357 },
358 [1] = { 358 [1] = {
359 .num = ST_ACCEL_FS_AVL_6G, 359 .num = ST_ACCEL_FS_AVL_6G,
360 .value = 0x01, 360 .value = 0x01,
361 .gain = IIO_G_TO_M_S_2(340), 361 .gain = IIO_G_TO_M_S_2(3000),
362 }, 362 },
363 }, 363 },
364 }, 364 },
@@ -366,6 +366,14 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
366 .addr = 0x21, 366 .addr = 0x21,
367 .mask = 0x40, 367 .mask = 0x40,
368 }, 368 },
369 /*
370 * Data Alignment Setting - needs to be set to get
371 * left-justified data like all other sensors.
372 */
373 .das = {
374 .addr = 0x21,
375 .mask = 0x01,
376 },
369 .drdy_irq = { 377 .drdy_irq = {
370 .addr = 0x21, 378 .addr = 0x21,
371 .mask_int1 = 0x04, 379 .mask_int1 = 0x04,
diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig
index 38bc319904c4..9c8b558ba19e 100644
--- a/drivers/iio/adc/Kconfig
+++ b/drivers/iio/adc/Kconfig
@@ -561,7 +561,7 @@ config TI_ADS8688
561 561
562config TI_AM335X_ADC 562config TI_AM335X_ADC
563 tristate "TI's AM335X ADC driver" 563 tristate "TI's AM335X ADC driver"
564 depends on MFD_TI_AM335X_TSCADC 564 depends on MFD_TI_AM335X_TSCADC && HAS_DMA
565 select IIO_BUFFER 565 select IIO_BUFFER
566 select IIO_KFIFO_BUF 566 select IIO_KFIFO_BUF
567 help 567 help
diff --git a/drivers/iio/adc/palmas_gpadc.c b/drivers/iio/adc/palmas_gpadc.c
index 2bbf0c521beb..7d61b566e148 100644
--- a/drivers/iio/adc/palmas_gpadc.c
+++ b/drivers/iio/adc/palmas_gpadc.c
@@ -775,7 +775,7 @@ static int palmas_adc_wakeup_reset(struct palmas_gpadc *adc)
775 775
776static int palmas_gpadc_suspend(struct device *dev) 776static int palmas_gpadc_suspend(struct device *dev)
777{ 777{
778 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 778 struct iio_dev *indio_dev = dev_get_drvdata(dev);
779 struct palmas_gpadc *adc = iio_priv(indio_dev); 779 struct palmas_gpadc *adc = iio_priv(indio_dev);
780 int wakeup = adc->wakeup1_enable || adc->wakeup2_enable; 780 int wakeup = adc->wakeup1_enable || adc->wakeup2_enable;
781 int ret; 781 int ret;
@@ -798,7 +798,7 @@ static int palmas_gpadc_suspend(struct device *dev)
798 798
799static int palmas_gpadc_resume(struct device *dev) 799static int palmas_gpadc_resume(struct device *dev)
800{ 800{
801 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 801 struct iio_dev *indio_dev = dev_get_drvdata(dev);
802 struct palmas_gpadc *adc = iio_priv(indio_dev); 802 struct palmas_gpadc *adc = iio_priv(indio_dev);
803 int wakeup = adc->wakeup1_enable || adc->wakeup2_enable; 803 int wakeup = adc->wakeup1_enable || adc->wakeup2_enable;
804 int ret; 804 int ret;
diff --git a/drivers/iio/common/st_sensors/st_sensors_buffer.c b/drivers/iio/common/st_sensors/st_sensors_buffer.c
index fe7775bb3740..df4045203a07 100644
--- a/drivers/iio/common/st_sensors/st_sensors_buffer.c
+++ b/drivers/iio/common/st_sensors/st_sensors_buffer.c
@@ -30,7 +30,9 @@ static int st_sensors_get_buffer_element(struct iio_dev *indio_dev, u8 *buf)
30 30
31 for_each_set_bit(i, indio_dev->active_scan_mask, num_data_channels) { 31 for_each_set_bit(i, indio_dev->active_scan_mask, num_data_channels) {
32 const struct iio_chan_spec *channel = &indio_dev->channels[i]; 32 const struct iio_chan_spec *channel = &indio_dev->channels[i];
33 unsigned int bytes_to_read = channel->scan_type.realbits >> 3; 33 unsigned int bytes_to_read =
34 DIV_ROUND_UP(channel->scan_type.realbits +
35 channel->scan_type.shift, 8);
34 unsigned int storage_bytes = 36 unsigned int storage_bytes =
35 channel->scan_type.storagebits >> 3; 37 channel->scan_type.storagebits >> 3;
36 38
diff --git a/drivers/iio/common/st_sensors/st_sensors_core.c b/drivers/iio/common/st_sensors/st_sensors_core.c
index 975a1f19f747..79c8c7cd70d5 100644
--- a/drivers/iio/common/st_sensors/st_sensors_core.c
+++ b/drivers/iio/common/st_sensors/st_sensors_core.c
@@ -401,6 +401,15 @@ int st_sensors_init_sensor(struct iio_dev *indio_dev,
401 return err; 401 return err;
402 } 402 }
403 403
404 /* set DAS */
405 if (sdata->sensor_settings->das.addr) {
406 err = st_sensors_write_data_with_mask(indio_dev,
407 sdata->sensor_settings->das.addr,
408 sdata->sensor_settings->das.mask, 1);
409 if (err < 0)
410 return err;
411 }
412
404 if (sdata->int_pin_open_drain) { 413 if (sdata->int_pin_open_drain) {
405 dev_info(&indio_dev->dev, 414 dev_info(&indio_dev->dev,
406 "set interrupt line to open drain mode\n"); 415 "set interrupt line to open drain mode\n");
@@ -483,8 +492,10 @@ static int st_sensors_read_axis_data(struct iio_dev *indio_dev,
483 int err; 492 int err;
484 u8 *outdata; 493 u8 *outdata;
485 struct st_sensor_data *sdata = iio_priv(indio_dev); 494 struct st_sensor_data *sdata = iio_priv(indio_dev);
486 unsigned int byte_for_channel = ch->scan_type.realbits >> 3; 495 unsigned int byte_for_channel;
487 496
497 byte_for_channel = DIV_ROUND_UP(ch->scan_type.realbits +
498 ch->scan_type.shift, 8);
488 outdata = kmalloc(byte_for_channel, GFP_KERNEL); 499 outdata = kmalloc(byte_for_channel, GFP_KERNEL);
489 if (!outdata) 500 if (!outdata)
490 return -ENOMEM; 501 return -ENOMEM;
diff --git a/drivers/iio/counter/104-quad-8.c b/drivers/iio/counter/104-quad-8.c
index 2d2ee353dde7..a5913e97945e 100644
--- a/drivers/iio/counter/104-quad-8.c
+++ b/drivers/iio/counter/104-quad-8.c
@@ -153,7 +153,7 @@ static int quad8_write_raw(struct iio_dev *indio_dev,
153 ior_cfg = val | priv->preset_enable[chan->channel] << 1; 153 ior_cfg = val | priv->preset_enable[chan->channel] << 1;
154 154
155 /* Load I/O control configuration */ 155 /* Load I/O control configuration */
156 outb(0x40 | ior_cfg, base_offset); 156 outb(0x40 | ior_cfg, base_offset + 1);
157 157
158 return 0; 158 return 0;
159 case IIO_CHAN_INFO_SCALE: 159 case IIO_CHAN_INFO_SCALE:
@@ -233,7 +233,7 @@ static ssize_t quad8_read_set_to_preset_on_index(struct iio_dev *indio_dev,
233 const struct quad8_iio *const priv = iio_priv(indio_dev); 233 const struct quad8_iio *const priv = iio_priv(indio_dev);
234 234
235 return snprintf(buf, PAGE_SIZE, "%u\n", 235 return snprintf(buf, PAGE_SIZE, "%u\n",
236 priv->preset_enable[chan->channel]); 236 !priv->preset_enable[chan->channel]);
237} 237}
238 238
239static ssize_t quad8_write_set_to_preset_on_index(struct iio_dev *indio_dev, 239static ssize_t quad8_write_set_to_preset_on_index(struct iio_dev *indio_dev,
@@ -241,7 +241,7 @@ static ssize_t quad8_write_set_to_preset_on_index(struct iio_dev *indio_dev,
241 size_t len) 241 size_t len)
242{ 242{
243 struct quad8_iio *const priv = iio_priv(indio_dev); 243 struct quad8_iio *const priv = iio_priv(indio_dev);
244 const int base_offset = priv->base + 2 * chan->channel; 244 const int base_offset = priv->base + 2 * chan->channel + 1;
245 bool preset_enable; 245 bool preset_enable;
246 int ret; 246 int ret;
247 unsigned int ior_cfg; 247 unsigned int ior_cfg;
@@ -250,6 +250,9 @@ static ssize_t quad8_write_set_to_preset_on_index(struct iio_dev *indio_dev,
250 if (ret) 250 if (ret)
251 return ret; 251 return ret;
252 252
253 /* Preset enable is active low in Input/Output Control register */
254 preset_enable = !preset_enable;
255
253 priv->preset_enable[chan->channel] = preset_enable; 256 priv->preset_enable[chan->channel] = preset_enable;
254 257
255 ior_cfg = priv->ab_enable[chan->channel] | 258 ior_cfg = priv->ab_enable[chan->channel] |
@@ -362,7 +365,7 @@ static int quad8_set_synchronous_mode(struct iio_dev *indio_dev,
362 priv->synchronous_mode[chan->channel] = synchronous_mode; 365 priv->synchronous_mode[chan->channel] = synchronous_mode;
363 366
364 /* Load Index Control configuration to Index Control Register */ 367 /* Load Index Control configuration to Index Control Register */
365 outb(0x40 | idr_cfg, base_offset); 368 outb(0x60 | idr_cfg, base_offset);
366 369
367 return 0; 370 return 0;
368} 371}
@@ -444,7 +447,7 @@ static int quad8_set_index_polarity(struct iio_dev *indio_dev,
444 priv->index_polarity[chan->channel] = index_polarity; 447 priv->index_polarity[chan->channel] = index_polarity;
445 448
446 /* Load Index Control configuration to Index Control Register */ 449 /* Load Index Control configuration to Index Control Register */
447 outb(0x40 | idr_cfg, base_offset); 450 outb(0x60 | idr_cfg, base_offset);
448 451
449 return 0; 452 return 0;
450} 453}
diff --git a/drivers/iio/health/afe4403.c b/drivers/iio/health/afe4403.c
index 9a081465c42f..6bb23a49e81e 100644
--- a/drivers/iio/health/afe4403.c
+++ b/drivers/iio/health/afe4403.c
@@ -422,7 +422,7 @@ MODULE_DEVICE_TABLE(of, afe4403_of_match);
422 422
423static int __maybe_unused afe4403_suspend(struct device *dev) 423static int __maybe_unused afe4403_suspend(struct device *dev)
424{ 424{
425 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 425 struct iio_dev *indio_dev = spi_get_drvdata(to_spi_device(dev));
426 struct afe4403_data *afe = iio_priv(indio_dev); 426 struct afe4403_data *afe = iio_priv(indio_dev);
427 int ret; 427 int ret;
428 428
@@ -443,7 +443,7 @@ static int __maybe_unused afe4403_suspend(struct device *dev)
443 443
444static int __maybe_unused afe4403_resume(struct device *dev) 444static int __maybe_unused afe4403_resume(struct device *dev)
445{ 445{
446 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 446 struct iio_dev *indio_dev = spi_get_drvdata(to_spi_device(dev));
447 struct afe4403_data *afe = iio_priv(indio_dev); 447 struct afe4403_data *afe = iio_priv(indio_dev);
448 int ret; 448 int ret;
449 449
diff --git a/drivers/iio/health/afe4404.c b/drivers/iio/health/afe4404.c
index 45266404f7e3..964f5231a831 100644
--- a/drivers/iio/health/afe4404.c
+++ b/drivers/iio/health/afe4404.c
@@ -428,7 +428,7 @@ MODULE_DEVICE_TABLE(of, afe4404_of_match);
428 428
429static int __maybe_unused afe4404_suspend(struct device *dev) 429static int __maybe_unused afe4404_suspend(struct device *dev)
430{ 430{
431 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 431 struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
432 struct afe4404_data *afe = iio_priv(indio_dev); 432 struct afe4404_data *afe = iio_priv(indio_dev);
433 int ret; 433 int ret;
434 434
@@ -449,7 +449,7 @@ static int __maybe_unused afe4404_suspend(struct device *dev)
449 449
450static int __maybe_unused afe4404_resume(struct device *dev) 450static int __maybe_unused afe4404_resume(struct device *dev)
451{ 451{
452 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 452 struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
453 struct afe4404_data *afe = iio_priv(indio_dev); 453 struct afe4404_data *afe = iio_priv(indio_dev);
454 int ret; 454 int ret;
455 455
diff --git a/drivers/iio/health/max30100.c b/drivers/iio/health/max30100.c
index 90ab8a2d2846..183c14329d6e 100644
--- a/drivers/iio/health/max30100.c
+++ b/drivers/iio/health/max30100.c
@@ -238,7 +238,7 @@ static irqreturn_t max30100_interrupt_handler(int irq, void *private)
238 238
239 mutex_lock(&data->lock); 239 mutex_lock(&data->lock);
240 240
241 while (cnt || (cnt = max30100_fifo_count(data) > 0)) { 241 while (cnt || (cnt = max30100_fifo_count(data)) > 0) {
242 ret = max30100_read_measurement(data); 242 ret = max30100_read_measurement(data);
243 if (ret) 243 if (ret)
244 break; 244 break;
diff --git a/drivers/iio/humidity/dht11.c b/drivers/iio/humidity/dht11.c
index 9c47bc98f3ac..2a22ad920333 100644
--- a/drivers/iio/humidity/dht11.c
+++ b/drivers/iio/humidity/dht11.c
@@ -71,7 +71,8 @@
71 * a) select an implementation using busy loop polling on those systems 71 * a) select an implementation using busy loop polling on those systems
72 * b) use the checksum to do some probabilistic decoding 72 * b) use the checksum to do some probabilistic decoding
73 */ 73 */
74#define DHT11_START_TRANSMISSION 18 /* ms */ 74#define DHT11_START_TRANSMISSION_MIN 18000 /* us */
75#define DHT11_START_TRANSMISSION_MAX 20000 /* us */
75#define DHT11_MIN_TIMERES 34000 /* ns */ 76#define DHT11_MIN_TIMERES 34000 /* ns */
76#define DHT11_THRESHOLD 49000 /* ns */ 77#define DHT11_THRESHOLD 49000 /* ns */
77#define DHT11_AMBIG_LOW 23000 /* ns */ 78#define DHT11_AMBIG_LOW 23000 /* ns */
@@ -228,7 +229,8 @@ static int dht11_read_raw(struct iio_dev *iio_dev,
228 ret = gpio_direction_output(dht11->gpio, 0); 229 ret = gpio_direction_output(dht11->gpio, 0);
229 if (ret) 230 if (ret)
230 goto err; 231 goto err;
231 msleep(DHT11_START_TRANSMISSION); 232 usleep_range(DHT11_START_TRANSMISSION_MIN,
233 DHT11_START_TRANSMISSION_MAX);
232 ret = gpio_direction_input(dht11->gpio); 234 ret = gpio_direction_input(dht11->gpio);
233 if (ret) 235 if (ret)
234 goto err; 236 goto err;
diff --git a/drivers/iio/imu/bmi160/bmi160_core.c b/drivers/iio/imu/bmi160/bmi160_core.c
index 5355507f8fa1..c9e319bff58b 100644
--- a/drivers/iio/imu/bmi160/bmi160_core.c
+++ b/drivers/iio/imu/bmi160/bmi160_core.c
@@ -66,10 +66,8 @@
66 66
67#define BMI160_REG_DUMMY 0x7F 67#define BMI160_REG_DUMMY 0x7F
68 68
69#define BMI160_ACCEL_PMU_MIN_USLEEP 3200 69#define BMI160_ACCEL_PMU_MIN_USLEEP 3800
70#define BMI160_ACCEL_PMU_MAX_USLEEP 3800 70#define BMI160_GYRO_PMU_MIN_USLEEP 80000
71#define BMI160_GYRO_PMU_MIN_USLEEP 55000
72#define BMI160_GYRO_PMU_MAX_USLEEP 80000
73#define BMI160_SOFTRESET_USLEEP 1000 71#define BMI160_SOFTRESET_USLEEP 1000
74 72
75#define BMI160_CHANNEL(_type, _axis, _index) { \ 73#define BMI160_CHANNEL(_type, _axis, _index) { \
@@ -151,20 +149,9 @@ static struct bmi160_regs bmi160_regs[] = {
151 }, 149 },
152}; 150};
153 151
154struct bmi160_pmu_time { 152static unsigned long bmi160_pmu_time[] = {
155 unsigned long min; 153 [BMI160_ACCEL] = BMI160_ACCEL_PMU_MIN_USLEEP,
156 unsigned long max; 154 [BMI160_GYRO] = BMI160_GYRO_PMU_MIN_USLEEP,
157};
158
159static struct bmi160_pmu_time bmi160_pmu_time[] = {
160 [BMI160_ACCEL] = {
161 .min = BMI160_ACCEL_PMU_MIN_USLEEP,
162 .max = BMI160_ACCEL_PMU_MAX_USLEEP
163 },
164 [BMI160_GYRO] = {
165 .min = BMI160_GYRO_PMU_MIN_USLEEP,
166 .max = BMI160_GYRO_PMU_MIN_USLEEP,
167 },
168}; 155};
169 156
170struct bmi160_scale { 157struct bmi160_scale {
@@ -289,7 +276,7 @@ int bmi160_set_mode(struct bmi160_data *data, enum bmi160_sensor_type t,
289 if (ret < 0) 276 if (ret < 0)
290 return ret; 277 return ret;
291 278
292 usleep_range(bmi160_pmu_time[t].min, bmi160_pmu_time[t].max); 279 usleep_range(bmi160_pmu_time[t], bmi160_pmu_time[t] + 1000);
293 280
294 return 0; 281 return 0;
295} 282}
diff --git a/drivers/iio/light/max44000.c b/drivers/iio/light/max44000.c
index a144ca3461fc..81bd8e8da4a6 100644
--- a/drivers/iio/light/max44000.c
+++ b/drivers/iio/light/max44000.c
@@ -113,7 +113,7 @@ static const char max44000_int_time_avail_str[] =
113 "0.100 " 113 "0.100 "
114 "0.025 " 114 "0.025 "
115 "0.00625 " 115 "0.00625 "
116 "0.001625"; 116 "0.0015625";
117 117
118/* Available scales (internal to ulux) with pretty manual alignment: */ 118/* Available scales (internal to ulux) with pretty manual alignment: */
119static const int max44000_scale_avail_ulux_array[] = { 119static const int max44000_scale_avail_ulux_array[] = {
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index e7dcfac877ca..3e70a9c5d79d 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -2811,7 +2811,8 @@ static int cma_bind_addr(struct rdma_cm_id *id, struct sockaddr *src_addr,
2811 if (!src_addr || !src_addr->sa_family) { 2811 if (!src_addr || !src_addr->sa_family) {
2812 src_addr = (struct sockaddr *) &id->route.addr.src_addr; 2812 src_addr = (struct sockaddr *) &id->route.addr.src_addr;
2813 src_addr->sa_family = dst_addr->sa_family; 2813 src_addr->sa_family = dst_addr->sa_family;
2814 if (dst_addr->sa_family == AF_INET6) { 2814 if (IS_ENABLED(CONFIG_IPV6) &&
2815 dst_addr->sa_family == AF_INET6) {
2815 struct sockaddr_in6 *src_addr6 = (struct sockaddr_in6 *) src_addr; 2816 struct sockaddr_in6 *src_addr6 = (struct sockaddr_in6 *) src_addr;
2816 struct sockaddr_in6 *dst_addr6 = (struct sockaddr_in6 *) dst_addr; 2817 struct sockaddr_in6 *dst_addr6 = (struct sockaddr_in6 *) dst_addr;
2817 src_addr6->sin6_scope_id = dst_addr6->sin6_scope_id; 2818 src_addr6->sin6_scope_id = dst_addr6->sin6_scope_id;
diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c
index 1e62a5f0cb28..4609b921f899 100644
--- a/drivers/infiniband/core/umem.c
+++ b/drivers/infiniband/core/umem.c
@@ -134,6 +134,7 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
134 IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_MW_BIND)); 134 IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_MW_BIND));
135 135
136 if (access & IB_ACCESS_ON_DEMAND) { 136 if (access & IB_ACCESS_ON_DEMAND) {
137 put_pid(umem->pid);
137 ret = ib_umem_odp_get(context, umem); 138 ret = ib_umem_odp_get(context, umem);
138 if (ret) { 139 if (ret) {
139 kfree(umem); 140 kfree(umem);
@@ -149,6 +150,7 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
149 150
150 page_list = (struct page **) __get_free_page(GFP_KERNEL); 151 page_list = (struct page **) __get_free_page(GFP_KERNEL);
151 if (!page_list) { 152 if (!page_list) {
153 put_pid(umem->pid);
152 kfree(umem); 154 kfree(umem);
153 return ERR_PTR(-ENOMEM); 155 return ERR_PTR(-ENOMEM);
154 } 156 }
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c
index 9d5fe1853da4..6262dc035f3c 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_provider.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c
@@ -1135,16 +1135,7 @@ static int iwch_query_port(struct ib_device *ibdev,
1135 1135
1136 memset(props, 0, sizeof(struct ib_port_attr)); 1136 memset(props, 0, sizeof(struct ib_port_attr));
1137 props->max_mtu = IB_MTU_4096; 1137 props->max_mtu = IB_MTU_4096;
1138 if (netdev->mtu >= 4096) 1138 props->active_mtu = ib_mtu_int_to_enum(netdev->mtu);
1139 props->active_mtu = IB_MTU_4096;
1140 else if (netdev->mtu >= 2048)
1141 props->active_mtu = IB_MTU_2048;
1142 else if (netdev->mtu >= 1024)
1143 props->active_mtu = IB_MTU_1024;
1144 else if (netdev->mtu >= 512)
1145 props->active_mtu = IB_MTU_512;
1146 else
1147 props->active_mtu = IB_MTU_256;
1148 1139
1149 if (!netif_carrier_ok(netdev)) 1140 if (!netif_carrier_ok(netdev))
1150 props->state = IB_PORT_DOWN; 1141 props->state = IB_PORT_DOWN;
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index f1510cc76d2d..9398143d7c5e 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -1804,20 +1804,21 @@ static int rx_data(struct c4iw_dev *dev, struct sk_buff *skb)
1804 skb_trim(skb, dlen); 1804 skb_trim(skb, dlen);
1805 mutex_lock(&ep->com.mutex); 1805 mutex_lock(&ep->com.mutex);
1806 1806
1807 /* update RX credits */
1808 update_rx_credits(ep, dlen);
1809
1810 switch (ep->com.state) { 1807 switch (ep->com.state) {
1811 case MPA_REQ_SENT: 1808 case MPA_REQ_SENT:
1809 update_rx_credits(ep, dlen);
1812 ep->rcv_seq += dlen; 1810 ep->rcv_seq += dlen;
1813 disconnect = process_mpa_reply(ep, skb); 1811 disconnect = process_mpa_reply(ep, skb);
1814 break; 1812 break;
1815 case MPA_REQ_WAIT: 1813 case MPA_REQ_WAIT:
1814 update_rx_credits(ep, dlen);
1816 ep->rcv_seq += dlen; 1815 ep->rcv_seq += dlen;
1817 disconnect = process_mpa_request(ep, skb); 1816 disconnect = process_mpa_request(ep, skb);
1818 break; 1817 break;
1819 case FPDU_MODE: { 1818 case FPDU_MODE: {
1820 struct c4iw_qp_attributes attrs; 1819 struct c4iw_qp_attributes attrs;
1820
1821 update_rx_credits(ep, dlen);
1821 BUG_ON(!ep->com.qp); 1822 BUG_ON(!ep->com.qp);
1822 if (status) 1823 if (status)
1823 pr_err("%s Unexpected streaming data." \ 1824 pr_err("%s Unexpected streaming data." \
diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c
index 19c6477af19f..bec82a600d77 100644
--- a/drivers/infiniband/hw/cxgb4/cq.c
+++ b/drivers/infiniband/hw/cxgb4/cq.c
@@ -505,6 +505,15 @@ static int poll_cq(struct t4_wq *wq, struct t4_cq *cq, struct t4_cqe *cqe,
505 } 505 }
506 506
507 /* 507 /*
508 * Special cqe for drain WR completions...
509 */
510 if (CQE_OPCODE(hw_cqe) == C4IW_DRAIN_OPCODE) {
511 *cookie = CQE_DRAIN_COOKIE(hw_cqe);
512 *cqe = *hw_cqe;
513 goto skip_cqe;
514 }
515
516 /*
508 * Gotta tweak READ completions: 517 * Gotta tweak READ completions:
509 * 1) the cqe doesn't contain the sq_wptr from the wr. 518 * 1) the cqe doesn't contain the sq_wptr from the wr.
510 * 2) opcode not reflected from the wr. 519 * 2) opcode not reflected from the wr.
@@ -753,6 +762,9 @@ static int c4iw_poll_cq_one(struct c4iw_cq *chp, struct ib_wc *wc)
753 c4iw_invalidate_mr(qhp->rhp, 762 c4iw_invalidate_mr(qhp->rhp,
754 CQE_WRID_FR_STAG(&cqe)); 763 CQE_WRID_FR_STAG(&cqe));
755 break; 764 break;
765 case C4IW_DRAIN_OPCODE:
766 wc->opcode = IB_WC_SEND;
767 break;
756 default: 768 default:
757 printk(KERN_ERR MOD "Unexpected opcode %d " 769 printk(KERN_ERR MOD "Unexpected opcode %d "
758 "in the CQE received for QPID=0x%0x\n", 770 "in the CQE received for QPID=0x%0x\n",
@@ -817,15 +829,8 @@ static int c4iw_poll_cq_one(struct c4iw_cq *chp, struct ib_wc *wc)
817 } 829 }
818 } 830 }
819out: 831out:
820 if (wq) { 832 if (wq)
821 if (unlikely(qhp->attr.state != C4IW_QP_STATE_RTS)) {
822 if (t4_sq_empty(wq))
823 complete(&qhp->sq_drained);
824 if (t4_rq_empty(wq))
825 complete(&qhp->rq_drained);
826 }
827 spin_unlock(&qhp->lock); 833 spin_unlock(&qhp->lock);
828 }
829 return ret; 834 return ret;
830} 835}
831 836
diff --git a/drivers/infiniband/hw/cxgb4/device.c b/drivers/infiniband/hw/cxgb4/device.c
index 516b0ae6dc3f..40c0e7b9fc6e 100644
--- a/drivers/infiniband/hw/cxgb4/device.c
+++ b/drivers/infiniband/hw/cxgb4/device.c
@@ -846,9 +846,17 @@ static int c4iw_rdev_open(struct c4iw_rdev *rdev)
846 } 846 }
847 } 847 }
848 848
849 rdev->free_workq = create_singlethread_workqueue("iw_cxgb4_free");
850 if (!rdev->free_workq) {
851 err = -ENOMEM;
852 goto err_free_status_page;
853 }
854
849 rdev->status_page->db_off = 0; 855 rdev->status_page->db_off = 0;
850 856
851 return 0; 857 return 0;
858err_free_status_page:
859 free_page((unsigned long)rdev->status_page);
852destroy_ocqp_pool: 860destroy_ocqp_pool:
853 c4iw_ocqp_pool_destroy(rdev); 861 c4iw_ocqp_pool_destroy(rdev);
854destroy_rqtpool: 862destroy_rqtpool:
@@ -862,6 +870,7 @@ destroy_resource:
862 870
863static void c4iw_rdev_close(struct c4iw_rdev *rdev) 871static void c4iw_rdev_close(struct c4iw_rdev *rdev)
864{ 872{
873 destroy_workqueue(rdev->free_workq);
865 kfree(rdev->wr_log); 874 kfree(rdev->wr_log);
866 free_page((unsigned long)rdev->status_page); 875 free_page((unsigned long)rdev->status_page);
867 c4iw_pblpool_destroy(rdev); 876 c4iw_pblpool_destroy(rdev);
diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
index 4788e1a46fde..8cd4d054a87e 100644
--- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
+++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
@@ -45,6 +45,7 @@
45#include <linux/kref.h> 45#include <linux/kref.h>
46#include <linux/timer.h> 46#include <linux/timer.h>
47#include <linux/io.h> 47#include <linux/io.h>
48#include <linux/workqueue.h>
48 49
49#include <asm/byteorder.h> 50#include <asm/byteorder.h>
50 51
@@ -107,6 +108,7 @@ struct c4iw_dev_ucontext {
107 struct list_head qpids; 108 struct list_head qpids;
108 struct list_head cqids; 109 struct list_head cqids;
109 struct mutex lock; 110 struct mutex lock;
111 struct kref kref;
110}; 112};
111 113
112enum c4iw_rdev_flags { 114enum c4iw_rdev_flags {
@@ -183,6 +185,7 @@ struct c4iw_rdev {
183 atomic_t wr_log_idx; 185 atomic_t wr_log_idx;
184 struct wr_log_entry *wr_log; 186 struct wr_log_entry *wr_log;
185 int wr_log_size; 187 int wr_log_size;
188 struct workqueue_struct *free_workq;
186}; 189};
187 190
188static inline int c4iw_fatal_error(struct c4iw_rdev *rdev) 191static inline int c4iw_fatal_error(struct c4iw_rdev *rdev)
@@ -480,8 +483,8 @@ struct c4iw_qp {
480 wait_queue_head_t wait; 483 wait_queue_head_t wait;
481 struct timer_list timer; 484 struct timer_list timer;
482 int sq_sig_all; 485 int sq_sig_all;
483 struct completion rq_drained; 486 struct work_struct free_work;
484 struct completion sq_drained; 487 struct c4iw_ucontext *ucontext;
485}; 488};
486 489
487static inline struct c4iw_qp *to_c4iw_qp(struct ib_qp *ibqp) 490static inline struct c4iw_qp *to_c4iw_qp(struct ib_qp *ibqp)
@@ -495,6 +498,7 @@ struct c4iw_ucontext {
495 u32 key; 498 u32 key;
496 spinlock_t mmap_lock; 499 spinlock_t mmap_lock;
497 struct list_head mmaps; 500 struct list_head mmaps;
501 struct kref kref;
498}; 502};
499 503
500static inline struct c4iw_ucontext *to_c4iw_ucontext(struct ib_ucontext *c) 504static inline struct c4iw_ucontext *to_c4iw_ucontext(struct ib_ucontext *c)
@@ -502,6 +506,18 @@ static inline struct c4iw_ucontext *to_c4iw_ucontext(struct ib_ucontext *c)
502 return container_of(c, struct c4iw_ucontext, ibucontext); 506 return container_of(c, struct c4iw_ucontext, ibucontext);
503} 507}
504 508
509void _c4iw_free_ucontext(struct kref *kref);
510
511static inline void c4iw_put_ucontext(struct c4iw_ucontext *ucontext)
512{
513 kref_put(&ucontext->kref, _c4iw_free_ucontext);
514}
515
516static inline void c4iw_get_ucontext(struct c4iw_ucontext *ucontext)
517{
518 kref_get(&ucontext->kref);
519}
520
505struct c4iw_mm_entry { 521struct c4iw_mm_entry {
506 struct list_head entry; 522 struct list_head entry;
507 u64 addr; 523 u64 addr;
@@ -615,6 +631,8 @@ static inline int to_ib_qp_state(int c4iw_qp_state)
615 return IB_QPS_ERR; 631 return IB_QPS_ERR;
616} 632}
617 633
634#define C4IW_DRAIN_OPCODE FW_RI_SGE_EC_CR_RETURN
635
618static inline u32 c4iw_ib_to_tpt_access(int a) 636static inline u32 c4iw_ib_to_tpt_access(int a)
619{ 637{
620 return (a & IB_ACCESS_REMOTE_WRITE ? FW_RI_MEM_ACCESS_REM_WRITE : 0) | 638 return (a & IB_ACCESS_REMOTE_WRITE ? FW_RI_MEM_ACCESS_REM_WRITE : 0) |
@@ -997,8 +1015,6 @@ extern int c4iw_wr_log;
997extern int db_fc_threshold; 1015extern int db_fc_threshold;
998extern int db_coalescing_threshold; 1016extern int db_coalescing_threshold;
999extern int use_dsgl; 1017extern int use_dsgl;
1000void c4iw_drain_rq(struct ib_qp *qp);
1001void c4iw_drain_sq(struct ib_qp *qp);
1002void c4iw_invalidate_mr(struct c4iw_dev *rhp, u32 rkey); 1018void c4iw_invalidate_mr(struct c4iw_dev *rhp, u32 rkey);
1003 1019
1004#endif 1020#endif
diff --git a/drivers/infiniband/hw/cxgb4/provider.c b/drivers/infiniband/hw/cxgb4/provider.c
index 49b51b7e0fd7..3345e1c312f7 100644
--- a/drivers/infiniband/hw/cxgb4/provider.c
+++ b/drivers/infiniband/hw/cxgb4/provider.c
@@ -93,17 +93,28 @@ static int c4iw_process_mad(struct ib_device *ibdev, int mad_flags,
93 return -ENOSYS; 93 return -ENOSYS;
94} 94}
95 95
96static int c4iw_dealloc_ucontext(struct ib_ucontext *context) 96void _c4iw_free_ucontext(struct kref *kref)
97{ 97{
98 struct c4iw_dev *rhp = to_c4iw_dev(context->device); 98 struct c4iw_ucontext *ucontext;
99 struct c4iw_ucontext *ucontext = to_c4iw_ucontext(context); 99 struct c4iw_dev *rhp;
100 struct c4iw_mm_entry *mm, *tmp; 100 struct c4iw_mm_entry *mm, *tmp;
101 101
102 PDBG("%s context %p\n", __func__, context); 102 ucontext = container_of(kref, struct c4iw_ucontext, kref);
103 rhp = to_c4iw_dev(ucontext->ibucontext.device);
104
105 PDBG("%s ucontext %p\n", __func__, ucontext);
103 list_for_each_entry_safe(mm, tmp, &ucontext->mmaps, entry) 106 list_for_each_entry_safe(mm, tmp, &ucontext->mmaps, entry)
104 kfree(mm); 107 kfree(mm);
105 c4iw_release_dev_ucontext(&rhp->rdev, &ucontext->uctx); 108 c4iw_release_dev_ucontext(&rhp->rdev, &ucontext->uctx);
106 kfree(ucontext); 109 kfree(ucontext);
110}
111
112static int c4iw_dealloc_ucontext(struct ib_ucontext *context)
113{
114 struct c4iw_ucontext *ucontext = to_c4iw_ucontext(context);
115
116 PDBG("%s context %p\n", __func__, context);
117 c4iw_put_ucontext(ucontext);
107 return 0; 118 return 0;
108} 119}
109 120
@@ -127,6 +138,7 @@ static struct ib_ucontext *c4iw_alloc_ucontext(struct ib_device *ibdev,
127 c4iw_init_dev_ucontext(&rhp->rdev, &context->uctx); 138 c4iw_init_dev_ucontext(&rhp->rdev, &context->uctx);
128 INIT_LIST_HEAD(&context->mmaps); 139 INIT_LIST_HEAD(&context->mmaps);
129 spin_lock_init(&context->mmap_lock); 140 spin_lock_init(&context->mmap_lock);
141 kref_init(&context->kref);
130 142
131 if (udata->outlen < sizeof(uresp) - sizeof(uresp.reserved)) { 143 if (udata->outlen < sizeof(uresp) - sizeof(uresp.reserved)) {
132 if (!warned++) 144 if (!warned++)
@@ -361,16 +373,7 @@ static int c4iw_query_port(struct ib_device *ibdev, u8 port,
361 373
362 memset(props, 0, sizeof(struct ib_port_attr)); 374 memset(props, 0, sizeof(struct ib_port_attr));
363 props->max_mtu = IB_MTU_4096; 375 props->max_mtu = IB_MTU_4096;
364 if (netdev->mtu >= 4096) 376 props->active_mtu = ib_mtu_int_to_enum(netdev->mtu);
365 props->active_mtu = IB_MTU_4096;
366 else if (netdev->mtu >= 2048)
367 props->active_mtu = IB_MTU_2048;
368 else if (netdev->mtu >= 1024)
369 props->active_mtu = IB_MTU_1024;
370 else if (netdev->mtu >= 512)
371 props->active_mtu = IB_MTU_512;
372 else
373 props->active_mtu = IB_MTU_256;
374 377
375 if (!netif_carrier_ok(netdev)) 378 if (!netif_carrier_ok(netdev))
376 props->state = IB_PORT_DOWN; 379 props->state = IB_PORT_DOWN;
@@ -607,8 +610,6 @@ int c4iw_register_device(struct c4iw_dev *dev)
607 dev->ibdev.uverbs_abi_ver = C4IW_UVERBS_ABI_VERSION; 610 dev->ibdev.uverbs_abi_ver = C4IW_UVERBS_ABI_VERSION;
608 dev->ibdev.get_port_immutable = c4iw_port_immutable; 611 dev->ibdev.get_port_immutable = c4iw_port_immutable;
609 dev->ibdev.get_dev_fw_str = get_dev_fw_str; 612 dev->ibdev.get_dev_fw_str = get_dev_fw_str;
610 dev->ibdev.drain_sq = c4iw_drain_sq;
611 dev->ibdev.drain_rq = c4iw_drain_rq;
612 613
613 dev->ibdev.iwcm = kmalloc(sizeof(struct iw_cm_verbs), GFP_KERNEL); 614 dev->ibdev.iwcm = kmalloc(sizeof(struct iw_cm_verbs), GFP_KERNEL);
614 if (!dev->ibdev.iwcm) 615 if (!dev->ibdev.iwcm)
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index cda5542e13a2..04c1c382dedb 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -715,13 +715,32 @@ static int build_inv_stag(union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16)
715 return 0; 715 return 0;
716} 716}
717 717
718static void _free_qp(struct kref *kref) 718static void free_qp_work(struct work_struct *work)
719{
720 struct c4iw_ucontext *ucontext;
721 struct c4iw_qp *qhp;
722 struct c4iw_dev *rhp;
723
724 qhp = container_of(work, struct c4iw_qp, free_work);
725 ucontext = qhp->ucontext;
726 rhp = qhp->rhp;
727
728 PDBG("%s qhp %p ucontext %p\n", __func__, qhp, ucontext);
729 destroy_qp(&rhp->rdev, &qhp->wq,
730 ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
731
732 if (ucontext)
733 c4iw_put_ucontext(ucontext);
734 kfree(qhp);
735}
736
737static void queue_qp_free(struct kref *kref)
719{ 738{
720 struct c4iw_qp *qhp; 739 struct c4iw_qp *qhp;
721 740
722 qhp = container_of(kref, struct c4iw_qp, kref); 741 qhp = container_of(kref, struct c4iw_qp, kref);
723 PDBG("%s qhp %p\n", __func__, qhp); 742 PDBG("%s qhp %p\n", __func__, qhp);
724 kfree(qhp); 743 queue_work(qhp->rhp->rdev.free_workq, &qhp->free_work);
725} 744}
726 745
727void c4iw_qp_add_ref(struct ib_qp *qp) 746void c4iw_qp_add_ref(struct ib_qp *qp)
@@ -733,7 +752,7 @@ void c4iw_qp_add_ref(struct ib_qp *qp)
733void c4iw_qp_rem_ref(struct ib_qp *qp) 752void c4iw_qp_rem_ref(struct ib_qp *qp)
734{ 753{
735 PDBG("%s ib_qp %p\n", __func__, qp); 754 PDBG("%s ib_qp %p\n", __func__, qp);
736 kref_put(&to_c4iw_qp(qp)->kref, _free_qp); 755 kref_put(&to_c4iw_qp(qp)->kref, queue_qp_free);
737} 756}
738 757
739static void add_to_fc_list(struct list_head *head, struct list_head *entry) 758static void add_to_fc_list(struct list_head *head, struct list_head *entry)
@@ -776,6 +795,64 @@ static int ring_kernel_rq_db(struct c4iw_qp *qhp, u16 inc)
776 return 0; 795 return 0;
777} 796}
778 797
798static void complete_sq_drain_wr(struct c4iw_qp *qhp, struct ib_send_wr *wr)
799{
800 struct t4_cqe cqe = {};
801 struct c4iw_cq *schp;
802 unsigned long flag;
803 struct t4_cq *cq;
804
805 schp = to_c4iw_cq(qhp->ibqp.send_cq);
806 cq = &schp->cq;
807
808 cqe.u.drain_cookie = wr->wr_id;
809 cqe.header = cpu_to_be32(CQE_STATUS_V(T4_ERR_SWFLUSH) |
810 CQE_OPCODE_V(C4IW_DRAIN_OPCODE) |
811 CQE_TYPE_V(1) |
812 CQE_SWCQE_V(1) |
813 CQE_QPID_V(qhp->wq.sq.qid));
814
815 spin_lock_irqsave(&schp->lock, flag);
816 cqe.bits_type_ts = cpu_to_be64(CQE_GENBIT_V((u64)cq->gen));
817 cq->sw_queue[cq->sw_pidx] = cqe;
818 t4_swcq_produce(cq);
819 spin_unlock_irqrestore(&schp->lock, flag);
820
821 spin_lock_irqsave(&schp->comp_handler_lock, flag);
822 (*schp->ibcq.comp_handler)(&schp->ibcq,
823 schp->ibcq.cq_context);
824 spin_unlock_irqrestore(&schp->comp_handler_lock, flag);
825}
826
827static void complete_rq_drain_wr(struct c4iw_qp *qhp, struct ib_recv_wr *wr)
828{
829 struct t4_cqe cqe = {};
830 struct c4iw_cq *rchp;
831 unsigned long flag;
832 struct t4_cq *cq;
833
834 rchp = to_c4iw_cq(qhp->ibqp.recv_cq);
835 cq = &rchp->cq;
836
837 cqe.u.drain_cookie = wr->wr_id;
838 cqe.header = cpu_to_be32(CQE_STATUS_V(T4_ERR_SWFLUSH) |
839 CQE_OPCODE_V(C4IW_DRAIN_OPCODE) |
840 CQE_TYPE_V(0) |
841 CQE_SWCQE_V(1) |
842 CQE_QPID_V(qhp->wq.sq.qid));
843
844 spin_lock_irqsave(&rchp->lock, flag);
845 cqe.bits_type_ts = cpu_to_be64(CQE_GENBIT_V((u64)cq->gen));
846 cq->sw_queue[cq->sw_pidx] = cqe;
847 t4_swcq_produce(cq);
848 spin_unlock_irqrestore(&rchp->lock, flag);
849
850 spin_lock_irqsave(&rchp->comp_handler_lock, flag);
851 (*rchp->ibcq.comp_handler)(&rchp->ibcq,
852 rchp->ibcq.cq_context);
853 spin_unlock_irqrestore(&rchp->comp_handler_lock, flag);
854}
855
779int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, 856int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
780 struct ib_send_wr **bad_wr) 857 struct ib_send_wr **bad_wr)
781{ 858{
@@ -794,8 +871,8 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
794 spin_lock_irqsave(&qhp->lock, flag); 871 spin_lock_irqsave(&qhp->lock, flag);
795 if (t4_wq_in_error(&qhp->wq)) { 872 if (t4_wq_in_error(&qhp->wq)) {
796 spin_unlock_irqrestore(&qhp->lock, flag); 873 spin_unlock_irqrestore(&qhp->lock, flag);
797 *bad_wr = wr; 874 complete_sq_drain_wr(qhp, wr);
798 return -EINVAL; 875 return err;
799 } 876 }
800 num_wrs = t4_sq_avail(&qhp->wq); 877 num_wrs = t4_sq_avail(&qhp->wq);
801 if (num_wrs == 0) { 878 if (num_wrs == 0) {
@@ -937,8 +1014,8 @@ int c4iw_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
937 spin_lock_irqsave(&qhp->lock, flag); 1014 spin_lock_irqsave(&qhp->lock, flag);
938 if (t4_wq_in_error(&qhp->wq)) { 1015 if (t4_wq_in_error(&qhp->wq)) {
939 spin_unlock_irqrestore(&qhp->lock, flag); 1016 spin_unlock_irqrestore(&qhp->lock, flag);
940 *bad_wr = wr; 1017 complete_rq_drain_wr(qhp, wr);
941 return -EINVAL; 1018 return err;
942 } 1019 }
943 num_wrs = t4_rq_avail(&qhp->wq); 1020 num_wrs = t4_rq_avail(&qhp->wq);
944 if (num_wrs == 0) { 1021 if (num_wrs == 0) {
@@ -1550,7 +1627,12 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
1550 } 1627 }
1551 break; 1628 break;
1552 case C4IW_QP_STATE_CLOSING: 1629 case C4IW_QP_STATE_CLOSING:
1553 if (!internal) { 1630
1631 /*
1632 * Allow kernel users to move to ERROR for qp draining.
1633 */
1634 if (!internal && (qhp->ibqp.uobject || attrs->next_state !=
1635 C4IW_QP_STATE_ERROR)) {
1554 ret = -EINVAL; 1636 ret = -EINVAL;
1555 goto out; 1637 goto out;
1556 } 1638 }
@@ -1643,7 +1725,6 @@ int c4iw_destroy_qp(struct ib_qp *ib_qp)
1643 struct c4iw_dev *rhp; 1725 struct c4iw_dev *rhp;
1644 struct c4iw_qp *qhp; 1726 struct c4iw_qp *qhp;
1645 struct c4iw_qp_attributes attrs; 1727 struct c4iw_qp_attributes attrs;
1646 struct c4iw_ucontext *ucontext;
1647 1728
1648 qhp = to_c4iw_qp(ib_qp); 1729 qhp = to_c4iw_qp(ib_qp);
1649 rhp = qhp->rhp; 1730 rhp = qhp->rhp;
@@ -1663,11 +1744,6 @@ int c4iw_destroy_qp(struct ib_qp *ib_qp)
1663 spin_unlock_irq(&rhp->lock); 1744 spin_unlock_irq(&rhp->lock);
1664 free_ird(rhp, qhp->attr.max_ird); 1745 free_ird(rhp, qhp->attr.max_ird);
1665 1746
1666 ucontext = ib_qp->uobject ?
1667 to_c4iw_ucontext(ib_qp->uobject->context) : NULL;
1668 destroy_qp(&rhp->rdev, &qhp->wq,
1669 ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
1670
1671 c4iw_qp_rem_ref(ib_qp); 1747 c4iw_qp_rem_ref(ib_qp);
1672 1748
1673 PDBG("%s ib_qp %p qpid 0x%0x\n", __func__, ib_qp, qhp->wq.sq.qid); 1749 PDBG("%s ib_qp %p qpid 0x%0x\n", __func__, ib_qp, qhp->wq.sq.qid);
@@ -1763,11 +1839,10 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
1763 qhp->attr.max_ird = 0; 1839 qhp->attr.max_ird = 0;
1764 qhp->sq_sig_all = attrs->sq_sig_type == IB_SIGNAL_ALL_WR; 1840 qhp->sq_sig_all = attrs->sq_sig_type == IB_SIGNAL_ALL_WR;
1765 spin_lock_init(&qhp->lock); 1841 spin_lock_init(&qhp->lock);
1766 init_completion(&qhp->sq_drained);
1767 init_completion(&qhp->rq_drained);
1768 mutex_init(&qhp->mutex); 1842 mutex_init(&qhp->mutex);
1769 init_waitqueue_head(&qhp->wait); 1843 init_waitqueue_head(&qhp->wait);
1770 kref_init(&qhp->kref); 1844 kref_init(&qhp->kref);
1845 INIT_WORK(&qhp->free_work, free_qp_work);
1771 1846
1772 ret = insert_handle(rhp, &rhp->qpidr, qhp, qhp->wq.sq.qid); 1847 ret = insert_handle(rhp, &rhp->qpidr, qhp, qhp->wq.sq.qid);
1773 if (ret) 1848 if (ret)
@@ -1854,6 +1929,9 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
1854 ma_sync_key_mm->len = PAGE_SIZE; 1929 ma_sync_key_mm->len = PAGE_SIZE;
1855 insert_mmap(ucontext, ma_sync_key_mm); 1930 insert_mmap(ucontext, ma_sync_key_mm);
1856 } 1931 }
1932
1933 c4iw_get_ucontext(ucontext);
1934 qhp->ucontext = ucontext;
1857 } 1935 }
1858 qhp->ibqp.qp_num = qhp->wq.sq.qid; 1936 qhp->ibqp.qp_num = qhp->wq.sq.qid;
1859 init_timer(&(qhp->timer)); 1937 init_timer(&(qhp->timer));
@@ -1958,40 +2036,3 @@ int c4iw_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1958 init_attr->sq_sig_type = qhp->sq_sig_all ? IB_SIGNAL_ALL_WR : 0; 2036 init_attr->sq_sig_type = qhp->sq_sig_all ? IB_SIGNAL_ALL_WR : 0;
1959 return 0; 2037 return 0;
1960} 2038}
1961
1962static void move_qp_to_err(struct c4iw_qp *qp)
1963{
1964 struct c4iw_qp_attributes attrs = { .next_state = C4IW_QP_STATE_ERROR };
1965
1966 (void)c4iw_modify_qp(qp->rhp, qp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
1967}
1968
1969void c4iw_drain_sq(struct ib_qp *ibqp)
1970{
1971 struct c4iw_qp *qp = to_c4iw_qp(ibqp);
1972 unsigned long flag;
1973 bool need_to_wait;
1974
1975 move_qp_to_err(qp);
1976 spin_lock_irqsave(&qp->lock, flag);
1977 need_to_wait = !t4_sq_empty(&qp->wq);
1978 spin_unlock_irqrestore(&qp->lock, flag);
1979
1980 if (need_to_wait)
1981 wait_for_completion(&qp->sq_drained);
1982}
1983
1984void c4iw_drain_rq(struct ib_qp *ibqp)
1985{
1986 struct c4iw_qp *qp = to_c4iw_qp(ibqp);
1987 unsigned long flag;
1988 bool need_to_wait;
1989
1990 move_qp_to_err(qp);
1991 spin_lock_irqsave(&qp->lock, flag);
1992 need_to_wait = !t4_rq_empty(&qp->wq);
1993 spin_unlock_irqrestore(&qp->lock, flag);
1994
1995 if (need_to_wait)
1996 wait_for_completion(&qp->rq_drained);
1997}
diff --git a/drivers/infiniband/hw/cxgb4/t4.h b/drivers/infiniband/hw/cxgb4/t4.h
index 862381aa83c8..640d22148a3e 100644
--- a/drivers/infiniband/hw/cxgb4/t4.h
+++ b/drivers/infiniband/hw/cxgb4/t4.h
@@ -179,6 +179,7 @@ struct t4_cqe {
179 __be32 wrid_hi; 179 __be32 wrid_hi;
180 __be32 wrid_low; 180 __be32 wrid_low;
181 } gen; 181 } gen;
182 u64 drain_cookie;
182 } u; 183 } u;
183 __be64 reserved; 184 __be64 reserved;
184 __be64 bits_type_ts; 185 __be64 bits_type_ts;
@@ -238,6 +239,7 @@ struct t4_cqe {
238/* generic accessor macros */ 239/* generic accessor macros */
239#define CQE_WRID_HI(x) (be32_to_cpu((x)->u.gen.wrid_hi)) 240#define CQE_WRID_HI(x) (be32_to_cpu((x)->u.gen.wrid_hi))
240#define CQE_WRID_LOW(x) (be32_to_cpu((x)->u.gen.wrid_low)) 241#define CQE_WRID_LOW(x) (be32_to_cpu((x)->u.gen.wrid_low))
242#define CQE_DRAIN_COOKIE(x) ((x)->u.drain_cookie)
241 243
242/* macros for flit 3 of the cqe */ 244/* macros for flit 3 of the cqe */
243#define CQE_GENBIT_S 63 245#define CQE_GENBIT_S 63
diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.c b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
index 29e97df9e1a7..4c000d60d5c6 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_verbs.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
@@ -100,16 +100,7 @@ static int i40iw_query_port(struct ib_device *ibdev,
100 memset(props, 0, sizeof(*props)); 100 memset(props, 0, sizeof(*props));
101 101
102 props->max_mtu = IB_MTU_4096; 102 props->max_mtu = IB_MTU_4096;
103 if (netdev->mtu >= 4096) 103 props->active_mtu = ib_mtu_int_to_enum(netdev->mtu);
104 props->active_mtu = IB_MTU_4096;
105 else if (netdev->mtu >= 2048)
106 props->active_mtu = IB_MTU_2048;
107 else if (netdev->mtu >= 1024)
108 props->active_mtu = IB_MTU_1024;
109 else if (netdev->mtu >= 512)
110 props->active_mtu = IB_MTU_512;
111 else
112 props->active_mtu = IB_MTU_256;
113 104
114 props->lid = 1; 105 props->lid = 1;
115 if (netif_carrier_ok(iwdev->netdev)) 106 if (netif_carrier_ok(iwdev->netdev))
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index c8413fc120e6..7031a8dd4d14 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -1682,9 +1682,19 @@ static int __mlx4_ib_create_flow(struct ib_qp *qp, struct ib_flow_attr *flow_att
1682 size += ret; 1682 size += ret;
1683 } 1683 }
1684 1684
1685 if (mlx4_is_master(mdev->dev) && flow_type == MLX4_FS_REGULAR &&
1686 flow_attr->num_of_specs == 1) {
1687 struct _rule_hw *rule_header = (struct _rule_hw *)(ctrl + 1);
1688 enum ib_flow_spec_type header_spec =
1689 ((union ib_flow_spec *)(flow_attr + 1))->type;
1690
1691 if (header_spec == IB_FLOW_SPEC_ETH)
1692 mlx4_handle_eth_header_mcast_prio(ctrl, rule_header);
1693 }
1694
1685 ret = mlx4_cmd_imm(mdev->dev, mailbox->dma, reg_id, size >> 2, 0, 1695 ret = mlx4_cmd_imm(mdev->dev, mailbox->dma, reg_id, size >> 2, 0,
1686 MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A, 1696 MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
1687 MLX4_CMD_WRAPPED); 1697 MLX4_CMD_NATIVE);
1688 if (ret == -ENOMEM) 1698 if (ret == -ENOMEM)
1689 pr_err("mcg table is full. Fail to register network rule.\n"); 1699 pr_err("mcg table is full. Fail to register network rule.\n");
1690 else if (ret == -ENXIO) 1700 else if (ret == -ENXIO)
@@ -1701,7 +1711,7 @@ static int __mlx4_ib_destroy_flow(struct mlx4_dev *dev, u64 reg_id)
1701 int err; 1711 int err;
1702 err = mlx4_cmd(dev, reg_id, 0, 0, 1712 err = mlx4_cmd(dev, reg_id, 0, 0,
1703 MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A, 1713 MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
1704 MLX4_CMD_WRAPPED); 1714 MLX4_CMD_NATIVE);
1705 if (err) 1715 if (err)
1706 pr_err("Fail to detach network rule. registration id = 0x%llx\n", 1716 pr_err("Fail to detach network rule. registration id = 0x%llx\n",
1707 reg_id); 1717 reg_id);
diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
index aff9fb14768b..5a31f3c6a421 100644
--- a/drivers/infiniband/hw/nes/nes_verbs.c
+++ b/drivers/infiniband/hw/nes/nes_verbs.c
@@ -478,17 +478,7 @@ static int nes_query_port(struct ib_device *ibdev, u8 port, struct ib_port_attr
478 memset(props, 0, sizeof(*props)); 478 memset(props, 0, sizeof(*props));
479 479
480 props->max_mtu = IB_MTU_4096; 480 props->max_mtu = IB_MTU_4096;
481 481 props->active_mtu = ib_mtu_int_to_enum(netdev->mtu);
482 if (netdev->mtu >= 4096)
483 props->active_mtu = IB_MTU_4096;
484 else if (netdev->mtu >= 2048)
485 props->active_mtu = IB_MTU_2048;
486 else if (netdev->mtu >= 1024)
487 props->active_mtu = IB_MTU_1024;
488 else if (netdev->mtu >= 512)
489 props->active_mtu = IB_MTU_512;
490 else
491 props->active_mtu = IB_MTU_256;
492 482
493 props->lid = 1; 483 props->lid = 1;
494 props->lmc = 0; 484 props->lmc = 0;
diff --git a/drivers/infiniband/hw/qedr/main.c b/drivers/infiniband/hw/qedr/main.c
index 7b74d09a8217..3ac8aa5ef37d 100644
--- a/drivers/infiniband/hw/qedr/main.c
+++ b/drivers/infiniband/hw/qedr/main.c
@@ -576,8 +576,7 @@ static int qedr_set_device_attr(struct qedr_dev *dev)
576 return 0; 576 return 0;
577} 577}
578 578
579void qedr_unaffiliated_event(void *context, 579void qedr_unaffiliated_event(void *context, u8 event_code)
580 u8 event_code)
581{ 580{
582 pr_err("unaffiliated event not implemented yet\n"); 581 pr_err("unaffiliated event not implemented yet\n");
583} 582}
@@ -792,6 +791,9 @@ static struct qedr_dev *qedr_add(struct qed_dev *cdev, struct pci_dev *pdev,
792 if (device_create_file(&dev->ibdev.dev, qedr_attributes[i])) 791 if (device_create_file(&dev->ibdev.dev, qedr_attributes[i]))
793 goto sysfs_err; 792 goto sysfs_err;
794 793
794 if (!test_and_set_bit(QEDR_ENET_STATE_BIT, &dev->enet_state))
795 qedr_ib_dispatch_event(dev, QEDR_PORT, IB_EVENT_PORT_ACTIVE);
796
795 DP_DEBUG(dev, QEDR_MSG_INIT, "qedr driver loaded successfully\n"); 797 DP_DEBUG(dev, QEDR_MSG_INIT, "qedr driver loaded successfully\n");
796 return dev; 798 return dev;
797 799
@@ -824,11 +826,10 @@ static void qedr_remove(struct qedr_dev *dev)
824 ib_dealloc_device(&dev->ibdev); 826 ib_dealloc_device(&dev->ibdev);
825} 827}
826 828
827static int qedr_close(struct qedr_dev *dev) 829static void qedr_close(struct qedr_dev *dev)
828{ 830{
829 qedr_ib_dispatch_event(dev, 1, IB_EVENT_PORT_ERR); 831 if (test_and_clear_bit(QEDR_ENET_STATE_BIT, &dev->enet_state))
830 832 qedr_ib_dispatch_event(dev, QEDR_PORT, IB_EVENT_PORT_ERR);
831 return 0;
832} 833}
833 834
834static void qedr_shutdown(struct qedr_dev *dev) 835static void qedr_shutdown(struct qedr_dev *dev)
@@ -837,6 +838,12 @@ static void qedr_shutdown(struct qedr_dev *dev)
837 qedr_remove(dev); 838 qedr_remove(dev);
838} 839}
839 840
841static void qedr_open(struct qedr_dev *dev)
842{
843 if (!test_and_set_bit(QEDR_ENET_STATE_BIT, &dev->enet_state))
844 qedr_ib_dispatch_event(dev, QEDR_PORT, IB_EVENT_PORT_ACTIVE);
845}
846
840static void qedr_mac_address_change(struct qedr_dev *dev) 847static void qedr_mac_address_change(struct qedr_dev *dev)
841{ 848{
842 union ib_gid *sgid = &dev->sgid_tbl[0]; 849 union ib_gid *sgid = &dev->sgid_tbl[0];
@@ -863,7 +870,7 @@ static void qedr_mac_address_change(struct qedr_dev *dev)
863 870
864 ether_addr_copy(dev->gsi_ll2_mac_address, dev->ndev->dev_addr); 871 ether_addr_copy(dev->gsi_ll2_mac_address, dev->ndev->dev_addr);
865 872
866 qedr_ib_dispatch_event(dev, 1, IB_EVENT_GID_CHANGE); 873 qedr_ib_dispatch_event(dev, QEDR_PORT, IB_EVENT_GID_CHANGE);
867 874
868 if (rc) 875 if (rc)
869 DP_ERR(dev, "Error updating mac filter\n"); 876 DP_ERR(dev, "Error updating mac filter\n");
@@ -877,7 +884,7 @@ static void qedr_notify(struct qedr_dev *dev, enum qede_roce_event event)
877{ 884{
878 switch (event) { 885 switch (event) {
879 case QEDE_UP: 886 case QEDE_UP:
880 qedr_ib_dispatch_event(dev, 1, IB_EVENT_PORT_ACTIVE); 887 qedr_open(dev);
881 break; 888 break;
882 case QEDE_DOWN: 889 case QEDE_DOWN:
883 qedr_close(dev); 890 qedr_close(dev);
diff --git a/drivers/infiniband/hw/qedr/qedr.h b/drivers/infiniband/hw/qedr/qedr.h
index 620badd7d4fb..bb32e4792ec9 100644
--- a/drivers/infiniband/hw/qedr/qedr.h
+++ b/drivers/infiniband/hw/qedr/qedr.h
@@ -113,6 +113,8 @@ struct qedr_device_attr {
113 struct qed_rdma_events events; 113 struct qed_rdma_events events;
114}; 114};
115 115
116#define QEDR_ENET_STATE_BIT (0)
117
116struct qedr_dev { 118struct qedr_dev {
117 struct ib_device ibdev; 119 struct ib_device ibdev;
118 struct qed_dev *cdev; 120 struct qed_dev *cdev;
@@ -153,6 +155,8 @@ struct qedr_dev {
153 struct qedr_cq *gsi_sqcq; 155 struct qedr_cq *gsi_sqcq;
154 struct qedr_cq *gsi_rqcq; 156 struct qedr_cq *gsi_rqcq;
155 struct qedr_qp *gsi_qp; 157 struct qedr_qp *gsi_qp;
158
159 unsigned long enet_state;
156}; 160};
157 161
158#define QEDR_MAX_SQ_PBL (0x8000) 162#define QEDR_MAX_SQ_PBL (0x8000)
@@ -188,6 +192,7 @@ struct qedr_dev {
188#define QEDR_ROCE_MAX_CNQ_SIZE (0x4000) 192#define QEDR_ROCE_MAX_CNQ_SIZE (0x4000)
189 193
190#define QEDR_MAX_PORT (1) 194#define QEDR_MAX_PORT (1)
195#define QEDR_PORT (1)
191 196
192#define QEDR_UVERBS(CMD_NAME) (1ull << IB_USER_VERBS_CMD_##CMD_NAME) 197#define QEDR_UVERBS(CMD_NAME) (1ull << IB_USER_VERBS_CMD_##CMD_NAME)
193 198
@@ -251,9 +256,6 @@ struct qedr_cq {
251 256
252 u16 icid; 257 u16 icid;
253 258
254 /* Lock to protect completion handler */
255 spinlock_t comp_handler_lock;
256
257 /* Lock to protect multiplem CQ's */ 259 /* Lock to protect multiplem CQ's */
258 spinlock_t cq_lock; 260 spinlock_t cq_lock;
259 u8 arm_flags; 261 u8 arm_flags;
diff --git a/drivers/infiniband/hw/qedr/qedr_cm.c b/drivers/infiniband/hw/qedr/qedr_cm.c
index 63890ebb72bd..a9a8d8745d2e 100644
--- a/drivers/infiniband/hw/qedr/qedr_cm.c
+++ b/drivers/infiniband/hw/qedr/qedr_cm.c
@@ -87,11 +87,8 @@ void qedr_ll2_tx_cb(void *_qdev, struct qed_roce_ll2_packet *pkt)
87 qedr_inc_sw_gsi_cons(&qp->sq); 87 qedr_inc_sw_gsi_cons(&qp->sq);
88 spin_unlock_irqrestore(&qp->q_lock, flags); 88 spin_unlock_irqrestore(&qp->q_lock, flags);
89 89
90 if (cq->ibcq.comp_handler) { 90 if (cq->ibcq.comp_handler)
91 spin_lock_irqsave(&cq->comp_handler_lock, flags);
92 (*cq->ibcq.comp_handler) (&cq->ibcq, cq->ibcq.cq_context); 91 (*cq->ibcq.comp_handler) (&cq->ibcq, cq->ibcq.cq_context);
93 spin_unlock_irqrestore(&cq->comp_handler_lock, flags);
94 }
95} 92}
96 93
97void qedr_ll2_rx_cb(void *_dev, struct qed_roce_ll2_packet *pkt, 94void qedr_ll2_rx_cb(void *_dev, struct qed_roce_ll2_packet *pkt,
@@ -113,11 +110,8 @@ void qedr_ll2_rx_cb(void *_dev, struct qed_roce_ll2_packet *pkt,
113 110
114 spin_unlock_irqrestore(&qp->q_lock, flags); 111 spin_unlock_irqrestore(&qp->q_lock, flags);
115 112
116 if (cq->ibcq.comp_handler) { 113 if (cq->ibcq.comp_handler)
117 spin_lock_irqsave(&cq->comp_handler_lock, flags);
118 (*cq->ibcq.comp_handler) (&cq->ibcq, cq->ibcq.cq_context); 114 (*cq->ibcq.comp_handler) (&cq->ibcq, cq->ibcq.cq_context);
119 spin_unlock_irqrestore(&cq->comp_handler_lock, flags);
120 }
121} 115}
122 116
123static void qedr_destroy_gsi_cq(struct qedr_dev *dev, 117static void qedr_destroy_gsi_cq(struct qedr_dev *dev,
@@ -404,9 +398,9 @@ static inline int qedr_gsi_build_packet(struct qedr_dev *dev,
404 } 398 }
405 399
406 if (ether_addr_equal(udh.eth.smac_h, udh.eth.dmac_h)) 400 if (ether_addr_equal(udh.eth.smac_h, udh.eth.dmac_h))
407 packet->tx_dest = QED_ROCE_LL2_TX_DEST_NW;
408 else
409 packet->tx_dest = QED_ROCE_LL2_TX_DEST_LB; 401 packet->tx_dest = QED_ROCE_LL2_TX_DEST_LB;
402 else
403 packet->tx_dest = QED_ROCE_LL2_TX_DEST_NW;
410 404
411 packet->roce_mode = roce_mode; 405 packet->roce_mode = roce_mode;
412 memcpy(packet->header.vaddr, ud_header_buffer, header_size); 406 memcpy(packet->header.vaddr, ud_header_buffer, header_size);
diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c
index 57c8de208077..c7d6c9a783bd 100644
--- a/drivers/infiniband/hw/qedr/verbs.c
+++ b/drivers/infiniband/hw/qedr/verbs.c
@@ -471,8 +471,6 @@ struct ib_pd *qedr_alloc_pd(struct ib_device *ibdev,
471 struct ib_ucontext *context, struct ib_udata *udata) 471 struct ib_ucontext *context, struct ib_udata *udata)
472{ 472{
473 struct qedr_dev *dev = get_qedr_dev(ibdev); 473 struct qedr_dev *dev = get_qedr_dev(ibdev);
474 struct qedr_ucontext *uctx = NULL;
475 struct qedr_alloc_pd_uresp uresp;
476 struct qedr_pd *pd; 474 struct qedr_pd *pd;
477 u16 pd_id; 475 u16 pd_id;
478 int rc; 476 int rc;
@@ -489,21 +487,33 @@ struct ib_pd *qedr_alloc_pd(struct ib_device *ibdev,
489 if (!pd) 487 if (!pd)
490 return ERR_PTR(-ENOMEM); 488 return ERR_PTR(-ENOMEM);
491 489
492 dev->ops->rdma_alloc_pd(dev->rdma_ctx, &pd_id); 490 rc = dev->ops->rdma_alloc_pd(dev->rdma_ctx, &pd_id);
491 if (rc)
492 goto err;
493 493
494 uresp.pd_id = pd_id;
495 pd->pd_id = pd_id; 494 pd->pd_id = pd_id;
496 495
497 if (udata && context) { 496 if (udata && context) {
497 struct qedr_alloc_pd_uresp uresp;
498
499 uresp.pd_id = pd_id;
500
498 rc = ib_copy_to_udata(udata, &uresp, sizeof(uresp)); 501 rc = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
499 if (rc) 502 if (rc) {
500 DP_ERR(dev, "copy error pd_id=0x%x.\n", pd_id); 503 DP_ERR(dev, "copy error pd_id=0x%x.\n", pd_id);
501 uctx = get_qedr_ucontext(context); 504 dev->ops->rdma_dealloc_pd(dev->rdma_ctx, pd_id);
502 uctx->pd = pd; 505 goto err;
503 pd->uctx = uctx; 506 }
507
508 pd->uctx = get_qedr_ucontext(context);
509 pd->uctx->pd = pd;
504 } 510 }
505 511
506 return &pd->ibpd; 512 return &pd->ibpd;
513
514err:
515 kfree(pd);
516 return ERR_PTR(rc);
507} 517}
508 518
509int qedr_dealloc_pd(struct ib_pd *ibpd) 519int qedr_dealloc_pd(struct ib_pd *ibpd)
@@ -1600,7 +1610,7 @@ err0:
1600 return ERR_PTR(-EFAULT); 1610 return ERR_PTR(-EFAULT);
1601} 1611}
1602 1612
1603enum ib_qp_state qedr_get_ibqp_state(enum qed_roce_qp_state qp_state) 1613static enum ib_qp_state qedr_get_ibqp_state(enum qed_roce_qp_state qp_state)
1604{ 1614{
1605 switch (qp_state) { 1615 switch (qp_state) {
1606 case QED_ROCE_QP_STATE_RESET: 1616 case QED_ROCE_QP_STATE_RESET:
@@ -1621,7 +1631,8 @@ enum ib_qp_state qedr_get_ibqp_state(enum qed_roce_qp_state qp_state)
1621 return IB_QPS_ERR; 1631 return IB_QPS_ERR;
1622} 1632}
1623 1633
1624enum qed_roce_qp_state qedr_get_state_from_ibqp(enum ib_qp_state qp_state) 1634static enum qed_roce_qp_state qedr_get_state_from_ibqp(
1635 enum ib_qp_state qp_state)
1625{ 1636{
1626 switch (qp_state) { 1637 switch (qp_state) {
1627 case IB_QPS_RESET: 1638 case IB_QPS_RESET:
@@ -1657,7 +1668,7 @@ static int qedr_update_qp_state(struct qedr_dev *dev,
1657 int status = 0; 1668 int status = 0;
1658 1669
1659 if (new_state == qp->state) 1670 if (new_state == qp->state)
1660 return 1; 1671 return 0;
1661 1672
1662 switch (qp->state) { 1673 switch (qp->state) {
1663 case QED_ROCE_QP_STATE_RESET: 1674 case QED_ROCE_QP_STATE_RESET:
@@ -1733,6 +1744,14 @@ static int qedr_update_qp_state(struct qedr_dev *dev,
1733 /* ERR->XXX */ 1744 /* ERR->XXX */
1734 switch (new_state) { 1745 switch (new_state) {
1735 case QED_ROCE_QP_STATE_RESET: 1746 case QED_ROCE_QP_STATE_RESET:
1747 if ((qp->rq.prod != qp->rq.cons) ||
1748 (qp->sq.prod != qp->sq.cons)) {
1749 DP_NOTICE(dev,
1750 "Error->Reset with rq/sq not empty rq.prod=%x rq.cons=%x sq.prod=%x sq.cons=%x\n",
1751 qp->rq.prod, qp->rq.cons, qp->sq.prod,
1752 qp->sq.cons);
1753 status = -EINVAL;
1754 }
1736 break; 1755 break;
1737 default: 1756 default:
1738 status = -EINVAL; 1757 status = -EINVAL;
@@ -1865,7 +1884,6 @@ int qedr_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1865 qp_params.sgid.dwords[2], qp_params.sgid.dwords[3]); 1884 qp_params.sgid.dwords[2], qp_params.sgid.dwords[3]);
1866 DP_DEBUG(dev, QEDR_MSG_QP, "remote_mac=[%pM]\n", 1885 DP_DEBUG(dev, QEDR_MSG_QP, "remote_mac=[%pM]\n",
1867 qp_params.remote_mac_addr); 1886 qp_params.remote_mac_addr);
1868;
1869 1887
1870 qp_params.mtu = qp->mtu; 1888 qp_params.mtu = qp->mtu;
1871 qp_params.lb_indication = false; 1889 qp_params.lb_indication = false;
@@ -2016,7 +2034,7 @@ int qedr_query_qp(struct ib_qp *ibqp,
2016 2034
2017 qp_attr->qp_state = qedr_get_ibqp_state(params.state); 2035 qp_attr->qp_state = qedr_get_ibqp_state(params.state);
2018 qp_attr->cur_qp_state = qedr_get_ibqp_state(params.state); 2036 qp_attr->cur_qp_state = qedr_get_ibqp_state(params.state);
2019 qp_attr->path_mtu = iboe_get_mtu(params.mtu); 2037 qp_attr->path_mtu = ib_mtu_int_to_enum(params.mtu);
2020 qp_attr->path_mig_state = IB_MIG_MIGRATED; 2038 qp_attr->path_mig_state = IB_MIG_MIGRATED;
2021 qp_attr->rq_psn = params.rq_psn; 2039 qp_attr->rq_psn = params.rq_psn;
2022 qp_attr->sq_psn = params.sq_psn; 2040 qp_attr->sq_psn = params.sq_psn;
@@ -2028,7 +2046,7 @@ int qedr_query_qp(struct ib_qp *ibqp,
2028 qp_attr->cap.max_recv_wr = qp->rq.max_wr; 2046 qp_attr->cap.max_recv_wr = qp->rq.max_wr;
2029 qp_attr->cap.max_send_sge = qp->sq.max_sges; 2047 qp_attr->cap.max_send_sge = qp->sq.max_sges;
2030 qp_attr->cap.max_recv_sge = qp->rq.max_sges; 2048 qp_attr->cap.max_recv_sge = qp->rq.max_sges;
2031 qp_attr->cap.max_inline_data = qp->max_inline_data; 2049 qp_attr->cap.max_inline_data = ROCE_REQ_MAX_INLINE_DATA_SIZE;
2032 qp_init_attr->cap = qp_attr->cap; 2050 qp_init_attr->cap = qp_attr->cap;
2033 2051
2034 memcpy(&qp_attr->ah_attr.grh.dgid.raw[0], &params.dgid.bytes[0], 2052 memcpy(&qp_attr->ah_attr.grh.dgid.raw[0], &params.dgid.bytes[0],
@@ -2302,7 +2320,8 @@ int qedr_dereg_mr(struct ib_mr *ib_mr)
2302 return rc; 2320 return rc;
2303} 2321}
2304 2322
2305struct qedr_mr *__qedr_alloc_mr(struct ib_pd *ibpd, int max_page_list_len) 2323static struct qedr_mr *__qedr_alloc_mr(struct ib_pd *ibpd,
2324 int max_page_list_len)
2306{ 2325{
2307 struct qedr_pd *pd = get_qedr_pd(ibpd); 2326 struct qedr_pd *pd = get_qedr_pd(ibpd);
2308 struct qedr_dev *dev = get_qedr_dev(ibpd->device); 2327 struct qedr_dev *dev = get_qedr_dev(ibpd->device);
@@ -2704,7 +2723,7 @@ static int qedr_prepare_reg(struct qedr_qp *qp,
2704 return 0; 2723 return 0;
2705} 2724}
2706 2725
2707enum ib_wc_opcode qedr_ib_to_wc_opcode(enum ib_wr_opcode opcode) 2726static enum ib_wc_opcode qedr_ib_to_wc_opcode(enum ib_wr_opcode opcode)
2708{ 2727{
2709 switch (opcode) { 2728 switch (opcode) {
2710 case IB_WR_RDMA_WRITE: 2729 case IB_WR_RDMA_WRITE:
@@ -2729,7 +2748,7 @@ enum ib_wc_opcode qedr_ib_to_wc_opcode(enum ib_wr_opcode opcode)
2729 } 2748 }
2730} 2749}
2731 2750
2732inline bool qedr_can_post_send(struct qedr_qp *qp, struct ib_send_wr *wr) 2751static inline bool qedr_can_post_send(struct qedr_qp *qp, struct ib_send_wr *wr)
2733{ 2752{
2734 int wq_is_full, err_wr, pbl_is_full; 2753 int wq_is_full, err_wr, pbl_is_full;
2735 struct qedr_dev *dev = qp->dev; 2754 struct qedr_dev *dev = qp->dev;
@@ -2766,7 +2785,7 @@ inline bool qedr_can_post_send(struct qedr_qp *qp, struct ib_send_wr *wr)
2766 return true; 2785 return true;
2767} 2786}
2768 2787
2769int __qedr_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, 2788static int __qedr_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
2770 struct ib_send_wr **bad_wr) 2789 struct ib_send_wr **bad_wr)
2771{ 2790{
2772 struct qedr_dev *dev = get_qedr_dev(ibqp->device); 2791 struct qedr_dev *dev = get_qedr_dev(ibqp->device);
@@ -3234,9 +3253,10 @@ static int qedr_poll_cq_req(struct qedr_dev *dev,
3234 IB_WC_SUCCESS, 0); 3253 IB_WC_SUCCESS, 0);
3235 break; 3254 break;
3236 case RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR: 3255 case RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR:
3237 DP_ERR(dev, 3256 if (qp->state != QED_ROCE_QP_STATE_ERR)
3238 "Error: POLL CQ with RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR. CQ icid=0x%x, QP icid=0x%x\n", 3257 DP_ERR(dev,
3239 cq->icid, qp->icid); 3258 "Error: POLL CQ with RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR. CQ icid=0x%x, QP icid=0x%x\n",
3259 cq->icid, qp->icid);
3240 cnt = process_req(dev, qp, cq, num_entries, wc, req->sq_cons, 3260 cnt = process_req(dev, qp, cq, num_entries, wc, req->sq_cons,
3241 IB_WC_WR_FLUSH_ERR, 1); 3261 IB_WC_WR_FLUSH_ERR, 1);
3242 break; 3262 break;
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
index 231a1ce1f4be..bd8fbd3d2032 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
@@ -1029,7 +1029,7 @@ static int pvrdma_pci_probe(struct pci_dev *pdev,
1029 if (ret) { 1029 if (ret) {
1030 dev_err(&pdev->dev, "failed to allocate interrupts\n"); 1030 dev_err(&pdev->dev, "failed to allocate interrupts\n");
1031 ret = -ENOMEM; 1031 ret = -ENOMEM;
1032 goto err_netdevice; 1032 goto err_free_cq_ring;
1033 } 1033 }
1034 1034
1035 /* Allocate UAR table. */ 1035 /* Allocate UAR table. */
@@ -1092,8 +1092,6 @@ err_free_uar_table:
1092err_free_intrs: 1092err_free_intrs:
1093 pvrdma_free_irq(dev); 1093 pvrdma_free_irq(dev);
1094 pvrdma_disable_msi_all(dev); 1094 pvrdma_disable_msi_all(dev);
1095err_netdevice:
1096 unregister_netdevice_notifier(&dev->nb_netdev);
1097err_free_cq_ring: 1095err_free_cq_ring:
1098 pvrdma_page_dir_cleanup(dev, &dev->cq_pdir); 1096 pvrdma_page_dir_cleanup(dev, &dev->cq_pdir);
1099err_free_async_ring: 1097err_free_async_ring:
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c
index 54891370d18a..c2aa52638dcb 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c
@@ -306,7 +306,7 @@ struct ib_ucontext *pvrdma_alloc_ucontext(struct ib_device *ibdev,
306 union pvrdma_cmd_resp rsp; 306 union pvrdma_cmd_resp rsp;
307 struct pvrdma_cmd_create_uc *cmd = &req.create_uc; 307 struct pvrdma_cmd_create_uc *cmd = &req.create_uc;
308 struct pvrdma_cmd_create_uc_resp *resp = &rsp.create_uc_resp; 308 struct pvrdma_cmd_create_uc_resp *resp = &rsp.create_uc_resp;
309 struct pvrdma_alloc_ucontext_resp uresp; 309 struct pvrdma_alloc_ucontext_resp uresp = {0};
310 int ret; 310 int ret;
311 void *ptr; 311 void *ptr;
312 312
diff --git a/drivers/infiniband/sw/rxe/rxe_mr.c b/drivers/infiniband/sw/rxe/rxe_mr.c
index d0faca294006..86a6585b847d 100644
--- a/drivers/infiniband/sw/rxe/rxe_mr.c
+++ b/drivers/infiniband/sw/rxe/rxe_mr.c
@@ -59,9 +59,11 @@ int mem_check_range(struct rxe_mem *mem, u64 iova, size_t length)
59 59
60 case RXE_MEM_TYPE_MR: 60 case RXE_MEM_TYPE_MR:
61 case RXE_MEM_TYPE_FMR: 61 case RXE_MEM_TYPE_FMR:
62 return ((iova < mem->iova) || 62 if (iova < mem->iova ||
63 ((iova + length) > (mem->iova + mem->length))) ? 63 length > mem->length ||
64 -EFAULT : 0; 64 iova > mem->iova + mem->length - length)
65 return -EFAULT;
66 return 0;
65 67
66 default: 68 default:
67 return -EFAULT; 69 return -EFAULT;
diff --git a/drivers/infiniband/sw/rxe/rxe_net.c b/drivers/infiniband/sw/rxe/rxe_net.c
index 342e78163613..4abdeb359fb4 100644
--- a/drivers/infiniband/sw/rxe/rxe_net.c
+++ b/drivers/infiniband/sw/rxe/rxe_net.c
@@ -555,7 +555,7 @@ struct rxe_dev *rxe_net_add(struct net_device *ndev)
555 } 555 }
556 556
557 spin_lock_bh(&dev_list_lock); 557 spin_lock_bh(&dev_list_lock);
558 list_add_tail(&rxe_dev_list, &rxe->list); 558 list_add_tail(&rxe->list, &rxe_dev_list);
559 spin_unlock_bh(&dev_list_lock); 559 spin_unlock_bh(&dev_list_lock);
560 return rxe; 560 return rxe;
561} 561}
diff --git a/drivers/infiniband/sw/rxe/rxe_qp.c b/drivers/infiniband/sw/rxe/rxe_qp.c
index 486d576e55bc..44b2108253bd 100644
--- a/drivers/infiniband/sw/rxe/rxe_qp.c
+++ b/drivers/infiniband/sw/rxe/rxe_qp.c
@@ -813,8 +813,7 @@ void rxe_qp_destroy(struct rxe_qp *qp)
813 del_timer_sync(&qp->rnr_nak_timer); 813 del_timer_sync(&qp->rnr_nak_timer);
814 814
815 rxe_cleanup_task(&qp->req.task); 815 rxe_cleanup_task(&qp->req.task);
816 if (qp_type(qp) == IB_QPT_RC) 816 rxe_cleanup_task(&qp->comp.task);
817 rxe_cleanup_task(&qp->comp.task);
818 817
819 /* flush out any receive wr's or pending requests */ 818 /* flush out any receive wr's or pending requests */
820 __rxe_do_task(&qp->req.task); 819 __rxe_do_task(&qp->req.task);
diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c
index 3435efff8799..5bcf07328972 100644
--- a/drivers/infiniband/sw/rxe/rxe_resp.c
+++ b/drivers/infiniband/sw/rxe/rxe_resp.c
@@ -479,7 +479,7 @@ static enum resp_states check_rkey(struct rxe_qp *qp,
479 goto err2; 479 goto err2;
480 } 480 }
481 481
482 resid = mtu; 482 qp->resp.resid = mtu;
483 } else { 483 } else {
484 if (pktlen != resid) { 484 if (pktlen != resid) {
485 state = RESPST_ERR_LENGTH; 485 state = RESPST_ERR_LENGTH;
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
index 9104e6b8cac9..e71af717e71b 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
@@ -651,13 +651,6 @@ iscsi_iser_session_create(struct iscsi_endpoint *ep,
651 SHOST_DIX_GUARD_CRC); 651 SHOST_DIX_GUARD_CRC);
652 } 652 }
653 653
654 /*
655 * Limit the sg_tablesize and max_sectors based on the device
656 * max fastreg page list length.
657 */
658 shost->sg_tablesize = min_t(unsigned short, shost->sg_tablesize,
659 ib_conn->device->ib_device->attrs.max_fast_reg_page_list_len);
660
661 if (iscsi_host_add(shost, 654 if (iscsi_host_add(shost,
662 ib_conn->device->ib_device->dma_device)) { 655 ib_conn->device->ib_device->dma_device)) {
663 mutex_unlock(&iser_conn->state_mutex); 656 mutex_unlock(&iser_conn->state_mutex);
@@ -679,6 +672,10 @@ iscsi_iser_session_create(struct iscsi_endpoint *ep,
679 max_fr_sectors = ((shost->sg_tablesize - 1) * PAGE_SIZE) >> 9; 672 max_fr_sectors = ((shost->sg_tablesize - 1) * PAGE_SIZE) >> 9;
680 shost->max_sectors = min(iser_max_sectors, max_fr_sectors); 673 shost->max_sectors = min(iser_max_sectors, max_fr_sectors);
681 674
675 iser_dbg("iser_conn %p, sg_tablesize %u, max_sectors %u\n",
676 iser_conn, shost->sg_tablesize,
677 shost->max_sectors);
678
682 if (cmds_max > max_cmds) { 679 if (cmds_max > max_cmds) {
683 iser_info("cmds_max changed from %u to %u\n", 680 iser_info("cmds_max changed from %u to %u\n",
684 cmds_max, max_cmds); 681 cmds_max, max_cmds);
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h
index 0be6a7c5ddb5..9d0b22ad58c1 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.h
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.h
@@ -496,7 +496,6 @@ struct ib_conn {
496 * @rx_descs: rx buffers array (cyclic buffer) 496 * @rx_descs: rx buffers array (cyclic buffer)
497 * @num_rx_descs: number of rx descriptors 497 * @num_rx_descs: number of rx descriptors
498 * @scsi_sg_tablesize: scsi host sg_tablesize 498 * @scsi_sg_tablesize: scsi host sg_tablesize
499 * @scsi_max_sectors: scsi host max sectors
500 */ 499 */
501struct iser_conn { 500struct iser_conn {
502 struct ib_conn ib_conn; 501 struct ib_conn ib_conn;
@@ -519,7 +518,6 @@ struct iser_conn {
519 struct iser_rx_desc *rx_descs; 518 struct iser_rx_desc *rx_descs;
520 u32 num_rx_descs; 519 u32 num_rx_descs;
521 unsigned short scsi_sg_tablesize; 520 unsigned short scsi_sg_tablesize;
522 unsigned int scsi_max_sectors;
523 bool snd_w_inv; 521 bool snd_w_inv;
524}; 522};
525 523
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c
index 8ae7a3beddb7..6a9d1cb548ee 100644
--- a/drivers/infiniband/ulp/iser/iser_verbs.c
+++ b/drivers/infiniband/ulp/iser/iser_verbs.c
@@ -707,18 +707,7 @@ iser_calc_scsi_params(struct iser_conn *iser_conn,
707 sup_sg_tablesize = min_t(unsigned, ISCSI_ISER_MAX_SG_TABLESIZE, 707 sup_sg_tablesize = min_t(unsigned, ISCSI_ISER_MAX_SG_TABLESIZE,
708 device->ib_device->attrs.max_fast_reg_page_list_len); 708 device->ib_device->attrs.max_fast_reg_page_list_len);
709 709
710 if (sg_tablesize > sup_sg_tablesize) { 710 iser_conn->scsi_sg_tablesize = min(sg_tablesize, sup_sg_tablesize);
711 sg_tablesize = sup_sg_tablesize;
712 iser_conn->scsi_max_sectors = sg_tablesize * SIZE_4K / 512;
713 } else {
714 iser_conn->scsi_max_sectors = max_sectors;
715 }
716
717 iser_conn->scsi_sg_tablesize = sg_tablesize;
718
719 iser_dbg("iser_conn %p, sg_tablesize %u, max_sectors %u\n",
720 iser_conn, iser_conn->scsi_sg_tablesize,
721 iser_conn->scsi_max_sectors);
722} 711}
723 712
724/** 713/**
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index 8ddc07123193..79bf48477ddb 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -371,6 +371,7 @@ static struct srp_fr_pool *srp_create_fr_pool(struct ib_device *device,
371 struct srp_fr_desc *d; 371 struct srp_fr_desc *d;
372 struct ib_mr *mr; 372 struct ib_mr *mr;
373 int i, ret = -EINVAL; 373 int i, ret = -EINVAL;
374 enum ib_mr_type mr_type;
374 375
375 if (pool_size <= 0) 376 if (pool_size <= 0)
376 goto err; 377 goto err;
@@ -384,9 +385,13 @@ static struct srp_fr_pool *srp_create_fr_pool(struct ib_device *device,
384 spin_lock_init(&pool->lock); 385 spin_lock_init(&pool->lock);
385 INIT_LIST_HEAD(&pool->free_list); 386 INIT_LIST_HEAD(&pool->free_list);
386 387
388 if (device->attrs.device_cap_flags & IB_DEVICE_SG_GAPS_REG)
389 mr_type = IB_MR_TYPE_SG_GAPS;
390 else
391 mr_type = IB_MR_TYPE_MEM_REG;
392
387 for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) { 393 for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) {
388 mr = ib_alloc_mr(pd, IB_MR_TYPE_MEM_REG, 394 mr = ib_alloc_mr(pd, mr_type, max_page_list_len);
389 max_page_list_len);
390 if (IS_ERR(mr)) { 395 if (IS_ERR(mr)) {
391 ret = PTR_ERR(mr); 396 ret = PTR_ERR(mr);
392 if (ret == -ENOMEM) 397 if (ret == -ENOMEM)
@@ -3694,6 +3699,12 @@ static int __init srp_init_module(void)
3694 indirect_sg_entries = cmd_sg_entries; 3699 indirect_sg_entries = cmd_sg_entries;
3695 } 3700 }
3696 3701
3702 if (indirect_sg_entries > SG_MAX_SEGMENTS) {
3703 pr_warn("Clamping indirect_sg_entries to %u\n",
3704 SG_MAX_SEGMENTS);
3705 indirect_sg_entries = SG_MAX_SEGMENTS;
3706 }
3707
3697 srp_remove_wq = create_workqueue("srp_remove"); 3708 srp_remove_wq = create_workqueue("srp_remove");
3698 if (!srp_remove_wq) { 3709 if (!srp_remove_wq) {
3699 ret = -ENOMEM; 3710 ret = -ENOMEM;
diff --git a/drivers/input/joydev.c b/drivers/input/joydev.c
index f3135ae22df4..abd18f31b24f 100644
--- a/drivers/input/joydev.c
+++ b/drivers/input/joydev.c
@@ -22,7 +22,6 @@
22#include <linux/sched.h> 22#include <linux/sched.h>
23#include <linux/slab.h> 23#include <linux/slab.h>
24#include <linux/mm.h> 24#include <linux/mm.h>
25#include <linux/miscdevice.h>
26#include <linux/module.h> 25#include <linux/module.h>
27#include <linux/poll.h> 26#include <linux/poll.h>
28#include <linux/init.h> 27#include <linux/init.h>
diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
index 6d9499658671..c7d5b2b643d1 100644
--- a/drivers/input/joystick/xpad.c
+++ b/drivers/input/joystick/xpad.c
@@ -1377,6 +1377,12 @@ static int xpad_init_input(struct usb_xpad *xpad)
1377 input_dev->name = xpad->name; 1377 input_dev->name = xpad->name;
1378 input_dev->phys = xpad->phys; 1378 input_dev->phys = xpad->phys;
1379 usb_to_input_id(xpad->udev, &input_dev->id); 1379 usb_to_input_id(xpad->udev, &input_dev->id);
1380
1381 if (xpad->xtype == XTYPE_XBOX360W) {
1382 /* x360w controllers and the receiver have different ids */
1383 input_dev->id.product = 0x02a1;
1384 }
1385
1380 input_dev->dev.parent = &xpad->intf->dev; 1386 input_dev->dev.parent = &xpad->intf->dev;
1381 1387
1382 input_set_drvdata(input_dev, xpad); 1388 input_set_drvdata(input_dev, xpad);
diff --git a/drivers/input/misc/adxl34x-i2c.c b/drivers/input/misc/adxl34x-i2c.c
index a8b0a2eec344..7fed92fb8cc1 100644
--- a/drivers/input/misc/adxl34x-i2c.c
+++ b/drivers/input/misc/adxl34x-i2c.c
@@ -136,7 +136,6 @@ static const struct i2c_device_id adxl34x_id[] = {
136 136
137MODULE_DEVICE_TABLE(i2c, adxl34x_id); 137MODULE_DEVICE_TABLE(i2c, adxl34x_id);
138 138
139#ifdef CONFIG_OF
140static const struct of_device_id adxl34x_of_id[] = { 139static const struct of_device_id adxl34x_of_id[] = {
141 /* 140 /*
142 * The ADXL346 is backward-compatible with the ADXL345. Differences are 141 * The ADXL346 is backward-compatible with the ADXL345. Differences are
@@ -153,13 +152,12 @@ static const struct of_device_id adxl34x_of_id[] = {
153}; 152};
154 153
155MODULE_DEVICE_TABLE(of, adxl34x_of_id); 154MODULE_DEVICE_TABLE(of, adxl34x_of_id);
156#endif
157 155
158static struct i2c_driver adxl34x_driver = { 156static struct i2c_driver adxl34x_driver = {
159 .driver = { 157 .driver = {
160 .name = "adxl34x", 158 .name = "adxl34x",
161 .pm = &adxl34x_i2c_pm, 159 .pm = &adxl34x_i2c_pm,
162 .of_match_table = of_match_ptr(adxl34x_of_id), 160 .of_match_table = adxl34x_of_id,
163 }, 161 },
164 .probe = adxl34x_i2c_probe, 162 .probe = adxl34x_i2c_probe,
165 .remove = adxl34x_i2c_remove, 163 .remove = adxl34x_i2c_remove,
diff --git a/drivers/input/misc/uinput.c b/drivers/input/misc/uinput.c
index 92595b98e7ed..022be0e22eba 100644
--- a/drivers/input/misc/uinput.c
+++ b/drivers/input/misc/uinput.c
@@ -263,13 +263,21 @@ static int uinput_create_device(struct uinput_device *udev)
263 return -EINVAL; 263 return -EINVAL;
264 } 264 }
265 265
266 if (test_bit(ABS_MT_SLOT, dev->absbit)) { 266 if (test_bit(EV_ABS, dev->evbit)) {
267 nslot = input_abs_get_max(dev, ABS_MT_SLOT) + 1; 267 input_alloc_absinfo(dev);
268 error = input_mt_init_slots(dev, nslot, 0); 268 if (!dev->absinfo) {
269 if (error) 269 error = -EINVAL;
270 goto fail1; 270 goto fail1;
271 } else if (test_bit(ABS_MT_POSITION_X, dev->absbit)) { 271 }
272 input_set_events_per_packet(dev, 60); 272
273 if (test_bit(ABS_MT_SLOT, dev->absbit)) {
274 nslot = input_abs_get_max(dev, ABS_MT_SLOT) + 1;
275 error = input_mt_init_slots(dev, nslot, 0);
276 if (error)
277 goto fail1;
278 } else if (test_bit(ABS_MT_POSITION_X, dev->absbit)) {
279 input_set_events_per_packet(dev, 60);
280 }
273 } 281 }
274 282
275 if (test_bit(EV_FF, dev->evbit) && !udev->ff_effects_max) { 283 if (test_bit(EV_FF, dev->evbit) && !udev->ff_effects_max) {
diff --git a/drivers/input/mouse/alps.h b/drivers/input/mouse/alps.h
index cde6f4bd8ea2..6d279aa27cb9 100644
--- a/drivers/input/mouse/alps.h
+++ b/drivers/input/mouse/alps.h
@@ -114,7 +114,7 @@ enum SS4_PACKET_ID {
114 (_b[1] & 0x7F) \ 114 (_b[1] & 0x7F) \
115 ) 115 )
116 116
117#define SS4_TS_Y_V2(_b) (s8)( \ 117#define SS4_TS_Y_V2(_b) -(s8)( \
118 ((_b[3] & 0x01) << 7) | \ 118 ((_b[3] & 0x01) << 7) | \
119 (_b[2] & 0x7F) \ 119 (_b[2] & 0x7F) \
120 ) 120 )
diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c
index fa598f7f4372..1e1d0ad406f2 100644
--- a/drivers/input/mouse/elan_i2c_core.c
+++ b/drivers/input/mouse/elan_i2c_core.c
@@ -1231,6 +1231,7 @@ static const struct acpi_device_id elan_acpi_id[] = {
1231 { "ELAN0000", 0 }, 1231 { "ELAN0000", 0 },
1232 { "ELAN0100", 0 }, 1232 { "ELAN0100", 0 },
1233 { "ELAN0600", 0 }, 1233 { "ELAN0600", 0 },
1234 { "ELAN0605", 0 },
1234 { "ELAN1000", 0 }, 1235 { "ELAN1000", 0 },
1235 { } 1236 { }
1236}; 1237};
diff --git a/drivers/input/mouse/synaptics_i2c.c b/drivers/input/mouse/synaptics_i2c.c
index aa7c5da60800..cb2bf203f4ca 100644
--- a/drivers/input/mouse/synaptics_i2c.c
+++ b/drivers/input/mouse/synaptics_i2c.c
@@ -29,7 +29,7 @@
29 * after soft reset, we should wait for 1 ms 29 * after soft reset, we should wait for 1 ms
30 * before the device becomes operational 30 * before the device becomes operational
31 */ 31 */
32#define SOFT_RESET_DELAY_MS 3 32#define SOFT_RESET_DELAY_US 3000
33/* and after hard reset, we should wait for max 500ms */ 33/* and after hard reset, we should wait for max 500ms */
34#define HARD_RESET_DELAY_MS 500 34#define HARD_RESET_DELAY_MS 500
35 35
@@ -311,7 +311,7 @@ static int synaptics_i2c_reset_config(struct i2c_client *client)
311 if (ret) { 311 if (ret) {
312 dev_err(&client->dev, "Unable to reset device\n"); 312 dev_err(&client->dev, "Unable to reset device\n");
313 } else { 313 } else {
314 msleep(SOFT_RESET_DELAY_MS); 314 usleep_range(SOFT_RESET_DELAY_US, SOFT_RESET_DELAY_US + 100);
315 ret = synaptics_i2c_config(client); 315 ret = synaptics_i2c_config(client);
316 if (ret) 316 if (ret)
317 dev_err(&client->dev, "Unable to config device\n"); 317 dev_err(&client->dev, "Unable to config device\n");
diff --git a/drivers/input/rmi4/Kconfig b/drivers/input/rmi4/Kconfig
index 30cc627a4f45..bb7762bf2879 100644
--- a/drivers/input/rmi4/Kconfig
+++ b/drivers/input/rmi4/Kconfig
@@ -41,13 +41,20 @@ config RMI4_SMB
41 41
42config RMI4_F03 42config RMI4_F03
43 bool "RMI4 Function 03 (PS2 Guest)" 43 bool "RMI4 Function 03 (PS2 Guest)"
44 depends on RMI4_CORE && SERIO 44 depends on RMI4_CORE
45 help 45 help
46 Say Y here if you want to add support for RMI4 function 03. 46 Say Y here if you want to add support for RMI4 function 03.
47 47
48 Function 03 provides PS2 guest support for RMI4 devices. This 48 Function 03 provides PS2 guest support for RMI4 devices. This
49 includes support for TrackPoints on TouchPads. 49 includes support for TrackPoints on TouchPads.
50 50
51config RMI4_F03_SERIO
52 tristate
53 depends on RMI4_CORE
54 depends on RMI4_F03
55 default RMI4_CORE
56 select SERIO
57
51config RMI4_2D_SENSOR 58config RMI4_2D_SENSOR
52 bool 59 bool
53 depends on RMI4_CORE 60 depends on RMI4_CORE
diff --git a/drivers/input/rmi4/rmi_driver.c b/drivers/input/rmi4/rmi_driver.c
index 11447ab1055c..bf5c36e229ba 100644
--- a/drivers/input/rmi4/rmi_driver.c
+++ b/drivers/input/rmi4/rmi_driver.c
@@ -901,7 +901,7 @@ void rmi_enable_irq(struct rmi_device *rmi_dev, bool clear_wake)
901 data->enabled = true; 901 data->enabled = true;
902 if (clear_wake && device_may_wakeup(rmi_dev->xport->dev)) { 902 if (clear_wake && device_may_wakeup(rmi_dev->xport->dev)) {
903 retval = disable_irq_wake(irq); 903 retval = disable_irq_wake(irq);
904 if (!retval) 904 if (retval)
905 dev_warn(&rmi_dev->dev, 905 dev_warn(&rmi_dev->dev,
906 "Failed to disable irq for wake: %d\n", 906 "Failed to disable irq for wake: %d\n",
907 retval); 907 retval);
@@ -936,7 +936,7 @@ void rmi_disable_irq(struct rmi_device *rmi_dev, bool enable_wake)
936 disable_irq(irq); 936 disable_irq(irq);
937 if (enable_wake && device_may_wakeup(rmi_dev->xport->dev)) { 937 if (enable_wake && device_may_wakeup(rmi_dev->xport->dev)) {
938 retval = enable_irq_wake(irq); 938 retval = enable_irq_wake(irq);
939 if (!retval) 939 if (retval)
940 dev_warn(&rmi_dev->dev, 940 dev_warn(&rmi_dev->dev,
941 "Failed to enable irq for wake: %d\n", 941 "Failed to enable irq for wake: %d\n",
942 retval); 942 retval);
diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
index 77551f522202..a7618776705a 100644
--- a/drivers/input/serio/i8042-x86ia64io.h
+++ b/drivers/input/serio/i8042-x86ia64io.h
@@ -211,6 +211,12 @@ static const struct dmi_system_id __initconst i8042_dmi_noloop_table[] = {
211 DMI_MATCH(DMI_PRODUCT_VERSION, "Rev 1"), 211 DMI_MATCH(DMI_PRODUCT_VERSION, "Rev 1"),
212 }, 212 },
213 }, 213 },
214 {
215 .matches = {
216 DMI_MATCH(DMI_SYS_VENDOR, "PEGATRON CORPORATION"),
217 DMI_MATCH(DMI_PRODUCT_NAME, "C15B"),
218 },
219 },
214 { } 220 { }
215}; 221};
216 222
diff --git a/drivers/input/touchscreen/elants_i2c.c b/drivers/input/touchscreen/elants_i2c.c
index 02aec284deca..3e6003d32e56 100644
--- a/drivers/input/touchscreen/elants_i2c.c
+++ b/drivers/input/touchscreen/elants_i2c.c
@@ -914,9 +914,9 @@ static irqreturn_t elants_i2c_irq(int irq, void *_dev)
914 914
915 case QUEUE_HEADER_NORMAL: 915 case QUEUE_HEADER_NORMAL:
916 report_count = ts->buf[FW_HDR_COUNT]; 916 report_count = ts->buf[FW_HDR_COUNT];
917 if (report_count > 3) { 917 if (report_count == 0 || report_count > 3) {
918 dev_err(&client->dev, 918 dev_err(&client->dev,
919 "too large report count: %*ph\n", 919 "bad report count: %*ph\n",
920 HEADER_SIZE, ts->buf); 920 HEADER_SIZE, ts->buf);
921 break; 921 break;
922 } 922 }
diff --git a/drivers/input/touchscreen/wm97xx-core.c b/drivers/input/touchscreen/wm97xx-core.c
index 83cf11312fd9..c9d1c91e1887 100644
--- a/drivers/input/touchscreen/wm97xx-core.c
+++ b/drivers/input/touchscreen/wm97xx-core.c
@@ -682,7 +682,7 @@ static int wm97xx_probe(struct device *dev)
682 } 682 }
683 platform_set_drvdata(wm->battery_dev, wm); 683 platform_set_drvdata(wm->battery_dev, wm);
684 wm->battery_dev->dev.parent = dev; 684 wm->battery_dev->dev.parent = dev;
685 wm->battery_dev->dev.platform_data = pdata->batt_pdata; 685 wm->battery_dev->dev.platform_data = pdata ? pdata->batt_pdata : NULL;
686 ret = platform_device_add(wm->battery_dev); 686 ret = platform_device_add(wm->battery_dev);
687 if (ret < 0) 687 if (ret < 0)
688 goto batt_reg_err; 688 goto batt_reg_err;
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index 019e02707cd5..3ef0f42984f2 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -1023,7 +1023,7 @@ again:
1023 next_tail = (tail + sizeof(*cmd)) % CMD_BUFFER_SIZE; 1023 next_tail = (tail + sizeof(*cmd)) % CMD_BUFFER_SIZE;
1024 left = (head - next_tail) % CMD_BUFFER_SIZE; 1024 left = (head - next_tail) % CMD_BUFFER_SIZE;
1025 1025
1026 if (left <= 2) { 1026 if (left <= 0x20) {
1027 struct iommu_cmd sync_cmd; 1027 struct iommu_cmd sync_cmd;
1028 int ret; 1028 int ret;
1029 1029
diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c
index a88576d50740..8ccbd7023194 100644
--- a/drivers/iommu/dmar.c
+++ b/drivers/iommu/dmar.c
@@ -903,8 +903,10 @@ int __init detect_intel_iommu(void)
903 x86_init.iommu.iommu_init = intel_iommu_init; 903 x86_init.iommu.iommu_init = intel_iommu_init;
904#endif 904#endif
905 905
906 acpi_put_table(dmar_tbl); 906 if (dmar_tbl) {
907 dmar_tbl = NULL; 907 acpi_put_table(dmar_tbl);
908 dmar_tbl = NULL;
909 }
908 up_write(&dmar_global_lock); 910 up_write(&dmar_global_lock);
909 911
910 return ret ? 1 : -ENODEV; 912 return ret ? 1 : -ENODEV;
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index c66c273dfd8a..8a185250ae5a 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -2037,6 +2037,25 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
2037 if (context_present(context)) 2037 if (context_present(context))
2038 goto out_unlock; 2038 goto out_unlock;
2039 2039
2040 /*
2041 * For kdump cases, old valid entries may be cached due to the
2042 * in-flight DMA and copied pgtable, but there is no unmapping
2043 * behaviour for them, thus we need an explicit cache flush for
2044 * the newly-mapped device. For kdump, at this point, the device
2045 * is supposed to finish reset at its driver probe stage, so no
2046 * in-flight DMA will exist, and we don't need to worry anymore
2047 * hereafter.
2048 */
2049 if (context_copied(context)) {
2050 u16 did_old = context_domain_id(context);
2051
2052 if (did_old >= 0 && did_old < cap_ndoms(iommu->cap))
2053 iommu->flush.flush_context(iommu, did_old,
2054 (((u16)bus) << 8) | devfn,
2055 DMA_CCMD_MASK_NOBIT,
2056 DMA_CCMD_DEVICE_INVL);
2057 }
2058
2040 pgd = domain->pgd; 2059 pgd = domain->pgd;
2041 2060
2042 context_clear_entry(context); 2061 context_clear_entry(context);
@@ -5185,6 +5204,25 @@ static void intel_iommu_remove_device(struct device *dev)
5185} 5204}
5186 5205
5187#ifdef CONFIG_INTEL_IOMMU_SVM 5206#ifdef CONFIG_INTEL_IOMMU_SVM
5207#define MAX_NR_PASID_BITS (20)
5208static inline unsigned long intel_iommu_get_pts(struct intel_iommu *iommu)
5209{
5210 /*
5211 * Convert ecap_pss to extend context entry pts encoding, also
5212 * respect the soft pasid_max value set by the iommu.
5213 * - number of PASID bits = ecap_pss + 1
5214 * - number of PASID table entries = 2^(pts + 5)
5215 * Therefore, pts = ecap_pss - 4
5216 * e.g. KBL ecap_pss = 0x13, PASID has 20 bits, pts = 15
5217 */
5218 if (ecap_pss(iommu->ecap) < 5)
5219 return 0;
5220
5221 /* pasid_max is encoded as actual number of entries not the bits */
5222 return find_first_bit((unsigned long *)&iommu->pasid_max,
5223 MAX_NR_PASID_BITS) - 5;
5224}
5225
5188int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct intel_svm_dev *sdev) 5226int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct intel_svm_dev *sdev)
5189{ 5227{
5190 struct device_domain_info *info; 5228 struct device_domain_info *info;
@@ -5217,7 +5255,9 @@ int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct intel_svm_dev *sd
5217 5255
5218 if (!(ctx_lo & CONTEXT_PASIDE)) { 5256 if (!(ctx_lo & CONTEXT_PASIDE)) {
5219 context[1].hi = (u64)virt_to_phys(iommu->pasid_state_table); 5257 context[1].hi = (u64)virt_to_phys(iommu->pasid_state_table);
5220 context[1].lo = (u64)virt_to_phys(iommu->pasid_table) | ecap_pss(iommu->ecap); 5258 context[1].lo = (u64)virt_to_phys(iommu->pasid_table) |
5259 intel_iommu_get_pts(iommu);
5260
5221 wmb(); 5261 wmb();
5222 /* CONTEXT_TT_MULTI_LEVEL and CONTEXT_TT_DEV_IOTLB are both 5262 /* CONTEXT_TT_MULTI_LEVEL and CONTEXT_TT_DEV_IOTLB are both
5223 * extended to permit requests-with-PASID if the PASIDE bit 5263 * extended to permit requests-with-PASID if the PASIDE bit
diff --git a/drivers/irqchip/irq-keystone.c b/drivers/irqchip/irq-keystone.c
index 54a5e870a8f5..efbcf8435185 100644
--- a/drivers/irqchip/irq-keystone.c
+++ b/drivers/irqchip/irq-keystone.c
@@ -19,9 +19,9 @@
19#include <linux/bitops.h> 19#include <linux/bitops.h>
20#include <linux/module.h> 20#include <linux/module.h>
21#include <linux/moduleparam.h> 21#include <linux/moduleparam.h>
22#include <linux/interrupt.h>
22#include <linux/irqdomain.h> 23#include <linux/irqdomain.h>
23#include <linux/irqchip.h> 24#include <linux/irqchip.h>
24#include <linux/irqchip/chained_irq.h>
25#include <linux/of.h> 25#include <linux/of.h>
26#include <linux/of_platform.h> 26#include <linux/of_platform.h>
27#include <linux/mfd/syscon.h> 27#include <linux/mfd/syscon.h>
@@ -39,6 +39,7 @@ struct keystone_irq_device {
39 struct irq_domain *irqd; 39 struct irq_domain *irqd;
40 struct regmap *devctrl_regs; 40 struct regmap *devctrl_regs;
41 u32 devctrl_offset; 41 u32 devctrl_offset;
42 raw_spinlock_t wa_lock;
42}; 43};
43 44
44static inline u32 keystone_irq_readl(struct keystone_irq_device *kirq) 45static inline u32 keystone_irq_readl(struct keystone_irq_device *kirq)
@@ -83,17 +84,15 @@ static void keystone_irq_ack(struct irq_data *d)
83 /* nothing to do here */ 84 /* nothing to do here */
84} 85}
85 86
86static void keystone_irq_handler(struct irq_desc *desc) 87static irqreturn_t keystone_irq_handler(int irq, void *keystone_irq)
87{ 88{
88 unsigned int irq = irq_desc_get_irq(desc); 89 struct keystone_irq_device *kirq = keystone_irq;
89 struct keystone_irq_device *kirq = irq_desc_get_handler_data(desc); 90 unsigned long wa_lock_flags;
90 unsigned long pending; 91 unsigned long pending;
91 int src, virq; 92 int src, virq;
92 93
93 dev_dbg(kirq->dev, "start irq %d\n", irq); 94 dev_dbg(kirq->dev, "start irq %d\n", irq);
94 95
95 chained_irq_enter(irq_desc_get_chip(desc), desc);
96
97 pending = keystone_irq_readl(kirq); 96 pending = keystone_irq_readl(kirq);
98 keystone_irq_writel(kirq, pending); 97 keystone_irq_writel(kirq, pending);
99 98
@@ -111,13 +110,15 @@ static void keystone_irq_handler(struct irq_desc *desc)
111 if (!virq) 110 if (!virq)
112 dev_warn(kirq->dev, "spurious irq detected hwirq %d, virq %d\n", 111 dev_warn(kirq->dev, "spurious irq detected hwirq %d, virq %d\n",
113 src, virq); 112 src, virq);
113 raw_spin_lock_irqsave(&kirq->wa_lock, wa_lock_flags);
114 generic_handle_irq(virq); 114 generic_handle_irq(virq);
115 raw_spin_unlock_irqrestore(&kirq->wa_lock,
116 wa_lock_flags);
115 } 117 }
116 } 118 }
117 119
118 chained_irq_exit(irq_desc_get_chip(desc), desc);
119
120 dev_dbg(kirq->dev, "end irq %d\n", irq); 120 dev_dbg(kirq->dev, "end irq %d\n", irq);
121 return IRQ_HANDLED;
121} 122}
122 123
123static int keystone_irq_map(struct irq_domain *h, unsigned int virq, 124static int keystone_irq_map(struct irq_domain *h, unsigned int virq,
@@ -182,9 +183,16 @@ static int keystone_irq_probe(struct platform_device *pdev)
182 return -ENODEV; 183 return -ENODEV;
183 } 184 }
184 185
186 raw_spin_lock_init(&kirq->wa_lock);
187
185 platform_set_drvdata(pdev, kirq); 188 platform_set_drvdata(pdev, kirq);
186 189
187 irq_set_chained_handler_and_data(kirq->irq, keystone_irq_handler, kirq); 190 ret = request_irq(kirq->irq, keystone_irq_handler,
191 0, dev_name(dev), kirq);
192 if (ret) {
193 irq_domain_remove(kirq->irqd);
194 return ret;
195 }
188 196
189 /* clear all source bits */ 197 /* clear all source bits */
190 keystone_irq_writel(kirq, ~0x0); 198 keystone_irq_writel(kirq, ~0x0);
@@ -199,6 +207,8 @@ static int keystone_irq_remove(struct platform_device *pdev)
199 struct keystone_irq_device *kirq = platform_get_drvdata(pdev); 207 struct keystone_irq_device *kirq = platform_get_drvdata(pdev);
200 int hwirq; 208 int hwirq;
201 209
210 free_irq(kirq->irq, kirq);
211
202 for (hwirq = 0; hwirq < KEYSTONE_N_IRQ; hwirq++) 212 for (hwirq = 0; hwirq < KEYSTONE_N_IRQ; hwirq++)
203 irq_dispose_mapping(irq_find_mapping(kirq->irqd, hwirq)); 213 irq_dispose_mapping(irq_find_mapping(kirq->irqd, hwirq));
204 214
diff --git a/drivers/irqchip/irq-mxs.c b/drivers/irqchip/irq-mxs.c
index 17304705f2cf..05fa9f7af53c 100644
--- a/drivers/irqchip/irq-mxs.c
+++ b/drivers/irqchip/irq-mxs.c
@@ -131,12 +131,16 @@ static struct irq_chip mxs_icoll_chip = {
131 .irq_ack = icoll_ack_irq, 131 .irq_ack = icoll_ack_irq,
132 .irq_mask = icoll_mask_irq, 132 .irq_mask = icoll_mask_irq,
133 .irq_unmask = icoll_unmask_irq, 133 .irq_unmask = icoll_unmask_irq,
134 .flags = IRQCHIP_MASK_ON_SUSPEND |
135 IRQCHIP_SKIP_SET_WAKE,
134}; 136};
135 137
136static struct irq_chip asm9260_icoll_chip = { 138static struct irq_chip asm9260_icoll_chip = {
137 .irq_ack = icoll_ack_irq, 139 .irq_ack = icoll_ack_irq,
138 .irq_mask = asm9260_mask_irq, 140 .irq_mask = asm9260_mask_irq,
139 .irq_unmask = asm9260_unmask_irq, 141 .irq_unmask = asm9260_unmask_irq,
142 .flags = IRQCHIP_MASK_ON_SUSPEND |
143 IRQCHIP_SKIP_SET_WAKE,
140}; 144};
141 145
142asmlinkage void __exception_irq_entry icoll_handle_irq(struct pt_regs *regs) 146asmlinkage void __exception_irq_entry icoll_handle_irq(struct pt_regs *regs)
diff --git a/drivers/isdn/hardware/eicon/message.c b/drivers/isdn/hardware/eicon/message.c
index 1a1d99704fe6..296f1411fe84 100644
--- a/drivers/isdn/hardware/eicon/message.c
+++ b/drivers/isdn/hardware/eicon/message.c
@@ -11297,7 +11297,8 @@ static void mixer_notify_update(PLCI *plci, byte others)
11297 ((CAPI_MSG *) msg)->header.ncci = 0; 11297 ((CAPI_MSG *) msg)->header.ncci = 0;
11298 ((CAPI_MSG *) msg)->info.facility_req.Selector = SELECTOR_LINE_INTERCONNECT; 11298 ((CAPI_MSG *) msg)->info.facility_req.Selector = SELECTOR_LINE_INTERCONNECT;
11299 ((CAPI_MSG *) msg)->info.facility_req.structs[0] = 3; 11299 ((CAPI_MSG *) msg)->info.facility_req.structs[0] = 3;
11300 PUT_WORD(&(((CAPI_MSG *) msg)->info.facility_req.structs[1]), LI_REQ_SILENT_UPDATE); 11300 ((CAPI_MSG *) msg)->info.facility_req.structs[1] = LI_REQ_SILENT_UPDATE & 0xff;
11301 ((CAPI_MSG *) msg)->info.facility_req.structs[2] = LI_REQ_SILENT_UPDATE >> 8;
11301 ((CAPI_MSG *) msg)->info.facility_req.structs[3] = 0; 11302 ((CAPI_MSG *) msg)->info.facility_req.structs[3] = 0;
11302 w = api_put(notify_plci->appl, (CAPI_MSG *) msg); 11303 w = api_put(notify_plci->appl, (CAPI_MSG *) msg);
11303 if (w != _QUEUE_FULL) 11304 if (w != _QUEUE_FULL)
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 7c6c57216bf2..8a9f742d8ed7 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -1534,18 +1534,18 @@ static int crypt_set_keyring_key(struct crypt_config *cc, const char *key_string
1534 return PTR_ERR(key); 1534 return PTR_ERR(key);
1535 } 1535 }
1536 1536
1537 rcu_read_lock(); 1537 down_read(&key->sem);
1538 1538
1539 ukp = user_key_payload(key); 1539 ukp = user_key_payload(key);
1540 if (!ukp) { 1540 if (!ukp) {
1541 rcu_read_unlock(); 1541 up_read(&key->sem);
1542 key_put(key); 1542 key_put(key);
1543 kzfree(new_key_string); 1543 kzfree(new_key_string);
1544 return -EKEYREVOKED; 1544 return -EKEYREVOKED;
1545 } 1545 }
1546 1546
1547 if (cc->key_size != ukp->datalen) { 1547 if (cc->key_size != ukp->datalen) {
1548 rcu_read_unlock(); 1548 up_read(&key->sem);
1549 key_put(key); 1549 key_put(key);
1550 kzfree(new_key_string); 1550 kzfree(new_key_string);
1551 return -EINVAL; 1551 return -EINVAL;
@@ -1553,7 +1553,7 @@ static int crypt_set_keyring_key(struct crypt_config *cc, const char *key_string
1553 1553
1554 memcpy(cc->key, ukp->data, cc->key_size); 1554 memcpy(cc->key, ukp->data, cc->key_size);
1555 1555
1556 rcu_read_unlock(); 1556 up_read(&key->sem);
1557 key_put(key); 1557 key_put(key);
1558 1558
1559 /* clear the flag since following operations may invalidate previously valid key */ 1559 /* clear the flag since following operations may invalidate previously valid key */
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index 6400cffb986d..3570bcb7a4a4 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -427,7 +427,7 @@ static struct pgpath *choose_pgpath(struct multipath *m, size_t nr_bytes)
427 unsigned long flags; 427 unsigned long flags;
428 struct priority_group *pg; 428 struct priority_group *pg;
429 struct pgpath *pgpath; 429 struct pgpath *pgpath;
430 bool bypassed = true; 430 unsigned bypassed = 1;
431 431
432 if (!atomic_read(&m->nr_valid_paths)) { 432 if (!atomic_read(&m->nr_valid_paths)) {
433 clear_bit(MPATHF_QUEUE_IO, &m->flags); 433 clear_bit(MPATHF_QUEUE_IO, &m->flags);
@@ -466,7 +466,7 @@ check_current_pg:
466 */ 466 */
467 do { 467 do {
468 list_for_each_entry(pg, &m->priority_groups, list) { 468 list_for_each_entry(pg, &m->priority_groups, list) {
469 if (pg->bypassed == bypassed) 469 if (pg->bypassed == !!bypassed)
470 continue; 470 continue;
471 pgpath = choose_path_in_pg(m, pg, nr_bytes); 471 pgpath = choose_path_in_pg(m, pg, nr_bytes);
472 if (!IS_ERR_OR_NULL(pgpath)) { 472 if (!IS_ERR_OR_NULL(pgpath)) {
diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c
index 9d7275fb541a..6e702fc69a83 100644
--- a/drivers/md/dm-rq.c
+++ b/drivers/md/dm-rq.c
@@ -779,6 +779,10 @@ static void dm_old_request_fn(struct request_queue *q)
779 int srcu_idx; 779 int srcu_idx;
780 struct dm_table *map = dm_get_live_table(md, &srcu_idx); 780 struct dm_table *map = dm_get_live_table(md, &srcu_idx);
781 781
782 if (unlikely(!map)) {
783 dm_put_live_table(md, srcu_idx);
784 return;
785 }
782 ti = dm_table_find_target(map, pos); 786 ti = dm_table_find_target(map, pos);
783 dm_put_live_table(md, srcu_idx); 787 dm_put_live_table(md, srcu_idx);
784 } 788 }
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 82821ee0d57f..01175dac0db6 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -5291,6 +5291,11 @@ int md_run(struct mddev *mddev)
5291 if (start_readonly && mddev->ro == 0) 5291 if (start_readonly && mddev->ro == 0)
5292 mddev->ro = 2; /* read-only, but switch on first write */ 5292 mddev->ro = 2; /* read-only, but switch on first write */
5293 5293
5294 /*
5295 * NOTE: some pers->run(), for example r5l_recovery_log(), wakes
5296 * up mddev->thread. It is important to initialize critical
5297 * resources for mddev->thread BEFORE calling pers->run().
5298 */
5294 err = pers->run(mddev); 5299 err = pers->run(mddev);
5295 if (err) 5300 if (err)
5296 pr_warn("md: pers->run() failed ...\n"); 5301 pr_warn("md: pers->run() failed ...\n");
diff --git a/drivers/md/md.h b/drivers/md/md.h
index e38936d05df1..2a514036a83d 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -212,6 +212,7 @@ extern int rdev_clear_badblocks(struct md_rdev *rdev, sector_t s, int sectors,
212 int is_new); 212 int is_new);
213struct md_cluster_info; 213struct md_cluster_info;
214 214
215/* change UNSUPPORTED_MDDEV_FLAGS for each array type if new flag is added */
215enum mddev_flags { 216enum mddev_flags {
216 MD_ARRAY_FIRST_USE, /* First use of array, needs initialization */ 217 MD_ARRAY_FIRST_USE, /* First use of array, needs initialization */
217 MD_CLOSING, /* If set, we are closing the array, do not open 218 MD_CLOSING, /* If set, we are closing the array, do not open
@@ -702,4 +703,11 @@ static inline int mddev_is_clustered(struct mddev *mddev)
702{ 703{
703 return mddev->cluster_info && mddev->bitmap_info.nodes > 1; 704 return mddev->cluster_info && mddev->bitmap_info.nodes > 1;
704} 705}
706
707/* clear unsupported mddev_flags */
708static inline void mddev_clear_unsupported_flags(struct mddev *mddev,
709 unsigned long unsupported_flags)
710{
711 mddev->flags &= ~unsupported_flags;
712}
705#endif /* _MD_MD_H */ 713#endif /* _MD_MD_H */
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index a162fedeb51a..848365d474f3 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -26,6 +26,11 @@
26#include "raid0.h" 26#include "raid0.h"
27#include "raid5.h" 27#include "raid5.h"
28 28
29#define UNSUPPORTED_MDDEV_FLAGS \
30 ((1L << MD_HAS_JOURNAL) | \
31 (1L << MD_JOURNAL_CLEAN) | \
32 (1L << MD_FAILFAST_SUPPORTED))
33
29static int raid0_congested(struct mddev *mddev, int bits) 34static int raid0_congested(struct mddev *mddev, int bits)
30{ 35{
31 struct r0conf *conf = mddev->private; 36 struct r0conf *conf = mddev->private;
@@ -539,8 +544,7 @@ static void *raid0_takeover_raid45(struct mddev *mddev)
539 mddev->delta_disks = -1; 544 mddev->delta_disks = -1;
540 /* make sure it will be not marked as dirty */ 545 /* make sure it will be not marked as dirty */
541 mddev->recovery_cp = MaxSector; 546 mddev->recovery_cp = MaxSector;
542 clear_bit(MD_HAS_JOURNAL, &mddev->flags); 547 mddev_clear_unsupported_flags(mddev, UNSUPPORTED_MDDEV_FLAGS);
543 clear_bit(MD_JOURNAL_CLEAN, &mddev->flags);
544 548
545 create_strip_zones(mddev, &priv_conf); 549 create_strip_zones(mddev, &priv_conf);
546 550
@@ -583,7 +587,7 @@ static void *raid0_takeover_raid10(struct mddev *mddev)
583 mddev->degraded = 0; 587 mddev->degraded = 0;
584 /* make sure it will be not marked as dirty */ 588 /* make sure it will be not marked as dirty */
585 mddev->recovery_cp = MaxSector; 589 mddev->recovery_cp = MaxSector;
586 clear_bit(MD_FAILFAST_SUPPORTED, &mddev->flags); 590 mddev_clear_unsupported_flags(mddev, UNSUPPORTED_MDDEV_FLAGS);
587 591
588 create_strip_zones(mddev, &priv_conf); 592 create_strip_zones(mddev, &priv_conf);
589 return priv_conf; 593 return priv_conf;
@@ -626,7 +630,7 @@ static void *raid0_takeover_raid1(struct mddev *mddev)
626 mddev->raid_disks = 1; 630 mddev->raid_disks = 1;
627 /* make sure it will be not marked as dirty */ 631 /* make sure it will be not marked as dirty */
628 mddev->recovery_cp = MaxSector; 632 mddev->recovery_cp = MaxSector;
629 clear_bit(MD_FAILFAST_SUPPORTED, &mddev->flags); 633 mddev_clear_unsupported_flags(mddev, UNSUPPORTED_MDDEV_FLAGS);
630 634
631 create_strip_zones(mddev, &priv_conf); 635 create_strip_zones(mddev, &priv_conf);
632 return priv_conf; 636 return priv_conf;
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index a1f3fbed9100..7b0f647bcccb 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -42,6 +42,10 @@
42#include "raid1.h" 42#include "raid1.h"
43#include "bitmap.h" 43#include "bitmap.h"
44 44
45#define UNSUPPORTED_MDDEV_FLAGS \
46 ((1L << MD_HAS_JOURNAL) | \
47 (1L << MD_JOURNAL_CLEAN))
48
45/* 49/*
46 * Number of guaranteed r1bios in case of extreme VM load: 50 * Number of guaranteed r1bios in case of extreme VM load:
47 */ 51 */
@@ -1066,17 +1070,107 @@ static void raid1_unplug(struct blk_plug_cb *cb, bool from_schedule)
1066 kfree(plug); 1070 kfree(plug);
1067} 1071}
1068 1072
1069static void raid1_make_request(struct mddev *mddev, struct bio * bio) 1073static void raid1_read_request(struct mddev *mddev, struct bio *bio,
1074 struct r1bio *r1_bio)
1070{ 1075{
1071 struct r1conf *conf = mddev->private; 1076 struct r1conf *conf = mddev->private;
1072 struct raid1_info *mirror; 1077 struct raid1_info *mirror;
1073 struct r1bio *r1_bio;
1074 struct bio *read_bio; 1078 struct bio *read_bio;
1079 struct bitmap *bitmap = mddev->bitmap;
1080 const int op = bio_op(bio);
1081 const unsigned long do_sync = (bio->bi_opf & REQ_SYNC);
1082 int sectors_handled;
1083 int max_sectors;
1084 int rdisk;
1085
1086 wait_barrier(conf, bio);
1087
1088read_again:
1089 rdisk = read_balance(conf, r1_bio, &max_sectors);
1090
1091 if (rdisk < 0) {
1092 /* couldn't find anywhere to read from */
1093 raid_end_bio_io(r1_bio);
1094 return;
1095 }
1096 mirror = conf->mirrors + rdisk;
1097
1098 if (test_bit(WriteMostly, &mirror->rdev->flags) &&
1099 bitmap) {
1100 /*
1101 * Reading from a write-mostly device must take care not to
1102 * over-take any writes that are 'behind'
1103 */
1104 raid1_log(mddev, "wait behind writes");
1105 wait_event(bitmap->behind_wait,
1106 atomic_read(&bitmap->behind_writes) == 0);
1107 }
1108 r1_bio->read_disk = rdisk;
1109 r1_bio->start_next_window = 0;
1110
1111 read_bio = bio_clone_mddev(bio, GFP_NOIO, mddev);
1112 bio_trim(read_bio, r1_bio->sector - bio->bi_iter.bi_sector,
1113 max_sectors);
1114
1115 r1_bio->bios[rdisk] = read_bio;
1116
1117 read_bio->bi_iter.bi_sector = r1_bio->sector +
1118 mirror->rdev->data_offset;
1119 read_bio->bi_bdev = mirror->rdev->bdev;
1120 read_bio->bi_end_io = raid1_end_read_request;
1121 bio_set_op_attrs(read_bio, op, do_sync);
1122 if (test_bit(FailFast, &mirror->rdev->flags) &&
1123 test_bit(R1BIO_FailFast, &r1_bio->state))
1124 read_bio->bi_opf |= MD_FAILFAST;
1125 read_bio->bi_private = r1_bio;
1126
1127 if (mddev->gendisk)
1128 trace_block_bio_remap(bdev_get_queue(read_bio->bi_bdev),
1129 read_bio, disk_devt(mddev->gendisk),
1130 r1_bio->sector);
1131
1132 if (max_sectors < r1_bio->sectors) {
1133 /*
1134 * could not read all from this device, so we will need another
1135 * r1_bio.
1136 */
1137 sectors_handled = (r1_bio->sector + max_sectors
1138 - bio->bi_iter.bi_sector);
1139 r1_bio->sectors = max_sectors;
1140 spin_lock_irq(&conf->device_lock);
1141 if (bio->bi_phys_segments == 0)
1142 bio->bi_phys_segments = 2;
1143 else
1144 bio->bi_phys_segments++;
1145 spin_unlock_irq(&conf->device_lock);
1146
1147 /*
1148 * Cannot call generic_make_request directly as that will be
1149 * queued in __make_request and subsequent mempool_alloc might
1150 * block waiting for it. So hand bio over to raid1d.
1151 */
1152 reschedule_retry(r1_bio);
1153
1154 r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO);
1155
1156 r1_bio->master_bio = bio;
1157 r1_bio->sectors = bio_sectors(bio) - sectors_handled;
1158 r1_bio->state = 0;
1159 r1_bio->mddev = mddev;
1160 r1_bio->sector = bio->bi_iter.bi_sector + sectors_handled;
1161 goto read_again;
1162 } else
1163 generic_make_request(read_bio);
1164}
1165
1166static void raid1_write_request(struct mddev *mddev, struct bio *bio,
1167 struct r1bio *r1_bio)
1168{
1169 struct r1conf *conf = mddev->private;
1075 int i, disks; 1170 int i, disks;
1076 struct bitmap *bitmap; 1171 struct bitmap *bitmap = mddev->bitmap;
1077 unsigned long flags; 1172 unsigned long flags;
1078 const int op = bio_op(bio); 1173 const int op = bio_op(bio);
1079 const int rw = bio_data_dir(bio);
1080 const unsigned long do_sync = (bio->bi_opf & REQ_SYNC); 1174 const unsigned long do_sync = (bio->bi_opf & REQ_SYNC);
1081 const unsigned long do_flush_fua = (bio->bi_opf & 1175 const unsigned long do_flush_fua = (bio->bi_opf &
1082 (REQ_PREFLUSH | REQ_FUA)); 1176 (REQ_PREFLUSH | REQ_FUA));
@@ -1096,15 +1190,15 @@ static void raid1_make_request(struct mddev *mddev, struct bio * bio)
1096 1190
1097 md_write_start(mddev, bio); /* wait on superblock update early */ 1191 md_write_start(mddev, bio); /* wait on superblock update early */
1098 1192
1099 if (bio_data_dir(bio) == WRITE && 1193 if ((bio_end_sector(bio) > mddev->suspend_lo &&
1100 ((bio_end_sector(bio) > mddev->suspend_lo &&
1101 bio->bi_iter.bi_sector < mddev->suspend_hi) || 1194 bio->bi_iter.bi_sector < mddev->suspend_hi) ||
1102 (mddev_is_clustered(mddev) && 1195 (mddev_is_clustered(mddev) &&
1103 md_cluster_ops->area_resyncing(mddev, WRITE, 1196 md_cluster_ops->area_resyncing(mddev, WRITE,
1104 bio->bi_iter.bi_sector, bio_end_sector(bio))))) { 1197 bio->bi_iter.bi_sector, bio_end_sector(bio)))) {
1105 /* As the suspend_* range is controlled by 1198
1106 * userspace, we want an interruptible 1199 /*
1107 * wait. 1200 * As the suspend_* range is controlled by userspace, we want
1201 * an interruptible wait.
1108 */ 1202 */
1109 DEFINE_WAIT(w); 1203 DEFINE_WAIT(w);
1110 for (;;) { 1204 for (;;) {
@@ -1115,128 +1209,15 @@ static void raid1_make_request(struct mddev *mddev, struct bio * bio)
1115 bio->bi_iter.bi_sector >= mddev->suspend_hi || 1209 bio->bi_iter.bi_sector >= mddev->suspend_hi ||
1116 (mddev_is_clustered(mddev) && 1210 (mddev_is_clustered(mddev) &&
1117 !md_cluster_ops->area_resyncing(mddev, WRITE, 1211 !md_cluster_ops->area_resyncing(mddev, WRITE,
1118 bio->bi_iter.bi_sector, bio_end_sector(bio)))) 1212 bio->bi_iter.bi_sector,
1213 bio_end_sector(bio))))
1119 break; 1214 break;
1120 schedule(); 1215 schedule();
1121 } 1216 }
1122 finish_wait(&conf->wait_barrier, &w); 1217 finish_wait(&conf->wait_barrier, &w);
1123 } 1218 }
1124
1125 start_next_window = wait_barrier(conf, bio); 1219 start_next_window = wait_barrier(conf, bio);
1126 1220
1127 bitmap = mddev->bitmap;
1128
1129 /*
1130 * make_request() can abort the operation when read-ahead is being
1131 * used and no empty request is available.
1132 *
1133 */
1134 r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO);
1135
1136 r1_bio->master_bio = bio;
1137 r1_bio->sectors = bio_sectors(bio);
1138 r1_bio->state = 0;
1139 r1_bio->mddev = mddev;
1140 r1_bio->sector = bio->bi_iter.bi_sector;
1141
1142 /* We might need to issue multiple reads to different
1143 * devices if there are bad blocks around, so we keep
1144 * track of the number of reads in bio->bi_phys_segments.
1145 * If this is 0, there is only one r1_bio and no locking
1146 * will be needed when requests complete. If it is
1147 * non-zero, then it is the number of not-completed requests.
1148 */
1149 bio->bi_phys_segments = 0;
1150 bio_clear_flag(bio, BIO_SEG_VALID);
1151
1152 if (rw == READ) {
1153 /*
1154 * read balancing logic:
1155 */
1156 int rdisk;
1157
1158read_again:
1159 rdisk = read_balance(conf, r1_bio, &max_sectors);
1160
1161 if (rdisk < 0) {
1162 /* couldn't find anywhere to read from */
1163 raid_end_bio_io(r1_bio);
1164 return;
1165 }
1166 mirror = conf->mirrors + rdisk;
1167
1168 if (test_bit(WriteMostly, &mirror->rdev->flags) &&
1169 bitmap) {
1170 /* Reading from a write-mostly device must
1171 * take care not to over-take any writes
1172 * that are 'behind'
1173 */
1174 raid1_log(mddev, "wait behind writes");
1175 wait_event(bitmap->behind_wait,
1176 atomic_read(&bitmap->behind_writes) == 0);
1177 }
1178 r1_bio->read_disk = rdisk;
1179 r1_bio->start_next_window = 0;
1180
1181 read_bio = bio_clone_mddev(bio, GFP_NOIO, mddev);
1182 bio_trim(read_bio, r1_bio->sector - bio->bi_iter.bi_sector,
1183 max_sectors);
1184
1185 r1_bio->bios[rdisk] = read_bio;
1186
1187 read_bio->bi_iter.bi_sector = r1_bio->sector +
1188 mirror->rdev->data_offset;
1189 read_bio->bi_bdev = mirror->rdev->bdev;
1190 read_bio->bi_end_io = raid1_end_read_request;
1191 bio_set_op_attrs(read_bio, op, do_sync);
1192 if (test_bit(FailFast, &mirror->rdev->flags) &&
1193 test_bit(R1BIO_FailFast, &r1_bio->state))
1194 read_bio->bi_opf |= MD_FAILFAST;
1195 read_bio->bi_private = r1_bio;
1196
1197 if (mddev->gendisk)
1198 trace_block_bio_remap(bdev_get_queue(read_bio->bi_bdev),
1199 read_bio, disk_devt(mddev->gendisk),
1200 r1_bio->sector);
1201
1202 if (max_sectors < r1_bio->sectors) {
1203 /* could not read all from this device, so we will
1204 * need another r1_bio.
1205 */
1206
1207 sectors_handled = (r1_bio->sector + max_sectors
1208 - bio->bi_iter.bi_sector);
1209 r1_bio->sectors = max_sectors;
1210 spin_lock_irq(&conf->device_lock);
1211 if (bio->bi_phys_segments == 0)
1212 bio->bi_phys_segments = 2;
1213 else
1214 bio->bi_phys_segments++;
1215 spin_unlock_irq(&conf->device_lock);
1216 /* Cannot call generic_make_request directly
1217 * as that will be queued in __make_request
1218 * and subsequent mempool_alloc might block waiting
1219 * for it. So hand bio over to raid1d.
1220 */
1221 reschedule_retry(r1_bio);
1222
1223 r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO);
1224
1225 r1_bio->master_bio = bio;
1226 r1_bio->sectors = bio_sectors(bio) - sectors_handled;
1227 r1_bio->state = 0;
1228 r1_bio->mddev = mddev;
1229 r1_bio->sector = bio->bi_iter.bi_sector +
1230 sectors_handled;
1231 goto read_again;
1232 } else
1233 generic_make_request(read_bio);
1234 return;
1235 }
1236
1237 /*
1238 * WRITE:
1239 */
1240 if (conf->pending_count >= max_queued_requests) { 1221 if (conf->pending_count >= max_queued_requests) {
1241 md_wakeup_thread(mddev->thread); 1222 md_wakeup_thread(mddev->thread);
1242 raid1_log(mddev, "wait queued"); 1223 raid1_log(mddev, "wait queued");
@@ -1280,8 +1261,7 @@ read_again:
1280 int bad_sectors; 1261 int bad_sectors;
1281 int is_bad; 1262 int is_bad;
1282 1263
1283 is_bad = is_badblock(rdev, r1_bio->sector, 1264 is_bad = is_badblock(rdev, r1_bio->sector, max_sectors,
1284 max_sectors,
1285 &first_bad, &bad_sectors); 1265 &first_bad, &bad_sectors);
1286 if (is_bad < 0) { 1266 if (is_bad < 0) {
1287 /* mustn't write here until the bad block is 1267 /* mustn't write here until the bad block is
@@ -1370,7 +1350,8 @@ read_again:
1370 continue; 1350 continue;
1371 1351
1372 mbio = bio_clone_mddev(bio, GFP_NOIO, mddev); 1352 mbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
1373 bio_trim(mbio, r1_bio->sector - bio->bi_iter.bi_sector, max_sectors); 1353 bio_trim(mbio, r1_bio->sector - bio->bi_iter.bi_sector,
1354 max_sectors);
1374 1355
1375 if (first_clone) { 1356 if (first_clone) {
1376 /* do behind I/O ? 1357 /* do behind I/O ?
@@ -1464,6 +1445,40 @@ read_again:
1464 wake_up(&conf->wait_barrier); 1445 wake_up(&conf->wait_barrier);
1465} 1446}
1466 1447
1448static void raid1_make_request(struct mddev *mddev, struct bio *bio)
1449{
1450 struct r1conf *conf = mddev->private;
1451 struct r1bio *r1_bio;
1452
1453 /*
1454 * make_request() can abort the operation when read-ahead is being
1455 * used and no empty request is available.
1456 *
1457 */
1458 r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO);
1459
1460 r1_bio->master_bio = bio;
1461 r1_bio->sectors = bio_sectors(bio);
1462 r1_bio->state = 0;
1463 r1_bio->mddev = mddev;
1464 r1_bio->sector = bio->bi_iter.bi_sector;
1465
1466 /*
1467 * We might need to issue multiple reads to different devices if there
1468 * are bad blocks around, so we keep track of the number of reads in
1469 * bio->bi_phys_segments. If this is 0, there is only one r1_bio and
1470 * no locking will be needed when requests complete. If it is
1471 * non-zero, then it is the number of not-completed requests.
1472 */
1473 bio->bi_phys_segments = 0;
1474 bio_clear_flag(bio, BIO_SEG_VALID);
1475
1476 if (bio_data_dir(bio) == READ)
1477 raid1_read_request(mddev, bio, r1_bio);
1478 else
1479 raid1_write_request(mddev, bio, r1_bio);
1480}
1481
1467static void raid1_status(struct seq_file *seq, struct mddev *mddev) 1482static void raid1_status(struct seq_file *seq, struct mddev *mddev)
1468{ 1483{
1469 struct r1conf *conf = mddev->private; 1484 struct r1conf *conf = mddev->private;
@@ -3246,8 +3261,8 @@ static void *raid1_takeover(struct mddev *mddev)
3246 if (!IS_ERR(conf)) { 3261 if (!IS_ERR(conf)) {
3247 /* Array must appear to be quiesced */ 3262 /* Array must appear to be quiesced */
3248 conf->array_frozen = 1; 3263 conf->array_frozen = 1;
3249 clear_bit(MD_HAS_JOURNAL, &mddev->flags); 3264 mddev_clear_unsupported_flags(mddev,
3250 clear_bit(MD_JOURNAL_CLEAN, &mddev->flags); 3265 UNSUPPORTED_MDDEV_FLAGS);
3251 } 3266 }
3252 return conf; 3267 return conf;
3253 } 3268 }
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index ab5e86209322..1920756828df 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -1087,23 +1087,122 @@ static void raid10_unplug(struct blk_plug_cb *cb, bool from_schedule)
1087 kfree(plug); 1087 kfree(plug);
1088} 1088}
1089 1089
1090static void __make_request(struct mddev *mddev, struct bio *bio) 1090static void raid10_read_request(struct mddev *mddev, struct bio *bio,
1091 struct r10bio *r10_bio)
1091{ 1092{
1092 struct r10conf *conf = mddev->private; 1093 struct r10conf *conf = mddev->private;
1093 struct r10bio *r10_bio;
1094 struct bio *read_bio; 1094 struct bio *read_bio;
1095 const int op = bio_op(bio);
1096 const unsigned long do_sync = (bio->bi_opf & REQ_SYNC);
1097 int sectors_handled;
1098 int max_sectors;
1099 sector_t sectors;
1100 struct md_rdev *rdev;
1101 int slot;
1102
1103 /*
1104 * Register the new request and wait if the reconstruction
1105 * thread has put up a bar for new requests.
1106 * Continue immediately if no resync is active currently.
1107 */
1108 wait_barrier(conf);
1109
1110 sectors = bio_sectors(bio);
1111 while (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
1112 bio->bi_iter.bi_sector < conf->reshape_progress &&
1113 bio->bi_iter.bi_sector + sectors > conf->reshape_progress) {
1114 /*
1115 * IO spans the reshape position. Need to wait for reshape to
1116 * pass
1117 */
1118 raid10_log(conf->mddev, "wait reshape");
1119 allow_barrier(conf);
1120 wait_event(conf->wait_barrier,
1121 conf->reshape_progress <= bio->bi_iter.bi_sector ||
1122 conf->reshape_progress >= bio->bi_iter.bi_sector +
1123 sectors);
1124 wait_barrier(conf);
1125 }
1126
1127read_again:
1128 rdev = read_balance(conf, r10_bio, &max_sectors);
1129 if (!rdev) {
1130 raid_end_bio_io(r10_bio);
1131 return;
1132 }
1133 slot = r10_bio->read_slot;
1134
1135 read_bio = bio_clone_mddev(bio, GFP_NOIO, mddev);
1136 bio_trim(read_bio, r10_bio->sector - bio->bi_iter.bi_sector,
1137 max_sectors);
1138
1139 r10_bio->devs[slot].bio = read_bio;
1140 r10_bio->devs[slot].rdev = rdev;
1141
1142 read_bio->bi_iter.bi_sector = r10_bio->devs[slot].addr +
1143 choose_data_offset(r10_bio, rdev);
1144 read_bio->bi_bdev = rdev->bdev;
1145 read_bio->bi_end_io = raid10_end_read_request;
1146 bio_set_op_attrs(read_bio, op, do_sync);
1147 if (test_bit(FailFast, &rdev->flags) &&
1148 test_bit(R10BIO_FailFast, &r10_bio->state))
1149 read_bio->bi_opf |= MD_FAILFAST;
1150 read_bio->bi_private = r10_bio;
1151
1152 if (mddev->gendisk)
1153 trace_block_bio_remap(bdev_get_queue(read_bio->bi_bdev),
1154 read_bio, disk_devt(mddev->gendisk),
1155 r10_bio->sector);
1156 if (max_sectors < r10_bio->sectors) {
1157 /*
1158 * Could not read all from this device, so we will need another
1159 * r10_bio.
1160 */
1161 sectors_handled = (r10_bio->sector + max_sectors
1162 - bio->bi_iter.bi_sector);
1163 r10_bio->sectors = max_sectors;
1164 spin_lock_irq(&conf->device_lock);
1165 if (bio->bi_phys_segments == 0)
1166 bio->bi_phys_segments = 2;
1167 else
1168 bio->bi_phys_segments++;
1169 spin_unlock_irq(&conf->device_lock);
1170 /*
1171 * Cannot call generic_make_request directly as that will be
1172 * queued in __generic_make_request and subsequent
1173 * mempool_alloc might block waiting for it. so hand bio over
1174 * to raid10d.
1175 */
1176 reschedule_retry(r10_bio);
1177
1178 r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO);
1179
1180 r10_bio->master_bio = bio;
1181 r10_bio->sectors = bio_sectors(bio) - sectors_handled;
1182 r10_bio->state = 0;
1183 r10_bio->mddev = mddev;
1184 r10_bio->sector = bio->bi_iter.bi_sector + sectors_handled;
1185 goto read_again;
1186 } else
1187 generic_make_request(read_bio);
1188 return;
1189}
1190
1191static void raid10_write_request(struct mddev *mddev, struct bio *bio,
1192 struct r10bio *r10_bio)
1193{
1194 struct r10conf *conf = mddev->private;
1095 int i; 1195 int i;
1096 const int op = bio_op(bio); 1196 const int op = bio_op(bio);
1097 const int rw = bio_data_dir(bio);
1098 const unsigned long do_sync = (bio->bi_opf & REQ_SYNC); 1197 const unsigned long do_sync = (bio->bi_opf & REQ_SYNC);
1099 const unsigned long do_fua = (bio->bi_opf & REQ_FUA); 1198 const unsigned long do_fua = (bio->bi_opf & REQ_FUA);
1100 unsigned long flags; 1199 unsigned long flags;
1101 struct md_rdev *blocked_rdev; 1200 struct md_rdev *blocked_rdev;
1102 struct blk_plug_cb *cb; 1201 struct blk_plug_cb *cb;
1103 struct raid10_plug_cb *plug = NULL; 1202 struct raid10_plug_cb *plug = NULL;
1203 sector_t sectors;
1104 int sectors_handled; 1204 int sectors_handled;
1105 int max_sectors; 1205 int max_sectors;
1106 int sectors;
1107 1206
1108 md_write_start(mddev, bio); 1207 md_write_start(mddev, bio);
1109 1208
@@ -1118,8 +1217,9 @@ static void __make_request(struct mddev *mddev, struct bio *bio)
1118 while (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && 1217 while (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
1119 bio->bi_iter.bi_sector < conf->reshape_progress && 1218 bio->bi_iter.bi_sector < conf->reshape_progress &&
1120 bio->bi_iter.bi_sector + sectors > conf->reshape_progress) { 1219 bio->bi_iter.bi_sector + sectors > conf->reshape_progress) {
1121 /* IO spans the reshape position. Need to wait for 1220 /*
1122 * reshape to pass 1221 * IO spans the reshape position. Need to wait for reshape to
1222 * pass
1123 */ 1223 */
1124 raid10_log(conf->mddev, "wait reshape"); 1224 raid10_log(conf->mddev, "wait reshape");
1125 allow_barrier(conf); 1225 allow_barrier(conf);
@@ -1129,8 +1229,8 @@ static void __make_request(struct mddev *mddev, struct bio *bio)
1129 sectors); 1229 sectors);
1130 wait_barrier(conf); 1230 wait_barrier(conf);
1131 } 1231 }
1232
1132 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && 1233 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
1133 bio_data_dir(bio) == WRITE &&
1134 (mddev->reshape_backwards 1234 (mddev->reshape_backwards
1135 ? (bio->bi_iter.bi_sector < conf->reshape_safe && 1235 ? (bio->bi_iter.bi_sector < conf->reshape_safe &&
1136 bio->bi_iter.bi_sector + sectors > conf->reshape_progress) 1236 bio->bi_iter.bi_sector + sectors > conf->reshape_progress)
@@ -1148,98 +1248,6 @@ static void __make_request(struct mddev *mddev, struct bio *bio)
1148 conf->reshape_safe = mddev->reshape_position; 1248 conf->reshape_safe = mddev->reshape_position;
1149 } 1249 }
1150 1250
1151 r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO);
1152
1153 r10_bio->master_bio = bio;
1154 r10_bio->sectors = sectors;
1155
1156 r10_bio->mddev = mddev;
1157 r10_bio->sector = bio->bi_iter.bi_sector;
1158 r10_bio->state = 0;
1159
1160 /* We might need to issue multiple reads to different
1161 * devices if there are bad blocks around, so we keep
1162 * track of the number of reads in bio->bi_phys_segments.
1163 * If this is 0, there is only one r10_bio and no locking
1164 * will be needed when the request completes. If it is
1165 * non-zero, then it is the number of not-completed requests.
1166 */
1167 bio->bi_phys_segments = 0;
1168 bio_clear_flag(bio, BIO_SEG_VALID);
1169
1170 if (rw == READ) {
1171 /*
1172 * read balancing logic:
1173 */
1174 struct md_rdev *rdev;
1175 int slot;
1176
1177read_again:
1178 rdev = read_balance(conf, r10_bio, &max_sectors);
1179 if (!rdev) {
1180 raid_end_bio_io(r10_bio);
1181 return;
1182 }
1183 slot = r10_bio->read_slot;
1184
1185 read_bio = bio_clone_mddev(bio, GFP_NOIO, mddev);
1186 bio_trim(read_bio, r10_bio->sector - bio->bi_iter.bi_sector,
1187 max_sectors);
1188
1189 r10_bio->devs[slot].bio = read_bio;
1190 r10_bio->devs[slot].rdev = rdev;
1191
1192 read_bio->bi_iter.bi_sector = r10_bio->devs[slot].addr +
1193 choose_data_offset(r10_bio, rdev);
1194 read_bio->bi_bdev = rdev->bdev;
1195 read_bio->bi_end_io = raid10_end_read_request;
1196 bio_set_op_attrs(read_bio, op, do_sync);
1197 if (test_bit(FailFast, &rdev->flags) &&
1198 test_bit(R10BIO_FailFast, &r10_bio->state))
1199 read_bio->bi_opf |= MD_FAILFAST;
1200 read_bio->bi_private = r10_bio;
1201
1202 if (mddev->gendisk)
1203 trace_block_bio_remap(bdev_get_queue(read_bio->bi_bdev),
1204 read_bio, disk_devt(mddev->gendisk),
1205 r10_bio->sector);
1206 if (max_sectors < r10_bio->sectors) {
1207 /* Could not read all from this device, so we will
1208 * need another r10_bio.
1209 */
1210 sectors_handled = (r10_bio->sector + max_sectors
1211 - bio->bi_iter.bi_sector);
1212 r10_bio->sectors = max_sectors;
1213 spin_lock_irq(&conf->device_lock);
1214 if (bio->bi_phys_segments == 0)
1215 bio->bi_phys_segments = 2;
1216 else
1217 bio->bi_phys_segments++;
1218 spin_unlock_irq(&conf->device_lock);
1219 /* Cannot call generic_make_request directly
1220 * as that will be queued in __generic_make_request
1221 * and subsequent mempool_alloc might block
1222 * waiting for it. so hand bio over to raid10d.
1223 */
1224 reschedule_retry(r10_bio);
1225
1226 r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO);
1227
1228 r10_bio->master_bio = bio;
1229 r10_bio->sectors = bio_sectors(bio) - sectors_handled;
1230 r10_bio->state = 0;
1231 r10_bio->mddev = mddev;
1232 r10_bio->sector = bio->bi_iter.bi_sector +
1233 sectors_handled;
1234 goto read_again;
1235 } else
1236 generic_make_request(read_bio);
1237 return;
1238 }
1239
1240 /*
1241 * WRITE:
1242 */
1243 if (conf->pending_count >= max_queued_requests) { 1251 if (conf->pending_count >= max_queued_requests) {
1244 md_wakeup_thread(mddev->thread); 1252 md_wakeup_thread(mddev->thread);
1245 raid10_log(mddev, "wait queued"); 1253 raid10_log(mddev, "wait queued");
@@ -1300,8 +1308,7 @@ retry_write:
1300 int bad_sectors; 1308 int bad_sectors;
1301 int is_bad; 1309 int is_bad;
1302 1310
1303 is_bad = is_badblock(rdev, dev_sector, 1311 is_bad = is_badblock(rdev, dev_sector, max_sectors,
1304 max_sectors,
1305 &first_bad, &bad_sectors); 1312 &first_bad, &bad_sectors);
1306 if (is_bad < 0) { 1313 if (is_bad < 0) {
1307 /* Mustn't write here until the bad block 1314 /* Mustn't write here until the bad block
@@ -1405,8 +1412,7 @@ retry_write:
1405 r10_bio->devs[i].bio = mbio; 1412 r10_bio->devs[i].bio = mbio;
1406 1413
1407 mbio->bi_iter.bi_sector = (r10_bio->devs[i].addr+ 1414 mbio->bi_iter.bi_sector = (r10_bio->devs[i].addr+
1408 choose_data_offset(r10_bio, 1415 choose_data_offset(r10_bio, rdev));
1409 rdev));
1410 mbio->bi_bdev = rdev->bdev; 1416 mbio->bi_bdev = rdev->bdev;
1411 mbio->bi_end_io = raid10_end_write_request; 1417 mbio->bi_end_io = raid10_end_write_request;
1412 bio_set_op_attrs(mbio, op, do_sync | do_fua); 1418 bio_set_op_attrs(mbio, op, do_sync | do_fua);
@@ -1457,8 +1463,7 @@ retry_write:
1457 r10_bio->devs[i].repl_bio = mbio; 1463 r10_bio->devs[i].repl_bio = mbio;
1458 1464
1459 mbio->bi_iter.bi_sector = (r10_bio->devs[i].addr + 1465 mbio->bi_iter.bi_sector = (r10_bio->devs[i].addr +
1460 choose_data_offset( 1466 choose_data_offset(r10_bio, rdev));
1461 r10_bio, rdev));
1462 mbio->bi_bdev = rdev->bdev; 1467 mbio->bi_bdev = rdev->bdev;
1463 mbio->bi_end_io = raid10_end_write_request; 1468 mbio->bi_end_io = raid10_end_write_request;
1464 bio_set_op_attrs(mbio, op, do_sync | do_fua); 1469 bio_set_op_attrs(mbio, op, do_sync | do_fua);
@@ -1503,6 +1508,36 @@ retry_write:
1503 one_write_done(r10_bio); 1508 one_write_done(r10_bio);
1504} 1509}
1505 1510
1511static void __make_request(struct mddev *mddev, struct bio *bio)
1512{
1513 struct r10conf *conf = mddev->private;
1514 struct r10bio *r10_bio;
1515
1516 r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO);
1517
1518 r10_bio->master_bio = bio;
1519 r10_bio->sectors = bio_sectors(bio);
1520
1521 r10_bio->mddev = mddev;
1522 r10_bio->sector = bio->bi_iter.bi_sector;
1523 r10_bio->state = 0;
1524
1525 /*
1526 * We might need to issue multiple reads to different devices if there
1527 * are bad blocks around, so we keep track of the number of reads in
1528 * bio->bi_phys_segments. If this is 0, there is only one r10_bio and
1529 * no locking will be needed when the request completes. If it is
1530 * non-zero, then it is the number of not-completed requests.
1531 */
1532 bio->bi_phys_segments = 0;
1533 bio_clear_flag(bio, BIO_SEG_VALID);
1534
1535 if (bio_data_dir(bio) == READ)
1536 raid10_read_request(mddev, bio, r10_bio);
1537 else
1538 raid10_write_request(mddev, bio, r10_bio);
1539}
1540
1506static void raid10_make_request(struct mddev *mddev, struct bio *bio) 1541static void raid10_make_request(struct mddev *mddev, struct bio *bio)
1507{ 1542{
1508 struct r10conf *conf = mddev->private; 1543 struct r10conf *conf = mddev->private;
diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c
index d7bfb6fc8aef..302dea3296ba 100644
--- a/drivers/md/raid5-cache.c
+++ b/drivers/md/raid5-cache.c
@@ -162,6 +162,8 @@ struct r5l_log {
162 162
163 /* to submit async io_units, to fulfill ordering of flush */ 163 /* to submit async io_units, to fulfill ordering of flush */
164 struct work_struct deferred_io_work; 164 struct work_struct deferred_io_work;
165 /* to disable write back during in degraded mode */
166 struct work_struct disable_writeback_work;
165}; 167};
166 168
167/* 169/*
@@ -611,6 +613,21 @@ static void r5l_submit_io_async(struct work_struct *work)
611 r5l_do_submit_io(log, io); 613 r5l_do_submit_io(log, io);
612} 614}
613 615
616static void r5c_disable_writeback_async(struct work_struct *work)
617{
618 struct r5l_log *log = container_of(work, struct r5l_log,
619 disable_writeback_work);
620 struct mddev *mddev = log->rdev->mddev;
621
622 if (log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH)
623 return;
624 pr_info("md/raid:%s: Disabling writeback cache for degraded array.\n",
625 mdname(mddev));
626 mddev_suspend(mddev);
627 log->r5c_journal_mode = R5C_JOURNAL_MODE_WRITE_THROUGH;
628 mddev_resume(mddev);
629}
630
614static void r5l_submit_current_io(struct r5l_log *log) 631static void r5l_submit_current_io(struct r5l_log *log)
615{ 632{
616 struct r5l_io_unit *io = log->current_io; 633 struct r5l_io_unit *io = log->current_io;
@@ -1393,8 +1410,6 @@ static void r5l_do_reclaim(struct r5l_log *log)
1393 next_checkpoint = r5c_calculate_new_cp(conf); 1410 next_checkpoint = r5c_calculate_new_cp(conf);
1394 spin_unlock_irq(&log->io_list_lock); 1411 spin_unlock_irq(&log->io_list_lock);
1395 1412
1396 BUG_ON(reclaimable < 0);
1397
1398 if (reclaimable == 0 || !write_super) 1413 if (reclaimable == 0 || !write_super)
1399 return; 1414 return;
1400 1415
@@ -1682,8 +1697,7 @@ out:
1682 1697
1683static struct stripe_head * 1698static struct stripe_head *
1684r5c_recovery_alloc_stripe(struct r5conf *conf, 1699r5c_recovery_alloc_stripe(struct r5conf *conf,
1685 sector_t stripe_sect, 1700 sector_t stripe_sect)
1686 sector_t log_start)
1687{ 1701{
1688 struct stripe_head *sh; 1702 struct stripe_head *sh;
1689 1703
@@ -1692,7 +1706,6 @@ r5c_recovery_alloc_stripe(struct r5conf *conf,
1692 return NULL; /* no more stripe available */ 1706 return NULL; /* no more stripe available */
1693 1707
1694 r5l_recovery_reset_stripe(sh); 1708 r5l_recovery_reset_stripe(sh);
1695 sh->log_start = log_start;
1696 1709
1697 return sh; 1710 return sh;
1698} 1711}
@@ -1862,7 +1875,7 @@ r5c_recovery_analyze_meta_block(struct r5l_log *log,
1862 stripe_sect); 1875 stripe_sect);
1863 1876
1864 if (!sh) { 1877 if (!sh) {
1865 sh = r5c_recovery_alloc_stripe(conf, stripe_sect, ctx->pos); 1878 sh = r5c_recovery_alloc_stripe(conf, stripe_sect);
1866 /* 1879 /*
1867 * cannot get stripe from raid5_get_active_stripe 1880 * cannot get stripe from raid5_get_active_stripe
1868 * try replay some stripes 1881 * try replay some stripes
@@ -1871,7 +1884,7 @@ r5c_recovery_analyze_meta_block(struct r5l_log *log,
1871 r5c_recovery_replay_stripes( 1884 r5c_recovery_replay_stripes(
1872 cached_stripe_list, ctx); 1885 cached_stripe_list, ctx);
1873 sh = r5c_recovery_alloc_stripe( 1886 sh = r5c_recovery_alloc_stripe(
1874 conf, stripe_sect, ctx->pos); 1887 conf, stripe_sect);
1875 } 1888 }
1876 if (!sh) { 1889 if (!sh) {
1877 pr_debug("md/raid:%s: Increasing stripe cache size to %d to recovery data on journal.\n", 1890 pr_debug("md/raid:%s: Increasing stripe cache size to %d to recovery data on journal.\n",
@@ -1879,8 +1892,8 @@ r5c_recovery_analyze_meta_block(struct r5l_log *log,
1879 conf->min_nr_stripes * 2); 1892 conf->min_nr_stripes * 2);
1880 raid5_set_cache_size(mddev, 1893 raid5_set_cache_size(mddev,
1881 conf->min_nr_stripes * 2); 1894 conf->min_nr_stripes * 2);
1882 sh = r5c_recovery_alloc_stripe( 1895 sh = r5c_recovery_alloc_stripe(conf,
1883 conf, stripe_sect, ctx->pos); 1896 stripe_sect);
1884 } 1897 }
1885 if (!sh) { 1898 if (!sh) {
1886 pr_err("md/raid:%s: Cannot get enough stripes due to memory pressure. Recovery failed.\n", 1899 pr_err("md/raid:%s: Cannot get enough stripes due to memory pressure. Recovery failed.\n",
@@ -1894,7 +1907,6 @@ r5c_recovery_analyze_meta_block(struct r5l_log *log,
1894 if (!test_bit(STRIPE_R5C_CACHING, &sh->state) && 1907 if (!test_bit(STRIPE_R5C_CACHING, &sh->state) &&
1895 test_bit(R5_Wantwrite, &sh->dev[sh->pd_idx].flags)) { 1908 test_bit(R5_Wantwrite, &sh->dev[sh->pd_idx].flags)) {
1896 r5l_recovery_replay_one_stripe(conf, sh, ctx); 1909 r5l_recovery_replay_one_stripe(conf, sh, ctx);
1897 sh->log_start = ctx->pos;
1898 list_move_tail(&sh->lru, cached_stripe_list); 1910 list_move_tail(&sh->lru, cached_stripe_list);
1899 } 1911 }
1900 r5l_recovery_load_data(log, sh, ctx, payload, 1912 r5l_recovery_load_data(log, sh, ctx, payload,
@@ -1933,8 +1945,6 @@ static void r5c_recovery_load_one_stripe(struct r5l_log *log,
1933 set_bit(R5_UPTODATE, &dev->flags); 1945 set_bit(R5_UPTODATE, &dev->flags);
1934 } 1946 }
1935 } 1947 }
1936 list_add_tail(&sh->r5c, &log->stripe_in_journal_list);
1937 atomic_inc(&log->stripe_in_journal_count);
1938} 1948}
1939 1949
1940/* 1950/*
@@ -2067,9 +2077,10 @@ static int
2067r5c_recovery_rewrite_data_only_stripes(struct r5l_log *log, 2077r5c_recovery_rewrite_data_only_stripes(struct r5l_log *log,
2068 struct r5l_recovery_ctx *ctx) 2078 struct r5l_recovery_ctx *ctx)
2069{ 2079{
2070 struct stripe_head *sh, *next; 2080 struct stripe_head *sh;
2071 struct mddev *mddev = log->rdev->mddev; 2081 struct mddev *mddev = log->rdev->mddev;
2072 struct page *page; 2082 struct page *page;
2083 sector_t next_checkpoint = MaxSector;
2073 2084
2074 page = alloc_page(GFP_KERNEL); 2085 page = alloc_page(GFP_KERNEL);
2075 if (!page) { 2086 if (!page) {
@@ -2078,7 +2089,9 @@ r5c_recovery_rewrite_data_only_stripes(struct r5l_log *log,
2078 return -ENOMEM; 2089 return -ENOMEM;
2079 } 2090 }
2080 2091
2081 list_for_each_entry_safe(sh, next, &ctx->cached_list, lru) { 2092 WARN_ON(list_empty(&ctx->cached_list));
2093
2094 list_for_each_entry(sh, &ctx->cached_list, lru) {
2082 struct r5l_meta_block *mb; 2095 struct r5l_meta_block *mb;
2083 int i; 2096 int i;
2084 int offset; 2097 int offset;
@@ -2123,14 +2136,42 @@ r5c_recovery_rewrite_data_only_stripes(struct r5l_log *log,
2123 sync_page_io(log->rdev, ctx->pos, PAGE_SIZE, page, 2136 sync_page_io(log->rdev, ctx->pos, PAGE_SIZE, page,
2124 REQ_OP_WRITE, REQ_FUA, false); 2137 REQ_OP_WRITE, REQ_FUA, false);
2125 sh->log_start = ctx->pos; 2138 sh->log_start = ctx->pos;
2139 list_add_tail(&sh->r5c, &log->stripe_in_journal_list);
2140 atomic_inc(&log->stripe_in_journal_count);
2126 ctx->pos = write_pos; 2141 ctx->pos = write_pos;
2127 ctx->seq += 1; 2142 ctx->seq += 1;
2143 next_checkpoint = sh->log_start;
2144 }
2145 log->next_checkpoint = next_checkpoint;
2146 __free_page(page);
2147 return 0;
2148}
2149
2150static void r5c_recovery_flush_data_only_stripes(struct r5l_log *log,
2151 struct r5l_recovery_ctx *ctx)
2152{
2153 struct mddev *mddev = log->rdev->mddev;
2154 struct r5conf *conf = mddev->private;
2155 struct stripe_head *sh, *next;
2156
2157 if (ctx->data_only_stripes == 0)
2158 return;
2128 2159
2160 log->r5c_journal_mode = R5C_JOURNAL_MODE_WRITE_BACK;
2161
2162 list_for_each_entry_safe(sh, next, &ctx->cached_list, lru) {
2163 r5c_make_stripe_write_out(sh);
2164 set_bit(STRIPE_HANDLE, &sh->state);
2129 list_del_init(&sh->lru); 2165 list_del_init(&sh->lru);
2130 raid5_release_stripe(sh); 2166 raid5_release_stripe(sh);
2131 } 2167 }
2132 __free_page(page); 2168
2133 return 0; 2169 md_wakeup_thread(conf->mddev->thread);
2170 /* reuse conf->wait_for_quiescent in recovery */
2171 wait_event(conf->wait_for_quiescent,
2172 atomic_read(&conf->active_stripes) == 0);
2173
2174 log->r5c_journal_mode = R5C_JOURNAL_MODE_WRITE_THROUGH;
2134} 2175}
2135 2176
2136static int r5l_recovery_log(struct r5l_log *log) 2177static int r5l_recovery_log(struct r5l_log *log)
@@ -2139,7 +2180,6 @@ static int r5l_recovery_log(struct r5l_log *log)
2139 struct r5l_recovery_ctx ctx; 2180 struct r5l_recovery_ctx ctx;
2140 int ret; 2181 int ret;
2141 sector_t pos; 2182 sector_t pos;
2142 struct stripe_head *sh;
2143 2183
2144 ctx.pos = log->last_checkpoint; 2184 ctx.pos = log->last_checkpoint;
2145 ctx.seq = log->last_cp_seq; 2185 ctx.seq = log->last_cp_seq;
@@ -2160,35 +2200,31 @@ static int r5l_recovery_log(struct r5l_log *log)
2160 pos = ctx.pos; 2200 pos = ctx.pos;
2161 ctx.seq += 10000; 2201 ctx.seq += 10000;
2162 2202
2163 if (ctx.data_only_stripes == 0) {
2164 log->next_checkpoint = ctx.pos;
2165 r5l_log_write_empty_meta_block(log, ctx.pos, ctx.seq++);
2166 ctx.pos = r5l_ring_add(log, ctx.pos, BLOCK_SECTORS);
2167 } else {
2168 sh = list_last_entry(&ctx.cached_list, struct stripe_head, lru);
2169 log->next_checkpoint = sh->log_start;
2170 }
2171 2203
2172 if ((ctx.data_only_stripes == 0) && (ctx.data_parity_stripes == 0)) 2204 if ((ctx.data_only_stripes == 0) && (ctx.data_parity_stripes == 0))
2173 pr_debug("md/raid:%s: starting from clean shutdown\n", 2205 pr_debug("md/raid:%s: starting from clean shutdown\n",
2174 mdname(mddev)); 2206 mdname(mddev));
2175 else { 2207 else
2176 pr_debug("md/raid:%s: recoverying %d data-only stripes and %d data-parity stripes\n", 2208 pr_debug("md/raid:%s: recovering %d data-only stripes and %d data-parity stripes\n",
2177 mdname(mddev), ctx.data_only_stripes, 2209 mdname(mddev), ctx.data_only_stripes,
2178 ctx.data_parity_stripes); 2210 ctx.data_parity_stripes);
2179 2211
2180 if (ctx.data_only_stripes > 0) 2212 if (ctx.data_only_stripes == 0) {
2181 if (r5c_recovery_rewrite_data_only_stripes(log, &ctx)) { 2213 log->next_checkpoint = ctx.pos;
2182 pr_err("md/raid:%s: failed to rewrite stripes to journal\n", 2214 r5l_log_write_empty_meta_block(log, ctx.pos, ctx.seq++);
2183 mdname(mddev)); 2215 ctx.pos = r5l_ring_add(log, ctx.pos, BLOCK_SECTORS);
2184 return -EIO; 2216 } else if (r5c_recovery_rewrite_data_only_stripes(log, &ctx)) {
2185 } 2217 pr_err("md/raid:%s: failed to rewrite stripes to journal\n",
2218 mdname(mddev));
2219 return -EIO;
2186 } 2220 }
2187 2221
2188 log->log_start = ctx.pos; 2222 log->log_start = ctx.pos;
2189 log->seq = ctx.seq; 2223 log->seq = ctx.seq;
2190 log->last_checkpoint = pos; 2224 log->last_checkpoint = pos;
2191 r5l_write_super(log, pos); 2225 r5l_write_super(log, pos);
2226
2227 r5c_recovery_flush_data_only_stripes(log, &ctx);
2192 return 0; 2228 return 0;
2193} 2229}
2194 2230
@@ -2250,6 +2286,10 @@ static ssize_t r5c_journal_mode_store(struct mddev *mddev,
2250 val > R5C_JOURNAL_MODE_WRITE_BACK) 2286 val > R5C_JOURNAL_MODE_WRITE_BACK)
2251 return -EINVAL; 2287 return -EINVAL;
2252 2288
2289 if (raid5_calc_degraded(conf) > 0 &&
2290 val == R5C_JOURNAL_MODE_WRITE_BACK)
2291 return -EINVAL;
2292
2253 mddev_suspend(mddev); 2293 mddev_suspend(mddev);
2254 conf->log->r5c_journal_mode = val; 2294 conf->log->r5c_journal_mode = val;
2255 mddev_resume(mddev); 2295 mddev_resume(mddev);
@@ -2304,6 +2344,16 @@ int r5c_try_caching_write(struct r5conf *conf,
2304 set_bit(STRIPE_R5C_CACHING, &sh->state); 2344 set_bit(STRIPE_R5C_CACHING, &sh->state);
2305 } 2345 }
2306 2346
2347 /*
2348 * When run in degraded mode, array is set to write-through mode.
2349 * This check helps drain pending write safely in the transition to
2350 * write-through mode.
2351 */
2352 if (s->failed) {
2353 r5c_make_stripe_write_out(sh);
2354 return -EAGAIN;
2355 }
2356
2307 for (i = disks; i--; ) { 2357 for (i = disks; i--; ) {
2308 dev = &sh->dev[i]; 2358 dev = &sh->dev[i];
2309 /* if non-overwrite, use writing-out phase */ 2359 /* if non-overwrite, use writing-out phase */
@@ -2354,6 +2404,8 @@ void r5c_release_extra_page(struct stripe_head *sh)
2354 struct page *p = sh->dev[i].orig_page; 2404 struct page *p = sh->dev[i].orig_page;
2355 2405
2356 sh->dev[i].orig_page = sh->dev[i].page; 2406 sh->dev[i].orig_page = sh->dev[i].page;
2407 clear_bit(R5_OrigPageUPTDODATE, &sh->dev[i].flags);
2408
2357 if (!using_disk_info_extra_page) 2409 if (!using_disk_info_extra_page)
2358 put_page(p); 2410 put_page(p);
2359 } 2411 }
@@ -2418,9 +2470,6 @@ void r5c_finish_stripe_write_out(struct r5conf *conf,
2418 if (do_wakeup) 2470 if (do_wakeup)
2419 wake_up(&conf->wait_for_overlap); 2471 wake_up(&conf->wait_for_overlap);
2420 2472
2421 if (conf->log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH)
2422 return;
2423
2424 spin_lock_irq(&conf->log->stripe_in_journal_lock); 2473 spin_lock_irq(&conf->log->stripe_in_journal_lock);
2425 list_del_init(&sh->r5c); 2474 list_del_init(&sh->r5c);
2426 spin_unlock_irq(&conf->log->stripe_in_journal_lock); 2475 spin_unlock_irq(&conf->log->stripe_in_journal_lock);
@@ -2561,6 +2610,19 @@ ioerr:
2561 return ret; 2610 return ret;
2562} 2611}
2563 2612
2613void r5c_update_on_rdev_error(struct mddev *mddev)
2614{
2615 struct r5conf *conf = mddev->private;
2616 struct r5l_log *log = conf->log;
2617
2618 if (!log)
2619 return;
2620
2621 if (raid5_calc_degraded(conf) > 0 &&
2622 conf->log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_BACK)
2623 schedule_work(&log->disable_writeback_work);
2624}
2625
2564int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev) 2626int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev)
2565{ 2627{
2566 struct request_queue *q = bdev_get_queue(rdev->bdev); 2628 struct request_queue *q = bdev_get_queue(rdev->bdev);
@@ -2633,20 +2695,23 @@ int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev)
2633 spin_lock_init(&log->no_space_stripes_lock); 2695 spin_lock_init(&log->no_space_stripes_lock);
2634 2696
2635 INIT_WORK(&log->deferred_io_work, r5l_submit_io_async); 2697 INIT_WORK(&log->deferred_io_work, r5l_submit_io_async);
2698 INIT_WORK(&log->disable_writeback_work, r5c_disable_writeback_async);
2636 2699
2637 log->r5c_journal_mode = R5C_JOURNAL_MODE_WRITE_THROUGH; 2700 log->r5c_journal_mode = R5C_JOURNAL_MODE_WRITE_THROUGH;
2638 INIT_LIST_HEAD(&log->stripe_in_journal_list); 2701 INIT_LIST_HEAD(&log->stripe_in_journal_list);
2639 spin_lock_init(&log->stripe_in_journal_lock); 2702 spin_lock_init(&log->stripe_in_journal_lock);
2640 atomic_set(&log->stripe_in_journal_count, 0); 2703 atomic_set(&log->stripe_in_journal_count, 0);
2641 2704
2705 rcu_assign_pointer(conf->log, log);
2706
2642 if (r5l_load_log(log)) 2707 if (r5l_load_log(log))
2643 goto error; 2708 goto error;
2644 2709
2645 rcu_assign_pointer(conf->log, log);
2646 set_bit(MD_HAS_JOURNAL, &conf->mddev->flags); 2710 set_bit(MD_HAS_JOURNAL, &conf->mddev->flags);
2647 return 0; 2711 return 0;
2648 2712
2649error: 2713error:
2714 rcu_assign_pointer(conf->log, NULL);
2650 md_unregister_thread(&log->reclaim_thread); 2715 md_unregister_thread(&log->reclaim_thread);
2651reclaim_thread: 2716reclaim_thread:
2652 mempool_destroy(log->meta_pool); 2717 mempool_destroy(log->meta_pool);
@@ -2663,6 +2728,7 @@ io_kc:
2663 2728
2664void r5l_exit_log(struct r5l_log *log) 2729void r5l_exit_log(struct r5l_log *log)
2665{ 2730{
2731 flush_work(&log->disable_writeback_work);
2666 md_unregister_thread(&log->reclaim_thread); 2732 md_unregister_thread(&log->reclaim_thread);
2667 mempool_destroy(log->meta_pool); 2733 mempool_destroy(log->meta_pool);
2668 bioset_free(log->bs); 2734 bioset_free(log->bs);
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 06d7279bdd04..3c7e106c12a2 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -62,6 +62,8 @@
62#include "raid0.h" 62#include "raid0.h"
63#include "bitmap.h" 63#include "bitmap.h"
64 64
65#define UNSUPPORTED_MDDEV_FLAGS (1L << MD_FAILFAST_SUPPORTED)
66
65#define cpu_to_group(cpu) cpu_to_node(cpu) 67#define cpu_to_group(cpu) cpu_to_node(cpu)
66#define ANY_GROUP NUMA_NO_NODE 68#define ANY_GROUP NUMA_NO_NODE
67 69
@@ -554,7 +556,7 @@ static struct stripe_head *__find_stripe(struct r5conf *conf, sector_t sector,
554 * of the two sections, and some non-in_sync devices may 556 * of the two sections, and some non-in_sync devices may
555 * be insync in the section most affected by failed devices. 557 * be insync in the section most affected by failed devices.
556 */ 558 */
557static int calc_degraded(struct r5conf *conf) 559int raid5_calc_degraded(struct r5conf *conf)
558{ 560{
559 int degraded, degraded2; 561 int degraded, degraded2;
560 int i; 562 int i;
@@ -617,7 +619,7 @@ static int has_failed(struct r5conf *conf)
617 if (conf->mddev->reshape_position == MaxSector) 619 if (conf->mddev->reshape_position == MaxSector)
618 return conf->mddev->degraded > conf->max_degraded; 620 return conf->mddev->degraded > conf->max_degraded;
619 621
620 degraded = calc_degraded(conf); 622 degraded = raid5_calc_degraded(conf);
621 if (degraded > conf->max_degraded) 623 if (degraded > conf->max_degraded)
622 return 1; 624 return 1;
623 return 0; 625 return 0;
@@ -1013,7 +1015,17 @@ again:
1013 1015
1014 if (test_bit(R5_SkipCopy, &sh->dev[i].flags)) 1016 if (test_bit(R5_SkipCopy, &sh->dev[i].flags))
1015 WARN_ON(test_bit(R5_UPTODATE, &sh->dev[i].flags)); 1017 WARN_ON(test_bit(R5_UPTODATE, &sh->dev[i].flags));
1016 sh->dev[i].vec.bv_page = sh->dev[i].page; 1018
1019 if (!op_is_write(op) &&
1020 test_bit(R5_InJournal, &sh->dev[i].flags))
1021 /*
1022 * issuing read for a page in journal, this
1023 * must be preparing for prexor in rmw; read
1024 * the data into orig_page
1025 */
1026 sh->dev[i].vec.bv_page = sh->dev[i].orig_page;
1027 else
1028 sh->dev[i].vec.bv_page = sh->dev[i].page;
1017 bi->bi_vcnt = 1; 1029 bi->bi_vcnt = 1;
1018 bi->bi_io_vec[0].bv_len = STRIPE_SIZE; 1030 bi->bi_io_vec[0].bv_len = STRIPE_SIZE;
1019 bi->bi_io_vec[0].bv_offset = 0; 1031 bi->bi_io_vec[0].bv_offset = 0;
@@ -2378,6 +2390,13 @@ static void raid5_end_read_request(struct bio * bi)
2378 } else if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags)) 2390 } else if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags))
2379 clear_bit(R5_ReadNoMerge, &sh->dev[i].flags); 2391 clear_bit(R5_ReadNoMerge, &sh->dev[i].flags);
2380 2392
2393 if (test_bit(R5_InJournal, &sh->dev[i].flags))
2394 /*
2395 * end read for a page in journal, this
2396 * must be preparing for prexor in rmw
2397 */
2398 set_bit(R5_OrigPageUPTDODATE, &sh->dev[i].flags);
2399
2381 if (atomic_read(&rdev->read_errors)) 2400 if (atomic_read(&rdev->read_errors))
2382 atomic_set(&rdev->read_errors, 0); 2401 atomic_set(&rdev->read_errors, 0);
2383 } else { 2402 } else {
@@ -2536,7 +2555,7 @@ static void raid5_error(struct mddev *mddev, struct md_rdev *rdev)
2536 2555
2537 spin_lock_irqsave(&conf->device_lock, flags); 2556 spin_lock_irqsave(&conf->device_lock, flags);
2538 clear_bit(In_sync, &rdev->flags); 2557 clear_bit(In_sync, &rdev->flags);
2539 mddev->degraded = calc_degraded(conf); 2558 mddev->degraded = raid5_calc_degraded(conf);
2540 spin_unlock_irqrestore(&conf->device_lock, flags); 2559 spin_unlock_irqrestore(&conf->device_lock, flags);
2541 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 2560 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
2542 2561
@@ -2550,6 +2569,7 @@ static void raid5_error(struct mddev *mddev, struct md_rdev *rdev)
2550 bdevname(rdev->bdev, b), 2569 bdevname(rdev->bdev, b),
2551 mdname(mddev), 2570 mdname(mddev),
2552 conf->raid_disks - mddev->degraded); 2571 conf->raid_disks - mddev->degraded);
2572 r5c_update_on_rdev_error(mddev);
2553} 2573}
2554 2574
2555/* 2575/*
@@ -2878,6 +2898,30 @@ sector_t raid5_compute_blocknr(struct stripe_head *sh, int i, int previous)
2878 return r_sector; 2898 return r_sector;
2879} 2899}
2880 2900
2901/*
2902 * There are cases where we want handle_stripe_dirtying() and
2903 * schedule_reconstruction() to delay towrite to some dev of a stripe.
2904 *
2905 * This function checks whether we want to delay the towrite. Specifically,
2906 * we delay the towrite when:
2907 *
2908 * 1. degraded stripe has a non-overwrite to the missing dev, AND this
2909 * stripe has data in journal (for other devices).
2910 *
2911 * In this case, when reading data for the non-overwrite dev, it is
2912 * necessary to handle complex rmw of write back cache (prexor with
2913 * orig_page, and xor with page). To keep read path simple, we would
2914 * like to flush data in journal to RAID disks first, so complex rmw
2915 * is handled in the write patch (handle_stripe_dirtying).
2916 *
2917 */
2918static inline bool delay_towrite(struct r5dev *dev,
2919 struct stripe_head_state *s)
2920{
2921 return !test_bit(R5_OVERWRITE, &dev->flags) &&
2922 !test_bit(R5_Insync, &dev->flags) && s->injournal;
2923}
2924
2881static void 2925static void
2882schedule_reconstruction(struct stripe_head *sh, struct stripe_head_state *s, 2926schedule_reconstruction(struct stripe_head *sh, struct stripe_head_state *s,
2883 int rcw, int expand) 2927 int rcw, int expand)
@@ -2898,7 +2942,7 @@ schedule_reconstruction(struct stripe_head *sh, struct stripe_head_state *s,
2898 for (i = disks; i--; ) { 2942 for (i = disks; i--; ) {
2899 struct r5dev *dev = &sh->dev[i]; 2943 struct r5dev *dev = &sh->dev[i];
2900 2944
2901 if (dev->towrite) { 2945 if (dev->towrite && !delay_towrite(dev, s)) {
2902 set_bit(R5_LOCKED, &dev->flags); 2946 set_bit(R5_LOCKED, &dev->flags);
2903 set_bit(R5_Wantdrain, &dev->flags); 2947 set_bit(R5_Wantdrain, &dev->flags);
2904 if (!expand) 2948 if (!expand)
@@ -3293,13 +3337,6 @@ static int want_replace(struct stripe_head *sh, int disk_idx)
3293 return rv; 3337 return rv;
3294} 3338}
3295 3339
3296/* fetch_block - checks the given member device to see if its data needs
3297 * to be read or computed to satisfy a request.
3298 *
3299 * Returns 1 when no more member devices need to be checked, otherwise returns
3300 * 0 to tell the loop in handle_stripe_fill to continue
3301 */
3302
3303static int need_this_block(struct stripe_head *sh, struct stripe_head_state *s, 3340static int need_this_block(struct stripe_head *sh, struct stripe_head_state *s,
3304 int disk_idx, int disks) 3341 int disk_idx, int disks)
3305{ 3342{
@@ -3390,6 +3427,12 @@ static int need_this_block(struct stripe_head *sh, struct stripe_head_state *s,
3390 return 0; 3427 return 0;
3391} 3428}
3392 3429
3430/* fetch_block - checks the given member device to see if its data needs
3431 * to be read or computed to satisfy a request.
3432 *
3433 * Returns 1 when no more member devices need to be checked, otherwise returns
3434 * 0 to tell the loop in handle_stripe_fill to continue
3435 */
3393static int fetch_block(struct stripe_head *sh, struct stripe_head_state *s, 3436static int fetch_block(struct stripe_head *sh, struct stripe_head_state *s,
3394 int disk_idx, int disks) 3437 int disk_idx, int disks)
3395{ 3438{
@@ -3476,10 +3519,26 @@ static void handle_stripe_fill(struct stripe_head *sh,
3476 * midst of changing due to a write 3519 * midst of changing due to a write
3477 */ 3520 */
3478 if (!test_bit(STRIPE_COMPUTE_RUN, &sh->state) && !sh->check_state && 3521 if (!test_bit(STRIPE_COMPUTE_RUN, &sh->state) && !sh->check_state &&
3479 !sh->reconstruct_state) 3522 !sh->reconstruct_state) {
3523
3524 /*
3525 * For degraded stripe with data in journal, do not handle
3526 * read requests yet, instead, flush the stripe to raid
3527 * disks first, this avoids handling complex rmw of write
3528 * back cache (prexor with orig_page, and then xor with
3529 * page) in the read path
3530 */
3531 if (s->injournal && s->failed) {
3532 if (test_bit(STRIPE_R5C_CACHING, &sh->state))
3533 r5c_make_stripe_write_out(sh);
3534 goto out;
3535 }
3536
3480 for (i = disks; i--; ) 3537 for (i = disks; i--; )
3481 if (fetch_block(sh, s, i, disks)) 3538 if (fetch_block(sh, s, i, disks))
3482 break; 3539 break;
3540 }
3541out:
3483 set_bit(STRIPE_HANDLE, &sh->state); 3542 set_bit(STRIPE_HANDLE, &sh->state);
3484} 3543}
3485 3544
@@ -3592,6 +3651,21 @@ unhash:
3592 break_stripe_batch_list(head_sh, STRIPE_EXPAND_SYNC_FLAGS); 3651 break_stripe_batch_list(head_sh, STRIPE_EXPAND_SYNC_FLAGS);
3593} 3652}
3594 3653
3654/*
3655 * For RMW in write back cache, we need extra page in prexor to store the
3656 * old data. This page is stored in dev->orig_page.
3657 *
3658 * This function checks whether we have data for prexor. The exact logic
3659 * is:
3660 * R5_UPTODATE && (!R5_InJournal || R5_OrigPageUPTDODATE)
3661 */
3662static inline bool uptodate_for_rmw(struct r5dev *dev)
3663{
3664 return (test_bit(R5_UPTODATE, &dev->flags)) &&
3665 (!test_bit(R5_InJournal, &dev->flags) ||
3666 test_bit(R5_OrigPageUPTDODATE, &dev->flags));
3667}
3668
3595static int handle_stripe_dirtying(struct r5conf *conf, 3669static int handle_stripe_dirtying(struct r5conf *conf,
3596 struct stripe_head *sh, 3670 struct stripe_head *sh,
3597 struct stripe_head_state *s, 3671 struct stripe_head_state *s,
@@ -3620,12 +3694,11 @@ static int handle_stripe_dirtying(struct r5conf *conf,
3620 } else for (i = disks; i--; ) { 3694 } else for (i = disks; i--; ) {
3621 /* would I have to read this buffer for read_modify_write */ 3695 /* would I have to read this buffer for read_modify_write */
3622 struct r5dev *dev = &sh->dev[i]; 3696 struct r5dev *dev = &sh->dev[i];
3623 if ((dev->towrite || i == sh->pd_idx || i == sh->qd_idx || 3697 if (((dev->towrite && !delay_towrite(dev, s)) ||
3698 i == sh->pd_idx || i == sh->qd_idx ||
3624 test_bit(R5_InJournal, &dev->flags)) && 3699 test_bit(R5_InJournal, &dev->flags)) &&
3625 !test_bit(R5_LOCKED, &dev->flags) && 3700 !test_bit(R5_LOCKED, &dev->flags) &&
3626 !((test_bit(R5_UPTODATE, &dev->flags) && 3701 !(uptodate_for_rmw(dev) ||
3627 (!test_bit(R5_InJournal, &dev->flags) ||
3628 dev->page != dev->orig_page)) ||
3629 test_bit(R5_Wantcompute, &dev->flags))) { 3702 test_bit(R5_Wantcompute, &dev->flags))) {
3630 if (test_bit(R5_Insync, &dev->flags)) 3703 if (test_bit(R5_Insync, &dev->flags))
3631 rmw++; 3704 rmw++;
@@ -3637,7 +3710,6 @@ static int handle_stripe_dirtying(struct r5conf *conf,
3637 i != sh->pd_idx && i != sh->qd_idx && 3710 i != sh->pd_idx && i != sh->qd_idx &&
3638 !test_bit(R5_LOCKED, &dev->flags) && 3711 !test_bit(R5_LOCKED, &dev->flags) &&
3639 !(test_bit(R5_UPTODATE, &dev->flags) || 3712 !(test_bit(R5_UPTODATE, &dev->flags) ||
3640 test_bit(R5_InJournal, &dev->flags) ||
3641 test_bit(R5_Wantcompute, &dev->flags))) { 3713 test_bit(R5_Wantcompute, &dev->flags))) {
3642 if (test_bit(R5_Insync, &dev->flags)) 3714 if (test_bit(R5_Insync, &dev->flags))
3643 rcw++; 3715 rcw++;
@@ -3687,13 +3759,11 @@ static int handle_stripe_dirtying(struct r5conf *conf,
3687 3759
3688 for (i = disks; i--; ) { 3760 for (i = disks; i--; ) {
3689 struct r5dev *dev = &sh->dev[i]; 3761 struct r5dev *dev = &sh->dev[i];
3690 if ((dev->towrite || 3762 if (((dev->towrite && !delay_towrite(dev, s)) ||
3691 i == sh->pd_idx || i == sh->qd_idx || 3763 i == sh->pd_idx || i == sh->qd_idx ||
3692 test_bit(R5_InJournal, &dev->flags)) && 3764 test_bit(R5_InJournal, &dev->flags)) &&
3693 !test_bit(R5_LOCKED, &dev->flags) && 3765 !test_bit(R5_LOCKED, &dev->flags) &&
3694 !((test_bit(R5_UPTODATE, &dev->flags) && 3766 !(uptodate_for_rmw(dev) ||
3695 (!test_bit(R5_InJournal, &dev->flags) ||
3696 dev->page != dev->orig_page)) ||
3697 test_bit(R5_Wantcompute, &dev->flags)) && 3767 test_bit(R5_Wantcompute, &dev->flags)) &&
3698 test_bit(R5_Insync, &dev->flags)) { 3768 test_bit(R5_Insync, &dev->flags)) {
3699 if (test_bit(STRIPE_PREREAD_ACTIVE, 3769 if (test_bit(STRIPE_PREREAD_ACTIVE,
@@ -3720,7 +3790,6 @@ static int handle_stripe_dirtying(struct r5conf *conf,
3720 i != sh->pd_idx && i != sh->qd_idx && 3790 i != sh->pd_idx && i != sh->qd_idx &&
3721 !test_bit(R5_LOCKED, &dev->flags) && 3791 !test_bit(R5_LOCKED, &dev->flags) &&
3722 !(test_bit(R5_UPTODATE, &dev->flags) || 3792 !(test_bit(R5_UPTODATE, &dev->flags) ||
3723 test_bit(R5_InJournal, &dev->flags) ||
3724 test_bit(R5_Wantcompute, &dev->flags))) { 3793 test_bit(R5_Wantcompute, &dev->flags))) {
3725 rcw++; 3794 rcw++;
3726 if (test_bit(R5_Insync, &dev->flags) && 3795 if (test_bit(R5_Insync, &dev->flags) &&
@@ -7023,7 +7092,7 @@ static int raid5_run(struct mddev *mddev)
7023 /* 7092 /*
7024 * 0 for a fully functional array, 1 or 2 for a degraded array. 7093 * 0 for a fully functional array, 1 or 2 for a degraded array.
7025 */ 7094 */
7026 mddev->degraded = calc_degraded(conf); 7095 mddev->degraded = raid5_calc_degraded(conf);
7027 7096
7028 if (has_failed(conf)) { 7097 if (has_failed(conf)) {
7029 pr_crit("md/raid:%s: not enough operational devices (%d/%d failed)\n", 7098 pr_crit("md/raid:%s: not enough operational devices (%d/%d failed)\n",
@@ -7270,7 +7339,7 @@ static int raid5_spare_active(struct mddev *mddev)
7270 } 7339 }
7271 } 7340 }
7272 spin_lock_irqsave(&conf->device_lock, flags); 7341 spin_lock_irqsave(&conf->device_lock, flags);
7273 mddev->degraded = calc_degraded(conf); 7342 mddev->degraded = raid5_calc_degraded(conf);
7274 spin_unlock_irqrestore(&conf->device_lock, flags); 7343 spin_unlock_irqrestore(&conf->device_lock, flags);
7275 print_raid5_conf(conf); 7344 print_raid5_conf(conf);
7276 return count; 7345 return count;
@@ -7630,7 +7699,7 @@ static int raid5_start_reshape(struct mddev *mddev)
7630 * pre and post number of devices. 7699 * pre and post number of devices.
7631 */ 7700 */
7632 spin_lock_irqsave(&conf->device_lock, flags); 7701 spin_lock_irqsave(&conf->device_lock, flags);
7633 mddev->degraded = calc_degraded(conf); 7702 mddev->degraded = raid5_calc_degraded(conf);
7634 spin_unlock_irqrestore(&conf->device_lock, flags); 7703 spin_unlock_irqrestore(&conf->device_lock, flags);
7635 } 7704 }
7636 mddev->raid_disks = conf->raid_disks; 7705 mddev->raid_disks = conf->raid_disks;
@@ -7718,7 +7787,7 @@ static void raid5_finish_reshape(struct mddev *mddev)
7718 } else { 7787 } else {
7719 int d; 7788 int d;
7720 spin_lock_irq(&conf->device_lock); 7789 spin_lock_irq(&conf->device_lock);
7721 mddev->degraded = calc_degraded(conf); 7790 mddev->degraded = raid5_calc_degraded(conf);
7722 spin_unlock_irq(&conf->device_lock); 7791 spin_unlock_irq(&conf->device_lock);
7723 for (d = conf->raid_disks ; 7792 for (d = conf->raid_disks ;
7724 d < conf->raid_disks - mddev->delta_disks; 7793 d < conf->raid_disks - mddev->delta_disks;
@@ -7829,8 +7898,9 @@ static void *raid5_takeover_raid1(struct mddev *mddev)
7829 mddev->new_chunk_sectors = chunksect; 7898 mddev->new_chunk_sectors = chunksect;
7830 7899
7831 ret = setup_conf(mddev); 7900 ret = setup_conf(mddev);
7832 if (!IS_ERR_VALUE(ret)) 7901 if (!IS_ERR(ret))
7833 clear_bit(MD_FAILFAST_SUPPORTED, &mddev->flags); 7902 mddev_clear_unsupported_flags(mddev,
7903 UNSUPPORTED_MDDEV_FLAGS);
7834 return ret; 7904 return ret;
7835} 7905}
7836 7906
diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h
index ed8e1362ab36..1440fa26e296 100644
--- a/drivers/md/raid5.h
+++ b/drivers/md/raid5.h
@@ -322,6 +322,11 @@ enum r5dev_flags {
322 * data and parity being written are in the journal 322 * data and parity being written are in the journal
323 * device 323 * device
324 */ 324 */
325 R5_OrigPageUPTDODATE, /* with write back cache, we read old data into
326 * dev->orig_page for prexor. When this flag is
327 * set, orig_page contains latest data in the
328 * raid disk.
329 */
325}; 330};
326 331
327/* 332/*
@@ -753,6 +758,7 @@ extern sector_t raid5_compute_sector(struct r5conf *conf, sector_t r_sector,
753extern struct stripe_head * 758extern struct stripe_head *
754raid5_get_active_stripe(struct r5conf *conf, sector_t sector, 759raid5_get_active_stripe(struct r5conf *conf, sector_t sector,
755 int previous, int noblock, int noquiesce); 760 int previous, int noblock, int noquiesce);
761extern int raid5_calc_degraded(struct r5conf *conf);
756extern int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev); 762extern int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev);
757extern void r5l_exit_log(struct r5l_log *log); 763extern void r5l_exit_log(struct r5l_log *log);
758extern int r5l_write_stripe(struct r5l_log *log, struct stripe_head *head_sh); 764extern int r5l_write_stripe(struct r5l_log *log, struct stripe_head *head_sh);
@@ -781,4 +787,5 @@ extern void r5c_flush_cache(struct r5conf *conf, int num);
781extern void r5c_check_stripe_cache_usage(struct r5conf *conf); 787extern void r5c_check_stripe_cache_usage(struct r5conf *conf);
782extern void r5c_check_cached_full_stripe(struct r5conf *conf); 788extern void r5c_check_cached_full_stripe(struct r5conf *conf);
783extern struct md_sysfs_entry r5c_journal_mode; 789extern struct md_sysfs_entry r5c_journal_mode;
790extern void r5c_update_on_rdev_error(struct mddev *mddev);
784#endif 791#endif
diff --git a/drivers/media/cec/cec-adap.c b/drivers/media/cec/cec-adap.c
index 0ea4efb3de66..ccda41c2c9e4 100644
--- a/drivers/media/cec/cec-adap.c
+++ b/drivers/media/cec/cec-adap.c
@@ -30,8 +30,9 @@
30 30
31#include "cec-priv.h" 31#include "cec-priv.h"
32 32
33static int cec_report_features(struct cec_adapter *adap, unsigned int la_idx); 33static void cec_fill_msg_report_features(struct cec_adapter *adap,
34static int cec_report_phys_addr(struct cec_adapter *adap, unsigned int la_idx); 34 struct cec_msg *msg,
35 unsigned int la_idx);
35 36
36/* 37/*
37 * 400 ms is the time it takes for one 16 byte message to be 38 * 400 ms is the time it takes for one 16 byte message to be
@@ -288,10 +289,10 @@ static void cec_data_cancel(struct cec_data *data)
288 289
289 /* Mark it as an error */ 290 /* Mark it as an error */
290 data->msg.tx_ts = ktime_get_ns(); 291 data->msg.tx_ts = ktime_get_ns();
291 data->msg.tx_status = CEC_TX_STATUS_ERROR | 292 data->msg.tx_status |= CEC_TX_STATUS_ERROR |
292 CEC_TX_STATUS_MAX_RETRIES; 293 CEC_TX_STATUS_MAX_RETRIES;
294 data->msg.tx_error_cnt++;
293 data->attempts = 0; 295 data->attempts = 0;
294 data->msg.tx_error_cnt = 1;
295 /* Queue transmitted message for monitoring purposes */ 296 /* Queue transmitted message for monitoring purposes */
296 cec_queue_msg_monitor(data->adap, &data->msg, 1); 297 cec_queue_msg_monitor(data->adap, &data->msg, 1);
297 298
@@ -611,8 +612,7 @@ int cec_transmit_msg_fh(struct cec_adapter *adap, struct cec_msg *msg,
611 } 612 }
612 memset(msg->msg + msg->len, 0, sizeof(msg->msg) - msg->len); 613 memset(msg->msg + msg->len, 0, sizeof(msg->msg) - msg->len);
613 if (msg->len == 1) { 614 if (msg->len == 1) {
614 if (cec_msg_initiator(msg) != 0xf || 615 if (cec_msg_destination(msg) == 0xf) {
615 cec_msg_destination(msg) == 0xf) {
616 dprintk(1, "cec_transmit_msg: invalid poll message\n"); 616 dprintk(1, "cec_transmit_msg: invalid poll message\n");
617 return -EINVAL; 617 return -EINVAL;
618 } 618 }
@@ -637,7 +637,7 @@ int cec_transmit_msg_fh(struct cec_adapter *adap, struct cec_msg *msg,
637 dprintk(1, "cec_transmit_msg: destination is the adapter itself\n"); 637 dprintk(1, "cec_transmit_msg: destination is the adapter itself\n");
638 return -EINVAL; 638 return -EINVAL;
639 } 639 }
640 if (cec_msg_initiator(msg) != 0xf && 640 if (msg->len > 1 && adap->is_configured &&
641 !cec_has_log_addr(adap, cec_msg_initiator(msg))) { 641 !cec_has_log_addr(adap, cec_msg_initiator(msg))) {
642 dprintk(1, "cec_transmit_msg: initiator has unknown logical address %d\n", 642 dprintk(1, "cec_transmit_msg: initiator has unknown logical address %d\n",
643 cec_msg_initiator(msg)); 643 cec_msg_initiator(msg));
@@ -851,7 +851,7 @@ static const u8 cec_msg_size[256] = {
851 [CEC_MSG_REQUEST_ARC_TERMINATION] = 2 | DIRECTED, 851 [CEC_MSG_REQUEST_ARC_TERMINATION] = 2 | DIRECTED,
852 [CEC_MSG_TERMINATE_ARC] = 2 | DIRECTED, 852 [CEC_MSG_TERMINATE_ARC] = 2 | DIRECTED,
853 [CEC_MSG_REQUEST_CURRENT_LATENCY] = 4 | BCAST, 853 [CEC_MSG_REQUEST_CURRENT_LATENCY] = 4 | BCAST,
854 [CEC_MSG_REPORT_CURRENT_LATENCY] = 7 | BCAST, 854 [CEC_MSG_REPORT_CURRENT_LATENCY] = 6 | BCAST,
855 [CEC_MSG_CDC_MESSAGE] = 2 | BCAST, 855 [CEC_MSG_CDC_MESSAGE] = 2 | BCAST,
856}; 856};
857 857
@@ -1071,7 +1071,7 @@ static int cec_config_log_addr(struct cec_adapter *adap,
1071 1071
1072 /* Send poll message */ 1072 /* Send poll message */
1073 msg.len = 1; 1073 msg.len = 1;
1074 msg.msg[0] = 0xf0 | log_addr; 1074 msg.msg[0] = (log_addr << 4) | log_addr;
1075 err = cec_transmit_msg_fh(adap, &msg, NULL, true); 1075 err = cec_transmit_msg_fh(adap, &msg, NULL, true);
1076 1076
1077 /* 1077 /*
@@ -1205,7 +1205,7 @@ static int cec_config_thread_func(void *arg)
1205 las->log_addr[i] = CEC_LOG_ADDR_INVALID; 1205 las->log_addr[i] = CEC_LOG_ADDR_INVALID;
1206 if (last_la == CEC_LOG_ADDR_INVALID || 1206 if (last_la == CEC_LOG_ADDR_INVALID ||
1207 last_la == CEC_LOG_ADDR_UNREGISTERED || 1207 last_la == CEC_LOG_ADDR_UNREGISTERED ||
1208 !(last_la & type2mask[type])) 1208 !((1 << last_la) & type2mask[type]))
1209 last_la = la_list[0]; 1209 last_la = la_list[0];
1210 1210
1211 err = cec_config_log_addr(adap, i, last_la); 1211 err = cec_config_log_addr(adap, i, last_la);
@@ -1250,30 +1250,49 @@ configured:
1250 for (i = 1; i < las->num_log_addrs; i++) 1250 for (i = 1; i < las->num_log_addrs; i++)
1251 las->log_addr[i] = CEC_LOG_ADDR_INVALID; 1251 las->log_addr[i] = CEC_LOG_ADDR_INVALID;
1252 } 1252 }
1253 for (i = las->num_log_addrs; i < CEC_MAX_LOG_ADDRS; i++)
1254 las->log_addr[i] = CEC_LOG_ADDR_INVALID;
1253 adap->is_configured = true; 1255 adap->is_configured = true;
1254 adap->is_configuring = false; 1256 adap->is_configuring = false;
1255 cec_post_state_event(adap); 1257 cec_post_state_event(adap);
1256 mutex_unlock(&adap->lock);
1257 1258
1259 /*
1260 * Now post the Report Features and Report Physical Address broadcast
1261 * messages. Note that these are non-blocking transmits, meaning that
1262 * they are just queued up and once adap->lock is unlocked the main
1263 * thread will kick in and start transmitting these.
1264 *
1265 * If after this function is done (but before one or more of these
1266 * messages are actually transmitted) the CEC adapter is unconfigured,
1267 * then any remaining messages will be dropped by the main thread.
1268 */
1258 for (i = 0; i < las->num_log_addrs; i++) { 1269 for (i = 0; i < las->num_log_addrs; i++) {
1270 struct cec_msg msg = {};
1271
1259 if (las->log_addr[i] == CEC_LOG_ADDR_INVALID || 1272 if (las->log_addr[i] == CEC_LOG_ADDR_INVALID ||
1260 (las->flags & CEC_LOG_ADDRS_FL_CDC_ONLY)) 1273 (las->flags & CEC_LOG_ADDRS_FL_CDC_ONLY))
1261 continue; 1274 continue;
1262 1275
1263 /* 1276 msg.msg[0] = (las->log_addr[i] << 4) | 0x0f;
1264 * Report Features must come first according 1277
1265 * to CEC 2.0 1278 /* Report Features must come first according to CEC 2.0 */
1266 */ 1279 if (las->log_addr[i] != CEC_LOG_ADDR_UNREGISTERED &&
1267 if (las->log_addr[i] != CEC_LOG_ADDR_UNREGISTERED) 1280 adap->log_addrs.cec_version >= CEC_OP_CEC_VERSION_2_0) {
1268 cec_report_features(adap, i); 1281 cec_fill_msg_report_features(adap, &msg, i);
1269 cec_report_phys_addr(adap, i); 1282 cec_transmit_msg_fh(adap, &msg, NULL, false);
1283 }
1284
1285 /* Report Physical Address */
1286 cec_msg_report_physical_addr(&msg, adap->phys_addr,
1287 las->primary_device_type[i]);
1288 dprintk(2, "config: la %d pa %x.%x.%x.%x\n",
1289 las->log_addr[i],
1290 cec_phys_addr_exp(adap->phys_addr));
1291 cec_transmit_msg_fh(adap, &msg, NULL, false);
1270 } 1292 }
1271 for (i = las->num_log_addrs; i < CEC_MAX_LOG_ADDRS; i++)
1272 las->log_addr[i] = CEC_LOG_ADDR_INVALID;
1273 mutex_lock(&adap->lock);
1274 adap->kthread_config = NULL; 1293 adap->kthread_config = NULL;
1275 mutex_unlock(&adap->lock);
1276 complete(&adap->config_completion); 1294 complete(&adap->config_completion);
1295 mutex_unlock(&adap->lock);
1277 return 0; 1296 return 0;
1278 1297
1279unconfigure: 1298unconfigure:
@@ -1526,52 +1545,32 @@ EXPORT_SYMBOL_GPL(cec_s_log_addrs);
1526 1545
1527/* High-level core CEC message handling */ 1546/* High-level core CEC message handling */
1528 1547
1529/* Transmit the Report Features message */ 1548/* Fill in the Report Features message */
1530static int cec_report_features(struct cec_adapter *adap, unsigned int la_idx) 1549static void cec_fill_msg_report_features(struct cec_adapter *adap,
1550 struct cec_msg *msg,
1551 unsigned int la_idx)
1531{ 1552{
1532 struct cec_msg msg = { };
1533 const struct cec_log_addrs *las = &adap->log_addrs; 1553 const struct cec_log_addrs *las = &adap->log_addrs;
1534 const u8 *features = las->features[la_idx]; 1554 const u8 *features = las->features[la_idx];
1535 bool op_is_dev_features = false; 1555 bool op_is_dev_features = false;
1536 unsigned int idx; 1556 unsigned int idx;
1537 1557
1538 /* This is 2.0 and up only */
1539 if (adap->log_addrs.cec_version < CEC_OP_CEC_VERSION_2_0)
1540 return 0;
1541
1542 /* Report Features */ 1558 /* Report Features */
1543 msg.msg[0] = (las->log_addr[la_idx] << 4) | 0x0f; 1559 msg->msg[0] = (las->log_addr[la_idx] << 4) | 0x0f;
1544 msg.len = 4; 1560 msg->len = 4;
1545 msg.msg[1] = CEC_MSG_REPORT_FEATURES; 1561 msg->msg[1] = CEC_MSG_REPORT_FEATURES;
1546 msg.msg[2] = adap->log_addrs.cec_version; 1562 msg->msg[2] = adap->log_addrs.cec_version;
1547 msg.msg[3] = las->all_device_types[la_idx]; 1563 msg->msg[3] = las->all_device_types[la_idx];
1548 1564
1549 /* Write RC Profiles first, then Device Features */ 1565 /* Write RC Profiles first, then Device Features */
1550 for (idx = 0; idx < ARRAY_SIZE(las->features[0]); idx++) { 1566 for (idx = 0; idx < ARRAY_SIZE(las->features[0]); idx++) {
1551 msg.msg[msg.len++] = features[idx]; 1567 msg->msg[msg->len++] = features[idx];
1552 if ((features[idx] & CEC_OP_FEAT_EXT) == 0) { 1568 if ((features[idx] & CEC_OP_FEAT_EXT) == 0) {
1553 if (op_is_dev_features) 1569 if (op_is_dev_features)
1554 break; 1570 break;
1555 op_is_dev_features = true; 1571 op_is_dev_features = true;
1556 } 1572 }
1557 } 1573 }
1558 return cec_transmit_msg(adap, &msg, false);
1559}
1560
1561/* Transmit the Report Physical Address message */
1562static int cec_report_phys_addr(struct cec_adapter *adap, unsigned int la_idx)
1563{
1564 const struct cec_log_addrs *las = &adap->log_addrs;
1565 struct cec_msg msg = { };
1566
1567 /* Report Physical Address */
1568 msg.msg[0] = (las->log_addr[la_idx] << 4) | 0x0f;
1569 cec_msg_report_physical_addr(&msg, adap->phys_addr,
1570 las->primary_device_type[la_idx]);
1571 dprintk(2, "config: la %d pa %x.%x.%x.%x\n",
1572 las->log_addr[la_idx],
1573 cec_phys_addr_exp(adap->phys_addr));
1574 return cec_transmit_msg(adap, &msg, false);
1575} 1574}
1576 1575
1577/* Transmit the Feature Abort message */ 1576/* Transmit the Feature Abort message */
@@ -1777,9 +1776,10 @@ static int cec_receive_notify(struct cec_adapter *adap, struct cec_msg *msg,
1777 } 1776 }
1778 1777
1779 case CEC_MSG_GIVE_FEATURES: 1778 case CEC_MSG_GIVE_FEATURES:
1780 if (adap->log_addrs.cec_version >= CEC_OP_CEC_VERSION_2_0) 1779 if (adap->log_addrs.cec_version < CEC_OP_CEC_VERSION_2_0)
1781 return cec_report_features(adap, la_idx); 1780 return cec_feature_abort(adap, msg);
1782 return 0; 1781 cec_fill_msg_report_features(adap, &tx_cec_msg, la_idx);
1782 return cec_transmit_msg(adap, &tx_cec_msg, false);
1783 1783
1784 default: 1784 default:
1785 /* 1785 /*
diff --git a/drivers/media/dvb-core/dvb_net.c b/drivers/media/dvb-core/dvb_net.c
index bc5e8cfe7ca2..8f11d7e45993 100644
--- a/drivers/media/dvb-core/dvb_net.c
+++ b/drivers/media/dvb-core/dvb_net.c
@@ -719,6 +719,9 @@ static void dvb_net_ule_check_crc(struct dvb_net_ule_handle *h,
719 skb_copy_from_linear_data(h->priv->ule_skb, dest_addr, 719 skb_copy_from_linear_data(h->priv->ule_skb, dest_addr,
720 ETH_ALEN); 720 ETH_ALEN);
721 skb_pull(h->priv->ule_skb, ETH_ALEN); 721 skb_pull(h->priv->ule_skb, ETH_ALEN);
722 } else {
723 /* dest_addr buffer is only valid if h->priv->ule_dbit == 0 */
724 eth_zero_addr(dest_addr);
722 } 725 }
723 726
724 /* Handle ULE Extension Headers. */ 727 /* Handle ULE Extension Headers. */
@@ -750,16 +753,8 @@ static void dvb_net_ule_check_crc(struct dvb_net_ule_handle *h,
750 if (!h->priv->ule_bridged) { 753 if (!h->priv->ule_bridged) {
751 skb_push(h->priv->ule_skb, ETH_HLEN); 754 skb_push(h->priv->ule_skb, ETH_HLEN);
752 h->ethh = (struct ethhdr *)h->priv->ule_skb->data; 755 h->ethh = (struct ethhdr *)h->priv->ule_skb->data;
753 if (!h->priv->ule_dbit) { 756 memcpy(h->ethh->h_dest, dest_addr, ETH_ALEN);
754 /* 757 eth_zero_addr(h->ethh->h_source);
755 * dest_addr buffer is only valid if
756 * h->priv->ule_dbit == 0
757 */
758 memcpy(h->ethh->h_dest, dest_addr, ETH_ALEN);
759 eth_zero_addr(h->ethh->h_source);
760 } else /* zeroize source and dest */
761 memset(h->ethh, 0, ETH_ALEN * 2);
762
763 h->ethh->h_proto = htons(h->priv->ule_sndu_type); 758 h->ethh->h_proto = htons(h->priv->ule_sndu_type);
764 } 759 }
765 /* else: skb is in correct state; nothing to do. */ 760 /* else: skb is in correct state; nothing to do. */
diff --git a/drivers/media/i2c/Kconfig b/drivers/media/i2c/Kconfig
index b31fa6fae009..b979ea148251 100644
--- a/drivers/media/i2c/Kconfig
+++ b/drivers/media/i2c/Kconfig
@@ -655,6 +655,7 @@ config VIDEO_S5K6A3
655config VIDEO_S5K4ECGX 655config VIDEO_S5K4ECGX
656 tristate "Samsung S5K4ECGX sensor support" 656 tristate "Samsung S5K4ECGX sensor support"
657 depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API 657 depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
658 select CRC32
658 ---help--- 659 ---help---
659 This is a V4L2 sensor-level driver for Samsung S5K4ECGX 5M 660 This is a V4L2 sensor-level driver for Samsung S5K4ECGX 5M
660 camera sensor with an embedded SoC image signal processor. 661 camera sensor with an embedded SoC image signal processor.
diff --git a/drivers/media/i2c/smiapp/smiapp-core.c b/drivers/media/i2c/smiapp/smiapp-core.c
index 59872b31f832..f4e92bdfe192 100644
--- a/drivers/media/i2c/smiapp/smiapp-core.c
+++ b/drivers/media/i2c/smiapp/smiapp-core.c
@@ -2741,9 +2741,7 @@ static const struct v4l2_subdev_internal_ops smiapp_internal_ops = {
2741 * I2C Driver 2741 * I2C Driver
2742 */ 2742 */
2743 2743
2744#ifdef CONFIG_PM 2744static int __maybe_unused smiapp_suspend(struct device *dev)
2745
2746static int smiapp_suspend(struct device *dev)
2747{ 2745{
2748 struct i2c_client *client = to_i2c_client(dev); 2746 struct i2c_client *client = to_i2c_client(dev);
2749 struct v4l2_subdev *subdev = i2c_get_clientdata(client); 2747 struct v4l2_subdev *subdev = i2c_get_clientdata(client);
@@ -2768,7 +2766,7 @@ static int smiapp_suspend(struct device *dev)
2768 return 0; 2766 return 0;
2769} 2767}
2770 2768
2771static int smiapp_resume(struct device *dev) 2769static int __maybe_unused smiapp_resume(struct device *dev)
2772{ 2770{
2773 struct i2c_client *client = to_i2c_client(dev); 2771 struct i2c_client *client = to_i2c_client(dev);
2774 struct v4l2_subdev *subdev = i2c_get_clientdata(client); 2772 struct v4l2_subdev *subdev = i2c_get_clientdata(client);
@@ -2783,13 +2781,6 @@ static int smiapp_resume(struct device *dev)
2783 return rval; 2781 return rval;
2784} 2782}
2785 2783
2786#else
2787
2788#define smiapp_suspend NULL
2789#define smiapp_resume NULL
2790
2791#endif /* CONFIG_PM */
2792
2793static struct smiapp_hwconfig *smiapp_get_hwconfig(struct device *dev) 2784static struct smiapp_hwconfig *smiapp_get_hwconfig(struct device *dev)
2794{ 2785{
2795 struct smiapp_hwconfig *hwcfg; 2786 struct smiapp_hwconfig *hwcfg;
@@ -2913,13 +2904,9 @@ static int smiapp_probe(struct i2c_client *client,
2913 if (IS_ERR(sensor->xshutdown)) 2904 if (IS_ERR(sensor->xshutdown))
2914 return PTR_ERR(sensor->xshutdown); 2905 return PTR_ERR(sensor->xshutdown);
2915 2906
2916 pm_runtime_enable(&client->dev); 2907 rval = smiapp_power_on(&client->dev);
2917 2908 if (rval < 0)
2918 rval = pm_runtime_get_sync(&client->dev); 2909 return rval;
2919 if (rval < 0) {
2920 rval = -ENODEV;
2921 goto out_power_off;
2922 }
2923 2910
2924 rval = smiapp_identify_module(sensor); 2911 rval = smiapp_identify_module(sensor);
2925 if (rval) { 2912 if (rval) {
@@ -3100,6 +3087,9 @@ static int smiapp_probe(struct i2c_client *client,
3100 if (rval < 0) 3087 if (rval < 0)
3101 goto out_media_entity_cleanup; 3088 goto out_media_entity_cleanup;
3102 3089
3090 pm_runtime_set_active(&client->dev);
3091 pm_runtime_get_noresume(&client->dev);
3092 pm_runtime_enable(&client->dev);
3103 pm_runtime_set_autosuspend_delay(&client->dev, 1000); 3093 pm_runtime_set_autosuspend_delay(&client->dev, 1000);
3104 pm_runtime_use_autosuspend(&client->dev); 3094 pm_runtime_use_autosuspend(&client->dev);
3105 pm_runtime_put_autosuspend(&client->dev); 3095 pm_runtime_put_autosuspend(&client->dev);
@@ -3113,8 +3103,7 @@ out_cleanup:
3113 smiapp_cleanup(sensor); 3103 smiapp_cleanup(sensor);
3114 3104
3115out_power_off: 3105out_power_off:
3116 pm_runtime_put(&client->dev); 3106 smiapp_power_off(&client->dev);
3117 pm_runtime_disable(&client->dev);
3118 3107
3119 return rval; 3108 return rval;
3120} 3109}
@@ -3127,8 +3116,10 @@ static int smiapp_remove(struct i2c_client *client)
3127 3116
3128 v4l2_async_unregister_subdev(subdev); 3117 v4l2_async_unregister_subdev(subdev);
3129 3118
3130 pm_runtime_suspend(&client->dev);
3131 pm_runtime_disable(&client->dev); 3119 pm_runtime_disable(&client->dev);
3120 if (!pm_runtime_status_suspended(&client->dev))
3121 smiapp_power_off(&client->dev);
3122 pm_runtime_set_suspended(&client->dev);
3132 3123
3133 for (i = 0; i < sensor->ssds_used; i++) { 3124 for (i = 0; i < sensor->ssds_used; i++) {
3134 v4l2_device_unregister_subdev(&sensor->ssds[i].sd); 3125 v4l2_device_unregister_subdev(&sensor->ssds[i].sd);
diff --git a/drivers/media/i2c/tvp5150.c b/drivers/media/i2c/tvp5150.c
index 3a0fe8cc64e9..48646a7f3fb0 100644
--- a/drivers/media/i2c/tvp5150.c
+++ b/drivers/media/i2c/tvp5150.c
@@ -291,8 +291,12 @@ static void tvp5150_selmux(struct v4l2_subdev *sd)
291 tvp5150_write(sd, TVP5150_OP_MODE_CTL, opmode); 291 tvp5150_write(sd, TVP5150_OP_MODE_CTL, opmode);
292 tvp5150_write(sd, TVP5150_VD_IN_SRC_SEL_1, input); 292 tvp5150_write(sd, TVP5150_VD_IN_SRC_SEL_1, input);
293 293
294 /* Svideo should enable YCrCb output and disable GPCL output 294 /*
295 * For Composite and TV, it should be the reverse 295 * Setup the FID/GLCO/VLK/HVLK and INTREQ/GPCL/VBLK output signals. For
296 * S-Video we output the vertical lock (VLK) signal on FID/GLCO/VLK/HVLK
297 * and set INTREQ/GPCL/VBLK to logic 0. For composite we output the
298 * field indicator (FID) signal on FID/GLCO/VLK/HVLK and set
299 * INTREQ/GPCL/VBLK to logic 1.
296 */ 300 */
297 val = tvp5150_read(sd, TVP5150_MISC_CTL); 301 val = tvp5150_read(sd, TVP5150_MISC_CTL);
298 if (val < 0) { 302 if (val < 0) {
@@ -301,9 +305,9 @@ static void tvp5150_selmux(struct v4l2_subdev *sd)
301 } 305 }
302 306
303 if (decoder->input == TVP5150_SVIDEO) 307 if (decoder->input == TVP5150_SVIDEO)
304 val = (val & ~0x40) | 0x10; 308 val = (val & ~TVP5150_MISC_CTL_GPCL) | TVP5150_MISC_CTL_HVLK;
305 else 309 else
306 val = (val & ~0x10) | 0x40; 310 val = (val & ~TVP5150_MISC_CTL_HVLK) | TVP5150_MISC_CTL_GPCL;
307 tvp5150_write(sd, TVP5150_MISC_CTL, val); 311 tvp5150_write(sd, TVP5150_MISC_CTL, val);
308}; 312};
309 313
@@ -455,7 +459,12 @@ static const struct i2c_reg_value tvp5150_init_enable[] = {
455 },{ /* Automatic offset and AGC enabled */ 459 },{ /* Automatic offset and AGC enabled */
456 TVP5150_ANAL_CHL_CTL, 0x15 460 TVP5150_ANAL_CHL_CTL, 0x15
457 },{ /* Activate YCrCb output 0x9 or 0xd ? */ 461 },{ /* Activate YCrCb output 0x9 or 0xd ? */
458 TVP5150_MISC_CTL, 0x6f 462 TVP5150_MISC_CTL, TVP5150_MISC_CTL_GPCL |
463 TVP5150_MISC_CTL_INTREQ_OE |
464 TVP5150_MISC_CTL_YCBCR_OE |
465 TVP5150_MISC_CTL_SYNC_OE |
466 TVP5150_MISC_CTL_VBLANK |
467 TVP5150_MISC_CTL_CLOCK_OE,
459 },{ /* Activates video std autodetection for all standards */ 468 },{ /* Activates video std autodetection for all standards */
460 TVP5150_AUTOSW_MSK, 0x0 469 TVP5150_AUTOSW_MSK, 0x0
461 },{ /* Default format: 0x47. For 4:2:2: 0x40 */ 470 },{ /* Default format: 0x47. For 4:2:2: 0x40 */
@@ -861,8 +870,6 @@ static int tvp5150_fill_fmt(struct v4l2_subdev *sd,
861 870
862 f = &format->format; 871 f = &format->format;
863 872
864 tvp5150_reset(sd, 0);
865
866 f->width = decoder->rect.width; 873 f->width = decoder->rect.width;
867 f->height = decoder->rect.height / 2; 874 f->height = decoder->rect.height / 2;
868 875
@@ -1051,21 +1058,27 @@ static const struct media_entity_operations tvp5150_sd_media_ops = {
1051static int tvp5150_s_stream(struct v4l2_subdev *sd, int enable) 1058static int tvp5150_s_stream(struct v4l2_subdev *sd, int enable)
1052{ 1059{
1053 struct tvp5150 *decoder = to_tvp5150(sd); 1060 struct tvp5150 *decoder = to_tvp5150(sd);
1054 /* Output format: 8-bit ITU-R BT.656 with embedded syncs */ 1061 int val;
1055 int val = 0x09;
1056
1057 /* Output format: 8-bit 4:2:2 YUV with discrete sync */
1058 if (decoder->mbus_type == V4L2_MBUS_PARALLEL)
1059 val = 0x0d;
1060 1062
1061 /* Initializes TVP5150 to its default values */ 1063 /* Enable or disable the video output signals. */
1062 /* # set PCLK (27MHz) */ 1064 val = tvp5150_read(sd, TVP5150_MISC_CTL);
1063 tvp5150_write(sd, TVP5150_CONF_SHARED_PIN, 0x00); 1065 if (val < 0)
1066 return val;
1067
1068 val &= ~(TVP5150_MISC_CTL_YCBCR_OE | TVP5150_MISC_CTL_SYNC_OE |
1069 TVP5150_MISC_CTL_CLOCK_OE);
1070
1071 if (enable) {
1072 /*
1073 * Enable the YCbCr and clock outputs. In discrete sync mode
1074 * (non-BT.656) additionally enable the the sync outputs.
1075 */
1076 val |= TVP5150_MISC_CTL_YCBCR_OE | TVP5150_MISC_CTL_CLOCK_OE;
1077 if (decoder->mbus_type == V4L2_MBUS_PARALLEL)
1078 val |= TVP5150_MISC_CTL_SYNC_OE;
1079 }
1064 1080
1065 if (enable) 1081 tvp5150_write(sd, TVP5150_MISC_CTL, val);
1066 tvp5150_write(sd, TVP5150_MISC_CTL, val);
1067 else
1068 tvp5150_write(sd, TVP5150_MISC_CTL, 0x00);
1069 1082
1070 return 0; 1083 return 0;
1071} 1084}
@@ -1524,7 +1537,6 @@ static int tvp5150_probe(struct i2c_client *c,
1524 res = core->hdl.error; 1537 res = core->hdl.error;
1525 goto err; 1538 goto err;
1526 } 1539 }
1527 v4l2_ctrl_handler_setup(&core->hdl);
1528 1540
1529 /* Default is no cropping */ 1541 /* Default is no cropping */
1530 core->rect.top = 0; 1542 core->rect.top = 0;
@@ -1535,6 +1547,8 @@ static int tvp5150_probe(struct i2c_client *c,
1535 core->rect.left = 0; 1547 core->rect.left = 0;
1536 core->rect.width = TVP5150_H_MAX; 1548 core->rect.width = TVP5150_H_MAX;
1537 1549
1550 tvp5150_reset(sd, 0); /* Calls v4l2_ctrl_handler_setup() */
1551
1538 res = v4l2_async_register_subdev(sd); 1552 res = v4l2_async_register_subdev(sd);
1539 if (res < 0) 1553 if (res < 0)
1540 goto err; 1554 goto err;
diff --git a/drivers/media/i2c/tvp5150_reg.h b/drivers/media/i2c/tvp5150_reg.h
index 25a994944918..30a48c28d05a 100644
--- a/drivers/media/i2c/tvp5150_reg.h
+++ b/drivers/media/i2c/tvp5150_reg.h
@@ -9,6 +9,15 @@
9#define TVP5150_ANAL_CHL_CTL 0x01 /* Analog channel controls */ 9#define TVP5150_ANAL_CHL_CTL 0x01 /* Analog channel controls */
10#define TVP5150_OP_MODE_CTL 0x02 /* Operation mode controls */ 10#define TVP5150_OP_MODE_CTL 0x02 /* Operation mode controls */
11#define TVP5150_MISC_CTL 0x03 /* Miscellaneous controls */ 11#define TVP5150_MISC_CTL 0x03 /* Miscellaneous controls */
12#define TVP5150_MISC_CTL_VBLK_GPCL BIT(7)
13#define TVP5150_MISC_CTL_GPCL BIT(6)
14#define TVP5150_MISC_CTL_INTREQ_OE BIT(5)
15#define TVP5150_MISC_CTL_HVLK BIT(4)
16#define TVP5150_MISC_CTL_YCBCR_OE BIT(3)
17#define TVP5150_MISC_CTL_SYNC_OE BIT(2)
18#define TVP5150_MISC_CTL_VBLANK BIT(1)
19#define TVP5150_MISC_CTL_CLOCK_OE BIT(0)
20
12#define TVP5150_AUTOSW_MSK 0x04 /* Autoswitch mask: TVP5150A / TVP5150AM */ 21#define TVP5150_AUTOSW_MSK 0x04 /* Autoswitch mask: TVP5150A / TVP5150AM */
13 22
14/* Reserved 05h */ 23/* Reserved 05h */
diff --git a/drivers/media/pci/cobalt/cobalt-driver.c b/drivers/media/pci/cobalt/cobalt-driver.c
index 979634000597..d5c911c09e2b 100644
--- a/drivers/media/pci/cobalt/cobalt-driver.c
+++ b/drivers/media/pci/cobalt/cobalt-driver.c
@@ -308,9 +308,7 @@ static void cobalt_pci_iounmap(struct cobalt *cobalt, struct pci_dev *pci_dev)
308static void cobalt_free_msi(struct cobalt *cobalt, struct pci_dev *pci_dev) 308static void cobalt_free_msi(struct cobalt *cobalt, struct pci_dev *pci_dev)
309{ 309{
310 free_irq(pci_dev->irq, (void *)cobalt); 310 free_irq(pci_dev->irq, (void *)cobalt);
311 311 pci_free_irq_vectors(pci_dev);
312 if (cobalt->msi_enabled)
313 pci_disable_msi(pci_dev);
314} 312}
315 313
316static int cobalt_setup_pci(struct cobalt *cobalt, struct pci_dev *pci_dev, 314static int cobalt_setup_pci(struct cobalt *cobalt, struct pci_dev *pci_dev,
@@ -387,14 +385,12 @@ static int cobalt_setup_pci(struct cobalt *cobalt, struct pci_dev *pci_dev,
387 from being generated. */ 385 from being generated. */
388 cobalt_set_interrupt(cobalt, false); 386 cobalt_set_interrupt(cobalt, false);
389 387
390 if (pci_enable_msi_range(pci_dev, 1, 1) < 1) { 388 if (pci_alloc_irq_vectors(pci_dev, 1, 1, PCI_IRQ_MSI) < 1) {
391 cobalt_err("Could not enable MSI\n"); 389 cobalt_err("Could not enable MSI\n");
392 cobalt->msi_enabled = false;
393 ret = -EIO; 390 ret = -EIO;
394 goto err_release; 391 goto err_release;
395 } 392 }
396 msi_config_show(cobalt, pci_dev); 393 msi_config_show(cobalt, pci_dev);
397 cobalt->msi_enabled = true;
398 394
399 /* Register IRQ */ 395 /* Register IRQ */
400 if (request_irq(pci_dev->irq, cobalt_irq_handler, IRQF_SHARED, 396 if (request_irq(pci_dev->irq, cobalt_irq_handler, IRQF_SHARED,
diff --git a/drivers/media/pci/cobalt/cobalt-driver.h b/drivers/media/pci/cobalt/cobalt-driver.h
index ed00dc9d9399..00f773ec359a 100644
--- a/drivers/media/pci/cobalt/cobalt-driver.h
+++ b/drivers/media/pci/cobalt/cobalt-driver.h
@@ -287,8 +287,6 @@ struct cobalt {
287 u32 irq_none; 287 u32 irq_none;
288 u32 irq_full_fifo; 288 u32 irq_full_fifo;
289 289
290 bool msi_enabled;
291
292 /* omnitek dma */ 290 /* omnitek dma */
293 int dma_channels; 291 int dma_channels;
294 int first_fifo_channel; 292 int first_fifo_channel;
diff --git a/drivers/media/usb/dvb-usb/pctv452e.c b/drivers/media/usb/dvb-usb/pctv452e.c
index 07fa08be9e99..d54ebe7e0215 100644
--- a/drivers/media/usb/dvb-usb/pctv452e.c
+++ b/drivers/media/usb/dvb-usb/pctv452e.c
@@ -97,14 +97,13 @@ struct pctv452e_state {
97 u8 c; /* transaction counter, wraps around... */ 97 u8 c; /* transaction counter, wraps around... */
98 u8 initialized; /* set to 1 if 0x15 has been sent */ 98 u8 initialized; /* set to 1 if 0x15 has been sent */
99 u16 last_rc_key; 99 u16 last_rc_key;
100
101 unsigned char data[80];
102}; 100};
103 101
104static int tt3650_ci_msg(struct dvb_usb_device *d, u8 cmd, u8 *data, 102static int tt3650_ci_msg(struct dvb_usb_device *d, u8 cmd, u8 *data,
105 unsigned int write_len, unsigned int read_len) 103 unsigned int write_len, unsigned int read_len)
106{ 104{
107 struct pctv452e_state *state = (struct pctv452e_state *)d->priv; 105 struct pctv452e_state *state = (struct pctv452e_state *)d->priv;
106 u8 *buf;
108 u8 id; 107 u8 id;
109 unsigned int rlen; 108 unsigned int rlen;
110 int ret; 109 int ret;
@@ -114,36 +113,39 @@ static int tt3650_ci_msg(struct dvb_usb_device *d, u8 cmd, u8 *data,
114 return -EIO; 113 return -EIO;
115 } 114 }
116 115
117 mutex_lock(&state->ca_mutex); 116 buf = kmalloc(64, GFP_KERNEL);
117 if (!buf)
118 return -ENOMEM;
119
118 id = state->c++; 120 id = state->c++;
119 121
120 state->data[0] = SYNC_BYTE_OUT; 122 buf[0] = SYNC_BYTE_OUT;
121 state->data[1] = id; 123 buf[1] = id;
122 state->data[2] = cmd; 124 buf[2] = cmd;
123 state->data[3] = write_len; 125 buf[3] = write_len;
124 126
125 memcpy(state->data + 4, data, write_len); 127 memcpy(buf + 4, data, write_len);
126 128
127 rlen = (read_len > 0) ? 64 : 0; 129 rlen = (read_len > 0) ? 64 : 0;
128 ret = dvb_usb_generic_rw(d, state->data, 4 + write_len, 130 ret = dvb_usb_generic_rw(d, buf, 4 + write_len,
129 state->data, rlen, /* delay_ms */ 0); 131 buf, rlen, /* delay_ms */ 0);
130 if (0 != ret) 132 if (0 != ret)
131 goto failed; 133 goto failed;
132 134
133 ret = -EIO; 135 ret = -EIO;
134 if (SYNC_BYTE_IN != state->data[0] || id != state->data[1]) 136 if (SYNC_BYTE_IN != buf[0] || id != buf[1])
135 goto failed; 137 goto failed;
136 138
137 memcpy(data, state->data + 4, read_len); 139 memcpy(data, buf + 4, read_len);
138 140
139 mutex_unlock(&state->ca_mutex); 141 kfree(buf);
140 return 0; 142 return 0;
141 143
142failed: 144failed:
143 err("CI error %d; %02X %02X %02X -> %*ph.", 145 err("CI error %d; %02X %02X %02X -> %*ph.",
144 ret, SYNC_BYTE_OUT, id, cmd, 3, state->data); 146 ret, SYNC_BYTE_OUT, id, cmd, 3, buf);
145 147
146 mutex_unlock(&state->ca_mutex); 148 kfree(buf);
147 return ret; 149 return ret;
148} 150}
149 151
@@ -410,53 +412,57 @@ static int pctv452e_i2c_msg(struct dvb_usb_device *d, u8 addr,
410 u8 *rcv_buf, u8 rcv_len) 412 u8 *rcv_buf, u8 rcv_len)
411{ 413{
412 struct pctv452e_state *state = (struct pctv452e_state *)d->priv; 414 struct pctv452e_state *state = (struct pctv452e_state *)d->priv;
415 u8 *buf;
413 u8 id; 416 u8 id;
414 int ret; 417 int ret;
415 418
416 mutex_lock(&state->ca_mutex); 419 buf = kmalloc(64, GFP_KERNEL);
420 if (!buf)
421 return -ENOMEM;
422
417 id = state->c++; 423 id = state->c++;
418 424
419 ret = -EINVAL; 425 ret = -EINVAL;
420 if (snd_len > 64 - 7 || rcv_len > 64 - 7) 426 if (snd_len > 64 - 7 || rcv_len > 64 - 7)
421 goto failed; 427 goto failed;
422 428
423 state->data[0] = SYNC_BYTE_OUT; 429 buf[0] = SYNC_BYTE_OUT;
424 state->data[1] = id; 430 buf[1] = id;
425 state->data[2] = PCTV_CMD_I2C; 431 buf[2] = PCTV_CMD_I2C;
426 state->data[3] = snd_len + 3; 432 buf[3] = snd_len + 3;
427 state->data[4] = addr << 1; 433 buf[4] = addr << 1;
428 state->data[5] = snd_len; 434 buf[5] = snd_len;
429 state->data[6] = rcv_len; 435 buf[6] = rcv_len;
430 436
431 memcpy(state->data + 7, snd_buf, snd_len); 437 memcpy(buf + 7, snd_buf, snd_len);
432 438
433 ret = dvb_usb_generic_rw(d, state->data, 7 + snd_len, 439 ret = dvb_usb_generic_rw(d, buf, 7 + snd_len,
434 state->data, /* rcv_len */ 64, 440 buf, /* rcv_len */ 64,
435 /* delay_ms */ 0); 441 /* delay_ms */ 0);
436 if (ret < 0) 442 if (ret < 0)
437 goto failed; 443 goto failed;
438 444
439 /* TT USB protocol error. */ 445 /* TT USB protocol error. */
440 ret = -EIO; 446 ret = -EIO;
441 if (SYNC_BYTE_IN != state->data[0] || id != state->data[1]) 447 if (SYNC_BYTE_IN != buf[0] || id != buf[1])
442 goto failed; 448 goto failed;
443 449
444 /* I2C device didn't respond as expected. */ 450 /* I2C device didn't respond as expected. */
445 ret = -EREMOTEIO; 451 ret = -EREMOTEIO;
446 if (state->data[5] < snd_len || state->data[6] < rcv_len) 452 if (buf[5] < snd_len || buf[6] < rcv_len)
447 goto failed; 453 goto failed;
448 454
449 memcpy(rcv_buf, state->data + 7, rcv_len); 455 memcpy(rcv_buf, buf + 7, rcv_len);
450 mutex_unlock(&state->ca_mutex);
451 456
457 kfree(buf);
452 return rcv_len; 458 return rcv_len;
453 459
454failed: 460failed:
455 err("I2C error %d; %02X %02X %02X %02X %02X -> %*ph", 461 err("I2C error %d; %02X %02X %02X %02X %02X -> %*ph",
456 ret, SYNC_BYTE_OUT, id, addr << 1, snd_len, rcv_len, 462 ret, SYNC_BYTE_OUT, id, addr << 1, snd_len, rcv_len,
457 7, state->data); 463 7, buf);
458 464
459 mutex_unlock(&state->ca_mutex); 465 kfree(buf);
460 return ret; 466 return ret;
461} 467}
462 468
@@ -505,7 +511,7 @@ static u32 pctv452e_i2c_func(struct i2c_adapter *adapter)
505static int pctv452e_power_ctrl(struct dvb_usb_device *d, int i) 511static int pctv452e_power_ctrl(struct dvb_usb_device *d, int i)
506{ 512{
507 struct pctv452e_state *state = (struct pctv452e_state *)d->priv; 513 struct pctv452e_state *state = (struct pctv452e_state *)d->priv;
508 u8 *rx; 514 u8 *b0, *rx;
509 int ret; 515 int ret;
510 516
511 info("%s: %d\n", __func__, i); 517 info("%s: %d\n", __func__, i);
@@ -516,11 +522,12 @@ static int pctv452e_power_ctrl(struct dvb_usb_device *d, int i)
516 if (state->initialized) 522 if (state->initialized)
517 return 0; 523 return 0;
518 524
519 rx = kmalloc(PCTV_ANSWER_LEN, GFP_KERNEL); 525 b0 = kmalloc(5 + PCTV_ANSWER_LEN, GFP_KERNEL);
520 if (!rx) 526 if (!b0)
521 return -ENOMEM; 527 return -ENOMEM;
522 528
523 mutex_lock(&state->ca_mutex); 529 rx = b0 + 5;
530
524 /* hmm where shoud this should go? */ 531 /* hmm where shoud this should go? */
525 ret = usb_set_interface(d->udev, 0, ISOC_INTERFACE_ALTERNATIVE); 532 ret = usb_set_interface(d->udev, 0, ISOC_INTERFACE_ALTERNATIVE);
526 if (ret != 0) 533 if (ret != 0)
@@ -528,66 +535,70 @@ static int pctv452e_power_ctrl(struct dvb_usb_device *d, int i)
528 __func__, ret); 535 __func__, ret);
529 536
530 /* this is a one-time initialization, dont know where to put */ 537 /* this is a one-time initialization, dont know where to put */
531 state->data[0] = 0xaa; 538 b0[0] = 0xaa;
532 state->data[1] = state->c++; 539 b0[1] = state->c++;
533 state->data[2] = PCTV_CMD_RESET; 540 b0[2] = PCTV_CMD_RESET;
534 state->data[3] = 1; 541 b0[3] = 1;
535 state->data[4] = 0; 542 b0[4] = 0;
536 /* reset board */ 543 /* reset board */
537 ret = dvb_usb_generic_rw(d, state->data, 5, rx, PCTV_ANSWER_LEN, 0); 544 ret = dvb_usb_generic_rw(d, b0, 5, rx, PCTV_ANSWER_LEN, 0);
538 if (ret) 545 if (ret)
539 goto ret; 546 goto ret;
540 547
541 state->data[1] = state->c++; 548 b0[1] = state->c++;
542 state->data[4] = 1; 549 b0[4] = 1;
543 /* reset board (again?) */ 550 /* reset board (again?) */
544 ret = dvb_usb_generic_rw(d, state->data, 5, rx, PCTV_ANSWER_LEN, 0); 551 ret = dvb_usb_generic_rw(d, b0, 5, rx, PCTV_ANSWER_LEN, 0);
545 if (ret) 552 if (ret)
546 goto ret; 553 goto ret;
547 554
548 state->initialized = 1; 555 state->initialized = 1;
549 556
550ret: 557ret:
551 mutex_unlock(&state->ca_mutex); 558 kfree(b0);
552 kfree(rx);
553 return ret; 559 return ret;
554} 560}
555 561
556static int pctv452e_rc_query(struct dvb_usb_device *d) 562static int pctv452e_rc_query(struct dvb_usb_device *d)
557{ 563{
558 struct pctv452e_state *state = (struct pctv452e_state *)d->priv; 564 struct pctv452e_state *state = (struct pctv452e_state *)d->priv;
565 u8 *b, *rx;
559 int ret, i; 566 int ret, i;
560 u8 id; 567 u8 id;
561 568
562 mutex_lock(&state->ca_mutex); 569 b = kmalloc(CMD_BUFFER_SIZE + PCTV_ANSWER_LEN, GFP_KERNEL);
570 if (!b)
571 return -ENOMEM;
572
573 rx = b + CMD_BUFFER_SIZE;
574
563 id = state->c++; 575 id = state->c++;
564 576
565 /* prepare command header */ 577 /* prepare command header */
566 state->data[0] = SYNC_BYTE_OUT; 578 b[0] = SYNC_BYTE_OUT;
567 state->data[1] = id; 579 b[1] = id;
568 state->data[2] = PCTV_CMD_IR; 580 b[2] = PCTV_CMD_IR;
569 state->data[3] = 0; 581 b[3] = 0;
570 582
571 /* send ir request */ 583 /* send ir request */
572 ret = dvb_usb_generic_rw(d, state->data, 4, 584 ret = dvb_usb_generic_rw(d, b, 4, rx, PCTV_ANSWER_LEN, 0);
573 state->data, PCTV_ANSWER_LEN, 0);
574 if (ret != 0) 585 if (ret != 0)
575 goto ret; 586 goto ret;
576 587
577 if (debug > 3) { 588 if (debug > 3) {
578 info("%s: read: %2d: %*ph: ", __func__, ret, 3, state->data); 589 info("%s: read: %2d: %*ph: ", __func__, ret, 3, rx);
579 for (i = 0; (i < state->data[3]) && ((i + 3) < PCTV_ANSWER_LEN); i++) 590 for (i = 0; (i < rx[3]) && ((i+3) < PCTV_ANSWER_LEN); i++)
580 info(" %02x", state->data[i + 3]); 591 info(" %02x", rx[i+3]);
581 592
582 info("\n"); 593 info("\n");
583 } 594 }
584 595
585 if ((state->data[3] == 9) && (state->data[12] & 0x01)) { 596 if ((rx[3] == 9) && (rx[12] & 0x01)) {
586 /* got a "press" event */ 597 /* got a "press" event */
587 state->last_rc_key = RC_SCANCODE_RC5(state->data[7], state->data[6]); 598 state->last_rc_key = RC_SCANCODE_RC5(rx[7], rx[6]);
588 if (debug > 2) 599 if (debug > 2)
589 info("%s: cmd=0x%02x sys=0x%02x\n", 600 info("%s: cmd=0x%02x sys=0x%02x\n",
590 __func__, state->data[6], state->data[7]); 601 __func__, rx[6], rx[7]);
591 602
592 rc_keydown(d->rc_dev, RC_TYPE_RC5, state->last_rc_key, 0); 603 rc_keydown(d->rc_dev, RC_TYPE_RC5, state->last_rc_key, 0);
593 } else if (state->last_rc_key) { 604 } else if (state->last_rc_key) {
@@ -595,7 +606,7 @@ static int pctv452e_rc_query(struct dvb_usb_device *d)
595 state->last_rc_key = 0; 606 state->last_rc_key = 0;
596 } 607 }
597ret: 608ret:
598 mutex_unlock(&state->ca_mutex); 609 kfree(b);
599 return ret; 610 return ret;
600} 611}
601 612
diff --git a/drivers/media/usb/siano/smsusb.c b/drivers/media/usb/siano/smsusb.c
index a4dcaec31d02..8c1f926567ec 100644
--- a/drivers/media/usb/siano/smsusb.c
+++ b/drivers/media/usb/siano/smsusb.c
@@ -218,22 +218,30 @@ static int smsusb_start_streaming(struct smsusb_device_t *dev)
218static int smsusb_sendrequest(void *context, void *buffer, size_t size) 218static int smsusb_sendrequest(void *context, void *buffer, size_t size)
219{ 219{
220 struct smsusb_device_t *dev = (struct smsusb_device_t *) context; 220 struct smsusb_device_t *dev = (struct smsusb_device_t *) context;
221 struct sms_msg_hdr *phdr = (struct sms_msg_hdr *) buffer; 221 struct sms_msg_hdr *phdr;
222 int dummy; 222 int dummy, ret;
223 223
224 if (dev->state != SMSUSB_ACTIVE) { 224 if (dev->state != SMSUSB_ACTIVE) {
225 pr_debug("Device not active yet\n"); 225 pr_debug("Device not active yet\n");
226 return -ENOENT; 226 return -ENOENT;
227 } 227 }
228 228
229 phdr = kmalloc(size, GFP_KERNEL);
230 if (!phdr)
231 return -ENOMEM;
232 memcpy(phdr, buffer, size);
233
229 pr_debug("sending %s(%d) size: %d\n", 234 pr_debug("sending %s(%d) size: %d\n",
230 smscore_translate_msg(phdr->msg_type), phdr->msg_type, 235 smscore_translate_msg(phdr->msg_type), phdr->msg_type,
231 phdr->msg_length); 236 phdr->msg_length);
232 237
233 smsendian_handle_tx_message((struct sms_msg_data *) phdr); 238 smsendian_handle_tx_message((struct sms_msg_data *) phdr);
234 smsendian_handle_message_header((struct sms_msg_hdr *)buffer); 239 smsendian_handle_message_header((struct sms_msg_hdr *)phdr);
235 return usb_bulk_msg(dev->udev, usb_sndbulkpipe(dev->udev, 2), 240 ret = usb_bulk_msg(dev->udev, usb_sndbulkpipe(dev->udev, 2),
236 buffer, size, &dummy, 1000); 241 phdr, size, &dummy, 1000);
242
243 kfree(phdr);
244 return ret;
237} 245}
238 246
239static char *smsusb1_fw_lkup[] = { 247static char *smsusb1_fw_lkup[] = {
diff --git a/drivers/memstick/core/memstick.c b/drivers/memstick/core/memstick.c
index a0547dbf9806..76382c858c35 100644
--- a/drivers/memstick/core/memstick.c
+++ b/drivers/memstick/core/memstick.c
@@ -330,7 +330,7 @@ static int h_memstick_read_dev_id(struct memstick_dev *card,
330 struct ms_id_register id_reg; 330 struct ms_id_register id_reg;
331 331
332 if (!(*mrq)) { 332 if (!(*mrq)) {
333 memstick_init_req(&card->current_mrq, MS_TPC_READ_REG, NULL, 333 memstick_init_req(&card->current_mrq, MS_TPC_READ_REG, &id_reg,
334 sizeof(struct ms_id_register)); 334 sizeof(struct ms_id_register));
335 *mrq = &card->current_mrq; 335 *mrq = &card->current_mrq;
336 return 0; 336 return 0;
diff --git a/drivers/misc/mei/bus-fixup.c b/drivers/misc/mei/bus-fixup.c
index 18e05ca7584f..3600c9993a98 100644
--- a/drivers/misc/mei/bus-fixup.c
+++ b/drivers/misc/mei/bus-fixup.c
@@ -152,6 +152,9 @@ static void mei_mkhi_fix(struct mei_cl_device *cldev)
152{ 152{
153 int ret; 153 int ret;
154 154
155 if (!cldev->bus->hbm_f_os_supported)
156 return;
157
155 ret = mei_cldev_enable(cldev); 158 ret = mei_cldev_enable(cldev);
156 if (ret) 159 if (ret)
157 return; 160 return;
diff --git a/drivers/misc/mei/bus.c b/drivers/misc/mei/bus.c
index 0037153c80a6..2d9c5dd06e42 100644
--- a/drivers/misc/mei/bus.c
+++ b/drivers/misc/mei/bus.c
@@ -450,7 +450,7 @@ bool mei_cldev_enabled(struct mei_cl_device *cldev)
450EXPORT_SYMBOL_GPL(mei_cldev_enabled); 450EXPORT_SYMBOL_GPL(mei_cldev_enabled);
451 451
452/** 452/**
453 * mei_cldev_enable_device - enable me client device 453 * mei_cldev_enable - enable me client device
454 * create connection with me client 454 * create connection with me client
455 * 455 *
456 * @cldev: me client device 456 * @cldev: me client device
diff --git a/drivers/misc/mei/client.c b/drivers/misc/mei/client.c
index 391936c1aa04..b0395601c6ae 100644
--- a/drivers/misc/mei/client.c
+++ b/drivers/misc/mei/client.c
@@ -1541,7 +1541,7 @@ int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb,
1541 1541
1542 rets = first_chunk ? mei_cl_tx_flow_ctrl_creds(cl) : 1; 1542 rets = first_chunk ? mei_cl_tx_flow_ctrl_creds(cl) : 1;
1543 if (rets < 0) 1543 if (rets < 0)
1544 return rets; 1544 goto err;
1545 1545
1546 if (rets == 0) { 1546 if (rets == 0) {
1547 cl_dbg(dev, cl, "No flow control credentials: not sending.\n"); 1547 cl_dbg(dev, cl, "No flow control credentials: not sending.\n");
@@ -1575,11 +1575,8 @@ int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb,
1575 cb->buf.size, cb->buf_idx); 1575 cb->buf.size, cb->buf_idx);
1576 1576
1577 rets = mei_write_message(dev, &mei_hdr, buf->data + cb->buf_idx); 1577 rets = mei_write_message(dev, &mei_hdr, buf->data + cb->buf_idx);
1578 if (rets) { 1578 if (rets)
1579 cl->status = rets; 1579 goto err;
1580 list_move_tail(&cb->list, &cmpl_list->list);
1581 return rets;
1582 }
1583 1580
1584 cl->status = 0; 1581 cl->status = 0;
1585 cl->writing_state = MEI_WRITING; 1582 cl->writing_state = MEI_WRITING;
@@ -1587,14 +1584,21 @@ int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb,
1587 cb->completed = mei_hdr.msg_complete == 1; 1584 cb->completed = mei_hdr.msg_complete == 1;
1588 1585
1589 if (first_chunk) { 1586 if (first_chunk) {
1590 if (mei_cl_tx_flow_ctrl_creds_reduce(cl)) 1587 if (mei_cl_tx_flow_ctrl_creds_reduce(cl)) {
1591 return -EIO; 1588 rets = -EIO;
1589 goto err;
1590 }
1592 } 1591 }
1593 1592
1594 if (mei_hdr.msg_complete) 1593 if (mei_hdr.msg_complete)
1595 list_move_tail(&cb->list, &dev->write_waiting_list.list); 1594 list_move_tail(&cb->list, &dev->write_waiting_list.list);
1596 1595
1597 return 0; 1596 return 0;
1597
1598err:
1599 cl->status = rets;
1600 list_move_tail(&cb->list, &cmpl_list->list);
1601 return rets;
1598} 1602}
1599 1603
1600/** 1604/**
diff --git a/drivers/misc/mei/debugfs.c b/drivers/misc/mei/debugfs.c
index c6c051b52f55..c6217a4993ad 100644
--- a/drivers/misc/mei/debugfs.c
+++ b/drivers/misc/mei/debugfs.c
@@ -180,6 +180,8 @@ static ssize_t mei_dbgfs_read_devstate(struct file *fp, char __user *ubuf,
180 dev->hbm_f_ev_supported); 180 dev->hbm_f_ev_supported);
181 pos += scnprintf(buf + pos, bufsz - pos, "\tFA: %01d\n", 181 pos += scnprintf(buf + pos, bufsz - pos, "\tFA: %01d\n",
182 dev->hbm_f_fa_supported); 182 dev->hbm_f_fa_supported);
183 pos += scnprintf(buf + pos, bufsz - pos, "\tOS: %01d\n",
184 dev->hbm_f_os_supported);
183 } 185 }
184 186
185 pos += scnprintf(buf + pos, bufsz - pos, "pg: %s, %s\n", 187 pos += scnprintf(buf + pos, bufsz - pos, "pg: %s, %s\n",
diff --git a/drivers/misc/mei/hbm.c b/drivers/misc/mei/hbm.c
index dd7f15a65eed..25b4a1ba522d 100644
--- a/drivers/misc/mei/hbm.c
+++ b/drivers/misc/mei/hbm.c
@@ -989,6 +989,10 @@ static void mei_hbm_config_features(struct mei_device *dev)
989 /* Fixed Address Client Support */ 989 /* Fixed Address Client Support */
990 if (dev->version.major_version >= HBM_MAJOR_VERSION_FA) 990 if (dev->version.major_version >= HBM_MAJOR_VERSION_FA)
991 dev->hbm_f_fa_supported = 1; 991 dev->hbm_f_fa_supported = 1;
992
993 /* OS ver message Support */
994 if (dev->version.major_version >= HBM_MAJOR_VERSION_OS)
995 dev->hbm_f_os_supported = 1;
992} 996}
993 997
994/** 998/**
diff --git a/drivers/misc/mei/hw.h b/drivers/misc/mei/hw.h
index 9daf3f9aed25..e1e4d47d4d7d 100644
--- a/drivers/misc/mei/hw.h
+++ b/drivers/misc/mei/hw.h
@@ -76,6 +76,12 @@
76#define HBM_MINOR_VERSION_FA 0 76#define HBM_MINOR_VERSION_FA 0
77#define HBM_MAJOR_VERSION_FA 2 77#define HBM_MAJOR_VERSION_FA 2
78 78
79/*
80 * MEI version with OS ver message support
81 */
82#define HBM_MINOR_VERSION_OS 0
83#define HBM_MAJOR_VERSION_OS 2
84
79/* Host bus message command opcode */ 85/* Host bus message command opcode */
80#define MEI_HBM_CMD_OP_MSK 0x7f 86#define MEI_HBM_CMD_OP_MSK 0x7f
81/* Host bus message command RESPONSE */ 87/* Host bus message command RESPONSE */
diff --git a/drivers/misc/mei/mei_dev.h b/drivers/misc/mei/mei_dev.h
index 699693cd8c59..8dadb98662a9 100644
--- a/drivers/misc/mei/mei_dev.h
+++ b/drivers/misc/mei/mei_dev.h
@@ -406,6 +406,7 @@ const char *mei_pg_state_str(enum mei_pg_state state);
406 * @hbm_f_ev_supported : hbm feature event notification 406 * @hbm_f_ev_supported : hbm feature event notification
407 * @hbm_f_fa_supported : hbm feature fixed address client 407 * @hbm_f_fa_supported : hbm feature fixed address client
408 * @hbm_f_ie_supported : hbm feature immediate reply to enum request 408 * @hbm_f_ie_supported : hbm feature immediate reply to enum request
409 * @hbm_f_os_supported : hbm feature support OS ver message
409 * 410 *
410 * @me_clients_rwsem: rw lock over me_clients list 411 * @me_clients_rwsem: rw lock over me_clients list
411 * @me_clients : list of FW clients 412 * @me_clients : list of FW clients
@@ -487,6 +488,7 @@ struct mei_device {
487 unsigned int hbm_f_ev_supported:1; 488 unsigned int hbm_f_ev_supported:1;
488 unsigned int hbm_f_fa_supported:1; 489 unsigned int hbm_f_fa_supported:1;
489 unsigned int hbm_f_ie_supported:1; 490 unsigned int hbm_f_ie_supported:1;
491 unsigned int hbm_f_os_supported:1;
490 492
491 struct rw_semaphore me_clients_rwsem; 493 struct rw_semaphore me_clients_rwsem;
492 struct list_head me_clients; 494 struct list_head me_clients;
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index b61b52f9da3d..0fccca075e29 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -1706,10 +1706,10 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
1706 err = mmc_select_hs400(card); 1706 err = mmc_select_hs400(card);
1707 if (err) 1707 if (err)
1708 goto free_card; 1708 goto free_card;
1709 } else if (mmc_card_hs(card)) { 1709 } else {
1710 /* Select the desired bus width optionally */ 1710 /* Select the desired bus width optionally */
1711 err = mmc_select_bus_width(card); 1711 err = mmc_select_bus_width(card);
1712 if (err > 0) { 1712 if (err > 0 && mmc_card_hs(card)) {
1713 err = mmc_select_hs_ddr(card); 1713 err = mmc_select_hs_ddr(card);
1714 if (err) 1714 if (err)
1715 goto free_card; 1715 goto free_card;
diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c
index b11c3455b040..e6ea8503f40c 100644
--- a/drivers/mmc/core/mmc_ops.c
+++ b/drivers/mmc/core/mmc_ops.c
@@ -506,9 +506,6 @@ static int mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms,
506 } 506 }
507 } while (busy); 507 } while (busy);
508 508
509 if (host->ops->card_busy && send_status)
510 return mmc_switch_status(card);
511
512 return 0; 509 return 0;
513} 510}
514 511
@@ -577,24 +574,26 @@ int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
577 if (!use_busy_signal) 574 if (!use_busy_signal)
578 goto out; 575 goto out;
579 576
580 /* Switch to new timing before poll and check switch status. */
581 if (timing)
582 mmc_set_timing(host, timing);
583
584 /*If SPI or used HW busy detection above, then we don't need to poll. */ 577 /*If SPI or used HW busy detection above, then we don't need to poll. */
585 if (((host->caps & MMC_CAP_WAIT_WHILE_BUSY) && use_r1b_resp) || 578 if (((host->caps & MMC_CAP_WAIT_WHILE_BUSY) && use_r1b_resp) ||
586 mmc_host_is_spi(host)) { 579 mmc_host_is_spi(host))
587 if (send_status)
588 err = mmc_switch_status(card);
589 goto out_tim; 580 goto out_tim;
590 }
591 581
592 /* Let's try to poll to find out when the command is completed. */ 582 /* Let's try to poll to find out when the command is completed. */
593 err = mmc_poll_for_busy(card, timeout_ms, send_status, retry_crc_err); 583 err = mmc_poll_for_busy(card, timeout_ms, send_status, retry_crc_err);
584 if (err)
585 goto out;
594 586
595out_tim: 587out_tim:
596 if (err && timing) 588 /* Switch to new timing before check switch status. */
597 mmc_set_timing(host, old_timing); 589 if (timing)
590 mmc_set_timing(host, timing);
591
592 if (send_status) {
593 err = mmc_switch_status(card);
594 if (err && timing)
595 mmc_set_timing(host, old_timing);
596 }
598out: 597out:
599 mmc_retune_release(host); 598 mmc_retune_release(host);
600 599
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
index b44306b886cb..73db08558e4d 100644
--- a/drivers/mmc/host/dw_mmc.c
+++ b/drivers/mmc/host/dw_mmc.c
@@ -3354,10 +3354,11 @@ int dw_mci_runtime_resume(struct device *dev)
3354 3354
3355 if (!slot) 3355 if (!slot)
3356 continue; 3356 continue;
3357 if (slot->mmc->pm_flags & MMC_PM_KEEP_POWER) { 3357 if (slot->mmc->pm_flags & MMC_PM_KEEP_POWER)
3358 dw_mci_set_ios(slot->mmc, &slot->mmc->ios); 3358 dw_mci_set_ios(slot->mmc, &slot->mmc->ios);
3359 dw_mci_setup_bus(slot, true); 3359
3360 } 3360 /* Force setup bus to guarantee available clock output */
3361 dw_mci_setup_bus(slot, true);
3361 } 3362 }
3362 3363
3363 /* Now that slots are all setup, we can enable card detect */ 3364 /* Now that slots are all setup, we can enable card detect */
diff --git a/drivers/mmc/host/meson-gx-mmc.c b/drivers/mmc/host/meson-gx-mmc.c
index b352760c041e..09739352834c 100644
--- a/drivers/mmc/host/meson-gx-mmc.c
+++ b/drivers/mmc/host/meson-gx-mmc.c
@@ -578,13 +578,15 @@ static irqreturn_t meson_mmc_irq(int irq, void *dev_id)
578{ 578{
579 struct meson_host *host = dev_id; 579 struct meson_host *host = dev_id;
580 struct mmc_request *mrq; 580 struct mmc_request *mrq;
581 struct mmc_command *cmd = host->cmd; 581 struct mmc_command *cmd;
582 u32 irq_en, status, raw_status; 582 u32 irq_en, status, raw_status;
583 irqreturn_t ret = IRQ_HANDLED; 583 irqreturn_t ret = IRQ_HANDLED;
584 584
585 if (WARN_ON(!host)) 585 if (WARN_ON(!host))
586 return IRQ_NONE; 586 return IRQ_NONE;
587 587
588 cmd = host->cmd;
589
588 mrq = host->mrq; 590 mrq = host->mrq;
589 591
590 if (WARN_ON(!mrq)) 592 if (WARN_ON(!mrq))
@@ -670,10 +672,10 @@ static irqreturn_t meson_mmc_irq_thread(int irq, void *dev_id)
670 int ret = IRQ_HANDLED; 672 int ret = IRQ_HANDLED;
671 673
672 if (WARN_ON(!mrq)) 674 if (WARN_ON(!mrq))
673 ret = IRQ_NONE; 675 return IRQ_NONE;
674 676
675 if (WARN_ON(!cmd)) 677 if (WARN_ON(!cmd))
676 ret = IRQ_NONE; 678 return IRQ_NONE;
677 679
678 data = cmd->data; 680 data = cmd->data;
679 if (data) { 681 if (data) {
diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
index 01a804792f30..b5972440c1bf 100644
--- a/drivers/mmc/host/mmci.c
+++ b/drivers/mmc/host/mmci.c
@@ -1023,7 +1023,12 @@ mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd,
1023 if (!host->busy_status && busy_resp && 1023 if (!host->busy_status && busy_resp &&
1024 !(status & (MCI_CMDCRCFAIL|MCI_CMDTIMEOUT)) && 1024 !(status & (MCI_CMDCRCFAIL|MCI_CMDTIMEOUT)) &&
1025 (readl(base + MMCISTATUS) & host->variant->busy_detect_flag)) { 1025 (readl(base + MMCISTATUS) & host->variant->busy_detect_flag)) {
1026 /* Unmask the busy IRQ */ 1026
1027 /* Clear the busy start IRQ */
1028 writel(host->variant->busy_detect_mask,
1029 host->base + MMCICLEAR);
1030
1031 /* Unmask the busy end IRQ */
1027 writel(readl(base + MMCIMASK0) | 1032 writel(readl(base + MMCIMASK0) |
1028 host->variant->busy_detect_mask, 1033 host->variant->busy_detect_mask,
1029 base + MMCIMASK0); 1034 base + MMCIMASK0);
@@ -1038,10 +1043,14 @@ mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd,
1038 1043
1039 /* 1044 /*
1040 * At this point we are not busy with a command, we have 1045 * At this point we are not busy with a command, we have
1041 * not received a new busy request, mask the busy IRQ and 1046 * not received a new busy request, clear and mask the busy
1042 * fall through to process the IRQ. 1047 * end IRQ and fall through to process the IRQ.
1043 */ 1048 */
1044 if (host->busy_status) { 1049 if (host->busy_status) {
1050
1051 writel(host->variant->busy_detect_mask,
1052 host->base + MMCICLEAR);
1053
1045 writel(readl(base + MMCIMASK0) & 1054 writel(readl(base + MMCIMASK0) &
1046 ~host->variant->busy_detect_mask, 1055 ~host->variant->busy_detect_mask,
1047 base + MMCIMASK0); 1056 base + MMCIMASK0);
@@ -1283,12 +1292,21 @@ static irqreturn_t mmci_irq(int irq, void *dev_id)
1283 } 1292 }
1284 1293
1285 /* 1294 /*
1286 * We intentionally clear the MCI_ST_CARDBUSY IRQ here (if it's 1295 * We intentionally clear the MCI_ST_CARDBUSY IRQ (if it's
1287 * enabled) since the HW seems to be triggering the IRQ on both 1296 * enabled) in mmci_cmd_irq() function where ST Micro busy
1288 * edges while monitoring DAT0 for busy completion. 1297 * detection variant is handled. Considering the HW seems to be
1298 * triggering the IRQ on both edges while monitoring DAT0 for
1299 * busy completion and that same status bit is used to monitor
1300 * start and end of busy detection, special care must be taken
1301 * to make sure that both start and end interrupts are always
1302 * cleared one after the other.
1289 */ 1303 */
1290 status &= readl(host->base + MMCIMASK0); 1304 status &= readl(host->base + MMCIMASK0);
1291 writel(status, host->base + MMCICLEAR); 1305 if (host->variant->busy_detect)
1306 writel(status & ~host->variant->busy_detect_mask,
1307 host->base + MMCICLEAR);
1308 else
1309 writel(status, host->base + MMCICLEAR);
1292 1310
1293 dev_dbg(mmc_dev(host->mmc), "irq0 (data+cmd) %08x\n", status); 1311 dev_dbg(mmc_dev(host->mmc), "irq0 (data+cmd) %08x\n", status);
1294 1312
diff --git a/drivers/mmc/host/mxs-mmc.c b/drivers/mmc/host/mxs-mmc.c
index 44ecebd1ea8c..c8b8ac66ff7e 100644
--- a/drivers/mmc/host/mxs-mmc.c
+++ b/drivers/mmc/host/mxs-mmc.c
@@ -309,6 +309,9 @@ static void mxs_mmc_ac(struct mxs_mmc_host *host)
309 cmd0 = BF_SSP(cmd->opcode, CMD0_CMD); 309 cmd0 = BF_SSP(cmd->opcode, CMD0_CMD);
310 cmd1 = cmd->arg; 310 cmd1 = cmd->arg;
311 311
312 if (cmd->opcode == MMC_STOP_TRANSMISSION)
313 cmd0 |= BM_SSP_CMD0_APPEND_8CYC;
314
312 if (host->sdio_irq_en) { 315 if (host->sdio_irq_en) {
313 ctrl0 |= BM_SSP_CTRL0_SDIO_IRQ_CHECK; 316 ctrl0 |= BM_SSP_CTRL0_SDIO_IRQ_CHECK;
314 cmd0 |= BM_SSP_CMD0_CONT_CLKING_EN | BM_SSP_CMD0_SLOW_CLKING_EN; 317 cmd0 |= BM_SSP_CMD0_CONT_CLKING_EN | BM_SSP_CMD0_SLOW_CLKING_EN;
@@ -417,8 +420,7 @@ static void mxs_mmc_adtc(struct mxs_mmc_host *host)
417 ssp->base + HW_SSP_BLOCK_SIZE); 420 ssp->base + HW_SSP_BLOCK_SIZE);
418 } 421 }
419 422
420 if ((cmd->opcode == MMC_STOP_TRANSMISSION) || 423 if (cmd->opcode == SD_IO_RW_EXTENDED)
421 (cmd->opcode == SD_IO_RW_EXTENDED))
422 cmd0 |= BM_SSP_CMD0_APPEND_8CYC; 424 cmd0 |= BM_SSP_CMD0_APPEND_8CYC;
423 425
424 cmd1 = cmd->arg; 426 cmd1 = cmd->arg;
diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c
index 160f695cc09c..278a5a435ab7 100644
--- a/drivers/mmc/host/sdhci-acpi.c
+++ b/drivers/mmc/host/sdhci-acpi.c
@@ -395,7 +395,8 @@ static int sdhci_acpi_probe(struct platform_device *pdev)
395 /* Power on the SDHCI controller and its children */ 395 /* Power on the SDHCI controller and its children */
396 acpi_device_fix_up_power(device); 396 acpi_device_fix_up_power(device);
397 list_for_each_entry(child, &device->children, node) 397 list_for_each_entry(child, &device->children, node)
398 acpi_device_fix_up_power(child); 398 if (child->status.present && child->status.enabled)
399 acpi_device_fix_up_power(child);
399 400
400 if (acpi_bus_get_status(device) || !device->status.present) 401 if (acpi_bus_get_status(device) || !device->status.present)
401 return -ENODEV; 402 return -ENODEV;
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 23909804ffb8..0def99590d16 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -2733,7 +2733,8 @@ static irqreturn_t sdhci_irq(int irq, void *dev_id)
2733 if (intmask & SDHCI_INT_RETUNE) 2733 if (intmask & SDHCI_INT_RETUNE)
2734 mmc_retune_needed(host->mmc); 2734 mmc_retune_needed(host->mmc);
2735 2735
2736 if (intmask & SDHCI_INT_CARD_INT) { 2736 if ((intmask & SDHCI_INT_CARD_INT) &&
2737 (host->ier & SDHCI_INT_CARD_INT)) {
2737 sdhci_enable_sdio_irq_nolock(host, false); 2738 sdhci_enable_sdio_irq_nolock(host, false);
2738 host->thread_isr |= SDHCI_INT_CARD_INT; 2739 host->thread_isr |= SDHCI_INT_CARD_INT;
2739 result = IRQ_WAKE_THREAD; 2740 result = IRQ_WAKE_THREAD;
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
index 353a9ddf6b97..9ce5dcb4abd0 100644
--- a/drivers/mtd/nand/Kconfig
+++ b/drivers/mtd/nand/Kconfig
@@ -426,6 +426,7 @@ config MTD_NAND_ORION
426 426
427config MTD_NAND_OXNAS 427config MTD_NAND_OXNAS
428 tristate "NAND Flash support for Oxford Semiconductor SoC" 428 tristate "NAND Flash support for Oxford Semiconductor SoC"
429 depends on HAS_IOMEM
429 help 430 help
430 This enables the NAND flash controller on Oxford Semiconductor SoCs. 431 This enables the NAND flash controller on Oxford Semiconductor SoCs.
431 432
@@ -540,7 +541,7 @@ config MTD_NAND_FSMC
540 Flexible Static Memory Controller (FSMC) 541 Flexible Static Memory Controller (FSMC)
541 542
542config MTD_NAND_XWAY 543config MTD_NAND_XWAY
543 tristate "Support for NAND on Lantiq XWAY SoC" 544 bool "Support for NAND on Lantiq XWAY SoC"
544 depends on LANTIQ && SOC_TYPE_XWAY 545 depends on LANTIQ && SOC_TYPE_XWAY
545 help 546 help
546 Enables support for NAND Flash chips on Lantiq XWAY SoCs. NAND is attached 547 Enables support for NAND Flash chips on Lantiq XWAY SoCs. NAND is attached
diff --git a/drivers/mtd/nand/lpc32xx_mlc.c b/drivers/mtd/nand/lpc32xx_mlc.c
index 5553a5d9efd1..846a66c1b133 100644
--- a/drivers/mtd/nand/lpc32xx_mlc.c
+++ b/drivers/mtd/nand/lpc32xx_mlc.c
@@ -775,7 +775,7 @@ static int lpc32xx_nand_probe(struct platform_device *pdev)
775 init_completion(&host->comp_controller); 775 init_completion(&host->comp_controller);
776 776
777 host->irq = platform_get_irq(pdev, 0); 777 host->irq = platform_get_irq(pdev, 0);
778 if ((host->irq < 0) || (host->irq >= NR_IRQS)) { 778 if (host->irq < 0) {
779 dev_err(&pdev->dev, "failed to get platform irq\n"); 779 dev_err(&pdev->dev, "failed to get platform irq\n");
780 res = -EINVAL; 780 res = -EINVAL;
781 goto err_exit3; 781 goto err_exit3;
diff --git a/drivers/mtd/nand/tango_nand.c b/drivers/mtd/nand/tango_nand.c
index 28c7f474be77..4a5e948c62df 100644
--- a/drivers/mtd/nand/tango_nand.c
+++ b/drivers/mtd/nand/tango_nand.c
@@ -632,11 +632,13 @@ static int tango_nand_probe(struct platform_device *pdev)
632 if (IS_ERR(nfc->pbus_base)) 632 if (IS_ERR(nfc->pbus_base))
633 return PTR_ERR(nfc->pbus_base); 633 return PTR_ERR(nfc->pbus_base);
634 634
635 writel_relaxed(MODE_RAW, nfc->pbus_base + PBUS_PAD_MODE);
636
635 clk = clk_get(&pdev->dev, NULL); 637 clk = clk_get(&pdev->dev, NULL);
636 if (IS_ERR(clk)) 638 if (IS_ERR(clk))
637 return PTR_ERR(clk); 639 return PTR_ERR(clk);
638 640
639 nfc->chan = dma_request_chan(&pdev->dev, "nfc_sbox"); 641 nfc->chan = dma_request_chan(&pdev->dev, "rxtx");
640 if (IS_ERR(nfc->chan)) 642 if (IS_ERR(nfc->chan))
641 return PTR_ERR(nfc->chan); 643 return PTR_ERR(nfc->chan);
642 644
diff --git a/drivers/mtd/nand/xway_nand.c b/drivers/mtd/nand/xway_nand.c
index 1f2948c0c458..895101a5e686 100644
--- a/drivers/mtd/nand/xway_nand.c
+++ b/drivers/mtd/nand/xway_nand.c
@@ -232,7 +232,6 @@ static const struct of_device_id xway_nand_match[] = {
232 { .compatible = "lantiq,nand-xway" }, 232 { .compatible = "lantiq,nand-xway" },
233 {}, 233 {},
234}; 234};
235MODULE_DEVICE_TABLE(of, xway_nand_match);
236 235
237static struct platform_driver xway_nand_driver = { 236static struct platform_driver xway_nand_driver = {
238 .probe = xway_nand_probe, 237 .probe = xway_nand_probe,
@@ -243,6 +242,4 @@ static struct platform_driver xway_nand_driver = {
243 }, 242 },
244}; 243};
245 244
246module_platform_driver(xway_nand_driver); 245builtin_platform_driver(xway_nand_driver);
247
248MODULE_LICENSE("GPL");
diff --git a/drivers/net/appletalk/ipddp.c b/drivers/net/appletalk/ipddp.c
index b8c293373ecc..a306de4318d7 100644
--- a/drivers/net/appletalk/ipddp.c
+++ b/drivers/net/appletalk/ipddp.c
@@ -190,7 +190,7 @@ static netdev_tx_t ipddp_xmit(struct sk_buff *skb, struct net_device *dev)
190 */ 190 */
191static int ipddp_create(struct ipddp_route *new_rt) 191static int ipddp_create(struct ipddp_route *new_rt)
192{ 192{
193 struct ipddp_route *rt = kmalloc(sizeof(*rt), GFP_KERNEL); 193 struct ipddp_route *rt = kzalloc(sizeof(*rt), GFP_KERNEL);
194 194
195 if (rt == NULL) 195 if (rt == NULL)
196 return -ENOMEM; 196 return -ENOMEM;
diff --git a/drivers/net/can/c_can/c_can_pci.c b/drivers/net/can/c_can/c_can_pci.c
index 7be393c96b1a..cf7c18947189 100644
--- a/drivers/net/can/c_can/c_can_pci.c
+++ b/drivers/net/can/c_can/c_can_pci.c
@@ -161,6 +161,7 @@ static int c_can_pci_probe(struct pci_dev *pdev,
161 161
162 dev->irq = pdev->irq; 162 dev->irq = pdev->irq;
163 priv->base = addr; 163 priv->base = addr;
164 priv->device = &pdev->dev;
164 165
165 if (!c_can_pci_data->freq) { 166 if (!c_can_pci_data->freq) {
166 dev_err(&pdev->dev, "no clock frequency defined\n"); 167 dev_err(&pdev->dev, "no clock frequency defined\n");
diff --git a/drivers/net/can/ti_hecc.c b/drivers/net/can/ti_hecc.c
index 680d1ff07a55..6749b1829469 100644
--- a/drivers/net/can/ti_hecc.c
+++ b/drivers/net/can/ti_hecc.c
@@ -948,7 +948,12 @@ static int ti_hecc_probe(struct platform_device *pdev)
948 netif_napi_add(ndev, &priv->napi, ti_hecc_rx_poll, 948 netif_napi_add(ndev, &priv->napi, ti_hecc_rx_poll,
949 HECC_DEF_NAPI_WEIGHT); 949 HECC_DEF_NAPI_WEIGHT);
950 950
951 clk_enable(priv->clk); 951 err = clk_prepare_enable(priv->clk);
952 if (err) {
953 dev_err(&pdev->dev, "clk_prepare_enable() failed\n");
954 goto probe_exit_clk;
955 }
956
952 err = register_candev(ndev); 957 err = register_candev(ndev);
953 if (err) { 958 if (err) {
954 dev_err(&pdev->dev, "register_candev() failed\n"); 959 dev_err(&pdev->dev, "register_candev() failed\n");
@@ -981,7 +986,7 @@ static int ti_hecc_remove(struct platform_device *pdev)
981 struct ti_hecc_priv *priv = netdev_priv(ndev); 986 struct ti_hecc_priv *priv = netdev_priv(ndev);
982 987
983 unregister_candev(ndev); 988 unregister_candev(ndev);
984 clk_disable(priv->clk); 989 clk_disable_unprepare(priv->clk);
985 clk_put(priv->clk); 990 clk_put(priv->clk);
986 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 991 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
987 iounmap(priv->base); 992 iounmap(priv->base);
@@ -1006,7 +1011,7 @@ static int ti_hecc_suspend(struct platform_device *pdev, pm_message_t state)
1006 hecc_set_bit(priv, HECC_CANMC, HECC_CANMC_PDR); 1011 hecc_set_bit(priv, HECC_CANMC, HECC_CANMC_PDR);
1007 priv->can.state = CAN_STATE_SLEEPING; 1012 priv->can.state = CAN_STATE_SLEEPING;
1008 1013
1009 clk_disable(priv->clk); 1014 clk_disable_unprepare(priv->clk);
1010 1015
1011 return 0; 1016 return 0;
1012} 1017}
@@ -1015,8 +1020,11 @@ static int ti_hecc_resume(struct platform_device *pdev)
1015{ 1020{
1016 struct net_device *dev = platform_get_drvdata(pdev); 1021 struct net_device *dev = platform_get_drvdata(pdev);
1017 struct ti_hecc_priv *priv = netdev_priv(dev); 1022 struct ti_hecc_priv *priv = netdev_priv(dev);
1023 int err;
1018 1024
1019 clk_enable(priv->clk); 1025 err = clk_prepare_enable(priv->clk);
1026 if (err)
1027 return err;
1020 1028
1021 hecc_clear_bit(priv, HECC_CANMC, HECC_CANMC_PDR); 1029 hecc_clear_bit(priv, HECC_CANMC, HECC_CANMC_PDR);
1022 priv->can.state = CAN_STATE_ERROR_ACTIVE; 1030 priv->can.state = CAN_STATE_ERROR_ACTIVE;
diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
index 9ec33b51a0ed..2ce7ae97ac91 100644
--- a/drivers/net/dsa/bcm_sf2.c
+++ b/drivers/net/dsa/bcm_sf2.c
@@ -393,7 +393,7 @@ static int bcm_sf2_sw_mdio_read(struct mii_bus *bus, int addr, int regnum)
393 if (addr == BRCM_PSEUDO_PHY_ADDR && priv->indir_phy_mask & BIT(addr)) 393 if (addr == BRCM_PSEUDO_PHY_ADDR && priv->indir_phy_mask & BIT(addr))
394 return bcm_sf2_sw_indir_rw(priv, 1, addr, regnum, 0); 394 return bcm_sf2_sw_indir_rw(priv, 1, addr, regnum, 0);
395 else 395 else
396 return mdiobus_read(priv->master_mii_bus, addr, regnum); 396 return mdiobus_read_nested(priv->master_mii_bus, addr, regnum);
397} 397}
398 398
399static int bcm_sf2_sw_mdio_write(struct mii_bus *bus, int addr, int regnum, 399static int bcm_sf2_sw_mdio_write(struct mii_bus *bus, int addr, int regnum,
@@ -407,7 +407,7 @@ static int bcm_sf2_sw_mdio_write(struct mii_bus *bus, int addr, int regnum,
407 if (addr == BRCM_PSEUDO_PHY_ADDR && priv->indir_phy_mask & BIT(addr)) 407 if (addr == BRCM_PSEUDO_PHY_ADDR && priv->indir_phy_mask & BIT(addr))
408 bcm_sf2_sw_indir_rw(priv, 0, addr, regnum, val); 408 bcm_sf2_sw_indir_rw(priv, 0, addr, regnum, val);
409 else 409 else
410 mdiobus_write(priv->master_mii_bus, addr, regnum, val); 410 mdiobus_write_nested(priv->master_mii_bus, addr, regnum, val);
411 411
412 return 0; 412 return 0;
413} 413}
@@ -982,6 +982,7 @@ static int bcm_sf2_sw_probe(struct platform_device *pdev)
982 const char *reg_names[BCM_SF2_REGS_NUM] = BCM_SF2_REGS_NAME; 982 const char *reg_names[BCM_SF2_REGS_NUM] = BCM_SF2_REGS_NAME;
983 struct device_node *dn = pdev->dev.of_node; 983 struct device_node *dn = pdev->dev.of_node;
984 struct b53_platform_data *pdata; 984 struct b53_platform_data *pdata;
985 struct dsa_switch_ops *ops;
985 struct bcm_sf2_priv *priv; 986 struct bcm_sf2_priv *priv;
986 struct b53_device *dev; 987 struct b53_device *dev;
987 struct dsa_switch *ds; 988 struct dsa_switch *ds;
@@ -995,6 +996,10 @@ static int bcm_sf2_sw_probe(struct platform_device *pdev)
995 if (!priv) 996 if (!priv)
996 return -ENOMEM; 997 return -ENOMEM;
997 998
999 ops = devm_kzalloc(&pdev->dev, sizeof(*ops), GFP_KERNEL);
1000 if (!ops)
1001 return -ENOMEM;
1002
998 dev = b53_switch_alloc(&pdev->dev, &bcm_sf2_io_ops, priv); 1003 dev = b53_switch_alloc(&pdev->dev, &bcm_sf2_io_ops, priv);
999 if (!dev) 1004 if (!dev)
1000 return -ENOMEM; 1005 return -ENOMEM;
@@ -1014,6 +1019,8 @@ static int bcm_sf2_sw_probe(struct platform_device *pdev)
1014 ds = dev->ds; 1019 ds = dev->ds;
1015 1020
1016 /* Override the parts that are non-standard wrt. normal b53 devices */ 1021 /* Override the parts that are non-standard wrt. normal b53 devices */
1022 memcpy(ops, ds->ops, sizeof(*ops));
1023 ds->ops = ops;
1017 ds->ops->get_tag_protocol = bcm_sf2_sw_get_tag_protocol; 1024 ds->ops->get_tag_protocol = bcm_sf2_sw_get_tag_protocol;
1018 ds->ops->setup = bcm_sf2_sw_setup; 1025 ds->ops->setup = bcm_sf2_sw_setup;
1019 ds->ops->get_phy_flags = bcm_sf2_sw_get_phy_flags; 1026 ds->ops->get_phy_flags = bcm_sf2_sw_get_phy_flags;
diff --git a/drivers/net/ethernet/adaptec/starfire.c b/drivers/net/ethernet/adaptec/starfire.c
index c12d2618eebf..3872ab96b80a 100644
--- a/drivers/net/ethernet/adaptec/starfire.c
+++ b/drivers/net/ethernet/adaptec/starfire.c
@@ -1152,6 +1152,12 @@ static void init_ring(struct net_device *dev)
1152 if (skb == NULL) 1152 if (skb == NULL)
1153 break; 1153 break;
1154 np->rx_info[i].mapping = pci_map_single(np->pci_dev, skb->data, np->rx_buf_sz, PCI_DMA_FROMDEVICE); 1154 np->rx_info[i].mapping = pci_map_single(np->pci_dev, skb->data, np->rx_buf_sz, PCI_DMA_FROMDEVICE);
1155 if (pci_dma_mapping_error(np->pci_dev,
1156 np->rx_info[i].mapping)) {
1157 dev_kfree_skb(skb);
1158 np->rx_info[i].skb = NULL;
1159 break;
1160 }
1155 /* Grrr, we cannot offset to correctly align the IP header. */ 1161 /* Grrr, we cannot offset to correctly align the IP header. */
1156 np->rx_ring[i].rxaddr = cpu_to_dma(np->rx_info[i].mapping | RxDescValid); 1162 np->rx_ring[i].rxaddr = cpu_to_dma(np->rx_info[i].mapping | RxDescValid);
1157 } 1163 }
@@ -1182,8 +1188,9 @@ static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
1182{ 1188{
1183 struct netdev_private *np = netdev_priv(dev); 1189 struct netdev_private *np = netdev_priv(dev);
1184 unsigned int entry; 1190 unsigned int entry;
1191 unsigned int prev_tx;
1185 u32 status; 1192 u32 status;
1186 int i; 1193 int i, j;
1187 1194
1188 /* 1195 /*
1189 * be cautious here, wrapping the queue has weird semantics 1196 * be cautious here, wrapping the queue has weird semantics
@@ -1201,6 +1208,7 @@ static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
1201 } 1208 }
1202#endif /* ZEROCOPY && HAS_BROKEN_FIRMWARE */ 1209#endif /* ZEROCOPY && HAS_BROKEN_FIRMWARE */
1203 1210
1211 prev_tx = np->cur_tx;
1204 entry = np->cur_tx % TX_RING_SIZE; 1212 entry = np->cur_tx % TX_RING_SIZE;
1205 for (i = 0; i < skb_num_frags(skb); i++) { 1213 for (i = 0; i < skb_num_frags(skb); i++) {
1206 int wrap_ring = 0; 1214 int wrap_ring = 0;
@@ -1234,6 +1242,11 @@ static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
1234 skb_frag_size(this_frag), 1242 skb_frag_size(this_frag),
1235 PCI_DMA_TODEVICE); 1243 PCI_DMA_TODEVICE);
1236 } 1244 }
1245 if (pci_dma_mapping_error(np->pci_dev,
1246 np->tx_info[entry].mapping)) {
1247 dev->stats.tx_dropped++;
1248 goto err_out;
1249 }
1237 1250
1238 np->tx_ring[entry].addr = cpu_to_dma(np->tx_info[entry].mapping); 1251 np->tx_ring[entry].addr = cpu_to_dma(np->tx_info[entry].mapping);
1239 np->tx_ring[entry].status = cpu_to_le32(status); 1252 np->tx_ring[entry].status = cpu_to_le32(status);
@@ -1268,8 +1281,30 @@ static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
1268 netif_stop_queue(dev); 1281 netif_stop_queue(dev);
1269 1282
1270 return NETDEV_TX_OK; 1283 return NETDEV_TX_OK;
1271}
1272 1284
1285err_out:
1286 entry = prev_tx % TX_RING_SIZE;
1287 np->tx_info[entry].skb = NULL;
1288 if (i > 0) {
1289 pci_unmap_single(np->pci_dev,
1290 np->tx_info[entry].mapping,
1291 skb_first_frag_len(skb),
1292 PCI_DMA_TODEVICE);
1293 np->tx_info[entry].mapping = 0;
1294 entry = (entry + np->tx_info[entry].used_slots) % TX_RING_SIZE;
1295 for (j = 1; j < i; j++) {
1296 pci_unmap_single(np->pci_dev,
1297 np->tx_info[entry].mapping,
1298 skb_frag_size(
1299 &skb_shinfo(skb)->frags[j-1]),
1300 PCI_DMA_TODEVICE);
1301 entry++;
1302 }
1303 }
1304 dev_kfree_skb_any(skb);
1305 np->cur_tx = prev_tx;
1306 return NETDEV_TX_OK;
1307}
1273 1308
1274/* The interrupt handler does all of the Rx thread work and cleans up 1309/* The interrupt handler does all of the Rx thread work and cleans up
1275 after the Tx thread. */ 1310 after the Tx thread. */
@@ -1569,6 +1604,12 @@ static void refill_rx_ring(struct net_device *dev)
1569 break; /* Better luck next round. */ 1604 break; /* Better luck next round. */
1570 np->rx_info[entry].mapping = 1605 np->rx_info[entry].mapping =
1571 pci_map_single(np->pci_dev, skb->data, np->rx_buf_sz, PCI_DMA_FROMDEVICE); 1606 pci_map_single(np->pci_dev, skb->data, np->rx_buf_sz, PCI_DMA_FROMDEVICE);
1607 if (pci_dma_mapping_error(np->pci_dev,
1608 np->rx_info[entry].mapping)) {
1609 dev_kfree_skb(skb);
1610 np->rx_info[entry].skb = NULL;
1611 break;
1612 }
1572 np->rx_ring[entry].rxaddr = 1613 np->rx_ring[entry].rxaddr =
1573 cpu_to_dma(np->rx_info[entry].mapping | RxDescValid); 1614 cpu_to_dma(np->rx_info[entry].mapping | RxDescValid);
1574 } 1615 }
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-common.h b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
index 5b7ba25e0065..8a280e7d66bd 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-common.h
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
@@ -891,6 +891,8 @@
891#define PCS_V1_WINDOW_SELECT 0x03fc 891#define PCS_V1_WINDOW_SELECT 0x03fc
892#define PCS_V2_WINDOW_DEF 0x9060 892#define PCS_V2_WINDOW_DEF 0x9060
893#define PCS_V2_WINDOW_SELECT 0x9064 893#define PCS_V2_WINDOW_SELECT 0x9064
894#define PCS_V2_RV_WINDOW_DEF 0x1060
895#define PCS_V2_RV_WINDOW_SELECT 0x1064
894 896
895/* PCS register entry bit positions and sizes */ 897/* PCS register entry bit positions and sizes */
896#define PCS_V2_WINDOW_DEF_OFFSET_INDEX 6 898#define PCS_V2_WINDOW_DEF_OFFSET_INDEX 6
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
index aaf0350076a9..a7d16db5c4b2 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
@@ -1151,7 +1151,7 @@ static int xgbe_read_mmd_regs_v2(struct xgbe_prv_data *pdata, int prtad,
1151 offset = pdata->xpcs_window + (mmd_address & pdata->xpcs_window_mask); 1151 offset = pdata->xpcs_window + (mmd_address & pdata->xpcs_window_mask);
1152 1152
1153 spin_lock_irqsave(&pdata->xpcs_lock, flags); 1153 spin_lock_irqsave(&pdata->xpcs_lock, flags);
1154 XPCS32_IOWRITE(pdata, PCS_V2_WINDOW_SELECT, index); 1154 XPCS32_IOWRITE(pdata, pdata->xpcs_window_sel_reg, index);
1155 mmd_data = XPCS16_IOREAD(pdata, offset); 1155 mmd_data = XPCS16_IOREAD(pdata, offset);
1156 spin_unlock_irqrestore(&pdata->xpcs_lock, flags); 1156 spin_unlock_irqrestore(&pdata->xpcs_lock, flags);
1157 1157
@@ -1183,7 +1183,7 @@ static void xgbe_write_mmd_regs_v2(struct xgbe_prv_data *pdata, int prtad,
1183 offset = pdata->xpcs_window + (mmd_address & pdata->xpcs_window_mask); 1183 offset = pdata->xpcs_window + (mmd_address & pdata->xpcs_window_mask);
1184 1184
1185 spin_lock_irqsave(&pdata->xpcs_lock, flags); 1185 spin_lock_irqsave(&pdata->xpcs_lock, flags);
1186 XPCS32_IOWRITE(pdata, PCS_V2_WINDOW_SELECT, index); 1186 XPCS32_IOWRITE(pdata, pdata->xpcs_window_sel_reg, index);
1187 XPCS16_IOWRITE(pdata, offset, mmd_data); 1187 XPCS16_IOWRITE(pdata, offset, mmd_data);
1188 spin_unlock_irqrestore(&pdata->xpcs_lock, flags); 1188 spin_unlock_irqrestore(&pdata->xpcs_lock, flags);
1189} 1189}
@@ -3407,8 +3407,10 @@ static int xgbe_init(struct xgbe_prv_data *pdata)
3407 3407
3408 /* Flush Tx queues */ 3408 /* Flush Tx queues */
3409 ret = xgbe_flush_tx_queues(pdata); 3409 ret = xgbe_flush_tx_queues(pdata);
3410 if (ret) 3410 if (ret) {
3411 netdev_err(pdata->netdev, "error flushing TX queues\n");
3411 return ret; 3412 return ret;
3413 }
3412 3414
3413 /* 3415 /*
3414 * Initialize DMA related features 3416 * Initialize DMA related features
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
index 155190db682d..1c87cc204075 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
@@ -539,6 +539,7 @@ static irqreturn_t xgbe_isr(int irq, void *data)
539 } 539 }
540 } 540 }
541 541
542isr_done:
542 /* If there is not a separate AN irq, handle it here */ 543 /* If there is not a separate AN irq, handle it here */
543 if (pdata->dev_irq == pdata->an_irq) 544 if (pdata->dev_irq == pdata->an_irq)
544 pdata->phy_if.an_isr(irq, pdata); 545 pdata->phy_if.an_isr(irq, pdata);
@@ -551,7 +552,6 @@ static irqreturn_t xgbe_isr(int irq, void *data)
551 if (pdata->vdata->i2c_support && (pdata->dev_irq == pdata->i2c_irq)) 552 if (pdata->vdata->i2c_support && (pdata->dev_irq == pdata->i2c_irq))
552 pdata->i2c_if.i2c_isr(irq, pdata); 553 pdata->i2c_if.i2c_isr(irq, pdata);
553 554
554isr_done:
555 return IRQ_HANDLED; 555 return IRQ_HANDLED;
556} 556}
557 557
@@ -1070,7 +1070,9 @@ static int xgbe_start(struct xgbe_prv_data *pdata)
1070 1070
1071 DBGPR("-->xgbe_start\n"); 1071 DBGPR("-->xgbe_start\n");
1072 1072
1073 hw_if->init(pdata); 1073 ret = hw_if->init(pdata);
1074 if (ret)
1075 return ret;
1074 1076
1075 xgbe_napi_enable(pdata, 1); 1077 xgbe_napi_enable(pdata, 1);
1076 1078
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-pci.c b/drivers/net/ethernet/amd/xgbe/xgbe-pci.c
index e76b7f65b805..c2730f15bd8b 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-pci.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-pci.c
@@ -265,6 +265,7 @@ static int xgbe_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
265 struct xgbe_prv_data *pdata; 265 struct xgbe_prv_data *pdata;
266 struct device *dev = &pdev->dev; 266 struct device *dev = &pdev->dev;
267 void __iomem * const *iomap_table; 267 void __iomem * const *iomap_table;
268 struct pci_dev *rdev;
268 unsigned int ma_lo, ma_hi; 269 unsigned int ma_lo, ma_hi;
269 unsigned int reg; 270 unsigned int reg;
270 int bar_mask; 271 int bar_mask;
@@ -326,8 +327,20 @@ static int xgbe_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
326 if (netif_msg_probe(pdata)) 327 if (netif_msg_probe(pdata))
327 dev_dbg(dev, "xpcs_regs = %p\n", pdata->xpcs_regs); 328 dev_dbg(dev, "xpcs_regs = %p\n", pdata->xpcs_regs);
328 329
330 /* Set the PCS indirect addressing definition registers */
331 rdev = pci_get_domain_bus_and_slot(0, 0, PCI_DEVFN(0, 0));
332 if (rdev &&
333 (rdev->vendor == PCI_VENDOR_ID_AMD) && (rdev->device == 0x15d0)) {
334 pdata->xpcs_window_def_reg = PCS_V2_RV_WINDOW_DEF;
335 pdata->xpcs_window_sel_reg = PCS_V2_RV_WINDOW_SELECT;
336 } else {
337 pdata->xpcs_window_def_reg = PCS_V2_WINDOW_DEF;
338 pdata->xpcs_window_sel_reg = PCS_V2_WINDOW_SELECT;
339 }
340 pci_dev_put(rdev);
341
329 /* Configure the PCS indirect addressing support */ 342 /* Configure the PCS indirect addressing support */
330 reg = XPCS32_IOREAD(pdata, PCS_V2_WINDOW_DEF); 343 reg = XPCS32_IOREAD(pdata, pdata->xpcs_window_def_reg);
331 pdata->xpcs_window = XPCS_GET_BITS(reg, PCS_V2_WINDOW_DEF, OFFSET); 344 pdata->xpcs_window = XPCS_GET_BITS(reg, PCS_V2_WINDOW_DEF, OFFSET);
332 pdata->xpcs_window <<= 6; 345 pdata->xpcs_window <<= 6;
333 pdata->xpcs_window_size = XPCS_GET_BITS(reg, PCS_V2_WINDOW_DEF, SIZE); 346 pdata->xpcs_window_size = XPCS_GET_BITS(reg, PCS_V2_WINDOW_DEF, SIZE);
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h
index f52a9bd05bac..00108815b55e 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe.h
+++ b/drivers/net/ethernet/amd/xgbe/xgbe.h
@@ -955,6 +955,8 @@ struct xgbe_prv_data {
955 955
956 /* XPCS indirect addressing lock */ 956 /* XPCS indirect addressing lock */
957 spinlock_t xpcs_lock; 957 spinlock_t xpcs_lock;
958 unsigned int xpcs_window_def_reg;
959 unsigned int xpcs_window_sel_reg;
958 unsigned int xpcs_window; 960 unsigned int xpcs_window;
959 unsigned int xpcs_window_size; 961 unsigned int xpcs_window_size;
960 unsigned int xpcs_window_mask; 962 unsigned int xpcs_window_mask;
diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c
index c8f525574d68..7dcc907a449d 100644
--- a/drivers/net/ethernet/atheros/alx/main.c
+++ b/drivers/net/ethernet/atheros/alx/main.c
@@ -685,8 +685,6 @@ static int alx_alloc_rings(struct alx_priv *alx)
685 return -ENOMEM; 685 return -ENOMEM;
686 } 686 }
687 687
688 alx_reinit_rings(alx);
689
690 return 0; 688 return 0;
691} 689}
692 690
@@ -703,7 +701,7 @@ static void alx_free_rings(struct alx_priv *alx)
703 if (alx->qnapi[0] && alx->qnapi[0]->rxq) 701 if (alx->qnapi[0] && alx->qnapi[0]->rxq)
704 kfree(alx->qnapi[0]->rxq->bufs); 702 kfree(alx->qnapi[0]->rxq->bufs);
705 703
706 if (!alx->descmem.virt) 704 if (alx->descmem.virt)
707 dma_free_coherent(&alx->hw.pdev->dev, 705 dma_free_coherent(&alx->hw.pdev->dev,
708 alx->descmem.size, 706 alx->descmem.size,
709 alx->descmem.virt, 707 alx->descmem.virt,
@@ -984,6 +982,7 @@ static int alx_realloc_resources(struct alx_priv *alx)
984 alx_free_rings(alx); 982 alx_free_rings(alx);
985 alx_free_napis(alx); 983 alx_free_napis(alx);
986 alx_disable_advanced_intr(alx); 984 alx_disable_advanced_intr(alx);
985 alx_init_intr(alx, false);
987 986
988 err = alx_alloc_napis(alx); 987 err = alx_alloc_napis(alx);
989 if (err) 988 if (err)
@@ -1241,6 +1240,12 @@ static int __alx_open(struct alx_priv *alx, bool resume)
1241 if (err) 1240 if (err)
1242 goto out_free_rings; 1241 goto out_free_rings;
1243 1242
1243 /* must be called after alx_request_irq because the chip stops working
1244 * if we copy the dma addresses in alx_init_ring_ptrs twice when
1245 * requesting msi-x interrupts failed
1246 */
1247 alx_reinit_rings(alx);
1248
1244 netif_set_real_num_tx_queues(alx->dev, alx->num_txq); 1249 netif_set_real_num_tx_queues(alx->dev, alx->num_txq);
1245 netif_set_real_num_rx_queues(alx->dev, alx->num_rxq); 1250 netif_set_real_num_rx_queues(alx->dev, alx->num_rxq);
1246 1251
diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
index 3b14d5144228..c483618b57bd 100644
--- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c
+++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
@@ -913,6 +913,8 @@ static int bcm_enet_open(struct net_device *dev)
913 priv->old_link = 0; 913 priv->old_link = 0;
914 priv->old_duplex = -1; 914 priv->old_duplex = -1;
915 priv->old_pause = -1; 915 priv->old_pause = -1;
916 } else {
917 phydev = NULL;
916 } 918 }
917 919
918 /* mask all interrupts and request them */ 920 /* mask all interrupts and request them */
@@ -1083,7 +1085,7 @@ static int bcm_enet_open(struct net_device *dev)
1083 enet_dmac_writel(priv, priv->dma_chan_int_mask, 1085 enet_dmac_writel(priv, priv->dma_chan_int_mask,
1084 ENETDMAC_IRMASK, priv->tx_chan); 1086 ENETDMAC_IRMASK, priv->tx_chan);
1085 1087
1086 if (priv->has_phy) 1088 if (phydev)
1087 phy_start(phydev); 1089 phy_start(phydev);
1088 else 1090 else
1089 bcm_enet_adjust_link(dev); 1091 bcm_enet_adjust_link(dev);
@@ -1126,7 +1128,7 @@ out_freeirq:
1126 free_irq(dev->irq, dev); 1128 free_irq(dev->irq, dev);
1127 1129
1128out_phy_disconnect: 1130out_phy_disconnect:
1129 if (priv->has_phy) 1131 if (phydev)
1130 phy_disconnect(phydev); 1132 phy_disconnect(phydev);
1131 1133
1132 return ret; 1134 return ret;
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index 25d1eb4933d0..744ed6ddaf37 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -710,11 +710,8 @@ static unsigned int __bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv,
710 unsigned int c_index, last_c_index, last_tx_cn, num_tx_cbs; 710 unsigned int c_index, last_c_index, last_tx_cn, num_tx_cbs;
711 unsigned int pkts_compl = 0, bytes_compl = 0; 711 unsigned int pkts_compl = 0, bytes_compl = 0;
712 struct bcm_sysport_cb *cb; 712 struct bcm_sysport_cb *cb;
713 struct netdev_queue *txq;
714 u32 hw_ind; 713 u32 hw_ind;
715 714
716 txq = netdev_get_tx_queue(ndev, ring->index);
717
718 /* Compute how many descriptors have been processed since last call */ 715 /* Compute how many descriptors have been processed since last call */
719 hw_ind = tdma_readl(priv, TDMA_DESC_RING_PROD_CONS_INDEX(ring->index)); 716 hw_ind = tdma_readl(priv, TDMA_DESC_RING_PROD_CONS_INDEX(ring->index));
720 c_index = (hw_ind >> RING_CONS_INDEX_SHIFT) & RING_CONS_INDEX_MASK; 717 c_index = (hw_ind >> RING_CONS_INDEX_SHIFT) & RING_CONS_INDEX_MASK;
@@ -745,9 +742,6 @@ static unsigned int __bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv,
745 742
746 ring->c_index = c_index; 743 ring->c_index = c_index;
747 744
748 if (netif_tx_queue_stopped(txq) && pkts_compl)
749 netif_tx_wake_queue(txq);
750
751 netif_dbg(priv, tx_done, ndev, 745 netif_dbg(priv, tx_done, ndev,
752 "ring=%d c_index=%d pkts_compl=%d, bytes_compl=%d\n", 746 "ring=%d c_index=%d pkts_compl=%d, bytes_compl=%d\n",
753 ring->index, ring->c_index, pkts_compl, bytes_compl); 747 ring->index, ring->c_index, pkts_compl, bytes_compl);
@@ -759,16 +753,33 @@ static unsigned int __bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv,
759static unsigned int bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv, 753static unsigned int bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv,
760 struct bcm_sysport_tx_ring *ring) 754 struct bcm_sysport_tx_ring *ring)
761{ 755{
756 struct netdev_queue *txq;
762 unsigned int released; 757 unsigned int released;
763 unsigned long flags; 758 unsigned long flags;
764 759
760 txq = netdev_get_tx_queue(priv->netdev, ring->index);
761
765 spin_lock_irqsave(&ring->lock, flags); 762 spin_lock_irqsave(&ring->lock, flags);
766 released = __bcm_sysport_tx_reclaim(priv, ring); 763 released = __bcm_sysport_tx_reclaim(priv, ring);
764 if (released)
765 netif_tx_wake_queue(txq);
766
767 spin_unlock_irqrestore(&ring->lock, flags); 767 spin_unlock_irqrestore(&ring->lock, flags);
768 768
769 return released; 769 return released;
770} 770}
771 771
772/* Locked version of the per-ring TX reclaim, but does not wake the queue */
773static void bcm_sysport_tx_clean(struct bcm_sysport_priv *priv,
774 struct bcm_sysport_tx_ring *ring)
775{
776 unsigned long flags;
777
778 spin_lock_irqsave(&ring->lock, flags);
779 __bcm_sysport_tx_reclaim(priv, ring);
780 spin_unlock_irqrestore(&ring->lock, flags);
781}
782
772static int bcm_sysport_tx_poll(struct napi_struct *napi, int budget) 783static int bcm_sysport_tx_poll(struct napi_struct *napi, int budget)
773{ 784{
774 struct bcm_sysport_tx_ring *ring = 785 struct bcm_sysport_tx_ring *ring =
@@ -1012,15 +1023,6 @@ static netdev_tx_t bcm_sysport_xmit(struct sk_buff *skb,
1012 goto out; 1023 goto out;
1013 } 1024 }
1014 1025
1015 /* Insert TSB and checksum infos */
1016 if (priv->tsb_en) {
1017 skb = bcm_sysport_insert_tsb(skb, dev);
1018 if (!skb) {
1019 ret = NETDEV_TX_OK;
1020 goto out;
1021 }
1022 }
1023
1024 /* The Ethernet switch we are interfaced with needs packets to be at 1026 /* The Ethernet switch we are interfaced with needs packets to be at
1025 * least 64 bytes (including FCS) otherwise they will be discarded when 1027 * least 64 bytes (including FCS) otherwise they will be discarded when
1026 * they enter the switch port logic. When Broadcom tags are enabled, we 1028 * they enter the switch port logic. When Broadcom tags are enabled, we
@@ -1028,13 +1030,21 @@ static netdev_tx_t bcm_sysport_xmit(struct sk_buff *skb,
1028 * (including FCS and tag) because the length verification is done after 1030 * (including FCS and tag) because the length verification is done after
1029 * the Broadcom tag is stripped off the ingress packet. 1031 * the Broadcom tag is stripped off the ingress packet.
1030 */ 1032 */
1031 if (skb_padto(skb, ETH_ZLEN + ENET_BRCM_TAG_LEN)) { 1033 if (skb_put_padto(skb, ETH_ZLEN + ENET_BRCM_TAG_LEN)) {
1032 ret = NETDEV_TX_OK; 1034 ret = NETDEV_TX_OK;
1033 goto out; 1035 goto out;
1034 } 1036 }
1035 1037
1036 skb_len = skb->len < ETH_ZLEN + ENET_BRCM_TAG_LEN ? 1038 /* Insert TSB and checksum infos */
1037 ETH_ZLEN + ENET_BRCM_TAG_LEN : skb->len; 1039 if (priv->tsb_en) {
1040 skb = bcm_sysport_insert_tsb(skb, dev);
1041 if (!skb) {
1042 ret = NETDEV_TX_OK;
1043 goto out;
1044 }
1045 }
1046
1047 skb_len = skb->len;
1038 1048
1039 mapping = dma_map_single(kdev, skb->data, skb_len, DMA_TO_DEVICE); 1049 mapping = dma_map_single(kdev, skb->data, skb_len, DMA_TO_DEVICE);
1040 if (dma_mapping_error(kdev, mapping)) { 1050 if (dma_mapping_error(kdev, mapping)) {
@@ -1253,7 +1263,7 @@ static void bcm_sysport_fini_tx_ring(struct bcm_sysport_priv *priv,
1253 napi_disable(&ring->napi); 1263 napi_disable(&ring->napi);
1254 netif_napi_del(&ring->napi); 1264 netif_napi_del(&ring->napi);
1255 1265
1256 bcm_sysport_tx_reclaim(priv, ring); 1266 bcm_sysport_tx_clean(priv, ring);
1257 1267
1258 kfree(ring->cbs); 1268 kfree(ring->cbs);
1259 ring->cbs = NULL; 1269 ring->cbs = NULL;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 9608cb49a11c..4fcc6a84a087 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -1099,7 +1099,7 @@ static struct sk_buff *bnxt_gro_func_5730x(struct bnxt_tpa_info *tpa_info,
1099{ 1099{
1100#ifdef CONFIG_INET 1100#ifdef CONFIG_INET
1101 struct tcphdr *th; 1101 struct tcphdr *th;
1102 int len, nw_off, tcp_opt_len; 1102 int len, nw_off, tcp_opt_len = 0;
1103 1103
1104 if (tcp_ts) 1104 if (tcp_ts)
1105 tcp_opt_len = 12; 1105 tcp_opt_len = 12;
@@ -5314,17 +5314,12 @@ static int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
5314 if ((link_info->support_auto_speeds | diff) != 5314 if ((link_info->support_auto_speeds | diff) !=
5315 link_info->support_auto_speeds) { 5315 link_info->support_auto_speeds) {
5316 /* An advertised speed is no longer supported, so we need to 5316 /* An advertised speed is no longer supported, so we need to
5317 * update the advertisement settings. See bnxt_reset() for 5317 * update the advertisement settings. Caller holds RTNL
5318 * comments about the rtnl_lock() sequence below. 5318 * so we can modify link settings.
5319 */ 5319 */
5320 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
5321 rtnl_lock();
5322 link_info->advertising = link_info->support_auto_speeds; 5320 link_info->advertising = link_info->support_auto_speeds;
5323 if (test_bit(BNXT_STATE_OPEN, &bp->state) && 5321 if (link_info->autoneg & BNXT_AUTONEG_SPEED)
5324 (link_info->autoneg & BNXT_AUTONEG_SPEED))
5325 bnxt_hwrm_set_link_setting(bp, true, false); 5322 bnxt_hwrm_set_link_setting(bp, true, false);
5326 set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
5327 rtnl_unlock();
5328 } 5323 }
5329 return 0; 5324 return 0;
5330} 5325}
@@ -6200,29 +6195,37 @@ bnxt_restart_timer:
6200 mod_timer(&bp->timer, jiffies + bp->current_interval); 6195 mod_timer(&bp->timer, jiffies + bp->current_interval);
6201} 6196}
6202 6197
6203/* Only called from bnxt_sp_task() */ 6198static void bnxt_rtnl_lock_sp(struct bnxt *bp)
6204static void bnxt_reset(struct bnxt *bp, bool silent)
6205{ 6199{
6206 /* bnxt_reset_task() calls bnxt_close_nic() which waits 6200 /* We are called from bnxt_sp_task which has BNXT_STATE_IN_SP_TASK
6207 * for BNXT_STATE_IN_SP_TASK to clear. 6201 * set. If the device is being closed, bnxt_close() may be holding
6208 * If there is a parallel dev_close(), bnxt_close() may be holding
6209 * rtnl() and waiting for BNXT_STATE_IN_SP_TASK to clear. So we 6202 * rtnl() and waiting for BNXT_STATE_IN_SP_TASK to clear. So we
6210 * must clear BNXT_STATE_IN_SP_TASK before holding rtnl(). 6203 * must clear BNXT_STATE_IN_SP_TASK before holding rtnl().
6211 */ 6204 */
6212 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state); 6205 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
6213 rtnl_lock(); 6206 rtnl_lock();
6214 if (test_bit(BNXT_STATE_OPEN, &bp->state)) 6207}
6215 bnxt_reset_task(bp, silent); 6208
6209static void bnxt_rtnl_unlock_sp(struct bnxt *bp)
6210{
6216 set_bit(BNXT_STATE_IN_SP_TASK, &bp->state); 6211 set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
6217 rtnl_unlock(); 6212 rtnl_unlock();
6218} 6213}
6219 6214
6215/* Only called from bnxt_sp_task() */
6216static void bnxt_reset(struct bnxt *bp, bool silent)
6217{
6218 bnxt_rtnl_lock_sp(bp);
6219 if (test_bit(BNXT_STATE_OPEN, &bp->state))
6220 bnxt_reset_task(bp, silent);
6221 bnxt_rtnl_unlock_sp(bp);
6222}
6223
6220static void bnxt_cfg_ntp_filters(struct bnxt *); 6224static void bnxt_cfg_ntp_filters(struct bnxt *);
6221 6225
6222static void bnxt_sp_task(struct work_struct *work) 6226static void bnxt_sp_task(struct work_struct *work)
6223{ 6227{
6224 struct bnxt *bp = container_of(work, struct bnxt, sp_task); 6228 struct bnxt *bp = container_of(work, struct bnxt, sp_task);
6225 int rc;
6226 6229
6227 set_bit(BNXT_STATE_IN_SP_TASK, &bp->state); 6230 set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
6228 smp_mb__after_atomic(); 6231 smp_mb__after_atomic();
@@ -6236,16 +6239,6 @@ static void bnxt_sp_task(struct work_struct *work)
6236 6239
6237 if (test_and_clear_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event)) 6240 if (test_and_clear_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event))
6238 bnxt_cfg_ntp_filters(bp); 6241 bnxt_cfg_ntp_filters(bp);
6239 if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) {
6240 if (test_and_clear_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT,
6241 &bp->sp_event))
6242 bnxt_hwrm_phy_qcaps(bp);
6243
6244 rc = bnxt_update_link(bp, true);
6245 if (rc)
6246 netdev_err(bp->dev, "SP task can't update link (rc: %x)\n",
6247 rc);
6248 }
6249 if (test_and_clear_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event)) 6242 if (test_and_clear_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event))
6250 bnxt_hwrm_exec_fwd_req(bp); 6243 bnxt_hwrm_exec_fwd_req(bp);
6251 if (test_and_clear_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event)) { 6244 if (test_and_clear_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event)) {
@@ -6266,18 +6259,39 @@ static void bnxt_sp_task(struct work_struct *work)
6266 bnxt_hwrm_tunnel_dst_port_free( 6259 bnxt_hwrm_tunnel_dst_port_free(
6267 bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE); 6260 bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
6268 } 6261 }
6262 if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event))
6263 bnxt_hwrm_port_qstats(bp);
6264
6265 /* These functions below will clear BNXT_STATE_IN_SP_TASK. They
6266 * must be the last functions to be called before exiting.
6267 */
6268 if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) {
6269 int rc = 0;
6270
6271 if (test_and_clear_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT,
6272 &bp->sp_event))
6273 bnxt_hwrm_phy_qcaps(bp);
6274
6275 bnxt_rtnl_lock_sp(bp);
6276 if (test_bit(BNXT_STATE_OPEN, &bp->state))
6277 rc = bnxt_update_link(bp, true);
6278 bnxt_rtnl_unlock_sp(bp);
6279 if (rc)
6280 netdev_err(bp->dev, "SP task can't update link (rc: %x)\n",
6281 rc);
6282 }
6283 if (test_and_clear_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event)) {
6284 bnxt_rtnl_lock_sp(bp);
6285 if (test_bit(BNXT_STATE_OPEN, &bp->state))
6286 bnxt_get_port_module_status(bp);
6287 bnxt_rtnl_unlock_sp(bp);
6288 }
6269 if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event)) 6289 if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event))
6270 bnxt_reset(bp, false); 6290 bnxt_reset(bp, false);
6271 6291
6272 if (test_and_clear_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event)) 6292 if (test_and_clear_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event))
6273 bnxt_reset(bp, true); 6293 bnxt_reset(bp, true);
6274 6294
6275 if (test_and_clear_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event))
6276 bnxt_get_port_module_status(bp);
6277
6278 if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event))
6279 bnxt_hwrm_port_qstats(bp);
6280
6281 smp_mb__before_atomic(); 6295 smp_mb__before_atomic();
6282 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state); 6296 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
6283} 6297}
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 185e9e047aa9..ae42de4fdddf 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -8720,11 +8720,14 @@ static void tg3_free_consistent(struct tg3 *tp)
8720 tg3_mem_rx_release(tp); 8720 tg3_mem_rx_release(tp);
8721 tg3_mem_tx_release(tp); 8721 tg3_mem_tx_release(tp);
8722 8722
8723 /* Protect tg3_get_stats64() from reading freed tp->hw_stats. */
8724 tg3_full_lock(tp, 0);
8723 if (tp->hw_stats) { 8725 if (tp->hw_stats) {
8724 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats), 8726 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
8725 tp->hw_stats, tp->stats_mapping); 8727 tp->hw_stats, tp->stats_mapping);
8726 tp->hw_stats = NULL; 8728 tp->hw_stats = NULL;
8727 } 8729 }
8730 tg3_full_unlock(tp);
8728} 8731}
8729 8732
8730/* 8733/*
diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c
index c0fb80acc2da..baba2db9d9c2 100644
--- a/drivers/net/ethernet/cadence/macb.c
+++ b/drivers/net/ethernet/cadence/macb.c
@@ -43,13 +43,13 @@
43#define DEFAULT_RX_RING_SIZE 512 /* must be power of 2 */ 43#define DEFAULT_RX_RING_SIZE 512 /* must be power of 2 */
44#define MIN_RX_RING_SIZE 64 44#define MIN_RX_RING_SIZE 64
45#define MAX_RX_RING_SIZE 8192 45#define MAX_RX_RING_SIZE 8192
46#define RX_RING_BYTES(bp) (sizeof(struct macb_dma_desc) \ 46#define RX_RING_BYTES(bp) (macb_dma_desc_get_size(bp) \
47 * (bp)->rx_ring_size) 47 * (bp)->rx_ring_size)
48 48
49#define DEFAULT_TX_RING_SIZE 512 /* must be power of 2 */ 49#define DEFAULT_TX_RING_SIZE 512 /* must be power of 2 */
50#define MIN_TX_RING_SIZE 64 50#define MIN_TX_RING_SIZE 64
51#define MAX_TX_RING_SIZE 4096 51#define MAX_TX_RING_SIZE 4096
52#define TX_RING_BYTES(bp) (sizeof(struct macb_dma_desc) \ 52#define TX_RING_BYTES(bp) (macb_dma_desc_get_size(bp) \
53 * (bp)->tx_ring_size) 53 * (bp)->tx_ring_size)
54 54
55/* level of occupied TX descriptors under which we wake up TX process */ 55/* level of occupied TX descriptors under which we wake up TX process */
@@ -78,6 +78,37 @@
78 */ 78 */
79#define MACB_HALT_TIMEOUT 1230 79#define MACB_HALT_TIMEOUT 1230
80 80
81/* DMA buffer descriptor might be different size
82 * depends on hardware configuration.
83 */
84static unsigned int macb_dma_desc_get_size(struct macb *bp)
85{
86#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
87 if (bp->hw_dma_cap == HW_DMA_CAP_64B)
88 return sizeof(struct macb_dma_desc) + sizeof(struct macb_dma_desc_64);
89#endif
90 return sizeof(struct macb_dma_desc);
91}
92
93static unsigned int macb_adj_dma_desc_idx(struct macb *bp, unsigned int idx)
94{
95#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
96 /* Dma buffer descriptor is 4 words length (instead of 2 words)
97 * for 64b GEM.
98 */
99 if (bp->hw_dma_cap == HW_DMA_CAP_64B)
100 idx <<= 1;
101#endif
102 return idx;
103}
104
105#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
106static struct macb_dma_desc_64 *macb_64b_desc(struct macb *bp, struct macb_dma_desc *desc)
107{
108 return (struct macb_dma_desc_64 *)((void *)desc + sizeof(struct macb_dma_desc));
109}
110#endif
111
81/* Ring buffer accessors */ 112/* Ring buffer accessors */
82static unsigned int macb_tx_ring_wrap(struct macb *bp, unsigned int index) 113static unsigned int macb_tx_ring_wrap(struct macb *bp, unsigned int index)
83{ 114{
@@ -87,7 +118,9 @@ static unsigned int macb_tx_ring_wrap(struct macb *bp, unsigned int index)
87static struct macb_dma_desc *macb_tx_desc(struct macb_queue *queue, 118static struct macb_dma_desc *macb_tx_desc(struct macb_queue *queue,
88 unsigned int index) 119 unsigned int index)
89{ 120{
90 return &queue->tx_ring[macb_tx_ring_wrap(queue->bp, index)]; 121 index = macb_tx_ring_wrap(queue->bp, index);
122 index = macb_adj_dma_desc_idx(queue->bp, index);
123 return &queue->tx_ring[index];
91} 124}
92 125
93static struct macb_tx_skb *macb_tx_skb(struct macb_queue *queue, 126static struct macb_tx_skb *macb_tx_skb(struct macb_queue *queue,
@@ -101,7 +134,7 @@ static dma_addr_t macb_tx_dma(struct macb_queue *queue, unsigned int index)
101 dma_addr_t offset; 134 dma_addr_t offset;
102 135
103 offset = macb_tx_ring_wrap(queue->bp, index) * 136 offset = macb_tx_ring_wrap(queue->bp, index) *
104 sizeof(struct macb_dma_desc); 137 macb_dma_desc_get_size(queue->bp);
105 138
106 return queue->tx_ring_dma + offset; 139 return queue->tx_ring_dma + offset;
107} 140}
@@ -113,7 +146,9 @@ static unsigned int macb_rx_ring_wrap(struct macb *bp, unsigned int index)
113 146
114static struct macb_dma_desc *macb_rx_desc(struct macb *bp, unsigned int index) 147static struct macb_dma_desc *macb_rx_desc(struct macb *bp, unsigned int index)
115{ 148{
116 return &bp->rx_ring[macb_rx_ring_wrap(bp, index)]; 149 index = macb_rx_ring_wrap(bp, index);
150 index = macb_adj_dma_desc_idx(bp, index);
151 return &bp->rx_ring[index];
117} 152}
118 153
119static void *macb_rx_buffer(struct macb *bp, unsigned int index) 154static void *macb_rx_buffer(struct macb *bp, unsigned int index)
@@ -560,12 +595,32 @@ static void macb_tx_unmap(struct macb *bp, struct macb_tx_skb *tx_skb)
560 } 595 }
561} 596}
562 597
563static inline void macb_set_addr(struct macb_dma_desc *desc, dma_addr_t addr) 598static void macb_set_addr(struct macb *bp, struct macb_dma_desc *desc, dma_addr_t addr)
564{ 599{
565 desc->addr = (u32)addr;
566#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 600#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
567 desc->addrh = (u32)(addr >> 32); 601 struct macb_dma_desc_64 *desc_64;
602
603 if (bp->hw_dma_cap == HW_DMA_CAP_64B) {
604 desc_64 = macb_64b_desc(bp, desc);
605 desc_64->addrh = upper_32_bits(addr);
606 }
568#endif 607#endif
608 desc->addr = lower_32_bits(addr);
609}
610
611static dma_addr_t macb_get_addr(struct macb *bp, struct macb_dma_desc *desc)
612{
613 dma_addr_t addr = 0;
614#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
615 struct macb_dma_desc_64 *desc_64;
616
617 if (bp->hw_dma_cap == HW_DMA_CAP_64B) {
618 desc_64 = macb_64b_desc(bp, desc);
619 addr = ((u64)(desc_64->addrh) << 32);
620 }
621#endif
622 addr |= MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, desc->addr));
623 return addr;
569} 624}
570 625
571static void macb_tx_error_task(struct work_struct *work) 626static void macb_tx_error_task(struct work_struct *work)
@@ -649,16 +704,17 @@ static void macb_tx_error_task(struct work_struct *work)
649 704
650 /* Set end of TX queue */ 705 /* Set end of TX queue */
651 desc = macb_tx_desc(queue, 0); 706 desc = macb_tx_desc(queue, 0);
652 macb_set_addr(desc, 0); 707 macb_set_addr(bp, desc, 0);
653 desc->ctrl = MACB_BIT(TX_USED); 708 desc->ctrl = MACB_BIT(TX_USED);
654 709
655 /* Make descriptor updates visible to hardware */ 710 /* Make descriptor updates visible to hardware */
656 wmb(); 711 wmb();
657 712
658 /* Reinitialize the TX desc queue */ 713 /* Reinitialize the TX desc queue */
659 queue_writel(queue, TBQP, (u32)(queue->tx_ring_dma)); 714 queue_writel(queue, TBQP, lower_32_bits(queue->tx_ring_dma));
660#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 715#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
661 queue_writel(queue, TBQPH, (u32)(queue->tx_ring_dma >> 32)); 716 if (bp->hw_dma_cap == HW_DMA_CAP_64B)
717 queue_writel(queue, TBQPH, upper_32_bits(queue->tx_ring_dma));
662#endif 718#endif
663 /* Make TX ring reflect state of hardware */ 719 /* Make TX ring reflect state of hardware */
664 queue->tx_head = 0; 720 queue->tx_head = 0;
@@ -750,6 +806,7 @@ static void gem_rx_refill(struct macb *bp)
750 unsigned int entry; 806 unsigned int entry;
751 struct sk_buff *skb; 807 struct sk_buff *skb;
752 dma_addr_t paddr; 808 dma_addr_t paddr;
809 struct macb_dma_desc *desc;
753 810
754 while (CIRC_SPACE(bp->rx_prepared_head, bp->rx_tail, 811 while (CIRC_SPACE(bp->rx_prepared_head, bp->rx_tail,
755 bp->rx_ring_size) > 0) { 812 bp->rx_ring_size) > 0) {
@@ -759,6 +816,7 @@ static void gem_rx_refill(struct macb *bp)
759 rmb(); 816 rmb();
760 817
761 bp->rx_prepared_head++; 818 bp->rx_prepared_head++;
819 desc = macb_rx_desc(bp, entry);
762 820
763 if (!bp->rx_skbuff[entry]) { 821 if (!bp->rx_skbuff[entry]) {
764 /* allocate sk_buff for this free entry in ring */ 822 /* allocate sk_buff for this free entry in ring */
@@ -782,14 +840,14 @@ static void gem_rx_refill(struct macb *bp)
782 840
783 if (entry == bp->rx_ring_size - 1) 841 if (entry == bp->rx_ring_size - 1)
784 paddr |= MACB_BIT(RX_WRAP); 842 paddr |= MACB_BIT(RX_WRAP);
785 macb_set_addr(&(bp->rx_ring[entry]), paddr); 843 macb_set_addr(bp, desc, paddr);
786 bp->rx_ring[entry].ctrl = 0; 844 desc->ctrl = 0;
787 845
788 /* properly align Ethernet header */ 846 /* properly align Ethernet header */
789 skb_reserve(skb, NET_IP_ALIGN); 847 skb_reserve(skb, NET_IP_ALIGN);
790 } else { 848 } else {
791 bp->rx_ring[entry].addr &= ~MACB_BIT(RX_USED); 849 desc->addr &= ~MACB_BIT(RX_USED);
792 bp->rx_ring[entry].ctrl = 0; 850 desc->ctrl = 0;
793 } 851 }
794 } 852 }
795 853
@@ -835,16 +893,13 @@ static int gem_rx(struct macb *bp, int budget)
835 bool rxused; 893 bool rxused;
836 894
837 entry = macb_rx_ring_wrap(bp, bp->rx_tail); 895 entry = macb_rx_ring_wrap(bp, bp->rx_tail);
838 desc = &bp->rx_ring[entry]; 896 desc = macb_rx_desc(bp, entry);
839 897
840 /* Make hw descriptor updates visible to CPU */ 898 /* Make hw descriptor updates visible to CPU */
841 rmb(); 899 rmb();
842 900
843 rxused = (desc->addr & MACB_BIT(RX_USED)) ? true : false; 901 rxused = (desc->addr & MACB_BIT(RX_USED)) ? true : false;
844 addr = MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, desc->addr)); 902 addr = macb_get_addr(bp, desc);
845#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
846 addr |= ((u64)(desc->addrh) << 32);
847#endif
848 ctrl = desc->ctrl; 903 ctrl = desc->ctrl;
849 904
850 if (!rxused) 905 if (!rxused)
@@ -987,15 +1042,17 @@ static int macb_rx_frame(struct macb *bp, unsigned int first_frag,
987static inline void macb_init_rx_ring(struct macb *bp) 1042static inline void macb_init_rx_ring(struct macb *bp)
988{ 1043{
989 dma_addr_t addr; 1044 dma_addr_t addr;
1045 struct macb_dma_desc *desc = NULL;
990 int i; 1046 int i;
991 1047
992 addr = bp->rx_buffers_dma; 1048 addr = bp->rx_buffers_dma;
993 for (i = 0; i < bp->rx_ring_size; i++) { 1049 for (i = 0; i < bp->rx_ring_size; i++) {
994 bp->rx_ring[i].addr = addr; 1050 desc = macb_rx_desc(bp, i);
995 bp->rx_ring[i].ctrl = 0; 1051 macb_set_addr(bp, desc, addr);
1052 desc->ctrl = 0;
996 addr += bp->rx_buffer_size; 1053 addr += bp->rx_buffer_size;
997 } 1054 }
998 bp->rx_ring[bp->rx_ring_size - 1].addr |= MACB_BIT(RX_WRAP); 1055 desc->addr |= MACB_BIT(RX_WRAP);
999 bp->rx_tail = 0; 1056 bp->rx_tail = 0;
1000} 1057}
1001 1058
@@ -1008,15 +1065,14 @@ static int macb_rx(struct macb *bp, int budget)
1008 1065
1009 for (tail = bp->rx_tail; budget > 0; tail++) { 1066 for (tail = bp->rx_tail; budget > 0; tail++) {
1010 struct macb_dma_desc *desc = macb_rx_desc(bp, tail); 1067 struct macb_dma_desc *desc = macb_rx_desc(bp, tail);
1011 u32 addr, ctrl; 1068 u32 ctrl;
1012 1069
1013 /* Make hw descriptor updates visible to CPU */ 1070 /* Make hw descriptor updates visible to CPU */
1014 rmb(); 1071 rmb();
1015 1072
1016 addr = desc->addr;
1017 ctrl = desc->ctrl; 1073 ctrl = desc->ctrl;
1018 1074
1019 if (!(addr & MACB_BIT(RX_USED))) 1075 if (!(desc->addr & MACB_BIT(RX_USED)))
1020 break; 1076 break;
1021 1077
1022 if (ctrl & MACB_BIT(RX_SOF)) { 1078 if (ctrl & MACB_BIT(RX_SOF)) {
@@ -1336,7 +1392,7 @@ static unsigned int macb_tx_map(struct macb *bp,
1336 i = tx_head; 1392 i = tx_head;
1337 entry = macb_tx_ring_wrap(bp, i); 1393 entry = macb_tx_ring_wrap(bp, i);
1338 ctrl = MACB_BIT(TX_USED); 1394 ctrl = MACB_BIT(TX_USED);
1339 desc = &queue->tx_ring[entry]; 1395 desc = macb_tx_desc(queue, entry);
1340 desc->ctrl = ctrl; 1396 desc->ctrl = ctrl;
1341 1397
1342 if (lso_ctrl) { 1398 if (lso_ctrl) {
@@ -1358,7 +1414,7 @@ static unsigned int macb_tx_map(struct macb *bp,
1358 i--; 1414 i--;
1359 entry = macb_tx_ring_wrap(bp, i); 1415 entry = macb_tx_ring_wrap(bp, i);
1360 tx_skb = &queue->tx_skb[entry]; 1416 tx_skb = &queue->tx_skb[entry];
1361 desc = &queue->tx_ring[entry]; 1417 desc = macb_tx_desc(queue, entry);
1362 1418
1363 ctrl = (u32)tx_skb->size; 1419 ctrl = (u32)tx_skb->size;
1364 if (eof) { 1420 if (eof) {
@@ -1379,7 +1435,7 @@ static unsigned int macb_tx_map(struct macb *bp,
1379 ctrl |= MACB_BF(MSS_MFS, mss_mfs); 1435 ctrl |= MACB_BF(MSS_MFS, mss_mfs);
1380 1436
1381 /* Set TX buffer descriptor */ 1437 /* Set TX buffer descriptor */
1382 macb_set_addr(desc, tx_skb->mapping); 1438 macb_set_addr(bp, desc, tx_skb->mapping);
1383 /* desc->addr must be visible to hardware before clearing 1439 /* desc->addr must be visible to hardware before clearing
1384 * 'TX_USED' bit in desc->ctrl. 1440 * 'TX_USED' bit in desc->ctrl.
1385 */ 1441 */
@@ -1586,11 +1642,9 @@ static void gem_free_rx_buffers(struct macb *bp)
1586 if (!skb) 1642 if (!skb)
1587 continue; 1643 continue;
1588 1644
1589 desc = &bp->rx_ring[i]; 1645 desc = macb_rx_desc(bp, i);
1590 addr = MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, desc->addr)); 1646 addr = macb_get_addr(bp, desc);
1591#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 1647
1592 addr |= ((u64)(desc->addrh) << 32);
1593#endif
1594 dma_unmap_single(&bp->pdev->dev, addr, bp->rx_buffer_size, 1648 dma_unmap_single(&bp->pdev->dev, addr, bp->rx_buffer_size,
1595 DMA_FROM_DEVICE); 1649 DMA_FROM_DEVICE);
1596 dev_kfree_skb_any(skb); 1650 dev_kfree_skb_any(skb);
@@ -1711,15 +1765,17 @@ out_err:
1711static void gem_init_rings(struct macb *bp) 1765static void gem_init_rings(struct macb *bp)
1712{ 1766{
1713 struct macb_queue *queue; 1767 struct macb_queue *queue;
1768 struct macb_dma_desc *desc = NULL;
1714 unsigned int q; 1769 unsigned int q;
1715 int i; 1770 int i;
1716 1771
1717 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { 1772 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
1718 for (i = 0; i < bp->tx_ring_size; i++) { 1773 for (i = 0; i < bp->tx_ring_size; i++) {
1719 queue->tx_ring[i].addr = 0; 1774 desc = macb_tx_desc(queue, i);
1720 queue->tx_ring[i].ctrl = MACB_BIT(TX_USED); 1775 macb_set_addr(bp, desc, 0);
1776 desc->ctrl = MACB_BIT(TX_USED);
1721 } 1777 }
1722 queue->tx_ring[bp->tx_ring_size - 1].ctrl |= MACB_BIT(TX_WRAP); 1778 desc->ctrl |= MACB_BIT(TX_WRAP);
1723 queue->tx_head = 0; 1779 queue->tx_head = 0;
1724 queue->tx_tail = 0; 1780 queue->tx_tail = 0;
1725 } 1781 }
@@ -1733,16 +1789,18 @@ static void gem_init_rings(struct macb *bp)
1733static void macb_init_rings(struct macb *bp) 1789static void macb_init_rings(struct macb *bp)
1734{ 1790{
1735 int i; 1791 int i;
1792 struct macb_dma_desc *desc = NULL;
1736 1793
1737 macb_init_rx_ring(bp); 1794 macb_init_rx_ring(bp);
1738 1795
1739 for (i = 0; i < bp->tx_ring_size; i++) { 1796 for (i = 0; i < bp->tx_ring_size; i++) {
1740 bp->queues[0].tx_ring[i].addr = 0; 1797 desc = macb_tx_desc(&bp->queues[0], i);
1741 bp->queues[0].tx_ring[i].ctrl = MACB_BIT(TX_USED); 1798 macb_set_addr(bp, desc, 0);
1799 desc->ctrl = MACB_BIT(TX_USED);
1742 } 1800 }
1743 bp->queues[0].tx_head = 0; 1801 bp->queues[0].tx_head = 0;
1744 bp->queues[0].tx_tail = 0; 1802 bp->queues[0].tx_tail = 0;
1745 bp->queues[0].tx_ring[bp->tx_ring_size - 1].ctrl |= MACB_BIT(TX_WRAP); 1803 desc->ctrl |= MACB_BIT(TX_WRAP);
1746} 1804}
1747 1805
1748static void macb_reset_hw(struct macb *bp) 1806static void macb_reset_hw(struct macb *bp)
@@ -1863,7 +1921,8 @@ static void macb_configure_dma(struct macb *bp)
1863 dmacfg &= ~GEM_BIT(TXCOEN); 1921 dmacfg &= ~GEM_BIT(TXCOEN);
1864 1922
1865#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 1923#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
1866 dmacfg |= GEM_BIT(ADDR64); 1924 if (bp->hw_dma_cap == HW_DMA_CAP_64B)
1925 dmacfg |= GEM_BIT(ADDR64);
1867#endif 1926#endif
1868 netdev_dbg(bp->dev, "Cadence configure DMA with 0x%08x\n", 1927 netdev_dbg(bp->dev, "Cadence configure DMA with 0x%08x\n",
1869 dmacfg); 1928 dmacfg);
@@ -1910,14 +1969,16 @@ static void macb_init_hw(struct macb *bp)
1910 macb_configure_dma(bp); 1969 macb_configure_dma(bp);
1911 1970
1912 /* Initialize TX and RX buffers */ 1971 /* Initialize TX and RX buffers */
1913 macb_writel(bp, RBQP, (u32)(bp->rx_ring_dma)); 1972 macb_writel(bp, RBQP, lower_32_bits(bp->rx_ring_dma));
1914#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 1973#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
1915 macb_writel(bp, RBQPH, (u32)(bp->rx_ring_dma >> 32)); 1974 if (bp->hw_dma_cap == HW_DMA_CAP_64B)
1975 macb_writel(bp, RBQPH, upper_32_bits(bp->rx_ring_dma));
1916#endif 1976#endif
1917 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { 1977 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
1918 queue_writel(queue, TBQP, (u32)(queue->tx_ring_dma)); 1978 queue_writel(queue, TBQP, lower_32_bits(queue->tx_ring_dma));
1919#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 1979#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
1920 queue_writel(queue, TBQPH, (u32)(queue->tx_ring_dma >> 32)); 1980 if (bp->hw_dma_cap == HW_DMA_CAP_64B)
1981 queue_writel(queue, TBQPH, upper_32_bits(queue->tx_ring_dma));
1921#endif 1982#endif
1922 1983
1923 /* Enable interrupts */ 1984 /* Enable interrupts */
@@ -2627,7 +2688,8 @@ static int macb_init(struct platform_device *pdev)
2627 queue->IMR = GEM_IMR(hw_q - 1); 2688 queue->IMR = GEM_IMR(hw_q - 1);
2628 queue->TBQP = GEM_TBQP(hw_q - 1); 2689 queue->TBQP = GEM_TBQP(hw_q - 1);
2629#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 2690#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
2630 queue->TBQPH = GEM_TBQPH(hw_q -1); 2691 if (bp->hw_dma_cap == HW_DMA_CAP_64B)
2692 queue->TBQPH = GEM_TBQPH(hw_q - 1);
2631#endif 2693#endif
2632 } else { 2694 } else {
2633 /* queue0 uses legacy registers */ 2695 /* queue0 uses legacy registers */
@@ -2637,7 +2699,8 @@ static int macb_init(struct platform_device *pdev)
2637 queue->IMR = MACB_IMR; 2699 queue->IMR = MACB_IMR;
2638 queue->TBQP = MACB_TBQP; 2700 queue->TBQP = MACB_TBQP;
2639#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 2701#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
2640 queue->TBQPH = MACB_TBQPH; 2702 if (bp->hw_dma_cap == HW_DMA_CAP_64B)
2703 queue->TBQPH = MACB_TBQPH;
2641#endif 2704#endif
2642 } 2705 }
2643 2706
@@ -2730,13 +2793,14 @@ static int macb_init(struct platform_device *pdev)
2730static int at91ether_start(struct net_device *dev) 2793static int at91ether_start(struct net_device *dev)
2731{ 2794{
2732 struct macb *lp = netdev_priv(dev); 2795 struct macb *lp = netdev_priv(dev);
2796 struct macb_dma_desc *desc;
2733 dma_addr_t addr; 2797 dma_addr_t addr;
2734 u32 ctl; 2798 u32 ctl;
2735 int i; 2799 int i;
2736 2800
2737 lp->rx_ring = dma_alloc_coherent(&lp->pdev->dev, 2801 lp->rx_ring = dma_alloc_coherent(&lp->pdev->dev,
2738 (AT91ETHER_MAX_RX_DESCR * 2802 (AT91ETHER_MAX_RX_DESCR *
2739 sizeof(struct macb_dma_desc)), 2803 macb_dma_desc_get_size(lp)),
2740 &lp->rx_ring_dma, GFP_KERNEL); 2804 &lp->rx_ring_dma, GFP_KERNEL);
2741 if (!lp->rx_ring) 2805 if (!lp->rx_ring)
2742 return -ENOMEM; 2806 return -ENOMEM;
@@ -2748,7 +2812,7 @@ static int at91ether_start(struct net_device *dev)
2748 if (!lp->rx_buffers) { 2812 if (!lp->rx_buffers) {
2749 dma_free_coherent(&lp->pdev->dev, 2813 dma_free_coherent(&lp->pdev->dev,
2750 AT91ETHER_MAX_RX_DESCR * 2814 AT91ETHER_MAX_RX_DESCR *
2751 sizeof(struct macb_dma_desc), 2815 macb_dma_desc_get_size(lp),
2752 lp->rx_ring, lp->rx_ring_dma); 2816 lp->rx_ring, lp->rx_ring_dma);
2753 lp->rx_ring = NULL; 2817 lp->rx_ring = NULL;
2754 return -ENOMEM; 2818 return -ENOMEM;
@@ -2756,13 +2820,14 @@ static int at91ether_start(struct net_device *dev)
2756 2820
2757 addr = lp->rx_buffers_dma; 2821 addr = lp->rx_buffers_dma;
2758 for (i = 0; i < AT91ETHER_MAX_RX_DESCR; i++) { 2822 for (i = 0; i < AT91ETHER_MAX_RX_DESCR; i++) {
2759 lp->rx_ring[i].addr = addr; 2823 desc = macb_rx_desc(lp, i);
2760 lp->rx_ring[i].ctrl = 0; 2824 macb_set_addr(lp, desc, addr);
2825 desc->ctrl = 0;
2761 addr += AT91ETHER_MAX_RBUFF_SZ; 2826 addr += AT91ETHER_MAX_RBUFF_SZ;
2762 } 2827 }
2763 2828
2764 /* Set the Wrap bit on the last descriptor */ 2829 /* Set the Wrap bit on the last descriptor */
2765 lp->rx_ring[AT91ETHER_MAX_RX_DESCR - 1].addr |= MACB_BIT(RX_WRAP); 2830 desc->addr |= MACB_BIT(RX_WRAP);
2766 2831
2767 /* Reset buffer index */ 2832 /* Reset buffer index */
2768 lp->rx_tail = 0; 2833 lp->rx_tail = 0;
@@ -2834,7 +2899,7 @@ static int at91ether_close(struct net_device *dev)
2834 2899
2835 dma_free_coherent(&lp->pdev->dev, 2900 dma_free_coherent(&lp->pdev->dev,
2836 AT91ETHER_MAX_RX_DESCR * 2901 AT91ETHER_MAX_RX_DESCR *
2837 sizeof(struct macb_dma_desc), 2902 macb_dma_desc_get_size(lp),
2838 lp->rx_ring, lp->rx_ring_dma); 2903 lp->rx_ring, lp->rx_ring_dma);
2839 lp->rx_ring = NULL; 2904 lp->rx_ring = NULL;
2840 2905
@@ -2885,13 +2950,15 @@ static int at91ether_start_xmit(struct sk_buff *skb, struct net_device *dev)
2885static void at91ether_rx(struct net_device *dev) 2950static void at91ether_rx(struct net_device *dev)
2886{ 2951{
2887 struct macb *lp = netdev_priv(dev); 2952 struct macb *lp = netdev_priv(dev);
2953 struct macb_dma_desc *desc;
2888 unsigned char *p_recv; 2954 unsigned char *p_recv;
2889 struct sk_buff *skb; 2955 struct sk_buff *skb;
2890 unsigned int pktlen; 2956 unsigned int pktlen;
2891 2957
2892 while (lp->rx_ring[lp->rx_tail].addr & MACB_BIT(RX_USED)) { 2958 desc = macb_rx_desc(lp, lp->rx_tail);
2959 while (desc->addr & MACB_BIT(RX_USED)) {
2893 p_recv = lp->rx_buffers + lp->rx_tail * AT91ETHER_MAX_RBUFF_SZ; 2960 p_recv = lp->rx_buffers + lp->rx_tail * AT91ETHER_MAX_RBUFF_SZ;
2894 pktlen = MACB_BF(RX_FRMLEN, lp->rx_ring[lp->rx_tail].ctrl); 2961 pktlen = MACB_BF(RX_FRMLEN, desc->ctrl);
2895 skb = netdev_alloc_skb(dev, pktlen + 2); 2962 skb = netdev_alloc_skb(dev, pktlen + 2);
2896 if (skb) { 2963 if (skb) {
2897 skb_reserve(skb, 2); 2964 skb_reserve(skb, 2);
@@ -2905,17 +2972,19 @@ static void at91ether_rx(struct net_device *dev)
2905 lp->stats.rx_dropped++; 2972 lp->stats.rx_dropped++;
2906 } 2973 }
2907 2974
2908 if (lp->rx_ring[lp->rx_tail].ctrl & MACB_BIT(RX_MHASH_MATCH)) 2975 if (desc->ctrl & MACB_BIT(RX_MHASH_MATCH))
2909 lp->stats.multicast++; 2976 lp->stats.multicast++;
2910 2977
2911 /* reset ownership bit */ 2978 /* reset ownership bit */
2912 lp->rx_ring[lp->rx_tail].addr &= ~MACB_BIT(RX_USED); 2979 desc->addr &= ~MACB_BIT(RX_USED);
2913 2980
2914 /* wrap after last buffer */ 2981 /* wrap after last buffer */
2915 if (lp->rx_tail == AT91ETHER_MAX_RX_DESCR - 1) 2982 if (lp->rx_tail == AT91ETHER_MAX_RX_DESCR - 1)
2916 lp->rx_tail = 0; 2983 lp->rx_tail = 0;
2917 else 2984 else
2918 lp->rx_tail++; 2985 lp->rx_tail++;
2986
2987 desc = macb_rx_desc(lp, lp->rx_tail);
2919 } 2988 }
2920} 2989}
2921 2990
@@ -3211,8 +3280,11 @@ static int macb_probe(struct platform_device *pdev)
3211 device_init_wakeup(&pdev->dev, bp->wol & MACB_WOL_HAS_MAGIC_PACKET); 3280 device_init_wakeup(&pdev->dev, bp->wol & MACB_WOL_HAS_MAGIC_PACKET);
3212 3281
3213#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 3282#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
3214 if (GEM_BFEXT(DBWDEF, gem_readl(bp, DCFG1)) > GEM_DBW32) 3283 if (GEM_BFEXT(DAW64, gem_readl(bp, DCFG6))) {
3215 dma_set_mask(&pdev->dev, DMA_BIT_MASK(44)); 3284 dma_set_mask(&pdev->dev, DMA_BIT_MASK(44));
3285 bp->hw_dma_cap = HW_DMA_CAP_64B;
3286 } else
3287 bp->hw_dma_cap = HW_DMA_CAP_32B;
3216#endif 3288#endif
3217 3289
3218 spin_lock_init(&bp->lock); 3290 spin_lock_init(&bp->lock);
diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h
index d67adad67be1..fc8550a5d47f 100644
--- a/drivers/net/ethernet/cadence/macb.h
+++ b/drivers/net/ethernet/cadence/macb.h
@@ -385,6 +385,8 @@
385/* Bitfields in DCFG6. */ 385/* Bitfields in DCFG6. */
386#define GEM_PBUF_LSO_OFFSET 27 386#define GEM_PBUF_LSO_OFFSET 27
387#define GEM_PBUF_LSO_SIZE 1 387#define GEM_PBUF_LSO_SIZE 1
388#define GEM_DAW64_OFFSET 23
389#define GEM_DAW64_SIZE 1
388 390
389/* Constants for CLK */ 391/* Constants for CLK */
390#define MACB_CLK_DIV8 0 392#define MACB_CLK_DIV8 0
@@ -487,12 +489,20 @@
487struct macb_dma_desc { 489struct macb_dma_desc {
488 u32 addr; 490 u32 addr;
489 u32 ctrl; 491 u32 ctrl;
492};
493
490#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 494#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
491 u32 addrh; 495enum macb_hw_dma_cap {
492 u32 resvd; 496 HW_DMA_CAP_32B,
493#endif 497 HW_DMA_CAP_64B,
494}; 498};
495 499
500struct macb_dma_desc_64 {
501 u32 addrh;
502 u32 resvd;
503};
504#endif
505
496/* DMA descriptor bitfields */ 506/* DMA descriptor bitfields */
497#define MACB_RX_USED_OFFSET 0 507#define MACB_RX_USED_OFFSET 0
498#define MACB_RX_USED_SIZE 1 508#define MACB_RX_USED_SIZE 1
@@ -874,6 +884,10 @@ struct macb {
874 unsigned int jumbo_max_len; 884 unsigned int jumbo_max_len;
875 885
876 u32 wol; 886 u32 wol;
887
888#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
889 enum macb_hw_dma_cap hw_dma_cap;
890#endif
877}; 891};
878 892
879static inline bool macb_is_gem(struct macb *bp) 893static inline bool macb_is_gem(struct macb *bp)
diff --git a/drivers/net/ethernet/cadence/macb_pci.c b/drivers/net/ethernet/cadence/macb_pci.c
index 92be2cd8f817..9906fda76087 100644
--- a/drivers/net/ethernet/cadence/macb_pci.c
+++ b/drivers/net/ethernet/cadence/macb_pci.c
@@ -1,5 +1,5 @@
1/** 1/**
2 * macb_pci.c - Cadence GEM PCI wrapper. 2 * Cadence GEM PCI wrapper.
3 * 3 *
4 * Copyright (C) 2016 Cadence Design Systems - http://www.cadence.com 4 * Copyright (C) 2016 Cadence Design Systems - http://www.cadence.com
5 * 5 *
@@ -45,32 +45,27 @@ static int macb_probe(struct pci_dev *pdev, const struct pci_device_id *id)
45 struct macb_platform_data plat_data; 45 struct macb_platform_data plat_data;
46 struct resource res[2]; 46 struct resource res[2];
47 47
48 /* sanity check */
49 if (!id)
50 return -EINVAL;
51
52 /* enable pci device */ 48 /* enable pci device */
53 err = pci_enable_device(pdev); 49 err = pcim_enable_device(pdev);
54 if (err < 0) { 50 if (err < 0) {
55 dev_err(&pdev->dev, "Enabling PCI device has failed: 0x%04X", 51 dev_err(&pdev->dev, "Enabling PCI device has failed: %d", err);
56 err); 52 return err;
57 return -EACCES;
58 } 53 }
59 54
60 pci_set_master(pdev); 55 pci_set_master(pdev);
61 56
62 /* set up resources */ 57 /* set up resources */
63 memset(res, 0x00, sizeof(struct resource) * ARRAY_SIZE(res)); 58 memset(res, 0x00, sizeof(struct resource) * ARRAY_SIZE(res));
64 res[0].start = pdev->resource[0].start; 59 res[0].start = pci_resource_start(pdev, 0);
65 res[0].end = pdev->resource[0].end; 60 res[0].end = pci_resource_end(pdev, 0);
66 res[0].name = PCI_DRIVER_NAME; 61 res[0].name = PCI_DRIVER_NAME;
67 res[0].flags = IORESOURCE_MEM; 62 res[0].flags = IORESOURCE_MEM;
68 res[1].start = pdev->irq; 63 res[1].start = pci_irq_vector(pdev, 0);
69 res[1].name = PCI_DRIVER_NAME; 64 res[1].name = PCI_DRIVER_NAME;
70 res[1].flags = IORESOURCE_IRQ; 65 res[1].flags = IORESOURCE_IRQ;
71 66
72 dev_info(&pdev->dev, "EMAC physical base addr = 0x%p\n", 67 dev_info(&pdev->dev, "EMAC physical base addr: %pa\n",
73 (void *)(uintptr_t)pci_resource_start(pdev, 0)); 68 &res[0].start);
74 69
75 /* set up macb platform data */ 70 /* set up macb platform data */
76 memset(&plat_data, 0, sizeof(plat_data)); 71 memset(&plat_data, 0, sizeof(plat_data));
@@ -100,7 +95,7 @@ static int macb_probe(struct pci_dev *pdev, const struct pci_device_id *id)
100 plat_info.num_res = ARRAY_SIZE(res); 95 plat_info.num_res = ARRAY_SIZE(res);
101 plat_info.data = &plat_data; 96 plat_info.data = &plat_data;
102 plat_info.size_data = sizeof(plat_data); 97 plat_info.size_data = sizeof(plat_data);
103 plat_info.dma_mask = DMA_BIT_MASK(32); 98 plat_info.dma_mask = pdev->dma_mask;
104 99
105 /* register platform device */ 100 /* register platform device */
106 plat_dev = platform_device_register_full(&plat_info); 101 plat_dev = platform_device_register_full(&plat_info);
@@ -120,7 +115,6 @@ err_hclk_register:
120 clk_unregister(plat_data.pclk); 115 clk_unregister(plat_data.pclk);
121 116
122err_pclk_register: 117err_pclk_register:
123 pci_disable_device(pdev);
124 return err; 118 return err;
125} 119}
126 120
@@ -130,7 +124,6 @@ static void macb_remove(struct pci_dev *pdev)
130 struct macb_platform_data *plat_data = dev_get_platdata(&plat_dev->dev); 124 struct macb_platform_data *plat_data = dev_get_platdata(&plat_dev->dev);
131 125
132 platform_device_unregister(plat_dev); 126 platform_device_unregister(plat_dev);
133 pci_disable_device(pdev);
134 clk_unregister(plat_data->pclk); 127 clk_unregister(plat_data->pclk);
135 clk_unregister(plat_data->hclk); 128 clk_unregister(plat_data->hclk);
136} 129}
diff --git a/drivers/net/ethernet/cavium/Kconfig b/drivers/net/ethernet/cavium/Kconfig
index bbc8bd16cb97..dcbce6cac63e 100644
--- a/drivers/net/ethernet/cavium/Kconfig
+++ b/drivers/net/ethernet/cavium/Kconfig
@@ -77,7 +77,7 @@ config OCTEON_MGMT_ETHERNET
77config LIQUIDIO_VF 77config LIQUIDIO_VF
78 tristate "Cavium LiquidIO VF support" 78 tristate "Cavium LiquidIO VF support"
79 depends on 64BIT && PCI_MSI 79 depends on 64BIT && PCI_MSI
80 select PTP_1588_CLOCK 80 imply PTP_1588_CLOCK
81 ---help--- 81 ---help---
82 This driver supports Cavium LiquidIO Intelligent Server Adapter 82 This driver supports Cavium LiquidIO Intelligent Server Adapter
83 based on CN23XX chips. 83 based on CN23XX chips.
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
index 9211c750e064..1e4695270da6 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
@@ -31,6 +31,7 @@ struct lmac {
31 u8 lmac_type; 31 u8 lmac_type;
32 u8 lane_to_sds; 32 u8 lane_to_sds;
33 bool use_training; 33 bool use_training;
34 bool autoneg;
34 bool link_up; 35 bool link_up;
35 int lmacid; /* ID within BGX */ 36 int lmacid; /* ID within BGX */
36 int lmacid_bd; /* ID on board */ 37 int lmacid_bd; /* ID on board */
@@ -47,8 +48,9 @@ struct lmac {
47struct bgx { 48struct bgx {
48 u8 bgx_id; 49 u8 bgx_id;
49 struct lmac lmac[MAX_LMAC_PER_BGX]; 50 struct lmac lmac[MAX_LMAC_PER_BGX];
50 int lmac_count; 51 u8 lmac_count;
51 u8 max_lmac; 52 u8 max_lmac;
53 u8 acpi_lmac_idx;
52 void __iomem *reg_base; 54 void __iomem *reg_base;
53 struct pci_dev *pdev; 55 struct pci_dev *pdev;
54 bool is_dlm; 56 bool is_dlm;
@@ -460,7 +462,17 @@ static int bgx_lmac_sgmii_init(struct bgx *bgx, struct lmac *lmac)
460 /* power down, reset autoneg, autoneg enable */ 462 /* power down, reset autoneg, autoneg enable */
461 cfg = bgx_reg_read(bgx, lmacid, BGX_GMP_PCS_MRX_CTL); 463 cfg = bgx_reg_read(bgx, lmacid, BGX_GMP_PCS_MRX_CTL);
462 cfg &= ~PCS_MRX_CTL_PWR_DN; 464 cfg &= ~PCS_MRX_CTL_PWR_DN;
463 cfg |= (PCS_MRX_CTL_RST_AN | PCS_MRX_CTL_AN_EN); 465 cfg |= PCS_MRX_CTL_RST_AN;
466 if (lmac->phydev) {
467 cfg |= PCS_MRX_CTL_AN_EN;
468 } else {
469 /* In scenarios where PHY driver is not present or it's a
470 * non-standard PHY, FW sets AN_EN to inform Linux driver
471 * to do auto-neg and link polling or not.
472 */
473 if (cfg & PCS_MRX_CTL_AN_EN)
474 lmac->autoneg = true;
475 }
464 bgx_reg_write(bgx, lmacid, BGX_GMP_PCS_MRX_CTL, cfg); 476 bgx_reg_write(bgx, lmacid, BGX_GMP_PCS_MRX_CTL, cfg);
465 477
466 if (lmac->lmac_type == BGX_MODE_QSGMII) { 478 if (lmac->lmac_type == BGX_MODE_QSGMII) {
@@ -471,7 +483,7 @@ static int bgx_lmac_sgmii_init(struct bgx *bgx, struct lmac *lmac)
471 return 0; 483 return 0;
472 } 484 }
473 485
474 if (lmac->lmac_type == BGX_MODE_SGMII) { 486 if ((lmac->lmac_type == BGX_MODE_SGMII) && lmac->phydev) {
475 if (bgx_poll_reg(bgx, lmacid, BGX_GMP_PCS_MRX_STATUS, 487 if (bgx_poll_reg(bgx, lmacid, BGX_GMP_PCS_MRX_STATUS,
476 PCS_MRX_STATUS_AN_CPT, false)) { 488 PCS_MRX_STATUS_AN_CPT, false)) {
477 dev_err(&bgx->pdev->dev, "BGX AN_CPT not completed\n"); 489 dev_err(&bgx->pdev->dev, "BGX AN_CPT not completed\n");
@@ -677,12 +689,71 @@ static int bgx_xaui_check_link(struct lmac *lmac)
677 return -1; 689 return -1;
678} 690}
679 691
692static void bgx_poll_for_sgmii_link(struct lmac *lmac)
693{
694 u64 pcs_link, an_result;
695 u8 speed;
696
697 pcs_link = bgx_reg_read(lmac->bgx, lmac->lmacid,
698 BGX_GMP_PCS_MRX_STATUS);
699
700 /*Link state bit is sticky, read it again*/
701 if (!(pcs_link & PCS_MRX_STATUS_LINK))
702 pcs_link = bgx_reg_read(lmac->bgx, lmac->lmacid,
703 BGX_GMP_PCS_MRX_STATUS);
704
705 if (bgx_poll_reg(lmac->bgx, lmac->lmacid, BGX_GMP_PCS_MRX_STATUS,
706 PCS_MRX_STATUS_AN_CPT, false)) {
707 lmac->link_up = false;
708 lmac->last_speed = SPEED_UNKNOWN;
709 lmac->last_duplex = DUPLEX_UNKNOWN;
710 goto next_poll;
711 }
712
713 lmac->link_up = ((pcs_link & PCS_MRX_STATUS_LINK) != 0) ? true : false;
714 an_result = bgx_reg_read(lmac->bgx, lmac->lmacid,
715 BGX_GMP_PCS_ANX_AN_RESULTS);
716
717 speed = (an_result >> 3) & 0x3;
718 lmac->last_duplex = (an_result >> 1) & 0x1;
719 switch (speed) {
720 case 0:
721 lmac->last_speed = 10;
722 break;
723 case 1:
724 lmac->last_speed = 100;
725 break;
726 case 2:
727 lmac->last_speed = 1000;
728 break;
729 default:
730 lmac->link_up = false;
731 lmac->last_speed = SPEED_UNKNOWN;
732 lmac->last_duplex = DUPLEX_UNKNOWN;
733 break;
734 }
735
736next_poll:
737
738 if (lmac->last_link != lmac->link_up) {
739 if (lmac->link_up)
740 bgx_sgmii_change_link_state(lmac);
741 lmac->last_link = lmac->link_up;
742 }
743
744 queue_delayed_work(lmac->check_link, &lmac->dwork, HZ * 3);
745}
746
680static void bgx_poll_for_link(struct work_struct *work) 747static void bgx_poll_for_link(struct work_struct *work)
681{ 748{
682 struct lmac *lmac; 749 struct lmac *lmac;
683 u64 spu_link, smu_link; 750 u64 spu_link, smu_link;
684 751
685 lmac = container_of(work, struct lmac, dwork.work); 752 lmac = container_of(work, struct lmac, dwork.work);
753 if (lmac->is_sgmii) {
754 bgx_poll_for_sgmii_link(lmac);
755 return;
756 }
686 757
687 /* Receive link is latching low. Force it high and verify it */ 758 /* Receive link is latching low. Force it high and verify it */
688 bgx_reg_modify(lmac->bgx, lmac->lmacid, 759 bgx_reg_modify(lmac->bgx, lmac->lmacid,
@@ -774,9 +845,21 @@ static int bgx_lmac_enable(struct bgx *bgx, u8 lmacid)
774 (lmac->lmac_type != BGX_MODE_XLAUI) && 845 (lmac->lmac_type != BGX_MODE_XLAUI) &&
775 (lmac->lmac_type != BGX_MODE_40G_KR) && 846 (lmac->lmac_type != BGX_MODE_40G_KR) &&
776 (lmac->lmac_type != BGX_MODE_10G_KR)) { 847 (lmac->lmac_type != BGX_MODE_10G_KR)) {
777 if (!lmac->phydev) 848 if (!lmac->phydev) {
778 return -ENODEV; 849 if (lmac->autoneg) {
779 850 bgx_reg_write(bgx, lmacid,
851 BGX_GMP_PCS_LINKX_TIMER,
852 PCS_LINKX_TIMER_COUNT);
853 goto poll;
854 } else {
855 /* Default to below link speed and duplex */
856 lmac->link_up = true;
857 lmac->last_speed = 1000;
858 lmac->last_duplex = 1;
859 bgx_sgmii_change_link_state(lmac);
860 return 0;
861 }
862 }
780 lmac->phydev->dev_flags = 0; 863 lmac->phydev->dev_flags = 0;
781 864
782 if (phy_connect_direct(&lmac->netdev, lmac->phydev, 865 if (phy_connect_direct(&lmac->netdev, lmac->phydev,
@@ -785,15 +868,17 @@ static int bgx_lmac_enable(struct bgx *bgx, u8 lmacid)
785 return -ENODEV; 868 return -ENODEV;
786 869
787 phy_start_aneg(lmac->phydev); 870 phy_start_aneg(lmac->phydev);
788 } else { 871 return 0;
789 lmac->check_link = alloc_workqueue("check_link", WQ_UNBOUND |
790 WQ_MEM_RECLAIM, 1);
791 if (!lmac->check_link)
792 return -ENOMEM;
793 INIT_DELAYED_WORK(&lmac->dwork, bgx_poll_for_link);
794 queue_delayed_work(lmac->check_link, &lmac->dwork, 0);
795 } 872 }
796 873
874poll:
875 lmac->check_link = alloc_workqueue("check_link", WQ_UNBOUND |
876 WQ_MEM_RECLAIM, 1);
877 if (!lmac->check_link)
878 return -ENOMEM;
879 INIT_DELAYED_WORK(&lmac->dwork, bgx_poll_for_link);
880 queue_delayed_work(lmac->check_link, &lmac->dwork, 0);
881
797 return 0; 882 return 0;
798} 883}
799 884
@@ -1143,13 +1228,13 @@ static acpi_status bgx_acpi_register_phy(acpi_handle handle,
1143 if (acpi_bus_get_device(handle, &adev)) 1228 if (acpi_bus_get_device(handle, &adev))
1144 goto out; 1229 goto out;
1145 1230
1146 acpi_get_mac_address(dev, adev, bgx->lmac[bgx->lmac_count].mac); 1231 acpi_get_mac_address(dev, adev, bgx->lmac[bgx->acpi_lmac_idx].mac);
1147 1232
1148 SET_NETDEV_DEV(&bgx->lmac[bgx->lmac_count].netdev, dev); 1233 SET_NETDEV_DEV(&bgx->lmac[bgx->acpi_lmac_idx].netdev, dev);
1149 1234
1150 bgx->lmac[bgx->lmac_count].lmacid = bgx->lmac_count; 1235 bgx->lmac[bgx->acpi_lmac_idx].lmacid = bgx->acpi_lmac_idx;
1236 bgx->acpi_lmac_idx++; /* move to next LMAC */
1151out: 1237out:
1152 bgx->lmac_count++;
1153 return AE_OK; 1238 return AE_OK;
1154} 1239}
1155 1240
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
index c18ebfeb2039..a60f189429bb 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
@@ -153,10 +153,15 @@
153#define PCS_MRX_CTL_LOOPBACK1 BIT_ULL(14) 153#define PCS_MRX_CTL_LOOPBACK1 BIT_ULL(14)
154#define PCS_MRX_CTL_RESET BIT_ULL(15) 154#define PCS_MRX_CTL_RESET BIT_ULL(15)
155#define BGX_GMP_PCS_MRX_STATUS 0x30008 155#define BGX_GMP_PCS_MRX_STATUS 0x30008
156#define PCS_MRX_STATUS_LINK BIT_ULL(2)
156#define PCS_MRX_STATUS_AN_CPT BIT_ULL(5) 157#define PCS_MRX_STATUS_AN_CPT BIT_ULL(5)
158#define BGX_GMP_PCS_ANX_ADV 0x30010
157#define BGX_GMP_PCS_ANX_AN_RESULTS 0x30020 159#define BGX_GMP_PCS_ANX_AN_RESULTS 0x30020
160#define BGX_GMP_PCS_LINKX_TIMER 0x30040
161#define PCS_LINKX_TIMER_COUNT 0x1E84
158#define BGX_GMP_PCS_SGM_AN_ADV 0x30068 162#define BGX_GMP_PCS_SGM_AN_ADV 0x30068
159#define BGX_GMP_PCS_MISCX_CTL 0x30078 163#define BGX_GMP_PCS_MISCX_CTL 0x30078
164#define PCS_MISC_CTL_MODE BIT_ULL(8)
160#define PCS_MISC_CTL_DISP_EN BIT_ULL(13) 165#define PCS_MISC_CTL_DISP_EN BIT_ULL(13)
161#define PCS_MISC_CTL_GMX_ENO BIT_ULL(11) 166#define PCS_MISC_CTL_GMX_ENO BIT_ULL(11)
162#define PCS_MISC_CTL_SAMP_PT_MASK 0x7Full 167#define PCS_MISC_CTL_SAMP_PT_MASK 0x7Full
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_xcv.c b/drivers/net/ethernet/cavium/thunder/thunder_xcv.c
index 67befedef709..578c7f8f11bf 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_xcv.c
+++ b/drivers/net/ethernet/cavium/thunder/thunder_xcv.c
@@ -116,8 +116,7 @@ void xcv_setup_link(bool link_up, int link_speed)
116 int speed = 2; 116 int speed = 2;
117 117
118 if (!xcv) { 118 if (!xcv) {
119 dev_err(&xcv->pdev->dev, 119 pr_err("XCV init not done, probe may have failed\n");
120 "XCV init not done, probe may have failed\n");
121 return; 120 return;
122 } 121 }
123 122
diff --git a/drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.c b/drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.c
index 0f0de5b63622..d04a6c163445 100644
--- a/drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.c
+++ b/drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.c
@@ -133,17 +133,15 @@ cxgb_find_route6(struct cxgb4_lld_info *lldi,
133 if (ipv6_addr_type(&fl6.daddr) & IPV6_ADDR_LINKLOCAL) 133 if (ipv6_addr_type(&fl6.daddr) & IPV6_ADDR_LINKLOCAL)
134 fl6.flowi6_oif = sin6_scope_id; 134 fl6.flowi6_oif = sin6_scope_id;
135 dst = ip6_route_output(&init_net, NULL, &fl6); 135 dst = ip6_route_output(&init_net, NULL, &fl6);
136 if (!dst) 136 if (dst->error ||
137 goto out; 137 (!cxgb_our_interface(lldi, get_real_dev,
138 if (!cxgb_our_interface(lldi, get_real_dev, 138 ip6_dst_idev(dst)->dev) &&
139 ip6_dst_idev(dst)->dev) && 139 !(ip6_dst_idev(dst)->dev->flags & IFF_LOOPBACK))) {
140 !(ip6_dst_idev(dst)->dev->flags & IFF_LOOPBACK)) {
141 dst_release(dst); 140 dst_release(dst);
142 dst = NULL; 141 return NULL;
143 } 142 }
144 } 143 }
145 144
146out:
147 return dst; 145 return dst;
148} 146}
149EXPORT_SYMBOL(cxgb_find_route6); 147EXPORT_SYMBOL(cxgb_find_route6);
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index 0e74529a4209..30e855004c57 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -1118,7 +1118,7 @@ int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
1118err: 1118err:
1119 mutex_unlock(&adapter->mcc_lock); 1119 mutex_unlock(&adapter->mcc_lock);
1120 1120
1121 if (status == MCC_STATUS_UNAUTHORIZED_REQUEST) 1121 if (base_status(status) == MCC_STATUS_UNAUTHORIZED_REQUEST)
1122 status = -EPERM; 1122 status = -EPERM;
1123 1123
1124 return status; 1124 return status;
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 7e1633bf5a22..cd49a54c538d 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -275,8 +275,7 @@ static int be_dev_mac_add(struct be_adapter *adapter, u8 *mac)
275 275
276 /* Check if mac has already been added as part of uc-list */ 276 /* Check if mac has already been added as part of uc-list */
277 for (i = 0; i < adapter->uc_macs; i++) { 277 for (i = 0; i < adapter->uc_macs; i++) {
278 if (ether_addr_equal((u8 *)&adapter->uc_list[i * ETH_ALEN], 278 if (ether_addr_equal(adapter->uc_list[i].mac, mac)) {
279 mac)) {
280 /* mac already added, skip addition */ 279 /* mac already added, skip addition */
281 adapter->pmac_id[0] = adapter->pmac_id[i + 1]; 280 adapter->pmac_id[0] = adapter->pmac_id[i + 1];
282 return 0; 281 return 0;
@@ -319,6 +318,13 @@ static int be_mac_addr_set(struct net_device *netdev, void *p)
319 if (ether_addr_equal(addr->sa_data, adapter->dev_mac)) 318 if (ether_addr_equal(addr->sa_data, adapter->dev_mac))
320 return 0; 319 return 0;
321 320
321 /* BE3 VFs without FILTMGMT privilege are not allowed to set its MAC
322 * address
323 */
324 if (BEx_chip(adapter) && be_virtfn(adapter) &&
325 !check_privilege(adapter, BE_PRIV_FILTMGMT))
326 return -EPERM;
327
322 /* if device is not running, copy MAC to netdev->dev_addr */ 328 /* if device is not running, copy MAC to netdev->dev_addr */
323 if (!netif_running(netdev)) 329 if (!netif_running(netdev))
324 goto done; 330 goto done;
@@ -356,8 +362,10 @@ static int be_mac_addr_set(struct net_device *netdev, void *p)
356 status = -EPERM; 362 status = -EPERM;
357 goto err; 363 goto err;
358 } 364 }
359done: 365
366 /* Remember currently programmed MAC */
360 ether_addr_copy(adapter->dev_mac, addr->sa_data); 367 ether_addr_copy(adapter->dev_mac, addr->sa_data);
368done:
361 ether_addr_copy(netdev->dev_addr, addr->sa_data); 369 ether_addr_copy(netdev->dev_addr, addr->sa_data);
362 dev_info(dev, "MAC address changed to %pM\n", addr->sa_data); 370 dev_info(dev, "MAC address changed to %pM\n", addr->sa_data);
363 return 0; 371 return 0;
@@ -1655,14 +1663,12 @@ static void be_clear_mc_list(struct be_adapter *adapter)
1655 1663
1656static int be_uc_mac_add(struct be_adapter *adapter, int uc_idx) 1664static int be_uc_mac_add(struct be_adapter *adapter, int uc_idx)
1657{ 1665{
1658 if (ether_addr_equal((u8 *)&adapter->uc_list[uc_idx * ETH_ALEN], 1666 if (ether_addr_equal(adapter->uc_list[uc_idx].mac, adapter->dev_mac)) {
1659 adapter->dev_mac)) {
1660 adapter->pmac_id[uc_idx + 1] = adapter->pmac_id[0]; 1667 adapter->pmac_id[uc_idx + 1] = adapter->pmac_id[0];
1661 return 0; 1668 return 0;
1662 } 1669 }
1663 1670
1664 return be_cmd_pmac_add(adapter, 1671 return be_cmd_pmac_add(adapter, adapter->uc_list[uc_idx].mac,
1665 (u8 *)&adapter->uc_list[uc_idx * ETH_ALEN],
1666 adapter->if_handle, 1672 adapter->if_handle,
1667 &adapter->pmac_id[uc_idx + 1], 0); 1673 &adapter->pmac_id[uc_idx + 1], 0);
1668} 1674}
@@ -1698,9 +1704,8 @@ static void be_set_uc_list(struct be_adapter *adapter)
1698 } 1704 }
1699 1705
1700 if (adapter->update_uc_list) { 1706 if (adapter->update_uc_list) {
1701 i = 1; /* First slot is claimed by the Primary MAC */
1702
1703 /* cache the uc-list in adapter array */ 1707 /* cache the uc-list in adapter array */
1708 i = 0;
1704 netdev_for_each_uc_addr(ha, netdev) { 1709 netdev_for_each_uc_addr(ha, netdev) {
1705 ether_addr_copy(adapter->uc_list[i].mac, ha->addr); 1710 ether_addr_copy(adapter->uc_list[i].mac, ha->addr);
1706 i++; 1711 i++;
@@ -3613,7 +3618,13 @@ static void be_rx_qs_destroy(struct be_adapter *adapter)
3613 3618
3614static void be_disable_if_filters(struct be_adapter *adapter) 3619static void be_disable_if_filters(struct be_adapter *adapter)
3615{ 3620{
3616 be_dev_mac_del(adapter, adapter->pmac_id[0]); 3621 /* Don't delete MAC on BE3 VFs without FILTMGMT privilege */
3622 if (!BEx_chip(adapter) || !be_virtfn(adapter) ||
3623 check_privilege(adapter, BE_PRIV_FILTMGMT)) {
3624 be_dev_mac_del(adapter, adapter->pmac_id[0]);
3625 eth_zero_addr(adapter->dev_mac);
3626 }
3627
3617 be_clear_uc_list(adapter); 3628 be_clear_uc_list(adapter);
3618 be_clear_mc_list(adapter); 3629 be_clear_mc_list(adapter);
3619 3630
@@ -3766,11 +3777,27 @@ static int be_enable_if_filters(struct be_adapter *adapter)
3766 if (status) 3777 if (status)
3767 return status; 3778 return status;
3768 3779
3769 /* For BE3 VFs, the PF programs the initial MAC address */ 3780 /* Normally this condition usually true as the ->dev_mac is zeroed.
3770 if (!(BEx_chip(adapter) && be_virtfn(adapter))) { 3781 * But on BE3 VFs the initial MAC is pre-programmed by PF and
3782 * subsequent be_dev_mac_add() can fail (after fresh boot)
3783 */
3784 if (!ether_addr_equal(adapter->dev_mac, adapter->netdev->dev_addr)) {
3785 int old_pmac_id = -1;
3786
3787 /* Remember old programmed MAC if any - can happen on BE3 VF */
3788 if (!is_zero_ether_addr(adapter->dev_mac))
3789 old_pmac_id = adapter->pmac_id[0];
3790
3771 status = be_dev_mac_add(adapter, adapter->netdev->dev_addr); 3791 status = be_dev_mac_add(adapter, adapter->netdev->dev_addr);
3772 if (status) 3792 if (status)
3773 return status; 3793 return status;
3794
3795 /* Delete the old programmed MAC as we successfully programmed
3796 * a new MAC
3797 */
3798 if (old_pmac_id >= 0 && old_pmac_id != adapter->pmac_id[0])
3799 be_dev_mac_del(adapter, old_pmac_id);
3800
3774 ether_addr_copy(adapter->dev_mac, adapter->netdev->dev_addr); 3801 ether_addr_copy(adapter->dev_mac, adapter->netdev->dev_addr);
3775 } 3802 }
3776 3803
@@ -4544,6 +4571,10 @@ static int be_mac_setup(struct be_adapter *adapter)
4544 4571
4545 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN); 4572 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
4546 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN); 4573 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
4574
4575 /* Initial MAC for BE3 VFs is already programmed by PF */
4576 if (BEx_chip(adapter) && be_virtfn(adapter))
4577 memcpy(adapter->dev_mac, mac, ETH_ALEN);
4547 } 4578 }
4548 4579
4549 return 0; 4580 return 0;
@@ -5155,7 +5186,9 @@ static netdev_features_t be_features_check(struct sk_buff *skb,
5155 skb->inner_protocol_type != ENCAP_TYPE_ETHER || 5186 skb->inner_protocol_type != ENCAP_TYPE_ETHER ||
5156 skb->inner_protocol != htons(ETH_P_TEB) || 5187 skb->inner_protocol != htons(ETH_P_TEB) ||
5157 skb_inner_mac_header(skb) - skb_transport_header(skb) != 5188 skb_inner_mac_header(skb) - skb_transport_header(skb) !=
5158 sizeof(struct udphdr) + sizeof(struct vxlanhdr)) 5189 sizeof(struct udphdr) + sizeof(struct vxlanhdr) ||
5190 !adapter->vxlan_port ||
5191 udp_hdr(skb)->dest != adapter->vxlan_port)
5159 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); 5192 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
5160 5193
5161 return features; 5194 return features;
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
index 624ba9058dc4..726b5693ae8a 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
@@ -733,6 +733,7 @@ static int dpaa_eth_cgr_init(struct dpaa_priv *priv)
733 priv->cgr_data.cgr.cb = dpaa_eth_cgscn; 733 priv->cgr_data.cgr.cb = dpaa_eth_cgscn;
734 734
735 /* Enable Congestion State Change Notifications and CS taildrop */ 735 /* Enable Congestion State Change Notifications and CS taildrop */
736 memset(&initcgr, 0, sizeof(initcgr));
736 initcgr.we_mask = cpu_to_be16(QM_CGR_WE_CSCN_EN | QM_CGR_WE_CS_THRES); 737 initcgr.we_mask = cpu_to_be16(QM_CGR_WE_CSCN_EN | QM_CGR_WE_CS_THRES);
737 initcgr.cgr.cscn_en = QM_CGR_EN; 738 initcgr.cgr.cscn_en = QM_CGR_EN;
738 739
@@ -1667,7 +1668,7 @@ static struct sk_buff *sg_fd_to_skb(const struct dpaa_priv *priv,
1667 1668
1668free_buffers: 1669free_buffers:
1669 /* compensate sw bpool counter changes */ 1670 /* compensate sw bpool counter changes */
1670 for (i--; i > 0; i--) { 1671 for (i--; i >= 0; i--) {
1671 dpaa_bp = dpaa_bpid2pool(sgt[i].bpid); 1672 dpaa_bp = dpaa_bpid2pool(sgt[i].bpid);
1672 if (dpaa_bp) { 1673 if (dpaa_bp) {
1673 count_ptr = this_cpu_ptr(dpaa_bp->percpu_count); 1674 count_ptr = this_cpu_ptr(dpaa_bp->percpu_count);
@@ -2291,7 +2292,8 @@ static int dpaa_open(struct net_device *net_dev)
2291 net_dev->phydev = mac_dev->init_phy(net_dev, priv->mac_dev); 2292 net_dev->phydev = mac_dev->init_phy(net_dev, priv->mac_dev);
2292 if (!net_dev->phydev) { 2293 if (!net_dev->phydev) {
2293 netif_err(priv, ifup, net_dev, "init_phy() failed\n"); 2294 netif_err(priv, ifup, net_dev, "init_phy() failed\n");
2294 return -ENODEV; 2295 err = -ENODEV;
2296 goto phy_init_failed;
2295 } 2297 }
2296 2298
2297 for (i = 0; i < ARRAY_SIZE(mac_dev->port); i++) { 2299 for (i = 0; i < ARRAY_SIZE(mac_dev->port); i++) {
@@ -2314,6 +2316,7 @@ mac_start_failed:
2314 for (i = 0; i < ARRAY_SIZE(mac_dev->port); i++) 2316 for (i = 0; i < ARRAY_SIZE(mac_dev->port); i++)
2315 fman_port_disable(mac_dev->port[i]); 2317 fman_port_disable(mac_dev->port[i]);
2316 2318
2319phy_init_failed:
2317 dpaa_eth_napi_disable(priv); 2320 dpaa_eth_napi_disable(priv);
2318 2321
2319 return err; 2322 return err;
@@ -2420,6 +2423,7 @@ static int dpaa_ingress_cgr_init(struct dpaa_priv *priv)
2420 } 2423 }
2421 2424
2422 /* Enable CS TD, but disable Congestion State Change Notifications. */ 2425 /* Enable CS TD, but disable Congestion State Change Notifications. */
2426 memset(&initcgr, 0, sizeof(initcgr));
2423 initcgr.we_mask = cpu_to_be16(QM_CGR_WE_CS_THRES); 2427 initcgr.we_mask = cpu_to_be16(QM_CGR_WE_CS_THRES);
2424 initcgr.cgr.cscn_en = QM_CGR_EN; 2428 initcgr.cgr.cscn_en = QM_CGR_EN;
2425 cs_th = DPAA_INGRESS_CS_THRESHOLD; 2429 cs_th = DPAA_INGRESS_CS_THRESHOLD;
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 38160c2bebcb..8be7034b2e7b 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -2910,6 +2910,7 @@ static void set_multicast_list(struct net_device *ndev)
2910 struct netdev_hw_addr *ha; 2910 struct netdev_hw_addr *ha;
2911 unsigned int i, bit, data, crc, tmp; 2911 unsigned int i, bit, data, crc, tmp;
2912 unsigned char hash; 2912 unsigned char hash;
2913 unsigned int hash_high = 0, hash_low = 0;
2913 2914
2914 if (ndev->flags & IFF_PROMISC) { 2915 if (ndev->flags & IFF_PROMISC) {
2915 tmp = readl(fep->hwp + FEC_R_CNTRL); 2916 tmp = readl(fep->hwp + FEC_R_CNTRL);
@@ -2932,11 +2933,7 @@ static void set_multicast_list(struct net_device *ndev)
2932 return; 2933 return;
2933 } 2934 }
2934 2935
2935 /* Clear filter and add the addresses in hash register 2936 /* Add the addresses in hash register */
2936 */
2937 writel(0, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
2938 writel(0, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
2939
2940 netdev_for_each_mc_addr(ha, ndev) { 2937 netdev_for_each_mc_addr(ha, ndev) {
2941 /* calculate crc32 value of mac address */ 2938 /* calculate crc32 value of mac address */
2942 crc = 0xffffffff; 2939 crc = 0xffffffff;
@@ -2954,16 +2951,14 @@ static void set_multicast_list(struct net_device *ndev)
2954 */ 2951 */
2955 hash = (crc >> (32 - FEC_HASH_BITS)) & 0x3f; 2952 hash = (crc >> (32 - FEC_HASH_BITS)) & 0x3f;
2956 2953
2957 if (hash > 31) { 2954 if (hash > 31)
2958 tmp = readl(fep->hwp + FEC_GRP_HASH_TABLE_HIGH); 2955 hash_high |= 1 << (hash - 32);
2959 tmp |= 1 << (hash - 32); 2956 else
2960 writel(tmp, fep->hwp + FEC_GRP_HASH_TABLE_HIGH); 2957 hash_low |= 1 << hash;
2961 } else {
2962 tmp = readl(fep->hwp + FEC_GRP_HASH_TABLE_LOW);
2963 tmp |= 1 << hash;
2964 writel(tmp, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
2965 }
2966 } 2958 }
2959
2960 writel(hash_high, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
2961 writel(hash_low, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
2967} 2962}
2968 2963
2969/* Set a MAC change in hardware. */ 2964/* Set a MAC change in hardware. */
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index a6e7afa878be..957bfc220978 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -2010,8 +2010,8 @@ static void free_skb_rx_queue(struct gfar_priv_rx_q *rx_queue)
2010 if (!rxb->page) 2010 if (!rxb->page)
2011 continue; 2011 continue;
2012 2012
2013 dma_unmap_single(rx_queue->dev, rxb->dma, 2013 dma_unmap_page(rx_queue->dev, rxb->dma,
2014 PAGE_SIZE, DMA_FROM_DEVICE); 2014 PAGE_SIZE, DMA_FROM_DEVICE);
2015 __free_page(rxb->page); 2015 __free_page(rxb->page);
2016 2016
2017 rxb->page = NULL; 2017 rxb->page = NULL;
@@ -2948,7 +2948,7 @@ static bool gfar_add_rx_frag(struct gfar_rx_buff *rxb, u32 lstatus,
2948 } 2948 }
2949 2949
2950 /* try reuse page */ 2950 /* try reuse page */
2951 if (unlikely(page_count(page) != 1)) 2951 if (unlikely(page_count(page) != 1 || page_is_pfmemalloc(page)))
2952 return false; 2952 return false;
2953 2953
2954 /* change offset to the other half */ 2954 /* change offset to the other half */
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
index 87226685f742..8fa18fc17cd2 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
@@ -1014,9 +1014,7 @@
1014 1014
1015static inline void dsaf_write_reg(void __iomem *base, u32 reg, u32 value) 1015static inline void dsaf_write_reg(void __iomem *base, u32 reg, u32 value)
1016{ 1016{
1017 u8 __iomem *reg_addr = ACCESS_ONCE(base); 1017 writel(value, base + reg);
1018
1019 writel(value, reg_addr + reg);
1020} 1018}
1021 1019
1022#define dsaf_write_dev(a, reg, value) \ 1020#define dsaf_write_dev(a, reg, value) \
@@ -1024,9 +1022,7 @@ static inline void dsaf_write_reg(void __iomem *base, u32 reg, u32 value)
1024 1022
1025static inline u32 dsaf_read_reg(u8 __iomem *base, u32 reg) 1023static inline u32 dsaf_read_reg(u8 __iomem *base, u32 reg)
1026{ 1024{
1027 u8 __iomem *reg_addr = ACCESS_ONCE(base); 1025 return readl(base + reg);
1028
1029 return readl(reg_addr + reg);
1030} 1026}
1031 1027
1032static inline void dsaf_write_syscon(struct regmap *base, u32 reg, u32 value) 1028static inline void dsaf_write_syscon(struct regmap *base, u32 reg, u32 value)
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
index 672b64606321..8aed72860e7c 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
@@ -305,8 +305,8 @@ int hns_nic_net_xmit_hw(struct net_device *ndev,
305 struct hns_nic_ring_data *ring_data) 305 struct hns_nic_ring_data *ring_data)
306{ 306{
307 struct hns_nic_priv *priv = netdev_priv(ndev); 307 struct hns_nic_priv *priv = netdev_priv(ndev);
308 struct device *dev = priv->dev;
309 struct hnae_ring *ring = ring_data->ring; 308 struct hnae_ring *ring = ring_data->ring;
309 struct device *dev = ring_to_dev(ring);
310 struct netdev_queue *dev_queue; 310 struct netdev_queue *dev_queue;
311 struct skb_frag_struct *frag; 311 struct skb_frag_struct *frag;
312 int buf_num; 312 int buf_num;
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
index a831f947ca8c..309f5c66083c 100644
--- a/drivers/net/ethernet/ibm/ibmveth.c
+++ b/drivers/net/ethernet/ibm/ibmveth.c
@@ -1601,8 +1601,11 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
1601 netdev->netdev_ops = &ibmveth_netdev_ops; 1601 netdev->netdev_ops = &ibmveth_netdev_ops;
1602 netdev->ethtool_ops = &netdev_ethtool_ops; 1602 netdev->ethtool_ops = &netdev_ethtool_ops;
1603 SET_NETDEV_DEV(netdev, &dev->dev); 1603 SET_NETDEV_DEV(netdev, &dev->dev);
1604 netdev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM | 1604 netdev->hw_features = NETIF_F_SG;
1605 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; 1605 if (vio_get_attribute(dev, "ibm,illan-options", NULL) != NULL) {
1606 netdev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
1607 NETIF_F_RXCSUM;
1608 }
1606 1609
1607 netdev->features |= netdev->hw_features; 1610 netdev->features |= netdev->hw_features;
1608 1611
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index c12596676bbb..a07b8d79174c 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -189,9 +189,10 @@ static int alloc_long_term_buff(struct ibmvnic_adapter *adapter,
189 } 189 }
190 ltb->map_id = adapter->map_id; 190 ltb->map_id = adapter->map_id;
191 adapter->map_id++; 191 adapter->map_id++;
192
193 init_completion(&adapter->fw_done);
192 send_request_map(adapter, ltb->addr, 194 send_request_map(adapter, ltb->addr,
193 ltb->size, ltb->map_id); 195 ltb->size, ltb->map_id);
194 init_completion(&adapter->fw_done);
195 wait_for_completion(&adapter->fw_done); 196 wait_for_completion(&adapter->fw_done);
196 return 0; 197 return 0;
197} 198}
@@ -505,7 +506,7 @@ rx_pool_alloc_failed:
505 adapter->rx_pool = NULL; 506 adapter->rx_pool = NULL;
506rx_pool_arr_alloc_failed: 507rx_pool_arr_alloc_failed:
507 for (i = 0; i < adapter->req_rx_queues; i++) 508 for (i = 0; i < adapter->req_rx_queues; i++)
508 napi_enable(&adapter->napi[i]); 509 napi_disable(&adapter->napi[i]);
509alloc_napi_failed: 510alloc_napi_failed:
510 return -ENOMEM; 511 return -ENOMEM;
511} 512}
@@ -1121,10 +1122,10 @@ static void ibmvnic_get_ethtool_stats(struct net_device *dev,
1121 crq.request_statistics.ioba = cpu_to_be32(adapter->stats_token); 1122 crq.request_statistics.ioba = cpu_to_be32(adapter->stats_token);
1122 crq.request_statistics.len = 1123 crq.request_statistics.len =
1123 cpu_to_be32(sizeof(struct ibmvnic_statistics)); 1124 cpu_to_be32(sizeof(struct ibmvnic_statistics));
1124 ibmvnic_send_crq(adapter, &crq);
1125 1125
1126 /* Wait for data to be written */ 1126 /* Wait for data to be written */
1127 init_completion(&adapter->stats_done); 1127 init_completion(&adapter->stats_done);
1128 ibmvnic_send_crq(adapter, &crq);
1128 wait_for_completion(&adapter->stats_done); 1129 wait_for_completion(&adapter->stats_done);
1129 1130
1130 for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++) 1131 for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++)
@@ -1496,7 +1497,7 @@ static void init_sub_crqs(struct ibmvnic_adapter *adapter, int retry)
1496 adapter->req_rx_queues = adapter->opt_rx_comp_queues; 1497 adapter->req_rx_queues = adapter->opt_rx_comp_queues;
1497 adapter->req_rx_add_queues = adapter->max_rx_add_queues; 1498 adapter->req_rx_add_queues = adapter->max_rx_add_queues;
1498 1499
1499 adapter->req_mtu = adapter->max_mtu; 1500 adapter->req_mtu = adapter->netdev->mtu + ETH_HLEN;
1500 } 1501 }
1501 1502
1502 total_queues = adapter->req_tx_queues + adapter->req_rx_queues; 1503 total_queues = adapter->req_tx_queues + adapter->req_rx_queues;
@@ -2185,12 +2186,12 @@ static void handle_error_info_rsp(union ibmvnic_crq *crq,
2185 2186
2186 if (!found) { 2187 if (!found) {
2187 dev_err(dev, "Couldn't find error id %x\n", 2188 dev_err(dev, "Couldn't find error id %x\n",
2188 crq->request_error_rsp.error_id); 2189 be32_to_cpu(crq->request_error_rsp.error_id));
2189 return; 2190 return;
2190 } 2191 }
2191 2192
2192 dev_err(dev, "Detailed info for error id %x:", 2193 dev_err(dev, "Detailed info for error id %x:",
2193 crq->request_error_rsp.error_id); 2194 be32_to_cpu(crq->request_error_rsp.error_id));
2194 2195
2195 for (i = 0; i < error_buff->len; i++) { 2196 for (i = 0; i < error_buff->len; i++) {
2196 pr_cont("%02x", (int)error_buff->buff[i]); 2197 pr_cont("%02x", (int)error_buff->buff[i]);
@@ -2269,8 +2270,8 @@ static void handle_error_indication(union ibmvnic_crq *crq,
2269 dev_err(dev, "Firmware reports %serror id %x, cause %d\n", 2270 dev_err(dev, "Firmware reports %serror id %x, cause %d\n",
2270 crq->error_indication. 2271 crq->error_indication.
2271 flags & IBMVNIC_FATAL_ERROR ? "FATAL " : "", 2272 flags & IBMVNIC_FATAL_ERROR ? "FATAL " : "",
2272 crq->error_indication.error_id, 2273 be32_to_cpu(crq->error_indication.error_id),
2273 crq->error_indication.error_cause); 2274 be16_to_cpu(crq->error_indication.error_cause));
2274 2275
2275 error_buff = kmalloc(sizeof(*error_buff), GFP_ATOMIC); 2276 error_buff = kmalloc(sizeof(*error_buff), GFP_ATOMIC);
2276 if (!error_buff) 2277 if (!error_buff)
@@ -2388,10 +2389,10 @@ static void handle_request_cap_rsp(union ibmvnic_crq *crq,
2388 case PARTIALSUCCESS: 2389 case PARTIALSUCCESS:
2389 dev_info(dev, "req=%lld, rsp=%ld in %s queue, retrying.\n", 2390 dev_info(dev, "req=%lld, rsp=%ld in %s queue, retrying.\n",
2390 *req_value, 2391 *req_value,
2391 (long int)be32_to_cpu(crq->request_capability_rsp. 2392 (long int)be64_to_cpu(crq->request_capability_rsp.
2392 number), name); 2393 number), name);
2393 release_sub_crqs_no_irqs(adapter); 2394 release_sub_crqs_no_irqs(adapter);
2394 *req_value = be32_to_cpu(crq->request_capability_rsp.number); 2395 *req_value = be64_to_cpu(crq->request_capability_rsp.number);
2395 init_sub_crqs(adapter, 1); 2396 init_sub_crqs(adapter, 1);
2396 return; 2397 return;
2397 default: 2398 default:
@@ -2626,12 +2627,12 @@ static void handle_query_cap_rsp(union ibmvnic_crq *crq,
2626 break; 2627 break;
2627 case MIN_MTU: 2628 case MIN_MTU:
2628 adapter->min_mtu = be64_to_cpu(crq->query_capability.number); 2629 adapter->min_mtu = be64_to_cpu(crq->query_capability.number);
2629 netdev->min_mtu = adapter->min_mtu; 2630 netdev->min_mtu = adapter->min_mtu - ETH_HLEN;
2630 netdev_dbg(netdev, "min_mtu = %lld\n", adapter->min_mtu); 2631 netdev_dbg(netdev, "min_mtu = %lld\n", adapter->min_mtu);
2631 break; 2632 break;
2632 case MAX_MTU: 2633 case MAX_MTU:
2633 adapter->max_mtu = be64_to_cpu(crq->query_capability.number); 2634 adapter->max_mtu = be64_to_cpu(crq->query_capability.number);
2634 netdev->max_mtu = adapter->max_mtu; 2635 netdev->max_mtu = adapter->max_mtu - ETH_HLEN;
2635 netdev_dbg(netdev, "max_mtu = %lld\n", adapter->max_mtu); 2636 netdev_dbg(netdev, "max_mtu = %lld\n", adapter->max_mtu);
2636 break; 2637 break;
2637 case MAX_MULTICAST_FILTERS: 2638 case MAX_MULTICAST_FILTERS:
@@ -2799,9 +2800,9 @@ static ssize_t trace_read(struct file *file, char __user *user_buf, size_t len,
2799 crq.collect_fw_trace.correlator = adapter->ras_comps[num].correlator; 2800 crq.collect_fw_trace.correlator = adapter->ras_comps[num].correlator;
2800 crq.collect_fw_trace.ioba = cpu_to_be32(trace_tok); 2801 crq.collect_fw_trace.ioba = cpu_to_be32(trace_tok);
2801 crq.collect_fw_trace.len = adapter->ras_comps[num].trace_buff_size; 2802 crq.collect_fw_trace.len = adapter->ras_comps[num].trace_buff_size;
2802 ibmvnic_send_crq(adapter, &crq);
2803 2803
2804 init_completion(&adapter->fw_done); 2804 init_completion(&adapter->fw_done);
2805 ibmvnic_send_crq(adapter, &crq);
2805 wait_for_completion(&adapter->fw_done); 2806 wait_for_completion(&adapter->fw_done);
2806 2807
2807 if (*ppos + len > be32_to_cpu(adapter->ras_comps[num].trace_buff_size)) 2808 if (*ppos + len > be32_to_cpu(adapter->ras_comps[num].trace_buff_size))
@@ -3581,9 +3582,9 @@ static int ibmvnic_dump_show(struct seq_file *seq, void *v)
3581 memset(&crq, 0, sizeof(crq)); 3582 memset(&crq, 0, sizeof(crq));
3582 crq.request_dump_size.first = IBMVNIC_CRQ_CMD; 3583 crq.request_dump_size.first = IBMVNIC_CRQ_CMD;
3583 crq.request_dump_size.cmd = REQUEST_DUMP_SIZE; 3584 crq.request_dump_size.cmd = REQUEST_DUMP_SIZE;
3584 ibmvnic_send_crq(adapter, &crq);
3585 3585
3586 init_completion(&adapter->fw_done); 3586 init_completion(&adapter->fw_done);
3587 ibmvnic_send_crq(adapter, &crq);
3587 wait_for_completion(&adapter->fw_done); 3588 wait_for_completion(&adapter->fw_done);
3588 3589
3589 seq_write(seq, adapter->dump_data, adapter->dump_data_size); 3590 seq_write(seq, adapter->dump_data, adapter->dump_data_size);
@@ -3629,8 +3630,8 @@ static void handle_crq_init_rsp(struct work_struct *work)
3629 } 3630 }
3630 } 3631 }
3631 3632
3632 send_version_xchg(adapter);
3633 reinit_completion(&adapter->init_done); 3633 reinit_completion(&adapter->init_done);
3634 send_version_xchg(adapter);
3634 if (!wait_for_completion_timeout(&adapter->init_done, timeout)) { 3635 if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
3635 dev_err(dev, "Passive init timeout\n"); 3636 dev_err(dev, "Passive init timeout\n");
3636 goto task_failed; 3637 goto task_failed;
@@ -3640,9 +3641,9 @@ static void handle_crq_init_rsp(struct work_struct *work)
3640 if (adapter->renegotiate) { 3641 if (adapter->renegotiate) {
3641 adapter->renegotiate = false; 3642 adapter->renegotiate = false;
3642 release_sub_crqs_no_irqs(adapter); 3643 release_sub_crqs_no_irqs(adapter);
3643 send_cap_queries(adapter);
3644 3644
3645 reinit_completion(&adapter->init_done); 3645 reinit_completion(&adapter->init_done);
3646 send_cap_queries(adapter);
3646 if (!wait_for_completion_timeout(&adapter->init_done, 3647 if (!wait_for_completion_timeout(&adapter->init_done,
3647 timeout)) { 3648 timeout)) {
3648 dev_err(dev, "Passive init timeout\n"); 3649 dev_err(dev, "Passive init timeout\n");
@@ -3656,9 +3657,7 @@ static void handle_crq_init_rsp(struct work_struct *work)
3656 goto task_failed; 3657 goto task_failed;
3657 3658
3658 netdev->real_num_tx_queues = adapter->req_tx_queues; 3659 netdev->real_num_tx_queues = adapter->req_tx_queues;
3659 netdev->mtu = adapter->req_mtu; 3660 netdev->mtu = adapter->req_mtu - ETH_HLEN;
3660 netdev->min_mtu = adapter->min_mtu;
3661 netdev->max_mtu = adapter->max_mtu;
3662 3661
3663 if (adapter->failover) { 3662 if (adapter->failover) {
3664 adapter->failover = false; 3663 adapter->failover = false;
@@ -3772,9 +3771,9 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
3772 adapter->debugfs_dump = ent; 3771 adapter->debugfs_dump = ent;
3773 } 3772 }
3774 } 3773 }
3775 ibmvnic_send_crq_init(adapter);
3776 3774
3777 init_completion(&adapter->init_done); 3775 init_completion(&adapter->init_done);
3776 ibmvnic_send_crq_init(adapter);
3778 if (!wait_for_completion_timeout(&adapter->init_done, timeout)) 3777 if (!wait_for_completion_timeout(&adapter->init_done, timeout))
3779 return 0; 3778 return 0;
3780 3779
@@ -3782,9 +3781,9 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
3782 if (adapter->renegotiate) { 3781 if (adapter->renegotiate) {
3783 adapter->renegotiate = false; 3782 adapter->renegotiate = false;
3784 release_sub_crqs_no_irqs(adapter); 3783 release_sub_crqs_no_irqs(adapter);
3785 send_cap_queries(adapter);
3786 3784
3787 reinit_completion(&adapter->init_done); 3785 reinit_completion(&adapter->init_done);
3786 send_cap_queries(adapter);
3788 if (!wait_for_completion_timeout(&adapter->init_done, 3787 if (!wait_for_completion_timeout(&adapter->init_done,
3789 timeout)) 3788 timeout))
3790 return 0; 3789 return 0;
@@ -3798,7 +3797,7 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
3798 } 3797 }
3799 3798
3800 netdev->real_num_tx_queues = adapter->req_tx_queues; 3799 netdev->real_num_tx_queues = adapter->req_tx_queues;
3801 netdev->mtu = adapter->req_mtu; 3800 netdev->mtu = adapter->req_mtu - ETH_HLEN;
3802 3801
3803 rc = register_netdev(netdev); 3802 rc = register_netdev(netdev);
3804 if (rc) { 3803 if (rc) {
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index a761001308dc..1515abaa5ac9 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -3962,8 +3962,8 @@ static void igb_clean_rx_ring(struct igb_ring *rx_ring)
3962 PAGE_SIZE, 3962 PAGE_SIZE,
3963 DMA_FROM_DEVICE, 3963 DMA_FROM_DEVICE,
3964 DMA_ATTR_SKIP_CPU_SYNC); 3964 DMA_ATTR_SKIP_CPU_SYNC);
3965 __page_frag_drain(buffer_info->page, 0, 3965 __page_frag_cache_drain(buffer_info->page,
3966 buffer_info->pagecnt_bias); 3966 buffer_info->pagecnt_bias);
3967 3967
3968 buffer_info->page = NULL; 3968 buffer_info->page = NULL;
3969 } 3969 }
@@ -6991,7 +6991,7 @@ static struct sk_buff *igb_fetch_rx_buffer(struct igb_ring *rx_ring,
6991 dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma, 6991 dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
6992 PAGE_SIZE, DMA_FROM_DEVICE, 6992 PAGE_SIZE, DMA_FROM_DEVICE,
6993 DMA_ATTR_SKIP_CPU_SYNC); 6993 DMA_ATTR_SKIP_CPU_SYNC);
6994 __page_frag_drain(page, 0, rx_buffer->pagecnt_bias); 6994 __page_frag_cache_drain(page, rx_buffer->pagecnt_bias);
6995 } 6995 }
6996 6996
6997 /* clear contents of rx_buffer */ 6997 /* clear contents of rx_buffer */
diff --git a/drivers/net/ethernet/korina.c b/drivers/net/ethernet/korina.c
index cbeea915f026..8037426ec50f 100644
--- a/drivers/net/ethernet/korina.c
+++ b/drivers/net/ethernet/korina.c
@@ -900,10 +900,10 @@ static void korina_restart_task(struct work_struct *work)
900 DMA_STAT_DONE | DMA_STAT_HALT | DMA_STAT_ERR, 900 DMA_STAT_DONE | DMA_STAT_HALT | DMA_STAT_ERR,
901 &lp->rx_dma_regs->dmasm); 901 &lp->rx_dma_regs->dmasm);
902 902
903 korina_free_ring(dev);
904
905 napi_disable(&lp->napi); 903 napi_disable(&lp->napi);
906 904
905 korina_free_ring(dev);
906
907 if (korina_init(dev) < 0) { 907 if (korina_init(dev) < 0) {
908 printk(KERN_ERR "%s: cannot restart device\n", dev->name); 908 printk(KERN_ERR "%s: cannot restart device\n", dev->name);
909 return; 909 return;
@@ -1064,12 +1064,12 @@ static int korina_close(struct net_device *dev)
1064 tmp = tmp | DMA_STAT_DONE | DMA_STAT_HALT | DMA_STAT_ERR; 1064 tmp = tmp | DMA_STAT_DONE | DMA_STAT_HALT | DMA_STAT_ERR;
1065 writel(tmp, &lp->rx_dma_regs->dmasm); 1065 writel(tmp, &lp->rx_dma_regs->dmasm);
1066 1066
1067 korina_free_ring(dev);
1068
1069 napi_disable(&lp->napi); 1067 napi_disable(&lp->napi);
1070 1068
1071 cancel_work_sync(&lp->restart_task); 1069 cancel_work_sync(&lp->restart_task);
1072 1070
1071 korina_free_ring(dev);
1072
1073 free_irq(lp->rx_irq, dev); 1073 free_irq(lp->rx_irq, dev);
1074 free_irq(lp->tx_irq, dev); 1074 free_irq(lp->tx_irq, dev);
1075 free_irq(lp->ovr_irq, dev); 1075 free_irq(lp->ovr_irq, dev);
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index 3dd87889e67e..1c29c86f8709 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -2517,7 +2517,7 @@ static int mtk_remove(struct platform_device *pdev)
2517} 2517}
2518 2518
2519const struct of_device_id of_mtk_match[] = { 2519const struct of_device_id of_mtk_match[] = {
2520 { .compatible = "mediatek,mt7623-eth" }, 2520 { .compatible = "mediatek,mt2701-eth" },
2521 {}, 2521 {},
2522}; 2522};
2523MODULE_DEVICE_TABLE(of, of_mtk_match); 2523MODULE_DEVICE_TABLE(of, of_mtk_match);
diff --git a/drivers/net/ethernet/mellanox/mlx4/catas.c b/drivers/net/ethernet/mellanox/mlx4/catas.c
index c7e939945259..53daa6ca5d83 100644
--- a/drivers/net/ethernet/mellanox/mlx4/catas.c
+++ b/drivers/net/ethernet/mellanox/mlx4/catas.c
@@ -158,7 +158,7 @@ static int mlx4_reset_slave(struct mlx4_dev *dev)
158 return -ETIMEDOUT; 158 return -ETIMEDOUT;
159} 159}
160 160
161static int mlx4_comm_internal_err(u32 slave_read) 161int mlx4_comm_internal_err(u32 slave_read)
162{ 162{
163 return (u32)COMM_CHAN_EVENT_INTERNAL_ERR == 163 return (u32)COMM_CHAN_EVENT_INTERNAL_ERR ==
164 (slave_read & (u32)COMM_CHAN_EVENT_INTERNAL_ERR) ? 1 : 0; 164 (slave_read & (u32)COMM_CHAN_EVENT_INTERNAL_ERR) ? 1 : 0;
diff --git a/drivers/net/ethernet/mellanox/mlx4/cq.c b/drivers/net/ethernet/mellanox/mlx4/cq.c
index a849da92f857..6b8635378f1f 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cq.c
@@ -101,13 +101,19 @@ void mlx4_cq_completion(struct mlx4_dev *dev, u32 cqn)
101{ 101{
102 struct mlx4_cq *cq; 102 struct mlx4_cq *cq;
103 103
104 rcu_read_lock();
104 cq = radix_tree_lookup(&mlx4_priv(dev)->cq_table.tree, 105 cq = radix_tree_lookup(&mlx4_priv(dev)->cq_table.tree,
105 cqn & (dev->caps.num_cqs - 1)); 106 cqn & (dev->caps.num_cqs - 1));
107 rcu_read_unlock();
108
106 if (!cq) { 109 if (!cq) {
107 mlx4_dbg(dev, "Completion event for bogus CQ %08x\n", cqn); 110 mlx4_dbg(dev, "Completion event for bogus CQ %08x\n", cqn);
108 return; 111 return;
109 } 112 }
110 113
114 /* Acessing the CQ outside of rcu_read_lock is safe, because
115 * the CQ is freed only after interrupt handling is completed.
116 */
111 ++cq->arm_sn; 117 ++cq->arm_sn;
112 118
113 cq->comp(cq); 119 cq->comp(cq);
@@ -118,23 +124,19 @@ void mlx4_cq_event(struct mlx4_dev *dev, u32 cqn, int event_type)
118 struct mlx4_cq_table *cq_table = &mlx4_priv(dev)->cq_table; 124 struct mlx4_cq_table *cq_table = &mlx4_priv(dev)->cq_table;
119 struct mlx4_cq *cq; 125 struct mlx4_cq *cq;
120 126
121 spin_lock(&cq_table->lock); 127 rcu_read_lock();
122
123 cq = radix_tree_lookup(&cq_table->tree, cqn & (dev->caps.num_cqs - 1)); 128 cq = radix_tree_lookup(&cq_table->tree, cqn & (dev->caps.num_cqs - 1));
124 if (cq) 129 rcu_read_unlock();
125 atomic_inc(&cq->refcount);
126
127 spin_unlock(&cq_table->lock);
128 130
129 if (!cq) { 131 if (!cq) {
130 mlx4_warn(dev, "Async event for bogus CQ %08x\n", cqn); 132 mlx4_dbg(dev, "Async event for bogus CQ %08x\n", cqn);
131 return; 133 return;
132 } 134 }
133 135
136 /* Acessing the CQ outside of rcu_read_lock is safe, because
137 * the CQ is freed only after interrupt handling is completed.
138 */
134 cq->event(cq, event_type); 139 cq->event(cq, event_type);
135
136 if (atomic_dec_and_test(&cq->refcount))
137 complete(&cq->free);
138} 140}
139 141
140static int mlx4_SW2HW_CQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, 142static int mlx4_SW2HW_CQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
@@ -301,9 +303,9 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent,
301 if (err) 303 if (err)
302 return err; 304 return err;
303 305
304 spin_lock_irq(&cq_table->lock); 306 spin_lock(&cq_table->lock);
305 err = radix_tree_insert(&cq_table->tree, cq->cqn, cq); 307 err = radix_tree_insert(&cq_table->tree, cq->cqn, cq);
306 spin_unlock_irq(&cq_table->lock); 308 spin_unlock(&cq_table->lock);
307 if (err) 309 if (err)
308 goto err_icm; 310 goto err_icm;
309 311
@@ -349,9 +351,9 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent,
349 return 0; 351 return 0;
350 352
351err_radix: 353err_radix:
352 spin_lock_irq(&cq_table->lock); 354 spin_lock(&cq_table->lock);
353 radix_tree_delete(&cq_table->tree, cq->cqn); 355 radix_tree_delete(&cq_table->tree, cq->cqn);
354 spin_unlock_irq(&cq_table->lock); 356 spin_unlock(&cq_table->lock);
355 357
356err_icm: 358err_icm:
357 mlx4_cq_free_icm(dev, cq->cqn); 359 mlx4_cq_free_icm(dev, cq->cqn);
@@ -370,15 +372,15 @@ void mlx4_cq_free(struct mlx4_dev *dev, struct mlx4_cq *cq)
370 if (err) 372 if (err)
371 mlx4_warn(dev, "HW2SW_CQ failed (%d) for CQN %06x\n", err, cq->cqn); 373 mlx4_warn(dev, "HW2SW_CQ failed (%d) for CQN %06x\n", err, cq->cqn);
372 374
375 spin_lock(&cq_table->lock);
376 radix_tree_delete(&cq_table->tree, cq->cqn);
377 spin_unlock(&cq_table->lock);
378
373 synchronize_irq(priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(cq->vector)].irq); 379 synchronize_irq(priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(cq->vector)].irq);
374 if (priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(cq->vector)].irq != 380 if (priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(cq->vector)].irq !=
375 priv->eq_table.eq[MLX4_EQ_ASYNC].irq) 381 priv->eq_table.eq[MLX4_EQ_ASYNC].irq)
376 synchronize_irq(priv->eq_table.eq[MLX4_EQ_ASYNC].irq); 382 synchronize_irq(priv->eq_table.eq[MLX4_EQ_ASYNC].irq);
377 383
378 spin_lock_irq(&cq_table->lock);
379 radix_tree_delete(&cq_table->tree, cq->cqn);
380 spin_unlock_irq(&cq_table->lock);
381
382 if (atomic_dec_and_test(&cq->refcount)) 384 if (atomic_dec_and_test(&cq->refcount))
383 complete(&cq->free); 385 complete(&cq->free);
384 wait_for_completion(&cq->free); 386 wait_for_completion(&cq->free);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_clock.c b/drivers/net/ethernet/mellanox/mlx4/en_clock.c
index 015198c14fa8..504461a464c5 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_clock.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_clock.c
@@ -245,13 +245,9 @@ static u32 freq_to_shift(u16 freq)
245{ 245{
246 u32 freq_khz = freq * 1000; 246 u32 freq_khz = freq * 1000;
247 u64 max_val_cycles = freq_khz * 1000 * MLX4_EN_WRAP_AROUND_SEC; 247 u64 max_val_cycles = freq_khz * 1000 * MLX4_EN_WRAP_AROUND_SEC;
248 u64 tmp_rounded = 248 u64 max_val_cycles_rounded = 1ULL << fls64(max_val_cycles - 1);
249 roundup_pow_of_two(max_val_cycles) > max_val_cycles ?
250 roundup_pow_of_two(max_val_cycles) - 1 : UINT_MAX;
251 u64 max_val_cycles_rounded = is_power_of_2(max_val_cycles + 1) ?
252 max_val_cycles : tmp_rounded;
253 /* calculate max possible multiplier in order to fit in 64bit */ 249 /* calculate max possible multiplier in order to fit in 64bit */
254 u64 max_mul = div_u64(0xffffffffffffffffULL, max_val_cycles_rounded); 250 u64 max_mul = div64_u64(ULLONG_MAX, max_val_cycles_rounded);
255 251
256 /* This comes from the reverse of clocksource_khz2mult */ 252 /* This comes from the reverse of clocksource_khz2mult */
257 return ilog2(div_u64(max_mul * freq_khz, 1000000)); 253 return ilog2(div_u64(max_mul * freq_khz, 1000000));
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index d9c9f86a30df..9aa422691954 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -1099,7 +1099,7 @@ static int mlx4_en_set_ringparam(struct net_device *dev,
1099 memcpy(&new_prof, priv->prof, sizeof(struct mlx4_en_port_profile)); 1099 memcpy(&new_prof, priv->prof, sizeof(struct mlx4_en_port_profile));
1100 new_prof.tx_ring_size = tx_size; 1100 new_prof.tx_ring_size = tx_size;
1101 new_prof.rx_ring_size = rx_size; 1101 new_prof.rx_ring_size = rx_size;
1102 err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof); 1102 err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof, true);
1103 if (err) 1103 if (err)
1104 goto out; 1104 goto out;
1105 1105
@@ -1732,8 +1732,6 @@ static void mlx4_en_get_channels(struct net_device *dev,
1732{ 1732{
1733 struct mlx4_en_priv *priv = netdev_priv(dev); 1733 struct mlx4_en_priv *priv = netdev_priv(dev);
1734 1734
1735 memset(channel, 0, sizeof(*channel));
1736
1737 channel->max_rx = MAX_RX_RINGS; 1735 channel->max_rx = MAX_RX_RINGS;
1738 channel->max_tx = MLX4_EN_MAX_TX_RING_P_UP; 1736 channel->max_tx = MLX4_EN_MAX_TX_RING_P_UP;
1739 1737
@@ -1752,10 +1750,7 @@ static int mlx4_en_set_channels(struct net_device *dev,
1752 int xdp_count; 1750 int xdp_count;
1753 int err = 0; 1751 int err = 0;
1754 1752
1755 if (channel->other_count || channel->combined_count || 1753 if (!channel->tx_count || !channel->rx_count)
1756 channel->tx_count > MLX4_EN_MAX_TX_RING_P_UP ||
1757 channel->rx_count > MAX_RX_RINGS ||
1758 !channel->tx_count || !channel->rx_count)
1759 return -EINVAL; 1754 return -EINVAL;
1760 1755
1761 tmp = kzalloc(sizeof(*tmp), GFP_KERNEL); 1756 tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
@@ -1779,7 +1774,7 @@ static int mlx4_en_set_channels(struct net_device *dev,
1779 new_prof.tx_ring_num[TX_XDP] = xdp_count; 1774 new_prof.tx_ring_num[TX_XDP] = xdp_count;
1780 new_prof.rx_ring_num = channel->rx_count; 1775 new_prof.rx_ring_num = channel->rx_count;
1781 1776
1782 err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof); 1777 err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof, true);
1783 if (err) 1778 if (err)
1784 goto out; 1779 goto out;
1785 1780
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index bcd955339058..3b4961a8e8e4 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -1638,7 +1638,8 @@ int mlx4_en_start_port(struct net_device *dev)
1638 1638
1639 /* Configure tx cq's and rings */ 1639 /* Configure tx cq's and rings */
1640 for (t = 0 ; t < MLX4_EN_NUM_TX_TYPES; t++) { 1640 for (t = 0 ; t < MLX4_EN_NUM_TX_TYPES; t++) {
1641 u8 num_tx_rings_p_up = t == TX ? priv->num_tx_rings_p_up : 1; 1641 u8 num_tx_rings_p_up = t == TX ?
1642 priv->num_tx_rings_p_up : priv->tx_ring_num[t];
1642 1643
1643 for (i = 0; i < priv->tx_ring_num[t]; i++) { 1644 for (i = 0; i < priv->tx_ring_num[t]; i++) {
1644 /* Configure cq */ 1645 /* Configure cq */
@@ -1747,8 +1748,11 @@ int mlx4_en_start_port(struct net_device *dev)
1747 /* Process all completions if exist to prevent 1748 /* Process all completions if exist to prevent
1748 * the queues freezing if they are full 1749 * the queues freezing if they are full
1749 */ 1750 */
1750 for (i = 0; i < priv->rx_ring_num; i++) 1751 for (i = 0; i < priv->rx_ring_num; i++) {
1752 local_bh_disable();
1751 napi_schedule(&priv->rx_cq[i]->napi); 1753 napi_schedule(&priv->rx_cq[i]->napi);
1754 local_bh_enable();
1755 }
1752 1756
1753 netif_tx_start_all_queues(dev); 1757 netif_tx_start_all_queues(dev);
1754 netif_device_attach(dev); 1758 netif_device_attach(dev);
@@ -2038,6 +2042,8 @@ static void mlx4_en_free_resources(struct mlx4_en_priv *priv)
2038 if (priv->tx_cq[t] && priv->tx_cq[t][i]) 2042 if (priv->tx_cq[t] && priv->tx_cq[t][i])
2039 mlx4_en_destroy_cq(priv, &priv->tx_cq[t][i]); 2043 mlx4_en_destroy_cq(priv, &priv->tx_cq[t][i]);
2040 } 2044 }
2045 kfree(priv->tx_ring[t]);
2046 kfree(priv->tx_cq[t]);
2041 } 2047 }
2042 2048
2043 for (i = 0; i < priv->rx_ring_num; i++) { 2049 for (i = 0; i < priv->rx_ring_num; i++) {
@@ -2180,9 +2186,11 @@ static void mlx4_en_update_priv(struct mlx4_en_priv *dst,
2180 2186
2181int mlx4_en_try_alloc_resources(struct mlx4_en_priv *priv, 2187int mlx4_en_try_alloc_resources(struct mlx4_en_priv *priv,
2182 struct mlx4_en_priv *tmp, 2188 struct mlx4_en_priv *tmp,
2183 struct mlx4_en_port_profile *prof) 2189 struct mlx4_en_port_profile *prof,
2190 bool carry_xdp_prog)
2184{ 2191{
2185 int t; 2192 struct bpf_prog *xdp_prog;
2193 int i, t;
2186 2194
2187 mlx4_en_copy_priv(tmp, priv, prof); 2195 mlx4_en_copy_priv(tmp, priv, prof);
2188 2196
@@ -2196,6 +2204,23 @@ int mlx4_en_try_alloc_resources(struct mlx4_en_priv *priv,
2196 } 2204 }
2197 return -ENOMEM; 2205 return -ENOMEM;
2198 } 2206 }
2207
2208 /* All rx_rings has the same xdp_prog. Pick the first one. */
2209 xdp_prog = rcu_dereference_protected(
2210 priv->rx_ring[0]->xdp_prog,
2211 lockdep_is_held(&priv->mdev->state_lock));
2212
2213 if (xdp_prog && carry_xdp_prog) {
2214 xdp_prog = bpf_prog_add(xdp_prog, tmp->rx_ring_num);
2215 if (IS_ERR(xdp_prog)) {
2216 mlx4_en_free_resources(tmp);
2217 return PTR_ERR(xdp_prog);
2218 }
2219 for (i = 0; i < tmp->rx_ring_num; i++)
2220 rcu_assign_pointer(tmp->rx_ring[i]->xdp_prog,
2221 xdp_prog);
2222 }
2223
2199 return 0; 2224 return 0;
2200} 2225}
2201 2226
@@ -2210,7 +2235,6 @@ void mlx4_en_destroy_netdev(struct net_device *dev)
2210{ 2235{
2211 struct mlx4_en_priv *priv = netdev_priv(dev); 2236 struct mlx4_en_priv *priv = netdev_priv(dev);
2212 struct mlx4_en_dev *mdev = priv->mdev; 2237 struct mlx4_en_dev *mdev = priv->mdev;
2213 int t;
2214 2238
2215 en_dbg(DRV, priv, "Destroying netdev on port:%d\n", priv->port); 2239 en_dbg(DRV, priv, "Destroying netdev on port:%d\n", priv->port);
2216 2240
@@ -2244,11 +2268,6 @@ void mlx4_en_destroy_netdev(struct net_device *dev)
2244 mlx4_en_free_resources(priv); 2268 mlx4_en_free_resources(priv);
2245 mutex_unlock(&mdev->state_lock); 2269 mutex_unlock(&mdev->state_lock);
2246 2270
2247 for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) {
2248 kfree(priv->tx_ring[t]);
2249 kfree(priv->tx_cq[t]);
2250 }
2251
2252 free_netdev(dev); 2271 free_netdev(dev);
2253} 2272}
2254 2273
@@ -2276,7 +2295,7 @@ static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu)
2276 2295
2277 if (priv->tx_ring_num[TX_XDP] && 2296 if (priv->tx_ring_num[TX_XDP] &&
2278 !mlx4_en_check_xdp_mtu(dev, new_mtu)) 2297 !mlx4_en_check_xdp_mtu(dev, new_mtu))
2279 return -ENOTSUPP; 2298 return -EOPNOTSUPP;
2280 2299
2281 dev->mtu = new_mtu; 2300 dev->mtu = new_mtu;
2282 2301
@@ -2751,7 +2770,7 @@ static int mlx4_xdp_set(struct net_device *dev, struct bpf_prog *prog)
2751 en_warn(priv, "Reducing the number of TX rings, to not exceed the max total rings number.\n"); 2770 en_warn(priv, "Reducing the number of TX rings, to not exceed the max total rings number.\n");
2752 } 2771 }
2753 2772
2754 err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof); 2773 err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof, false);
2755 if (err) { 2774 if (err) {
2756 if (prog) 2775 if (prog)
2757 bpf_prog_sub(prog, priv->rx_ring_num - 1); 2776 bpf_prog_sub(prog, priv->rx_ring_num - 1);
@@ -3495,7 +3514,7 @@ int mlx4_en_reset_config(struct net_device *dev,
3495 memcpy(&new_prof, priv->prof, sizeof(struct mlx4_en_port_profile)); 3514 memcpy(&new_prof, priv->prof, sizeof(struct mlx4_en_port_profile));
3496 memcpy(&new_prof.hwtstamp_config, &ts_config, sizeof(ts_config)); 3515 memcpy(&new_prof.hwtstamp_config, &ts_config, sizeof(ts_config));
3497 3516
3498 err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof); 3517 err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof, true);
3499 if (err) 3518 if (err)
3500 goto out; 3519 goto out;
3501 3520
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index 3c37e216bbf3..cc003fdf0ed9 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -445,8 +445,14 @@ int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv)
445 ring->cqn = priv->rx_cq[ring_ind]->mcq.cqn; 445 ring->cqn = priv->rx_cq[ring_ind]->mcq.cqn;
446 446
447 ring->stride = stride; 447 ring->stride = stride;
448 if (ring->stride <= TXBB_SIZE) 448 if (ring->stride <= TXBB_SIZE) {
449 /* Stamp first unused send wqe */
450 __be32 *ptr = (__be32 *)ring->buf;
451 __be32 stamp = cpu_to_be32(1 << STAMP_SHIFT);
452 *ptr = stamp;
453 /* Move pointer to start of rx section */
449 ring->buf += TXBB_SIZE; 454 ring->buf += TXBB_SIZE;
455 }
450 456
451 ring->log_stride = ffs(ring->stride) - 1; 457 ring->log_stride = ffs(ring->stride) - 1;
452 ring->buf_size = ring->size * ring->stride; 458 ring->buf_size = ring->size * ring->stride;
@@ -508,8 +514,11 @@ void mlx4_en_recover_from_oom(struct mlx4_en_priv *priv)
508 return; 514 return;
509 515
510 for (ring = 0; ring < priv->rx_ring_num; ring++) { 516 for (ring = 0; ring < priv->rx_ring_num; ring++) {
511 if (mlx4_en_is_ring_empty(priv->rx_ring[ring])) 517 if (mlx4_en_is_ring_empty(priv->rx_ring[ring])) {
518 local_bh_disable();
512 napi_reschedule(&priv->rx_cq[ring]->napi); 519 napi_reschedule(&priv->rx_cq[ring]->napi);
520 local_bh_enable();
521 }
513 } 522 }
514} 523}
515 524
diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c
index cd3638e6fe25..0509996957d9 100644
--- a/drivers/net/ethernet/mellanox/mlx4/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/eq.c
@@ -554,8 +554,9 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
554 break; 554 break;
555 555
556 case MLX4_EVENT_TYPE_SRQ_LIMIT: 556 case MLX4_EVENT_TYPE_SRQ_LIMIT:
557 mlx4_dbg(dev, "%s: MLX4_EVENT_TYPE_SRQ_LIMIT\n", 557 mlx4_dbg(dev, "%s: MLX4_EVENT_TYPE_SRQ_LIMIT. srq_no=0x%x, eq 0x%x\n",
558 __func__); 558 __func__, be32_to_cpu(eqe->event.srq.srqn),
559 eq->eqn);
559 case MLX4_EVENT_TYPE_SRQ_CATAS_ERROR: 560 case MLX4_EVENT_TYPE_SRQ_CATAS_ERROR:
560 if (mlx4_is_master(dev)) { 561 if (mlx4_is_master(dev)) {
561 /* forward only to slave owning the SRQ */ 562 /* forward only to slave owning the SRQ */
@@ -570,15 +571,19 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
570 eq->eqn, eq->cons_index, ret); 571 eq->eqn, eq->cons_index, ret);
571 break; 572 break;
572 } 573 }
573 mlx4_warn(dev, "%s: slave:%d, srq_no:0x%x, event: %02x(%02x)\n", 574 if (eqe->type ==
574 __func__, slave, 575 MLX4_EVENT_TYPE_SRQ_CATAS_ERROR)
575 be32_to_cpu(eqe->event.srq.srqn), 576 mlx4_warn(dev, "%s: slave:%d, srq_no:0x%x, event: %02x(%02x)\n",
576 eqe->type, eqe->subtype); 577 __func__, slave,
578 be32_to_cpu(eqe->event.srq.srqn),
579 eqe->type, eqe->subtype);
577 580
578 if (!ret && slave != dev->caps.function) { 581 if (!ret && slave != dev->caps.function) {
579 mlx4_warn(dev, "%s: sending event %02x(%02x) to slave:%d\n", 582 if (eqe->type ==
580 __func__, eqe->type, 583 MLX4_EVENT_TYPE_SRQ_CATAS_ERROR)
581 eqe->subtype, slave); 584 mlx4_warn(dev, "%s: sending event %02x(%02x) to slave:%d\n",
585 __func__, eqe->type,
586 eqe->subtype, slave);
582 mlx4_slave_event(dev, slave, eqe); 587 mlx4_slave_event(dev, slave, eqe);
583 break; 588 break;
584 } 589 }
diff --git a/drivers/net/ethernet/mellanox/mlx4/icm.c b/drivers/net/ethernet/mellanox/mlx4/icm.c
index 2a9dd460a95f..e1f9e7cebf8f 100644
--- a/drivers/net/ethernet/mellanox/mlx4/icm.c
+++ b/drivers/net/ethernet/mellanox/mlx4/icm.c
@@ -118,8 +118,13 @@ static int mlx4_alloc_icm_coherent(struct device *dev, struct scatterlist *mem,
118 if (!buf) 118 if (!buf)
119 return -ENOMEM; 119 return -ENOMEM;
120 120
121 if (offset_in_page(buf)) {
122 dma_free_coherent(dev, PAGE_SIZE << order,
123 buf, sg_dma_address(mem));
124 return -ENOMEM;
125 }
126
121 sg_set_buf(mem, buf, PAGE_SIZE << order); 127 sg_set_buf(mem, buf, PAGE_SIZE << order);
122 BUG_ON(mem->offset);
123 sg_dma_len(mem) = PAGE_SIZE << order; 128 sg_dma_len(mem) = PAGE_SIZE << order;
124 return 0; 129 return 0;
125} 130}
diff --git a/drivers/net/ethernet/mellanox/mlx4/intf.c b/drivers/net/ethernet/mellanox/mlx4/intf.c
index 0e8b7c44931f..8258d08acd8c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/intf.c
+++ b/drivers/net/ethernet/mellanox/mlx4/intf.c
@@ -222,6 +222,18 @@ void mlx4_unregister_device(struct mlx4_dev *dev)
222 return; 222 return;
223 223
224 mlx4_stop_catas_poll(dev); 224 mlx4_stop_catas_poll(dev);
225 if (dev->persist->interface_state & MLX4_INTERFACE_STATE_DELETION &&
226 mlx4_is_slave(dev)) {
227 /* In mlx4_remove_one on a VF */
228 u32 slave_read =
229 swab32(readl(&mlx4_priv(dev)->mfunc.comm->slave_read));
230
231 if (mlx4_comm_internal_err(slave_read)) {
232 mlx4_dbg(dev, "%s: comm channel is down, entering error state.\n",
233 __func__);
234 mlx4_enter_error_state(dev->persist);
235 }
236 }
225 mutex_lock(&intf_mutex); 237 mutex_lock(&intf_mutex);
226 238
227 list_for_each_entry(intf, &intf_list, list) 239 list_for_each_entry(intf, &intf_list, list)
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 5e7840a7a33b..bffa6f345f2f 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -42,6 +42,7 @@
42#include <linux/io-mapping.h> 42#include <linux/io-mapping.h>
43#include <linux/delay.h> 43#include <linux/delay.h>
44#include <linux/kmod.h> 44#include <linux/kmod.h>
45#include <linux/etherdevice.h>
45#include <net/devlink.h> 46#include <net/devlink.h>
46 47
47#include <linux/mlx4/device.h> 48#include <linux/mlx4/device.h>
@@ -782,6 +783,23 @@ int mlx4_is_slave_active(struct mlx4_dev *dev, int slave)
782} 783}
783EXPORT_SYMBOL(mlx4_is_slave_active); 784EXPORT_SYMBOL(mlx4_is_slave_active);
784 785
786void mlx4_handle_eth_header_mcast_prio(struct mlx4_net_trans_rule_hw_ctrl *ctrl,
787 struct _rule_hw *eth_header)
788{
789 if (is_multicast_ether_addr(eth_header->eth.dst_mac) ||
790 is_broadcast_ether_addr(eth_header->eth.dst_mac)) {
791 struct mlx4_net_trans_rule_hw_eth *eth =
792 (struct mlx4_net_trans_rule_hw_eth *)eth_header;
793 struct _rule_hw *next_rule = (struct _rule_hw *)(eth + 1);
794 bool last_rule = next_rule->size == 0 && next_rule->id == 0 &&
795 next_rule->rsvd == 0;
796
797 if (last_rule)
798 ctrl->prio = cpu_to_be16(MLX4_DOMAIN_NIC);
799 }
800}
801EXPORT_SYMBOL(mlx4_handle_eth_header_mcast_prio);
802
785static void slave_adjust_steering_mode(struct mlx4_dev *dev, 803static void slave_adjust_steering_mode(struct mlx4_dev *dev,
786 struct mlx4_dev_cap *dev_cap, 804 struct mlx4_dev_cap *dev_cap,
787 struct mlx4_init_hca_param *hca_param) 805 struct mlx4_init_hca_param *hca_param)
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index 88ee7d8a5923..086920b615af 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -1220,6 +1220,7 @@ void mlx4_qp_event(struct mlx4_dev *dev, u32 qpn, int event_type);
1220void mlx4_srq_event(struct mlx4_dev *dev, u32 srqn, int event_type); 1220void mlx4_srq_event(struct mlx4_dev *dev, u32 srqn, int event_type);
1221 1221
1222void mlx4_enter_error_state(struct mlx4_dev_persistent *persist); 1222void mlx4_enter_error_state(struct mlx4_dev_persistent *persist);
1223int mlx4_comm_internal_err(u32 slave_read);
1223 1224
1224int mlx4_SENSE_PORT(struct mlx4_dev *dev, int port, 1225int mlx4_SENSE_PORT(struct mlx4_dev *dev, int port,
1225 enum mlx4_port_type *type); 1226 enum mlx4_port_type *type);
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index ba1c6cd0cc79..cec59bc264c9 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -679,7 +679,8 @@ void mlx4_en_set_stats_bitmap(struct mlx4_dev *dev,
679 679
680int mlx4_en_try_alloc_resources(struct mlx4_en_priv *priv, 680int mlx4_en_try_alloc_resources(struct mlx4_en_priv *priv,
681 struct mlx4_en_priv *tmp, 681 struct mlx4_en_priv *tmp,
682 struct mlx4_en_port_profile *prof); 682 struct mlx4_en_port_profile *prof,
683 bool carry_xdp_prog);
683void mlx4_en_safe_replace_resources(struct mlx4_en_priv *priv, 684void mlx4_en_safe_replace_resources(struct mlx4_en_priv *priv,
684 struct mlx4_en_priv *tmp); 685 struct mlx4_en_priv *tmp);
685 686
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index c548beaaf910..1822382212ee 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -2980,6 +2980,9 @@ int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
2980 put_res(dev, slave, srqn, RES_SRQ); 2980 put_res(dev, slave, srqn, RES_SRQ);
2981 qp->srq = srq; 2981 qp->srq = srq;
2982 } 2982 }
2983
2984 /* Save param3 for dynamic changes from VST back to VGT */
2985 qp->param3 = qpc->param3;
2983 put_res(dev, slave, rcqn, RES_CQ); 2986 put_res(dev, slave, rcqn, RES_CQ);
2984 put_res(dev, slave, mtt_base, RES_MTT); 2987 put_res(dev, slave, mtt_base, RES_MTT);
2985 res_end_move(dev, slave, RES_QP, qpn); 2988 res_end_move(dev, slave, RES_QP, qpn);
@@ -3772,7 +3775,6 @@ int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave,
3772 int qpn = vhcr->in_modifier & 0x7fffff; 3775 int qpn = vhcr->in_modifier & 0x7fffff;
3773 struct res_qp *qp; 3776 struct res_qp *qp;
3774 u8 orig_sched_queue; 3777 u8 orig_sched_queue;
3775 __be32 orig_param3 = qpc->param3;
3776 u8 orig_vlan_control = qpc->pri_path.vlan_control; 3778 u8 orig_vlan_control = qpc->pri_path.vlan_control;
3777 u8 orig_fvl_rx = qpc->pri_path.fvl_rx; 3779 u8 orig_fvl_rx = qpc->pri_path.fvl_rx;
3778 u8 orig_pri_path_fl = qpc->pri_path.fl; 3780 u8 orig_pri_path_fl = qpc->pri_path.fl;
@@ -3814,7 +3816,6 @@ out:
3814 */ 3816 */
3815 if (!err) { 3817 if (!err) {
3816 qp->sched_queue = orig_sched_queue; 3818 qp->sched_queue = orig_sched_queue;
3817 qp->param3 = orig_param3;
3818 qp->vlan_control = orig_vlan_control; 3819 qp->vlan_control = orig_vlan_control;
3819 qp->fvl_rx = orig_fvl_rx; 3820 qp->fvl_rx = orig_fvl_rx;
3820 qp->pri_path_fl = orig_pri_path_fl; 3821 qp->pri_path_fl = orig_pri_path_fl;
@@ -4164,22 +4165,6 @@ static int validate_eth_header_mac(int slave, struct _rule_hw *eth_header,
4164 return 0; 4165 return 0;
4165} 4166}
4166 4167
4167static void handle_eth_header_mcast_prio(struct mlx4_net_trans_rule_hw_ctrl *ctrl,
4168 struct _rule_hw *eth_header)
4169{
4170 if (is_multicast_ether_addr(eth_header->eth.dst_mac) ||
4171 is_broadcast_ether_addr(eth_header->eth.dst_mac)) {
4172 struct mlx4_net_trans_rule_hw_eth *eth =
4173 (struct mlx4_net_trans_rule_hw_eth *)eth_header;
4174 struct _rule_hw *next_rule = (struct _rule_hw *)(eth + 1);
4175 bool last_rule = next_rule->size == 0 && next_rule->id == 0 &&
4176 next_rule->rsvd == 0;
4177
4178 if (last_rule)
4179 ctrl->prio = cpu_to_be16(MLX4_DOMAIN_NIC);
4180 }
4181}
4182
4183/* 4168/*
4184 * In case of missing eth header, append eth header with a MAC address 4169 * In case of missing eth header, append eth header with a MAC address
4185 * assigned to the VF. 4170 * assigned to the VF.
@@ -4363,10 +4348,7 @@ int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
4363 header_id = map_hw_to_sw_id(be16_to_cpu(rule_header->id)); 4348 header_id = map_hw_to_sw_id(be16_to_cpu(rule_header->id));
4364 4349
4365 if (header_id == MLX4_NET_TRANS_RULE_ID_ETH) 4350 if (header_id == MLX4_NET_TRANS_RULE_ID_ETH)
4366 handle_eth_header_mcast_prio(ctrl, rule_header); 4351 mlx4_handle_eth_header_mcast_prio(ctrl, rule_header);
4367
4368 if (slave == dev->caps.function)
4369 goto execute;
4370 4352
4371 switch (header_id) { 4353 switch (header_id) {
4372 case MLX4_NET_TRANS_RULE_ID_ETH: 4354 case MLX4_NET_TRANS_RULE_ID_ETH:
@@ -4394,7 +4376,6 @@ int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
4394 goto err_put_qp; 4376 goto err_put_qp;
4395 } 4377 }
4396 4378
4397execute:
4398 err = mlx4_cmd_imm(dev, inbox->dma, &vhcr->out_param, 4379 err = mlx4_cmd_imm(dev, inbox->dma, &vhcr->out_param,
4399 vhcr->in_modifier, 0, 4380 vhcr->in_modifier, 0,
4400 MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A, 4381 MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
@@ -4473,6 +4454,7 @@ int mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev *dev, int slave,
4473 struct res_qp *rqp; 4454 struct res_qp *rqp;
4474 struct res_fs_rule *rrule; 4455 struct res_fs_rule *rrule;
4475 u64 mirr_reg_id; 4456 u64 mirr_reg_id;
4457 int qpn;
4476 4458
4477 if (dev->caps.steering_mode != 4459 if (dev->caps.steering_mode !=
4478 MLX4_STEERING_MODE_DEVICE_MANAGED) 4460 MLX4_STEERING_MODE_DEVICE_MANAGED)
@@ -4489,10 +4471,11 @@ int mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev *dev, int slave,
4489 } 4471 }
4490 mirr_reg_id = rrule->mirr_rule_id; 4472 mirr_reg_id = rrule->mirr_rule_id;
4491 kfree(rrule->mirr_mbox); 4473 kfree(rrule->mirr_mbox);
4474 qpn = rrule->qpn;
4492 4475
4493 /* Release the rule form busy state before removal */ 4476 /* Release the rule form busy state before removal */
4494 put_res(dev, slave, vhcr->in_param, RES_FS_RULE); 4477 put_res(dev, slave, vhcr->in_param, RES_FS_RULE);
4495 err = get_res(dev, slave, rrule->qpn, RES_QP, &rqp); 4478 err = get_res(dev, slave, qpn, RES_QP, &rqp);
4496 if (err) 4479 if (err)
4497 return err; 4480 return err;
4498 4481
@@ -4517,7 +4500,7 @@ int mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev *dev, int slave,
4517 if (!err) 4500 if (!err)
4518 atomic_dec(&rqp->ref_count); 4501 atomic_dec(&rqp->ref_count);
4519out: 4502out:
4520 put_res(dev, slave, rrule->qpn, RES_QP); 4503 put_res(dev, slave, qpn, RES_QP);
4521 return err; 4504 return err;
4522} 4505}
4523 4506
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index 3797cc7c1288..caa837e5e2b9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -1728,7 +1728,7 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev)
1728 if (cmd->cmdif_rev > CMD_IF_REV) { 1728 if (cmd->cmdif_rev > CMD_IF_REV) {
1729 dev_err(&dev->pdev->dev, "driver does not support command interface version. driver %d, firmware %d\n", 1729 dev_err(&dev->pdev->dev, "driver does not support command interface version. driver %d, firmware %d\n",
1730 CMD_IF_REV, cmd->cmdif_rev); 1730 CMD_IF_REV, cmd->cmdif_rev);
1731 err = -ENOTSUPP; 1731 err = -EOPNOTSUPP;
1732 goto err_free_page; 1732 goto err_free_page;
1733 } 1733 }
1734 1734
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 951dbd58594d..d5ecb8f53fd4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -791,7 +791,8 @@ void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv);
791int mlx5e_modify_rqs_vsd(struct mlx5e_priv *priv, bool vsd); 791int mlx5e_modify_rqs_vsd(struct mlx5e_priv *priv, bool vsd);
792 792
793int mlx5e_redirect_rqt(struct mlx5e_priv *priv, u32 rqtn, int sz, int ix); 793int mlx5e_redirect_rqt(struct mlx5e_priv *priv, u32 rqtn, int sz, int ix);
794void mlx5e_build_tir_ctx_hash(void *tirc, struct mlx5e_priv *priv); 794void mlx5e_build_indir_tir_ctx_hash(struct mlx5e_priv *priv, void *tirc,
795 enum mlx5e_traffic_types tt);
795 796
796int mlx5e_open_locked(struct net_device *netdev); 797int mlx5e_open_locked(struct net_device *netdev);
797int mlx5e_close_locked(struct net_device *netdev); 798int mlx5e_close_locked(struct net_device *netdev);
@@ -863,12 +864,12 @@ static inline void mlx5e_arfs_destroy_tables(struct mlx5e_priv *priv) {}
863 864
864static inline int mlx5e_arfs_enable(struct mlx5e_priv *priv) 865static inline int mlx5e_arfs_enable(struct mlx5e_priv *priv)
865{ 866{
866 return -ENOTSUPP; 867 return -EOPNOTSUPP;
867} 868}
868 869
869static inline int mlx5e_arfs_disable(struct mlx5e_priv *priv) 870static inline int mlx5e_arfs_disable(struct mlx5e_priv *priv)
870{ 871{
871 return -ENOTSUPP; 872 return -EOPNOTSUPP;
872} 873}
873#else 874#else
874int mlx5e_arfs_create_tables(struct mlx5e_priv *priv); 875int mlx5e_arfs_create_tables(struct mlx5e_priv *priv);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
index 7f6c225666c1..0523ed47f597 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
@@ -89,7 +89,7 @@ static int mlx5e_dcbnl_ieee_getets(struct net_device *netdev,
89 int i; 89 int i;
90 90
91 if (!MLX5_CAP_GEN(priv->mdev, ets)) 91 if (!MLX5_CAP_GEN(priv->mdev, ets))
92 return -ENOTSUPP; 92 return -EOPNOTSUPP;
93 93
94 ets->ets_cap = mlx5_max_tc(priv->mdev) + 1; 94 ets->ets_cap = mlx5_max_tc(priv->mdev) + 1;
95 for (i = 0; i < ets->ets_cap; i++) { 95 for (i = 0; i < ets->ets_cap; i++) {
@@ -236,7 +236,7 @@ static int mlx5e_dcbnl_ieee_setets(struct net_device *netdev,
236 int err; 236 int err;
237 237
238 if (!MLX5_CAP_GEN(priv->mdev, ets)) 238 if (!MLX5_CAP_GEN(priv->mdev, ets))
239 return -ENOTSUPP; 239 return -EOPNOTSUPP;
240 240
241 err = mlx5e_dbcnl_validate_ets(netdev, ets); 241 err = mlx5e_dbcnl_validate_ets(netdev, ets);
242 if (err) 242 if (err)
@@ -402,7 +402,7 @@ static u8 mlx5e_dcbnl_setall(struct net_device *netdev)
402 struct mlx5_core_dev *mdev = priv->mdev; 402 struct mlx5_core_dev *mdev = priv->mdev;
403 struct ieee_ets ets; 403 struct ieee_ets ets;
404 struct ieee_pfc pfc; 404 struct ieee_pfc pfc;
405 int err = -ENOTSUPP; 405 int err = -EOPNOTSUPP;
406 int i; 406 int i;
407 407
408 if (!MLX5_CAP_GEN(mdev, ets)) 408 if (!MLX5_CAP_GEN(mdev, ets))
@@ -511,6 +511,11 @@ static void mlx5e_dcbnl_getpgtccfgtx(struct net_device *netdev,
511 struct mlx5e_priv *priv = netdev_priv(netdev); 511 struct mlx5e_priv *priv = netdev_priv(netdev);
512 struct mlx5_core_dev *mdev = priv->mdev; 512 struct mlx5_core_dev *mdev = priv->mdev;
513 513
514 if (!MLX5_CAP_GEN(priv->mdev, ets)) {
515 netdev_err(netdev, "%s, ets is not supported\n", __func__);
516 return;
517 }
518
514 if (priority >= CEE_DCBX_MAX_PRIO) { 519 if (priority >= CEE_DCBX_MAX_PRIO) {
515 netdev_err(netdev, 520 netdev_err(netdev,
516 "%s, priority is out of range\n", __func__); 521 "%s, priority is out of range\n", __func__);
@@ -723,6 +728,9 @@ static void mlx5e_ets_init(struct mlx5e_priv *priv)
723 int i; 728 int i;
724 struct ieee_ets ets; 729 struct ieee_ets ets;
725 730
731 if (!MLX5_CAP_GEN(priv->mdev, ets))
732 return;
733
726 memset(&ets, 0, sizeof(ets)); 734 memset(&ets, 0, sizeof(ets));
727 ets.ets_cap = mlx5_max_tc(priv->mdev) + 1; 735 ets.ets_cap = mlx5_max_tc(priv->mdev) + 1;
728 for (i = 0; i < ets.ets_cap; i++) { 736 for (i = 0; i < ets.ets_cap; i++) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index 352462af8d51..bb67863aa361 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -171,7 +171,6 @@ static int mlx5e_get_sset_count(struct net_device *dev, int sset)
171 return NUM_SW_COUNTERS + 171 return NUM_SW_COUNTERS +
172 MLX5E_NUM_Q_CNTRS(priv) + 172 MLX5E_NUM_Q_CNTRS(priv) +
173 NUM_VPORT_COUNTERS + NUM_PPORT_COUNTERS + 173 NUM_VPORT_COUNTERS + NUM_PPORT_COUNTERS +
174 NUM_PCIE_COUNTERS +
175 MLX5E_NUM_RQ_STATS(priv) + 174 MLX5E_NUM_RQ_STATS(priv) +
176 MLX5E_NUM_SQ_STATS(priv) + 175 MLX5E_NUM_SQ_STATS(priv) +
177 MLX5E_NUM_PFC_COUNTERS(priv) + 176 MLX5E_NUM_PFC_COUNTERS(priv) +
@@ -219,14 +218,6 @@ static void mlx5e_fill_stats_strings(struct mlx5e_priv *priv, uint8_t *data)
219 strcpy(data + (idx++) * ETH_GSTRING_LEN, 218 strcpy(data + (idx++) * ETH_GSTRING_LEN,
220 pport_2819_stats_desc[i].format); 219 pport_2819_stats_desc[i].format);
221 220
222 for (i = 0; i < NUM_PCIE_PERF_COUNTERS; i++)
223 strcpy(data + (idx++) * ETH_GSTRING_LEN,
224 pcie_perf_stats_desc[i].format);
225
226 for (i = 0; i < NUM_PCIE_TAS_COUNTERS; i++)
227 strcpy(data + (idx++) * ETH_GSTRING_LEN,
228 pcie_tas_stats_desc[i].format);
229
230 for (prio = 0; prio < NUM_PPORT_PRIO; prio++) { 221 for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
231 for (i = 0; i < NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS; i++) 222 for (i = 0; i < NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS; i++)
232 sprintf(data + (idx++) * ETH_GSTRING_LEN, 223 sprintf(data + (idx++) * ETH_GSTRING_LEN,
@@ -339,14 +330,6 @@ static void mlx5e_get_ethtool_stats(struct net_device *dev,
339 data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.RFC_2819_counters, 330 data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.RFC_2819_counters,
340 pport_2819_stats_desc, i); 331 pport_2819_stats_desc, i);
341 332
342 for (i = 0; i < NUM_PCIE_PERF_COUNTERS; i++)
343 data[idx++] = MLX5E_READ_CTR32_BE(&priv->stats.pcie.pcie_perf_counters,
344 pcie_perf_stats_desc, i);
345
346 for (i = 0; i < NUM_PCIE_TAS_COUNTERS; i++)
347 data[idx++] = MLX5E_READ_CTR32_BE(&priv->stats.pcie.pcie_tas_counters,
348 pcie_tas_stats_desc, i);
349
350 for (prio = 0; prio < NUM_PPORT_PRIO; prio++) { 333 for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
351 for (i = 0; i < NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS; i++) 334 for (i = 0; i < NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS; i++)
352 data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[prio], 335 data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[prio],
@@ -560,7 +543,6 @@ static int mlx5e_set_channels(struct net_device *dev,
560 struct ethtool_channels *ch) 543 struct ethtool_channels *ch)
561{ 544{
562 struct mlx5e_priv *priv = netdev_priv(dev); 545 struct mlx5e_priv *priv = netdev_priv(dev);
563 int ncv = mlx5e_get_max_num_channels(priv->mdev);
564 unsigned int count = ch->combined_count; 546 unsigned int count = ch->combined_count;
565 bool arfs_enabled; 547 bool arfs_enabled;
566 bool was_opened; 548 bool was_opened;
@@ -571,16 +553,6 @@ static int mlx5e_set_channels(struct net_device *dev,
571 __func__); 553 __func__);
572 return -EINVAL; 554 return -EINVAL;
573 } 555 }
574 if (ch->rx_count || ch->tx_count) {
575 netdev_info(dev, "%s: separate rx/tx count not supported\n",
576 __func__);
577 return -EINVAL;
578 }
579 if (count > ncv) {
580 netdev_info(dev, "%s: count (%d) > max (%d)\n",
581 __func__, count, ncv);
582 return -EINVAL;
583 }
584 556
585 if (priv->params.num_channels == count) 557 if (priv->params.num_channels == count)
586 return 0; 558 return 0;
@@ -623,7 +595,7 @@ static int mlx5e_get_coalesce(struct net_device *netdev,
623 struct mlx5e_priv *priv = netdev_priv(netdev); 595 struct mlx5e_priv *priv = netdev_priv(netdev);
624 596
625 if (!MLX5_CAP_GEN(priv->mdev, cq_moderation)) 597 if (!MLX5_CAP_GEN(priv->mdev, cq_moderation))
626 return -ENOTSUPP; 598 return -EOPNOTSUPP;
627 599
628 coal->rx_coalesce_usecs = priv->params.rx_cq_moderation.usec; 600 coal->rx_coalesce_usecs = priv->params.rx_cq_moderation.usec;
629 coal->rx_max_coalesced_frames = priv->params.rx_cq_moderation.pkts; 601 coal->rx_max_coalesced_frames = priv->params.rx_cq_moderation.pkts;
@@ -648,7 +620,7 @@ static int mlx5e_set_coalesce(struct net_device *netdev,
648 int i; 620 int i;
649 621
650 if (!MLX5_CAP_GEN(mdev, cq_moderation)) 622 if (!MLX5_CAP_GEN(mdev, cq_moderation))
651 return -ENOTSUPP; 623 return -EOPNOTSUPP;
652 624
653 mutex_lock(&priv->state_lock); 625 mutex_lock(&priv->state_lock);
654 626
@@ -1008,15 +980,18 @@ static int mlx5e_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
1008 980
1009static void mlx5e_modify_tirs_hash(struct mlx5e_priv *priv, void *in, int inlen) 981static void mlx5e_modify_tirs_hash(struct mlx5e_priv *priv, void *in, int inlen)
1010{ 982{
1011 struct mlx5_core_dev *mdev = priv->mdev;
1012 void *tirc = MLX5_ADDR_OF(modify_tir_in, in, ctx); 983 void *tirc = MLX5_ADDR_OF(modify_tir_in, in, ctx);
1013 int i; 984 struct mlx5_core_dev *mdev = priv->mdev;
985 int ctxlen = MLX5_ST_SZ_BYTES(tirc);
986 int tt;
1014 987
1015 MLX5_SET(modify_tir_in, in, bitmask.hash, 1); 988 MLX5_SET(modify_tir_in, in, bitmask.hash, 1);
1016 mlx5e_build_tir_ctx_hash(tirc, priv);
1017 989
1018 for (i = 0; i < MLX5E_NUM_INDIR_TIRS; i++) 990 for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
1019 mlx5_core_modify_tir(mdev, priv->indir_tir[i].tirn, in, inlen); 991 memset(tirc, 0, ctxlen);
992 mlx5e_build_indir_tir_ctx_hash(priv, tirc, tt);
993 mlx5_core_modify_tir(mdev, priv->indir_tir[tt].tirn, in, inlen);
994 }
1020} 995}
1021 996
1022static int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir, 997static int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir,
@@ -1024,6 +999,7 @@ static int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir,
1024{ 999{
1025 struct mlx5e_priv *priv = netdev_priv(dev); 1000 struct mlx5e_priv *priv = netdev_priv(dev);
1026 int inlen = MLX5_ST_SZ_BYTES(modify_tir_in); 1001 int inlen = MLX5_ST_SZ_BYTES(modify_tir_in);
1002 bool hash_changed = false;
1027 void *in; 1003 void *in;
1028 1004
1029 if ((hfunc != ETH_RSS_HASH_NO_CHANGE) && 1005 if ((hfunc != ETH_RSS_HASH_NO_CHANGE) &&
@@ -1045,14 +1021,21 @@ static int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir,
1045 mlx5e_redirect_rqt(priv, rqtn, MLX5E_INDIR_RQT_SIZE, 0); 1021 mlx5e_redirect_rqt(priv, rqtn, MLX5E_INDIR_RQT_SIZE, 0);
1046 } 1022 }
1047 1023
1048 if (key) 1024 if (hfunc != ETH_RSS_HASH_NO_CHANGE &&
1025 hfunc != priv->params.rss_hfunc) {
1026 priv->params.rss_hfunc = hfunc;
1027 hash_changed = true;
1028 }
1029
1030 if (key) {
1049 memcpy(priv->params.toeplitz_hash_key, key, 1031 memcpy(priv->params.toeplitz_hash_key, key,
1050 sizeof(priv->params.toeplitz_hash_key)); 1032 sizeof(priv->params.toeplitz_hash_key));
1033 hash_changed = hash_changed ||
1034 priv->params.rss_hfunc == ETH_RSS_HASH_TOP;
1035 }
1051 1036
1052 if (hfunc != ETH_RSS_HASH_NO_CHANGE) 1037 if (hash_changed)
1053 priv->params.rss_hfunc = hfunc; 1038 mlx5e_modify_tirs_hash(priv, in, inlen);
1054
1055 mlx5e_modify_tirs_hash(priv, in, inlen);
1056 1039
1057 mutex_unlock(&priv->state_lock); 1040 mutex_unlock(&priv->state_lock);
1058 1041
@@ -1324,7 +1307,7 @@ static int mlx5e_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
1324 u32 mlx5_wol_mode; 1307 u32 mlx5_wol_mode;
1325 1308
1326 if (!wol_supported) 1309 if (!wol_supported)
1327 return -ENOTSUPP; 1310 return -EOPNOTSUPP;
1328 1311
1329 if (wol->wolopts & ~wol_supported) 1312 if (wol->wolopts & ~wol_supported)
1330 return -EINVAL; 1313 return -EINVAL;
@@ -1454,7 +1437,7 @@ static int set_pflag_rx_cqe_based_moder(struct net_device *netdev, bool enable)
1454 1437
1455 if (rx_cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE && 1438 if (rx_cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE &&
1456 !MLX5_CAP_GEN(mdev, cq_period_start_from_cqe)) 1439 !MLX5_CAP_GEN(mdev, cq_period_start_from_cqe))
1457 return -ENOTSUPP; 1440 return -EOPNOTSUPP;
1458 1441
1459 if (!rx_mode_changed) 1442 if (!rx_mode_changed)
1460 return 0; 1443 return 0;
@@ -1480,7 +1463,7 @@ static int set_pflag_rx_cqe_compress(struct net_device *netdev,
1480 bool reset; 1463 bool reset;
1481 1464
1482 if (!MLX5_CAP_GEN(mdev, cqe_compression)) 1465 if (!MLX5_CAP_GEN(mdev, cqe_compression))
1483 return -ENOTSUPP; 1466 return -EOPNOTSUPP;
1484 1467
1485 if (enable && priv->tstamp.hwtstamp_config.rx_filter != HWTSTAMP_FILTER_NONE) { 1468 if (enable && priv->tstamp.hwtstamp_config.rx_filter != HWTSTAMP_FILTER_NONE) {
1486 netdev_err(netdev, "Can't enable cqe compression while timestamping is enabled.\n"); 1469 netdev_err(netdev, "Can't enable cqe compression while timestamping is enabled.\n");
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
index 1fe80de5d68f..a0e5a69402b3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
@@ -1089,7 +1089,7 @@ int mlx5e_create_flow_steering(struct mlx5e_priv *priv)
1089 MLX5_FLOW_NAMESPACE_KERNEL); 1089 MLX5_FLOW_NAMESPACE_KERNEL);
1090 1090
1091 if (!priv->fs.ns) 1091 if (!priv->fs.ns)
1092 return -EINVAL; 1092 return -EOPNOTSUPP;
1093 1093
1094 err = mlx5e_arfs_create_tables(priv); 1094 err = mlx5e_arfs_create_tables(priv);
1095 if (err) { 1095 if (err) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
index 3691451c728c..f33f72d0237c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
@@ -92,7 +92,7 @@ static struct mlx5e_ethtool_table *get_flow_table(struct mlx5e_priv *priv,
92 ns = mlx5_get_flow_namespace(priv->mdev, 92 ns = mlx5_get_flow_namespace(priv->mdev,
93 MLX5_FLOW_NAMESPACE_ETHTOOL); 93 MLX5_FLOW_NAMESPACE_ETHTOOL);
94 if (!ns) 94 if (!ns)
95 return ERR_PTR(-ENOTSUPP); 95 return ERR_PTR(-EOPNOTSUPP);
96 96
97 table_size = min_t(u32, BIT(MLX5_CAP_FLOWTABLE(priv->mdev, 97 table_size = min_t(u32, BIT(MLX5_CAP_FLOWTABLE(priv->mdev,
98 flow_table_properties_nic_receive.log_max_ft_size)), 98 flow_table_properties_nic_receive.log_max_ft_size)),
@@ -247,6 +247,7 @@ static int set_flow_attrs(u32 *match_c, u32 *match_v,
247 } 247 }
248 if (fs->flow_type & FLOW_MAC_EXT && 248 if (fs->flow_type & FLOW_MAC_EXT &&
249 !is_zero_ether_addr(fs->m_ext.h_dest)) { 249 !is_zero_ether_addr(fs->m_ext.h_dest)) {
250 mask_spec(fs->m_ext.h_dest, fs->h_ext.h_dest, ETH_ALEN);
250 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, 251 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4,
251 outer_headers_c, dmac_47_16), 252 outer_headers_c, dmac_47_16),
252 fs->m_ext.h_dest); 253 fs->m_ext.h_dest);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index cbfa38fc72c0..f14ca3385fdd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -291,36 +291,12 @@ static void mlx5e_update_q_counter(struct mlx5e_priv *priv)
291 &qcnt->rx_out_of_buffer); 291 &qcnt->rx_out_of_buffer);
292} 292}
293 293
294static void mlx5e_update_pcie_counters(struct mlx5e_priv *priv)
295{
296 struct mlx5e_pcie_stats *pcie_stats = &priv->stats.pcie;
297 struct mlx5_core_dev *mdev = priv->mdev;
298 int sz = MLX5_ST_SZ_BYTES(mpcnt_reg);
299 void *out;
300 u32 *in;
301
302 in = mlx5_vzalloc(sz);
303 if (!in)
304 return;
305
306 out = pcie_stats->pcie_perf_counters;
307 MLX5_SET(mpcnt_reg, in, grp, MLX5_PCIE_PERFORMANCE_COUNTERS_GROUP);
308 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_MPCNT, 0, 0);
309
310 out = pcie_stats->pcie_tas_counters;
311 MLX5_SET(mpcnt_reg, in, grp, MLX5_PCIE_TIMERS_AND_STATES_COUNTERS_GROUP);
312 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_MPCNT, 0, 0);
313
314 kvfree(in);
315}
316
317void mlx5e_update_stats(struct mlx5e_priv *priv) 294void mlx5e_update_stats(struct mlx5e_priv *priv)
318{ 295{
319 mlx5e_update_q_counter(priv); 296 mlx5e_update_q_counter(priv);
320 mlx5e_update_vport_counters(priv); 297 mlx5e_update_vport_counters(priv);
321 mlx5e_update_pport_counters(priv); 298 mlx5e_update_pport_counters(priv);
322 mlx5e_update_sw_counters(priv); 299 mlx5e_update_sw_counters(priv);
323 mlx5e_update_pcie_counters(priv);
324} 300}
325 301
326void mlx5e_update_stats_work(struct work_struct *work) 302void mlx5e_update_stats_work(struct work_struct *work)
@@ -2046,8 +2022,23 @@ static void mlx5e_build_tir_ctx_lro(void *tirc, struct mlx5e_priv *priv)
2046 MLX5_SET(tirc, tirc, lro_timeout_period_usecs, priv->params.lro_timeout); 2022 MLX5_SET(tirc, tirc, lro_timeout_period_usecs, priv->params.lro_timeout);
2047} 2023}
2048 2024
2049void mlx5e_build_tir_ctx_hash(void *tirc, struct mlx5e_priv *priv) 2025void mlx5e_build_indir_tir_ctx_hash(struct mlx5e_priv *priv, void *tirc,
2026 enum mlx5e_traffic_types tt)
2050{ 2027{
2028 void *hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer);
2029
2030#define MLX5_HASH_IP (MLX5_HASH_FIELD_SEL_SRC_IP |\
2031 MLX5_HASH_FIELD_SEL_DST_IP)
2032
2033#define MLX5_HASH_IP_L4PORTS (MLX5_HASH_FIELD_SEL_SRC_IP |\
2034 MLX5_HASH_FIELD_SEL_DST_IP |\
2035 MLX5_HASH_FIELD_SEL_L4_SPORT |\
2036 MLX5_HASH_FIELD_SEL_L4_DPORT)
2037
2038#define MLX5_HASH_IP_IPSEC_SPI (MLX5_HASH_FIELD_SEL_SRC_IP |\
2039 MLX5_HASH_FIELD_SEL_DST_IP |\
2040 MLX5_HASH_FIELD_SEL_IPSEC_SPI)
2041
2051 MLX5_SET(tirc, tirc, rx_hash_fn, 2042 MLX5_SET(tirc, tirc, rx_hash_fn,
2052 mlx5e_rx_hash_fn(priv->params.rss_hfunc)); 2043 mlx5e_rx_hash_fn(priv->params.rss_hfunc));
2053 if (priv->params.rss_hfunc == ETH_RSS_HASH_TOP) { 2044 if (priv->params.rss_hfunc == ETH_RSS_HASH_TOP) {
@@ -2059,6 +2050,88 @@ void mlx5e_build_tir_ctx_hash(void *tirc, struct mlx5e_priv *priv)
2059 MLX5_SET(tirc, tirc, rx_hash_symmetric, 1); 2050 MLX5_SET(tirc, tirc, rx_hash_symmetric, 1);
2060 memcpy(rss_key, priv->params.toeplitz_hash_key, len); 2051 memcpy(rss_key, priv->params.toeplitz_hash_key, len);
2061 } 2052 }
2053
2054 switch (tt) {
2055 case MLX5E_TT_IPV4_TCP:
2056 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2057 MLX5_L3_PROT_TYPE_IPV4);
2058 MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
2059 MLX5_L4_PROT_TYPE_TCP);
2060 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2061 MLX5_HASH_IP_L4PORTS);
2062 break;
2063
2064 case MLX5E_TT_IPV6_TCP:
2065 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2066 MLX5_L3_PROT_TYPE_IPV6);
2067 MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
2068 MLX5_L4_PROT_TYPE_TCP);
2069 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2070 MLX5_HASH_IP_L4PORTS);
2071 break;
2072
2073 case MLX5E_TT_IPV4_UDP:
2074 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2075 MLX5_L3_PROT_TYPE_IPV4);
2076 MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
2077 MLX5_L4_PROT_TYPE_UDP);
2078 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2079 MLX5_HASH_IP_L4PORTS);
2080 break;
2081
2082 case MLX5E_TT_IPV6_UDP:
2083 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2084 MLX5_L3_PROT_TYPE_IPV6);
2085 MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
2086 MLX5_L4_PROT_TYPE_UDP);
2087 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2088 MLX5_HASH_IP_L4PORTS);
2089 break;
2090
2091 case MLX5E_TT_IPV4_IPSEC_AH:
2092 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2093 MLX5_L3_PROT_TYPE_IPV4);
2094 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2095 MLX5_HASH_IP_IPSEC_SPI);
2096 break;
2097
2098 case MLX5E_TT_IPV6_IPSEC_AH:
2099 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2100 MLX5_L3_PROT_TYPE_IPV6);
2101 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2102 MLX5_HASH_IP_IPSEC_SPI);
2103 break;
2104
2105 case MLX5E_TT_IPV4_IPSEC_ESP:
2106 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2107 MLX5_L3_PROT_TYPE_IPV4);
2108 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2109 MLX5_HASH_IP_IPSEC_SPI);
2110 break;
2111
2112 case MLX5E_TT_IPV6_IPSEC_ESP:
2113 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2114 MLX5_L3_PROT_TYPE_IPV6);
2115 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2116 MLX5_HASH_IP_IPSEC_SPI);
2117 break;
2118
2119 case MLX5E_TT_IPV4:
2120 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2121 MLX5_L3_PROT_TYPE_IPV4);
2122 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2123 MLX5_HASH_IP);
2124 break;
2125
2126 case MLX5E_TT_IPV6:
2127 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2128 MLX5_L3_PROT_TYPE_IPV6);
2129 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2130 MLX5_HASH_IP);
2131 break;
2132 default:
2133 WARN_ONCE(true, "%s: bad traffic type!\n", __func__);
2134 }
2062} 2135}
2063 2136
2064static int mlx5e_modify_tirs_lro(struct mlx5e_priv *priv) 2137static int mlx5e_modify_tirs_lro(struct mlx5e_priv *priv)
@@ -2428,110 +2501,13 @@ void mlx5e_cleanup_nic_tx(struct mlx5e_priv *priv)
2428static void mlx5e_build_indir_tir_ctx(struct mlx5e_priv *priv, u32 *tirc, 2501static void mlx5e_build_indir_tir_ctx(struct mlx5e_priv *priv, u32 *tirc,
2429 enum mlx5e_traffic_types tt) 2502 enum mlx5e_traffic_types tt)
2430{ 2503{
2431 void *hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer);
2432
2433 MLX5_SET(tirc, tirc, transport_domain, priv->mdev->mlx5e_res.td.tdn); 2504 MLX5_SET(tirc, tirc, transport_domain, priv->mdev->mlx5e_res.td.tdn);
2434 2505
2435#define MLX5_HASH_IP (MLX5_HASH_FIELD_SEL_SRC_IP |\
2436 MLX5_HASH_FIELD_SEL_DST_IP)
2437
2438#define MLX5_HASH_IP_L4PORTS (MLX5_HASH_FIELD_SEL_SRC_IP |\
2439 MLX5_HASH_FIELD_SEL_DST_IP |\
2440 MLX5_HASH_FIELD_SEL_L4_SPORT |\
2441 MLX5_HASH_FIELD_SEL_L4_DPORT)
2442
2443#define MLX5_HASH_IP_IPSEC_SPI (MLX5_HASH_FIELD_SEL_SRC_IP |\
2444 MLX5_HASH_FIELD_SEL_DST_IP |\
2445 MLX5_HASH_FIELD_SEL_IPSEC_SPI)
2446
2447 mlx5e_build_tir_ctx_lro(tirc, priv); 2506 mlx5e_build_tir_ctx_lro(tirc, priv);
2448 2507
2449 MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT); 2508 MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT);
2450 MLX5_SET(tirc, tirc, indirect_table, priv->indir_rqt.rqtn); 2509 MLX5_SET(tirc, tirc, indirect_table, priv->indir_rqt.rqtn);
2451 mlx5e_build_tir_ctx_hash(tirc, priv); 2510 mlx5e_build_indir_tir_ctx_hash(priv, tirc, tt);
2452
2453 switch (tt) {
2454 case MLX5E_TT_IPV4_TCP:
2455 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2456 MLX5_L3_PROT_TYPE_IPV4);
2457 MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
2458 MLX5_L4_PROT_TYPE_TCP);
2459 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2460 MLX5_HASH_IP_L4PORTS);
2461 break;
2462
2463 case MLX5E_TT_IPV6_TCP:
2464 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2465 MLX5_L3_PROT_TYPE_IPV6);
2466 MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
2467 MLX5_L4_PROT_TYPE_TCP);
2468 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2469 MLX5_HASH_IP_L4PORTS);
2470 break;
2471
2472 case MLX5E_TT_IPV4_UDP:
2473 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2474 MLX5_L3_PROT_TYPE_IPV4);
2475 MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
2476 MLX5_L4_PROT_TYPE_UDP);
2477 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2478 MLX5_HASH_IP_L4PORTS);
2479 break;
2480
2481 case MLX5E_TT_IPV6_UDP:
2482 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2483 MLX5_L3_PROT_TYPE_IPV6);
2484 MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
2485 MLX5_L4_PROT_TYPE_UDP);
2486 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2487 MLX5_HASH_IP_L4PORTS);
2488 break;
2489
2490 case MLX5E_TT_IPV4_IPSEC_AH:
2491 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2492 MLX5_L3_PROT_TYPE_IPV4);
2493 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2494 MLX5_HASH_IP_IPSEC_SPI);
2495 break;
2496
2497 case MLX5E_TT_IPV6_IPSEC_AH:
2498 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2499 MLX5_L3_PROT_TYPE_IPV6);
2500 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2501 MLX5_HASH_IP_IPSEC_SPI);
2502 break;
2503
2504 case MLX5E_TT_IPV4_IPSEC_ESP:
2505 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2506 MLX5_L3_PROT_TYPE_IPV4);
2507 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2508 MLX5_HASH_IP_IPSEC_SPI);
2509 break;
2510
2511 case MLX5E_TT_IPV6_IPSEC_ESP:
2512 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2513 MLX5_L3_PROT_TYPE_IPV6);
2514 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2515 MLX5_HASH_IP_IPSEC_SPI);
2516 break;
2517
2518 case MLX5E_TT_IPV4:
2519 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2520 MLX5_L3_PROT_TYPE_IPV4);
2521 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2522 MLX5_HASH_IP);
2523 break;
2524
2525 case MLX5E_TT_IPV6:
2526 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2527 MLX5_L3_PROT_TYPE_IPV6);
2528 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2529 MLX5_HASH_IP);
2530 break;
2531 default:
2532 WARN_ONCE(true,
2533 "mlx5e_build_indir_tir_ctx: bad traffic type!\n");
2534 }
2535} 2511}
2536 2512
2537static void mlx5e_build_direct_tir_ctx(struct mlx5e_priv *priv, u32 *tirc, 2513static void mlx5e_build_direct_tir_ctx(struct mlx5e_priv *priv, u32 *tirc,
@@ -3355,7 +3331,7 @@ static const struct net_device_ops mlx5e_netdev_ops_sriov = {
3355static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev) 3331static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev)
3356{ 3332{
3357 if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH) 3333 if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
3358 return -ENOTSUPP; 3334 return -EOPNOTSUPP;
3359 if (!MLX5_CAP_GEN(mdev, eth_net_offloads) || 3335 if (!MLX5_CAP_GEN(mdev, eth_net_offloads) ||
3360 !MLX5_CAP_GEN(mdev, nic_flow_table) || 3336 !MLX5_CAP_GEN(mdev, nic_flow_table) ||
3361 !MLX5_CAP_ETH(mdev, csum_cap) || 3337 !MLX5_CAP_ETH(mdev, csum_cap) ||
@@ -3367,7 +3343,7 @@ static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev)
3367 < 3) { 3343 < 3) {
3368 mlx5_core_warn(mdev, 3344 mlx5_core_warn(mdev,
3369 "Not creating net device, some required device capabilities are missing\n"); 3345 "Not creating net device, some required device capabilities are missing\n");
3370 return -ENOTSUPP; 3346 return -EOPNOTSUPP;
3371 } 3347 }
3372 if (!MLX5_CAP_ETH(mdev, self_lb_en_modifiable)) 3348 if (!MLX5_CAP_ETH(mdev, self_lb_en_modifiable))
3373 mlx5_core_warn(mdev, "Self loop back prevention is not supported\n"); 3349 mlx5_core_warn(mdev, "Self loop back prevention is not supported\n");
@@ -3699,14 +3675,8 @@ static void mlx5e_nic_init(struct mlx5_core_dev *mdev,
3699 3675
3700static void mlx5e_nic_cleanup(struct mlx5e_priv *priv) 3676static void mlx5e_nic_cleanup(struct mlx5e_priv *priv)
3701{ 3677{
3702 struct mlx5_core_dev *mdev = priv->mdev;
3703 struct mlx5_eswitch *esw = mdev->priv.eswitch;
3704
3705 mlx5e_vxlan_cleanup(priv); 3678 mlx5e_vxlan_cleanup(priv);
3706 3679
3707 if (MLX5_CAP_GEN(mdev, vport_group_manager))
3708 mlx5_eswitch_unregister_vport_rep(esw, 0);
3709
3710 if (priv->xdp_prog) 3680 if (priv->xdp_prog)
3711 bpf_prog_put(priv->xdp_prog); 3681 bpf_prog_put(priv->xdp_prog);
3712} 3682}
@@ -3805,14 +3775,7 @@ static void mlx5e_nic_enable(struct mlx5e_priv *priv)
3805 3775
3806 mlx5_lag_add(mdev, netdev); 3776 mlx5_lag_add(mdev, netdev);
3807 3777
3808 if (mlx5e_vxlan_allowed(mdev)) {
3809 rtnl_lock();
3810 udp_tunnel_get_rx_info(netdev);
3811 rtnl_unlock();
3812 }
3813
3814 mlx5e_enable_async_events(priv); 3778 mlx5e_enable_async_events(priv);
3815 queue_work(priv->wq, &priv->set_rx_mode_work);
3816 3779
3817 if (MLX5_CAP_GEN(mdev, vport_group_manager)) { 3780 if (MLX5_CAP_GEN(mdev, vport_group_manager)) {
3818 mlx5_query_nic_vport_mac_address(mdev, 0, rep.hw_id); 3781 mlx5_query_nic_vport_mac_address(mdev, 0, rep.hw_id);
@@ -3822,13 +3785,30 @@ static void mlx5e_nic_enable(struct mlx5e_priv *priv)
3822 rep.netdev = netdev; 3785 rep.netdev = netdev;
3823 mlx5_eswitch_register_vport_rep(esw, 0, &rep); 3786 mlx5_eswitch_register_vport_rep(esw, 0, &rep);
3824 } 3787 }
3788
3789 if (netdev->reg_state != NETREG_REGISTERED)
3790 return;
3791
3792 /* Device already registered: sync netdev system state */
3793 if (mlx5e_vxlan_allowed(mdev)) {
3794 rtnl_lock();
3795 udp_tunnel_get_rx_info(netdev);
3796 rtnl_unlock();
3797 }
3798
3799 queue_work(priv->wq, &priv->set_rx_mode_work);
3825} 3800}
3826 3801
3827static void mlx5e_nic_disable(struct mlx5e_priv *priv) 3802static void mlx5e_nic_disable(struct mlx5e_priv *priv)
3828{ 3803{
3804 struct mlx5_core_dev *mdev = priv->mdev;
3805 struct mlx5_eswitch *esw = mdev->priv.eswitch;
3806
3829 queue_work(priv->wq, &priv->set_rx_mode_work); 3807 queue_work(priv->wq, &priv->set_rx_mode_work);
3808 if (MLX5_CAP_GEN(mdev, vport_group_manager))
3809 mlx5_eswitch_unregister_vport_rep(esw, 0);
3830 mlx5e_disable_async_events(priv); 3810 mlx5e_disable_async_events(priv);
3831 mlx5_lag_remove(priv->mdev); 3811 mlx5_lag_remove(mdev);
3832} 3812}
3833 3813
3834static const struct mlx5e_profile mlx5e_nic_profile = { 3814static const struct mlx5e_profile mlx5e_nic_profile = {
@@ -3966,10 +3946,6 @@ void mlx5e_detach_netdev(struct mlx5_core_dev *mdev, struct net_device *netdev)
3966 const struct mlx5e_profile *profile = priv->profile; 3946 const struct mlx5e_profile *profile = priv->profile;
3967 3947
3968 set_bit(MLX5E_STATE_DESTROYING, &priv->state); 3948 set_bit(MLX5E_STATE_DESTROYING, &priv->state);
3969 if (profile->disable)
3970 profile->disable(priv);
3971
3972 flush_workqueue(priv->wq);
3973 3949
3974 rtnl_lock(); 3950 rtnl_lock();
3975 if (netif_running(netdev)) 3951 if (netif_running(netdev))
@@ -3977,6 +3953,10 @@ void mlx5e_detach_netdev(struct mlx5_core_dev *mdev, struct net_device *netdev)
3977 netif_device_detach(netdev); 3953 netif_device_detach(netdev);
3978 rtnl_unlock(); 3954 rtnl_unlock();
3979 3955
3956 if (profile->disable)
3957 profile->disable(priv);
3958 flush_workqueue(priv->wq);
3959
3980 mlx5e_destroy_q_counter(priv); 3960 mlx5e_destroy_q_counter(priv);
3981 profile->cleanup_rx(priv); 3961 profile->cleanup_rx(priv);
3982 mlx5e_close_drop_rq(priv); 3962 mlx5e_close_drop_rq(priv);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index 0e2fb3ed1790..06d5e6fecb0a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -193,6 +193,9 @@ static inline bool mlx5e_rx_cache_put(struct mlx5e_rq *rq,
193 return false; 193 return false;
194 } 194 }
195 195
196 if (unlikely(page_is_pfmemalloc(dma_info->page)))
197 return false;
198
196 cache->page_cache[cache->tail] = *dma_info; 199 cache->page_cache[cache->tail] = *dma_info;
197 cache->tail = tail_next; 200 cache->tail = tail_next;
198 return true; 201 return true;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c
index 1fffe48a93cc..cbfac06b7ffd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c
@@ -109,7 +109,6 @@ static bool mlx5e_am_on_top(struct mlx5e_rx_am *am)
109 switch (am->tune_state) { 109 switch (am->tune_state) {
110 case MLX5E_AM_PARKING_ON_TOP: 110 case MLX5E_AM_PARKING_ON_TOP:
111 case MLX5E_AM_PARKING_TIRED: 111 case MLX5E_AM_PARKING_TIRED:
112 WARN_ONCE(true, "mlx5e_am_on_top: PARKING\n");
113 return true; 112 return true;
114 case MLX5E_AM_GOING_RIGHT: 113 case MLX5E_AM_GOING_RIGHT:
115 return (am->steps_left > 1) && (am->steps_right == 1); 114 return (am->steps_left > 1) && (am->steps_right == 1);
@@ -123,7 +122,6 @@ static void mlx5e_am_turn(struct mlx5e_rx_am *am)
123 switch (am->tune_state) { 122 switch (am->tune_state) {
124 case MLX5E_AM_PARKING_ON_TOP: 123 case MLX5E_AM_PARKING_ON_TOP:
125 case MLX5E_AM_PARKING_TIRED: 124 case MLX5E_AM_PARKING_TIRED:
126 WARN_ONCE(true, "mlx5e_am_turn: PARKING\n");
127 break; 125 break;
128 case MLX5E_AM_GOING_RIGHT: 126 case MLX5E_AM_GOING_RIGHT:
129 am->tune_state = MLX5E_AM_GOING_LEFT; 127 am->tune_state = MLX5E_AM_GOING_LEFT;
@@ -144,7 +142,6 @@ static int mlx5e_am_step(struct mlx5e_rx_am *am)
144 switch (am->tune_state) { 142 switch (am->tune_state) {
145 case MLX5E_AM_PARKING_ON_TOP: 143 case MLX5E_AM_PARKING_ON_TOP:
146 case MLX5E_AM_PARKING_TIRED: 144 case MLX5E_AM_PARKING_TIRED:
147 WARN_ONCE(true, "mlx5e_am_step: PARKING\n");
148 break; 145 break;
149 case MLX5E_AM_GOING_RIGHT: 146 case MLX5E_AM_GOING_RIGHT:
150 if (am->profile_ix == (MLX5E_PARAMS_AM_NUM_PROFILES - 1)) 147 if (am->profile_ix == (MLX5E_PARAMS_AM_NUM_PROFILES - 1))
@@ -282,10 +279,8 @@ static void mlx5e_am_calc_stats(struct mlx5e_rx_am_sample *start,
282 u32 delta_us = ktime_us_delta(end->time, start->time); 279 u32 delta_us = ktime_us_delta(end->time, start->time);
283 unsigned int npkts = end->pkt_ctr - start->pkt_ctr; 280 unsigned int npkts = end->pkt_ctr - start->pkt_ctr;
284 281
285 if (!delta_us) { 282 if (!delta_us)
286 WARN_ONCE(true, "mlx5e_am_calc_stats: delta_us=0\n");
287 return; 283 return;
288 }
289 284
290 curr_stats->ppms = (npkts * USEC_PER_MSEC) / delta_us; 285 curr_stats->ppms = (npkts * USEC_PER_MSEC) / delta_us;
291 curr_stats->epms = (MLX5E_AM_NEVENTS * USEC_PER_MSEC) / delta_us; 286 curr_stats->epms = (MLX5E_AM_NEVENTS * USEC_PER_MSEC) / delta_us;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
index f202f872f57f..ba5db1dd23a9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
@@ -39,7 +39,7 @@
39#define MLX5E_READ_CTR32_CPU(ptr, dsc, i) \ 39#define MLX5E_READ_CTR32_CPU(ptr, dsc, i) \
40 (*(u32 *)((char *)ptr + dsc[i].offset)) 40 (*(u32 *)((char *)ptr + dsc[i].offset))
41#define MLX5E_READ_CTR32_BE(ptr, dsc, i) \ 41#define MLX5E_READ_CTR32_BE(ptr, dsc, i) \
42 be32_to_cpu(*(__be32 *)((char *)ptr + dsc[i].offset)) 42 be64_to_cpu(*(__be32 *)((char *)ptr + dsc[i].offset))
43 43
44#define MLX5E_DECLARE_STAT(type, fld) #fld, offsetof(type, fld) 44#define MLX5E_DECLARE_STAT(type, fld) #fld, offsetof(type, fld)
45#define MLX5E_DECLARE_RX_STAT(type, fld) "rx%d_"#fld, offsetof(type, fld) 45#define MLX5E_DECLARE_RX_STAT(type, fld) "rx%d_"#fld, offsetof(type, fld)
@@ -276,32 +276,6 @@ static const struct counter_desc pport_per_prio_pfc_stats_desc[] = {
276 { "rx_%s_pause_transition", PPORT_PER_PRIO_OFF(rx_pause_transition) }, 276 { "rx_%s_pause_transition", PPORT_PER_PRIO_OFF(rx_pause_transition) },
277}; 277};
278 278
279#define PCIE_PERF_OFF(c) \
280 MLX5_BYTE_OFF(mpcnt_reg, counter_set.pcie_perf_cntrs_grp_data_layout.c)
281#define PCIE_PERF_GET(pcie_stats, c) \
282 MLX5_GET(mpcnt_reg, pcie_stats->pcie_perf_counters, \
283 counter_set.pcie_perf_cntrs_grp_data_layout.c)
284#define PCIE_TAS_OFF(c) \
285 MLX5_BYTE_OFF(mpcnt_reg, counter_set.pcie_tas_cntrs_grp_data_layout.c)
286#define PCIE_TAS_GET(pcie_stats, c) \
287 MLX5_GET(mpcnt_reg, pcie_stats->pcie_tas_counters, \
288 counter_set.pcie_tas_cntrs_grp_data_layout.c)
289
290struct mlx5e_pcie_stats {
291 __be64 pcie_perf_counters[MLX5_ST_SZ_QW(mpcnt_reg)];
292 __be64 pcie_tas_counters[MLX5_ST_SZ_QW(mpcnt_reg)];
293};
294
295static const struct counter_desc pcie_perf_stats_desc[] = {
296 { "rx_pci_signal_integrity", PCIE_PERF_OFF(rx_errors) },
297 { "tx_pci_signal_integrity", PCIE_PERF_OFF(tx_errors) },
298};
299
300static const struct counter_desc pcie_tas_stats_desc[] = {
301 { "tx_pci_transport_nonfatal_msg", PCIE_TAS_OFF(non_fatal_err_msg_sent) },
302 { "tx_pci_transport_fatal_msg", PCIE_TAS_OFF(fatal_err_msg_sent) },
303};
304
305struct mlx5e_rq_stats { 279struct mlx5e_rq_stats {
306 u64 packets; 280 u64 packets;
307 u64 bytes; 281 u64 bytes;
@@ -386,8 +360,6 @@ static const struct counter_desc sq_stats_desc[] = {
386#define NUM_PPORT_802_3_COUNTERS ARRAY_SIZE(pport_802_3_stats_desc) 360#define NUM_PPORT_802_3_COUNTERS ARRAY_SIZE(pport_802_3_stats_desc)
387#define NUM_PPORT_2863_COUNTERS ARRAY_SIZE(pport_2863_stats_desc) 361#define NUM_PPORT_2863_COUNTERS ARRAY_SIZE(pport_2863_stats_desc)
388#define NUM_PPORT_2819_COUNTERS ARRAY_SIZE(pport_2819_stats_desc) 362#define NUM_PPORT_2819_COUNTERS ARRAY_SIZE(pport_2819_stats_desc)
389#define NUM_PCIE_PERF_COUNTERS ARRAY_SIZE(pcie_perf_stats_desc)
390#define NUM_PCIE_TAS_COUNTERS ARRAY_SIZE(pcie_tas_stats_desc)
391#define NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS \ 363#define NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS \
392 ARRAY_SIZE(pport_per_prio_traffic_stats_desc) 364 ARRAY_SIZE(pport_per_prio_traffic_stats_desc)
393#define NUM_PPORT_PER_PRIO_PFC_COUNTERS \ 365#define NUM_PPORT_PER_PRIO_PFC_COUNTERS \
@@ -397,7 +369,6 @@ static const struct counter_desc sq_stats_desc[] = {
397 NUM_PPORT_2819_COUNTERS + \ 369 NUM_PPORT_2819_COUNTERS + \
398 NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS * \ 370 NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS * \
399 NUM_PPORT_PRIO) 371 NUM_PPORT_PRIO)
400#define NUM_PCIE_COUNTERS (NUM_PCIE_PERF_COUNTERS + NUM_PCIE_TAS_COUNTERS)
401#define NUM_RQ_STATS ARRAY_SIZE(rq_stats_desc) 372#define NUM_RQ_STATS ARRAY_SIZE(rq_stats_desc)
402#define NUM_SQ_STATS ARRAY_SIZE(sq_stats_desc) 373#define NUM_SQ_STATS ARRAY_SIZE(sq_stats_desc)
403 374
@@ -406,7 +377,6 @@ struct mlx5e_stats {
406 struct mlx5e_qcounter_stats qcnt; 377 struct mlx5e_qcounter_stats qcnt;
407 struct mlx5e_vport_stats vport; 378 struct mlx5e_vport_stats vport;
408 struct mlx5e_pport_stats pport; 379 struct mlx5e_pport_stats pport;
409 struct mlx5e_pcie_stats pcie;
410 struct rtnl_link_stats64 vf_vport; 380 struct rtnl_link_stats64 vf_vport;
411}; 381};
412 382
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index f8829b517156..2ebbe80d8126 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -161,15 +161,21 @@ static void mlx5e_detach_encap(struct mlx5e_priv *priv,
161 } 161 }
162} 162}
163 163
164/* we get here also when setting rule to the FW failed, etc. It means that the
165 * flow rule itself might not exist, but some offloading related to the actions
166 * should be cleaned.
167 */
164static void mlx5e_tc_del_flow(struct mlx5e_priv *priv, 168static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
165 struct mlx5e_tc_flow *flow) 169 struct mlx5e_tc_flow *flow)
166{ 170{
167 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; 171 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
168 struct mlx5_fc *counter = NULL; 172 struct mlx5_fc *counter = NULL;
169 173
170 counter = mlx5_flow_rule_counter(flow->rule); 174 if (!IS_ERR(flow->rule)) {
171 175 counter = mlx5_flow_rule_counter(flow->rule);
172 mlx5_del_flow_rules(flow->rule); 176 mlx5_del_flow_rules(flow->rule);
177 mlx5_fc_destroy(priv->mdev, counter);
178 }
173 179
174 if (esw && esw->mode == SRIOV_OFFLOADS) { 180 if (esw && esw->mode == SRIOV_OFFLOADS) {
175 mlx5_eswitch_del_vlan_action(esw, flow->attr); 181 mlx5_eswitch_del_vlan_action(esw, flow->attr);
@@ -177,8 +183,6 @@ static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
177 mlx5e_detach_encap(priv, flow); 183 mlx5e_detach_encap(priv, flow);
178 } 184 }
179 185
180 mlx5_fc_destroy(priv->mdev, counter);
181
182 if (!mlx5e_tc_num_filters(priv) && (priv->fs.tc.t)) { 186 if (!mlx5e_tc_num_filters(priv) && (priv->fs.tc.t)) {
183 mlx5_destroy_flow_table(priv->fs.tc.t); 187 mlx5_destroy_flow_table(priv->fs.tc.t);
184 priv->fs.tc.t = NULL; 188 priv->fs.tc.t = NULL;
@@ -225,6 +229,11 @@ static int parse_tunnel_attr(struct mlx5e_priv *priv,
225 void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, 229 void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
226 outer_headers); 230 outer_headers);
227 231
232 struct flow_dissector_key_control *enc_control =
233 skb_flow_dissector_target(f->dissector,
234 FLOW_DISSECTOR_KEY_ENC_CONTROL,
235 f->key);
236
228 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_PORTS)) { 237 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_PORTS)) {
229 struct flow_dissector_key_ports *key = 238 struct flow_dissector_key_ports *key =
230 skb_flow_dissector_target(f->dissector, 239 skb_flow_dissector_target(f->dissector,
@@ -237,28 +246,34 @@ static int parse_tunnel_attr(struct mlx5e_priv *priv,
237 246
238 /* Full udp dst port must be given */ 247 /* Full udp dst port must be given */
239 if (memchr_inv(&mask->dst, 0xff, sizeof(mask->dst))) 248 if (memchr_inv(&mask->dst, 0xff, sizeof(mask->dst)))
240 return -EOPNOTSUPP; 249 goto vxlan_match_offload_err;
241
242 /* udp src port isn't supported */
243 if (memchr_inv(&mask->src, 0, sizeof(mask->src)))
244 return -EOPNOTSUPP;
245 250
246 if (mlx5e_vxlan_lookup_port(priv, be16_to_cpu(key->dst)) && 251 if (mlx5e_vxlan_lookup_port(priv, be16_to_cpu(key->dst)) &&
247 MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap)) 252 MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap))
248 parse_vxlan_attr(spec, f); 253 parse_vxlan_attr(spec, f);
249 else 254 else {
255 netdev_warn(priv->netdev,
256 "%d isn't an offloaded vxlan udp dport\n", be16_to_cpu(key->dst));
250 return -EOPNOTSUPP; 257 return -EOPNOTSUPP;
258 }
251 259
252 MLX5_SET(fte_match_set_lyr_2_4, headers_c, 260 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
253 udp_dport, ntohs(mask->dst)); 261 udp_dport, ntohs(mask->dst));
254 MLX5_SET(fte_match_set_lyr_2_4, headers_v, 262 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
255 udp_dport, ntohs(key->dst)); 263 udp_dport, ntohs(key->dst));
256 264
265 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
266 udp_sport, ntohs(mask->src));
267 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
268 udp_sport, ntohs(key->src));
257 } else { /* udp dst port must be given */ 269 } else { /* udp dst port must be given */
258 return -EOPNOTSUPP; 270vxlan_match_offload_err:
271 netdev_warn(priv->netdev,
272 "IP tunnel decap offload supported only for vxlan, must set UDP dport\n");
273 return -EOPNOTSUPP;
259 } 274 }
260 275
261 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS)) { 276 if (enc_control->addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
262 struct flow_dissector_key_ipv4_addrs *key = 277 struct flow_dissector_key_ipv4_addrs *key =
263 skb_flow_dissector_target(f->dissector, 278 skb_flow_dissector_target(f->dissector,
264 FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS, 279 FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS,
@@ -280,10 +295,10 @@ static int parse_tunnel_attr(struct mlx5e_priv *priv,
280 MLX5_SET(fte_match_set_lyr_2_4, headers_v, 295 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
281 dst_ipv4_dst_ipv6.ipv4_layout.ipv4, 296 dst_ipv4_dst_ipv6.ipv4_layout.ipv4,
282 ntohl(key->dst)); 297 ntohl(key->dst));
283 }
284 298
285 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ethertype); 299 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ethertype);
286 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, ETH_P_IP); 300 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, ETH_P_IP);
301 }
287 302
288 /* Enforce DMAC when offloading incoming tunneled flows. 303 /* Enforce DMAC when offloading incoming tunneled flows.
289 * Flow counters require a match on the DMAC. 304 * Flow counters require a match on the DMAC.
@@ -346,6 +361,9 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
346 if (parse_tunnel_attr(priv, spec, f)) 361 if (parse_tunnel_attr(priv, spec, f))
347 return -EOPNOTSUPP; 362 return -EOPNOTSUPP;
348 break; 363 break;
364 case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
365 netdev_warn(priv->netdev,
366 "IPv6 tunnel decap offload isn't supported\n");
349 default: 367 default:
350 return -EOPNOTSUPP; 368 return -EOPNOTSUPP;
351 } 369 }
@@ -375,6 +393,10 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
375 MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1); 393 MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1);
376 MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag, 394 MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,
377 key->flags & FLOW_DIS_IS_FRAGMENT); 395 key->flags & FLOW_DIS_IS_FRAGMENT);
396
397 /* the HW doesn't need L3 inline to match on frag=no */
398 if (key->flags & FLOW_DIS_IS_FRAGMENT)
399 *min_inline = MLX5_INLINE_MODE_IP;
378 } 400 }
379 } 401 }
380 402
@@ -641,26 +663,26 @@ static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv,
641 __be32 *saddr, 663 __be32 *saddr,
642 int *out_ttl) 664 int *out_ttl)
643{ 665{
666 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
644 struct rtable *rt; 667 struct rtable *rt;
645 struct neighbour *n = NULL; 668 struct neighbour *n = NULL;
646 int ttl; 669 int ttl;
647 670
648#if IS_ENABLED(CONFIG_INET) 671#if IS_ENABLED(CONFIG_INET)
672 int ret;
673
649 rt = ip_route_output_key(dev_net(mirred_dev), fl4); 674 rt = ip_route_output_key(dev_net(mirred_dev), fl4);
650 if (IS_ERR(rt)) { 675 ret = PTR_ERR_OR_ZERO(rt);
651 pr_warn("%s: no route to %pI4\n", __func__, &fl4->daddr); 676 if (ret)
652 return -EOPNOTSUPP; 677 return ret;
653 }
654#else 678#else
655 return -EOPNOTSUPP; 679 return -EOPNOTSUPP;
656#endif 680#endif
657 681 /* if the egress device isn't on the same HW e-switch, we use the uplink */
658 if (!switchdev_port_same_parent_id(priv->netdev, rt->dst.dev)) { 682 if (!switchdev_port_same_parent_id(priv->netdev, rt->dst.dev))
659 pr_warn("%s: Can't offload the flow, netdevices aren't on the same HW e-switch\n", 683 *out_dev = mlx5_eswitch_get_uplink_netdev(esw);
660 __func__); 684 else
661 ip_rt_put(rt); 685 *out_dev = rt->dst.dev;
662 return -EOPNOTSUPP;
663 }
664 686
665 ttl = ip4_dst_hoplimit(&rt->dst); 687 ttl = ip4_dst_hoplimit(&rt->dst);
666 n = dst_neigh_lookup(&rt->dst, &fl4->daddr); 688 n = dst_neigh_lookup(&rt->dst, &fl4->daddr);
@@ -671,7 +693,6 @@ static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv,
671 *out_n = n; 693 *out_n = n;
672 *saddr = fl4->saddr; 694 *saddr = fl4->saddr;
673 *out_ttl = ttl; 695 *out_ttl = ttl;
674 *out_dev = rt->dst.dev;
675 696
676 return 0; 697 return 0;
677} 698}
@@ -718,8 +739,8 @@ static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv,
718 struct net_device **out_dev) 739 struct net_device **out_dev)
719{ 740{
720 int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size); 741 int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size);
742 struct neighbour *n = NULL;
721 struct flowi4 fl4 = {}; 743 struct flowi4 fl4 = {};
722 struct neighbour *n;
723 char *encap_header; 744 char *encap_header;
724 int encap_size; 745 int encap_size;
725 __be32 saddr; 746 __be32 saddr;
@@ -750,7 +771,8 @@ static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv,
750 e->out_dev = *out_dev; 771 e->out_dev = *out_dev;
751 772
752 if (!(n->nud_state & NUD_VALID)) { 773 if (!(n->nud_state & NUD_VALID)) {
753 err = -ENOTSUPP; 774 pr_warn("%s: can't offload, neighbour to %pI4 invalid\n", __func__, &fl4.daddr);
775 err = -EOPNOTSUPP;
754 goto out; 776 goto out;
755 } 777 }
756 778
@@ -772,6 +794,8 @@ static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv,
772 err = mlx5_encap_alloc(priv->mdev, e->tunnel_type, 794 err = mlx5_encap_alloc(priv->mdev, e->tunnel_type,
773 encap_size, encap_header, &e->encap_id); 795 encap_size, encap_header, &e->encap_id);
774out: 796out:
797 if (err && n)
798 neigh_release(n);
775 kfree(encap_header); 799 kfree(encap_header);
776 return err; 800 return err;
777} 801}
@@ -792,9 +816,17 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv,
792 int tunnel_type; 816 int tunnel_type;
793 int err; 817 int err;
794 818
795 /* udp dst port must be given */ 819 /* udp dst port must be set */
796 if (!memchr_inv(&key->tp_dst, 0, sizeof(key->tp_dst))) 820 if (!memchr_inv(&key->tp_dst, 0, sizeof(key->tp_dst)))
821 goto vxlan_encap_offload_err;
822
823 /* setting udp src port isn't supported */
824 if (memchr_inv(&key->tp_src, 0, sizeof(key->tp_src))) {
825vxlan_encap_offload_err:
826 netdev_warn(priv->netdev,
827 "must set udp dst port and not set udp src port\n");
797 return -EOPNOTSUPP; 828 return -EOPNOTSUPP;
829 }
798 830
799 if (mlx5e_vxlan_lookup_port(priv, be16_to_cpu(key->tp_dst)) && 831 if (mlx5e_vxlan_lookup_port(priv, be16_to_cpu(key->tp_dst)) &&
800 MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap)) { 832 MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap)) {
@@ -802,6 +834,8 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv,
802 info.tun_id = tunnel_id_to_key32(key->tun_id); 834 info.tun_id = tunnel_id_to_key32(key->tun_id);
803 tunnel_type = MLX5_HEADER_TYPE_VXLAN; 835 tunnel_type = MLX5_HEADER_TYPE_VXLAN;
804 } else { 836 } else {
837 netdev_warn(priv->netdev,
838 "%d isn't an offloaded vxlan udp dport\n", be16_to_cpu(key->tp_dst));
805 return -EOPNOTSUPP; 839 return -EOPNOTSUPP;
806 } 840 }
807 841
@@ -809,6 +843,9 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv,
809 case AF_INET: 843 case AF_INET:
810 info.daddr = key->u.ipv4.dst; 844 info.daddr = key->u.ipv4.dst;
811 break; 845 break;
846 case AF_INET6:
847 netdev_warn(priv->netdev,
848 "IPv6 tunnel encap offload isn't supported\n");
812 default: 849 default:
813 return -EOPNOTSUPP; 850 return -EOPNOTSUPP;
814 } 851 }
@@ -986,7 +1023,7 @@ int mlx5e_configure_flower(struct mlx5e_priv *priv, __be16 protocol,
986 1023
987 if (IS_ERR(flow->rule)) { 1024 if (IS_ERR(flow->rule)) {
988 err = PTR_ERR(flow->rule); 1025 err = PTR_ERR(flow->rule);
989 goto err_free; 1026 goto err_del_rule;
990 } 1027 }
991 1028
992 err = rhashtable_insert_fast(&tc->ht, &flow->node, 1029 err = rhashtable_insert_fast(&tc->ht, &flow->node,
@@ -997,7 +1034,7 @@ int mlx5e_configure_flower(struct mlx5e_priv *priv, __be16 protocol,
997 goto out; 1034 goto out;
998 1035
999err_del_rule: 1036err_del_rule:
1000 mlx5_del_flow_rules(flow->rule); 1037 mlx5e_tc_del_flow(priv, flow);
1001 1038
1002err_free: 1039err_free:
1003 kfree(flow); 1040 kfree(flow);
@@ -1050,10 +1087,14 @@ int mlx5e_stats_flower(struct mlx5e_priv *priv,
1050 1087
1051 mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse); 1088 mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse);
1052 1089
1090 preempt_disable();
1091
1053 tcf_exts_to_list(f->exts, &actions); 1092 tcf_exts_to_list(f->exts, &actions);
1054 list_for_each_entry(a, &actions, list) 1093 list_for_each_entry(a, &actions, list)
1055 tcf_action_stats_update(a, bytes, packets, lastuse); 1094 tcf_action_stats_update(a, bytes, packets, lastuse);
1056 1095
1096 preempt_enable();
1097
1057 return 0; 1098 return 0;
1058} 1099}
1059 1100
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index d6807c3cc461..d0c8bf014453 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -133,7 +133,7 @@ static int modify_esw_vport_cvlan(struct mlx5_core_dev *dev, u32 vport,
133 133
134 if (!MLX5_CAP_ESW(dev, vport_cvlan_strip) || 134 if (!MLX5_CAP_ESW(dev, vport_cvlan_strip) ||
135 !MLX5_CAP_ESW(dev, vport_cvlan_insert_if_not_exist)) 135 !MLX5_CAP_ESW(dev, vport_cvlan_insert_if_not_exist))
136 return -ENOTSUPP; 136 return -EOPNOTSUPP;
137 137
138 esw_debug(dev, "Set Vport[%d] VLAN %d qos %d set=%x\n", 138 esw_debug(dev, "Set Vport[%d] VLAN %d qos %d set=%x\n",
139 vport, vlan, qos, set_flags); 139 vport, vlan, qos, set_flags);
@@ -353,7 +353,7 @@ static int esw_create_legacy_fdb_table(struct mlx5_eswitch *esw, int nvports)
353 root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB); 353 root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
354 if (!root_ns) { 354 if (!root_ns) {
355 esw_warn(dev, "Failed to get FDB flow namespace\n"); 355 esw_warn(dev, "Failed to get FDB flow namespace\n");
356 return -ENOMEM; 356 return -EOPNOTSUPP;
357 } 357 }
358 358
359 flow_group_in = mlx5_vzalloc(inlen); 359 flow_group_in = mlx5_vzalloc(inlen);
@@ -962,7 +962,7 @@ static int esw_vport_enable_egress_acl(struct mlx5_eswitch *esw,
962 root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_EGRESS); 962 root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_EGRESS);
963 if (!root_ns) { 963 if (!root_ns) {
964 esw_warn(dev, "Failed to get E-Switch egress flow namespace\n"); 964 esw_warn(dev, "Failed to get E-Switch egress flow namespace\n");
965 return -EIO; 965 return -EOPNOTSUPP;
966 } 966 }
967 967
968 flow_group_in = mlx5_vzalloc(inlen); 968 flow_group_in = mlx5_vzalloc(inlen);
@@ -1079,7 +1079,7 @@ static int esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw,
1079 root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_INGRESS); 1079 root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_INGRESS);
1080 if (!root_ns) { 1080 if (!root_ns) {
1081 esw_warn(dev, "Failed to get E-Switch ingress flow namespace\n"); 1081 esw_warn(dev, "Failed to get E-Switch ingress flow namespace\n");
1082 return -EIO; 1082 return -EOPNOTSUPP;
1083 } 1083 }
1084 1084
1085 flow_group_in = mlx5_vzalloc(inlen); 1085 flow_group_in = mlx5_vzalloc(inlen);
@@ -1630,7 +1630,7 @@ int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode)
1630 if (!MLX5_CAP_GEN(esw->dev, eswitch_flow_table) || 1630 if (!MLX5_CAP_GEN(esw->dev, eswitch_flow_table) ||
1631 !MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ft_support)) { 1631 !MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ft_support)) {
1632 esw_warn(esw->dev, "E-Switch FDB is not supported, aborting ...\n"); 1632 esw_warn(esw->dev, "E-Switch FDB is not supported, aborting ...\n");
1633 return -ENOTSUPP; 1633 return -EOPNOTSUPP;
1634 } 1634 }
1635 1635
1636 if (!MLX5_CAP_ESW_INGRESS_ACL(esw->dev, ft_support)) 1636 if (!MLX5_CAP_ESW_INGRESS_ACL(esw->dev, ft_support))
@@ -1860,7 +1860,7 @@ int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
1860 1860
1861 if (!ESW_ALLOWED(esw)) 1861 if (!ESW_ALLOWED(esw))
1862 return -EPERM; 1862 return -EPERM;
1863 if (!LEGAL_VPORT(esw, vport)) 1863 if (!LEGAL_VPORT(esw, vport) || is_multicast_ether_addr(mac))
1864 return -EINVAL; 1864 return -EINVAL;
1865 1865
1866 mutex_lock(&esw->state_lock); 1866 mutex_lock(&esw->state_lock);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index 466e161010f7..595f7c7383b3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -166,7 +166,7 @@ static int esw_add_vlan_action_check(struct mlx5_esw_flow_attr *attr,
166 return 0; 166 return 0;
167 167
168out_notsupp: 168out_notsupp:
169 return -ENOTSUPP; 169 return -EOPNOTSUPP;
170} 170}
171 171
172int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw, 172int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw,
@@ -424,6 +424,7 @@ static int esw_create_offloads_fdb_table(struct mlx5_eswitch *esw, int nvports)
424 root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB); 424 root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
425 if (!root_ns) { 425 if (!root_ns) {
426 esw_warn(dev, "Failed to get FDB flow namespace\n"); 426 esw_warn(dev, "Failed to get FDB flow namespace\n");
427 err = -EOPNOTSUPP;
427 goto ns_err; 428 goto ns_err;
428 } 429 }
429 430
@@ -535,7 +536,7 @@ static int esw_create_offloads_table(struct mlx5_eswitch *esw)
535 ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_OFFLOADS); 536 ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_OFFLOADS);
536 if (!ns) { 537 if (!ns) {
537 esw_warn(esw->dev, "Failed to get offloads flow namespace\n"); 538 esw_warn(esw->dev, "Failed to get offloads flow namespace\n");
538 return -ENOMEM; 539 return -EOPNOTSUPP;
539 } 540 }
540 541
541 ft_offloads = mlx5_create_flow_table(ns, 0, dev->priv.sriov.num_vfs + 2, 0, 0); 542 ft_offloads = mlx5_create_flow_table(ns, 0, dev->priv.sriov.num_vfs + 2, 0, 0);
@@ -655,7 +656,7 @@ static int esw_offloads_start(struct mlx5_eswitch *esw)
655 esw_warn(esw->dev, "Failed setting eswitch to offloads, err %d\n", err); 656 esw_warn(esw->dev, "Failed setting eswitch to offloads, err %d\n", err);
656 err1 = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY); 657 err1 = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY);
657 if (err1) 658 if (err1)
658 esw_warn(esw->dev, "Failed setting eswitch back to legacy, err %d\n", err); 659 esw_warn(esw->dev, "Failed setting eswitch back to legacy, err %d\n", err1);
659 } 660 }
660 if (esw->offloads.inline_mode == MLX5_INLINE_MODE_NONE) { 661 if (esw->offloads.inline_mode == MLX5_INLINE_MODE_NONE) {
661 if (mlx5_eswitch_inline_mode_get(esw, 662 if (mlx5_eswitch_inline_mode_get(esw,
@@ -674,9 +675,14 @@ int esw_offloads_init(struct mlx5_eswitch *esw, int nvports)
674 int vport; 675 int vport;
675 int err; 676 int err;
676 677
678 /* disable PF RoCE so missed packets don't go through RoCE steering */
679 mlx5_dev_list_lock();
680 mlx5_remove_dev_by_protocol(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
681 mlx5_dev_list_unlock();
682
677 err = esw_create_offloads_fdb_table(esw, nvports); 683 err = esw_create_offloads_fdb_table(esw, nvports);
678 if (err) 684 if (err)
679 return err; 685 goto create_fdb_err;
680 686
681 err = esw_create_offloads_table(esw); 687 err = esw_create_offloads_table(esw);
682 if (err) 688 if (err)
@@ -695,6 +701,7 @@ int esw_offloads_init(struct mlx5_eswitch *esw, int nvports)
695 if (err) 701 if (err)
696 goto err_reps; 702 goto err_reps;
697 } 703 }
704
698 return 0; 705 return 0;
699 706
700err_reps: 707err_reps:
@@ -711,6 +718,13 @@ create_fg_err:
711 718
712create_ft_err: 719create_ft_err:
713 esw_destroy_offloads_fdb_table(esw); 720 esw_destroy_offloads_fdb_table(esw);
721
722create_fdb_err:
723 /* enable back PF RoCE */
724 mlx5_dev_list_lock();
725 mlx5_add_dev_by_protocol(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
726 mlx5_dev_list_unlock();
727
714 return err; 728 return err;
715} 729}
716 730
@@ -727,6 +741,11 @@ static int esw_offloads_stop(struct mlx5_eswitch *esw)
727 esw_warn(esw->dev, "Failed setting eswitch back to offloads, err %d\n", err); 741 esw_warn(esw->dev, "Failed setting eswitch back to offloads, err %d\n", err);
728 } 742 }
729 743
744 /* enable back PF RoCE */
745 mlx5_dev_list_lock();
746 mlx5_add_dev_by_protocol(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
747 mlx5_dev_list_unlock();
748
730 return err; 749 return err;
731} 750}
732 751
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
index c4478ecd8056..b53fc85a2375 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
@@ -322,7 +322,7 @@ int mlx5_cmd_update_fte(struct mlx5_core_dev *dev,
322 flow_table_properties_nic_receive. 322 flow_table_properties_nic_receive.
323 flow_modify_en); 323 flow_modify_en);
324 if (!atomic_mod_cap) 324 if (!atomic_mod_cap)
325 return -ENOTSUPP; 325 return -EOPNOTSUPP;
326 opmod = 1; 326 opmod = 1;
327 327
328 return mlx5_cmd_set_fte(dev, opmod, modify_mask, ft, group_id, fte); 328 return mlx5_cmd_set_fte(dev, opmod, modify_mask, ft, group_id, fte);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index a263d8904a4c..6346a8f5883b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -1263,6 +1263,7 @@ static struct mlx5_flow_handle *add_rule_fg(struct mlx5_flow_group *fg,
1263 nested_lock_ref_node(&fte->node, FS_MUTEX_CHILD); 1263 nested_lock_ref_node(&fte->node, FS_MUTEX_CHILD);
1264 handle = add_rule_fte(fte, fg, dest, dest_num, false); 1264 handle = add_rule_fte(fte, fg, dest, dest_num, false);
1265 if (IS_ERR(handle)) { 1265 if (IS_ERR(handle)) {
1266 unlock_ref_node(&fte->node);
1266 kfree(fte); 1267 kfree(fte);
1267 goto unlock_fg; 1268 goto unlock_fg;
1268 } 1269 }
@@ -1821,7 +1822,7 @@ static int create_anchor_flow_table(struct mlx5_flow_steering *steering)
1821 struct mlx5_flow_table *ft; 1822 struct mlx5_flow_table *ft;
1822 1823
1823 ns = mlx5_get_flow_namespace(steering->dev, MLX5_FLOW_NAMESPACE_ANCHOR); 1824 ns = mlx5_get_flow_namespace(steering->dev, MLX5_FLOW_NAMESPACE_ANCHOR);
1824 if (!ns) 1825 if (WARN_ON(!ns))
1825 return -EINVAL; 1826 return -EINVAL;
1826 ft = mlx5_create_flow_table(ns, ANCHOR_PRIO, ANCHOR_SIZE, ANCHOR_LEVEL, 0); 1827 ft = mlx5_create_flow_table(ns, ANCHOR_PRIO, ANCHOR_SIZE, ANCHOR_LEVEL, 0);
1827 if (IS_ERR(ft)) { 1828 if (IS_ERR(ft)) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 54e5a786f191..3c315eb8d270 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -503,6 +503,13 @@ static int handle_hca_cap(struct mlx5_core_dev *dev)
503 MLX5_SET(cmd_hca_cap, set_hca_cap, pkey_table_size, 503 MLX5_SET(cmd_hca_cap, set_hca_cap, pkey_table_size,
504 to_fw_pkey_sz(dev, 128)); 504 to_fw_pkey_sz(dev, 128));
505 505
506 /* Check log_max_qp from HCA caps to set in current profile */
507 if (MLX5_CAP_GEN_MAX(dev, log_max_qp) < profile[prof_sel].log_max_qp) {
508 mlx5_core_warn(dev, "log_max_qp value in current profile is %d, changing it to HCA capability limit (%d)\n",
509 profile[prof_sel].log_max_qp,
510 MLX5_CAP_GEN_MAX(dev, log_max_qp));
511 profile[prof_sel].log_max_qp = MLX5_CAP_GEN_MAX(dev, log_max_qp);
512 }
506 if (prof->mask & MLX5_PROF_MASK_QP_SIZE) 513 if (prof->mask & MLX5_PROF_MASK_QP_SIZE)
507 MLX5_SET(cmd_hca_cap, set_hca_cap, log_max_qp, 514 MLX5_SET(cmd_hca_cap, set_hca_cap, log_max_qp,
508 prof->log_max_qp); 515 prof->log_max_qp);
@@ -575,7 +582,6 @@ static int mlx5_irq_set_affinity_hint(struct mlx5_core_dev *mdev, int i)
575 struct mlx5_priv *priv = &mdev->priv; 582 struct mlx5_priv *priv = &mdev->priv;
576 struct msix_entry *msix = priv->msix_arr; 583 struct msix_entry *msix = priv->msix_arr;
577 int irq = msix[i + MLX5_EQ_VEC_COMP_BASE].vector; 584 int irq = msix[i + MLX5_EQ_VEC_COMP_BASE].vector;
578 int numa_node = priv->numa_node;
579 int err; 585 int err;
580 586
581 if (!zalloc_cpumask_var(&priv->irq_info[i].mask, GFP_KERNEL)) { 587 if (!zalloc_cpumask_var(&priv->irq_info[i].mask, GFP_KERNEL)) {
@@ -583,7 +589,7 @@ static int mlx5_irq_set_affinity_hint(struct mlx5_core_dev *mdev, int i)
583 return -ENOMEM; 589 return -ENOMEM;
584 } 590 }
585 591
586 cpumask_set_cpu(cpumask_local_spread(i, numa_node), 592 cpumask_set_cpu(cpumask_local_spread(i, priv->numa_node),
587 priv->irq_info[i].mask); 593 priv->irq_info[i].mask);
588 594
589 err = irq_set_affinity_hint(irq, priv->irq_info[i].mask); 595 err = irq_set_affinity_hint(irq, priv->irq_info[i].mask);
@@ -801,7 +807,7 @@ static int mlx5_core_set_issi(struct mlx5_core_dev *dev)
801 return 0; 807 return 0;
802 } 808 }
803 809
804 return -ENOTSUPP; 810 return -EOPNOTSUPP;
805} 811}
806 812
807 813
@@ -1189,6 +1195,9 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
1189{ 1195{
1190 int err = 0; 1196 int err = 0;
1191 1197
1198 if (cleanup)
1199 mlx5_drain_health_wq(dev);
1200
1192 mutex_lock(&dev->intf_state_mutex); 1201 mutex_lock(&dev->intf_state_mutex);
1193 if (test_bit(MLX5_INTERFACE_STATE_DOWN, &dev->intf_state)) { 1202 if (test_bit(MLX5_INTERFACE_STATE_DOWN, &dev->intf_state)) {
1194 dev_warn(&dev->pdev->dev, "%s: interface is down, NOP\n", 1203 dev_warn(&dev->pdev->dev, "%s: interface is down, NOP\n",
@@ -1351,7 +1360,7 @@ static pci_ers_result_t mlx5_pci_err_detected(struct pci_dev *pdev,
1351 1360
1352 mlx5_enter_error_state(dev); 1361 mlx5_enter_error_state(dev);
1353 mlx5_unload_one(dev, priv, false); 1362 mlx5_unload_one(dev, priv, false);
1354 /* In case of kernel call save the pci state and drain health wq */ 1363 /* In case of kernel call save the pci state and drain the health wq */
1355 if (state) { 1364 if (state) {
1356 pci_save_state(pdev); 1365 pci_save_state(pdev);
1357 mlx5_drain_health_wq(dev); 1366 mlx5_drain_health_wq(dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c
index d2ec9d232a70..fd12e0a377a5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c
@@ -620,7 +620,7 @@ static int mlx5_set_port_qetcr_reg(struct mlx5_core_dev *mdev, u32 *in,
620 u32 out[MLX5_ST_SZ_DW(qtct_reg)]; 620 u32 out[MLX5_ST_SZ_DW(qtct_reg)];
621 621
622 if (!MLX5_CAP_GEN(mdev, ets)) 622 if (!MLX5_CAP_GEN(mdev, ets))
623 return -ENOTSUPP; 623 return -EOPNOTSUPP;
624 624
625 return mlx5_core_access_reg(mdev, in, inlen, out, sizeof(out), 625 return mlx5_core_access_reg(mdev, in, inlen, out, sizeof(out),
626 MLX5_REG_QETCR, 0, 1); 626 MLX5_REG_QETCR, 0, 1);
@@ -632,7 +632,7 @@ static int mlx5_query_port_qetcr_reg(struct mlx5_core_dev *mdev, u32 *out,
632 u32 in[MLX5_ST_SZ_DW(qtct_reg)]; 632 u32 in[MLX5_ST_SZ_DW(qtct_reg)];
633 633
634 if (!MLX5_CAP_GEN(mdev, ets)) 634 if (!MLX5_CAP_GEN(mdev, ets))
635 return -ENOTSUPP; 635 return -EOPNOTSUPP;
636 636
637 memset(in, 0, sizeof(in)); 637 memset(in, 0, sizeof(in));
638 return mlx5_core_access_reg(mdev, in, sizeof(in), out, outlen, 638 return mlx5_core_access_reg(mdev, in, sizeof(in), out, outlen,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
index 269e4401c342..7129c30a2ab4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
@@ -532,7 +532,7 @@ int mlx5_modify_nic_vport_node_guid(struct mlx5_core_dev *mdev,
532 if (!MLX5_CAP_GEN(mdev, vport_group_manager)) 532 if (!MLX5_CAP_GEN(mdev, vport_group_manager))
533 return -EACCES; 533 return -EACCES;
534 if (!MLX5_CAP_ESW(mdev, nic_vport_node_guid_modify)) 534 if (!MLX5_CAP_ESW(mdev, nic_vport_node_guid_modify))
535 return -ENOTSUPP; 535 return -EOPNOTSUPP;
536 536
537 in = mlx5_vzalloc(inlen); 537 in = mlx5_vzalloc(inlen);
538 if (!in) 538 if (!in)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
index d147ddd97997..0af3338bfcb4 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
@@ -209,21 +209,21 @@ MLXSW_ITEM32(pci, eqe, owner, 0x0C, 0, 1);
209/* pci_eqe_cmd_token 209/* pci_eqe_cmd_token
210 * Command completion event - token 210 * Command completion event - token
211 */ 211 */
212MLXSW_ITEM32(pci, eqe, cmd_token, 0x08, 16, 16); 212MLXSW_ITEM32(pci, eqe, cmd_token, 0x00, 16, 16);
213 213
214/* pci_eqe_cmd_status 214/* pci_eqe_cmd_status
215 * Command completion event - status 215 * Command completion event - status
216 */ 216 */
217MLXSW_ITEM32(pci, eqe, cmd_status, 0x08, 0, 8); 217MLXSW_ITEM32(pci, eqe, cmd_status, 0x00, 0, 8);
218 218
219/* pci_eqe_cmd_out_param_h 219/* pci_eqe_cmd_out_param_h
220 * Command completion event - output parameter - higher part 220 * Command completion event - output parameter - higher part
221 */ 221 */
222MLXSW_ITEM32(pci, eqe, cmd_out_param_h, 0x0C, 0, 32); 222MLXSW_ITEM32(pci, eqe, cmd_out_param_h, 0x04, 0, 32);
223 223
224/* pci_eqe_cmd_out_param_l 224/* pci_eqe_cmd_out_param_l
225 * Command completion event - output parameter - lower part 225 * Command completion event - output parameter - lower part
226 */ 226 */
227MLXSW_ITEM32(pci, eqe, cmd_out_param_l, 0x10, 0, 32); 227MLXSW_ITEM32(pci, eqe, cmd_out_param_l, 0x08, 0, 32);
228 228
229#endif 229#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index d768c7b6c6d6..003093abb170 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -684,6 +684,7 @@ static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb,
684 dev_kfree_skb_any(skb_orig); 684 dev_kfree_skb_any(skb_orig);
685 return NETDEV_TX_OK; 685 return NETDEV_TX_OK;
686 } 686 }
687 dev_consume_skb_any(skb_orig);
687 } 688 }
688 689
689 if (eth_skb_pad(skb)) { 690 if (eth_skb_pad(skb)) {
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index 01d0efa9c5c7..9e494a446b7e 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -1172,7 +1172,8 @@ static int mlxsw_sp_nexthop_mac_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index,
1172 1172
1173static int 1173static int
1174mlxsw_sp_nexthop_group_mac_update(struct mlxsw_sp *mlxsw_sp, 1174mlxsw_sp_nexthop_group_mac_update(struct mlxsw_sp *mlxsw_sp,
1175 struct mlxsw_sp_nexthop_group *nh_grp) 1175 struct mlxsw_sp_nexthop_group *nh_grp,
1176 bool reallocate)
1176{ 1177{
1177 u32 adj_index = nh_grp->adj_index; /* base */ 1178 u32 adj_index = nh_grp->adj_index; /* base */
1178 struct mlxsw_sp_nexthop *nh; 1179 struct mlxsw_sp_nexthop *nh;
@@ -1187,7 +1188,7 @@ mlxsw_sp_nexthop_group_mac_update(struct mlxsw_sp *mlxsw_sp,
1187 continue; 1188 continue;
1188 } 1189 }
1189 1190
1190 if (nh->update) { 1191 if (nh->update || reallocate) {
1191 err = mlxsw_sp_nexthop_mac_update(mlxsw_sp, 1192 err = mlxsw_sp_nexthop_mac_update(mlxsw_sp,
1192 adj_index, nh); 1193 adj_index, nh);
1193 if (err) 1194 if (err)
@@ -1248,7 +1249,8 @@ mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp,
1248 /* Nothing was added or removed, so no need to reallocate. Just 1249 /* Nothing was added or removed, so no need to reallocate. Just
1249 * update MAC on existing adjacency indexes. 1250 * update MAC on existing adjacency indexes.
1250 */ 1251 */
1251 err = mlxsw_sp_nexthop_group_mac_update(mlxsw_sp, nh_grp); 1252 err = mlxsw_sp_nexthop_group_mac_update(mlxsw_sp, nh_grp,
1253 false);
1252 if (err) { 1254 if (err) {
1253 dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n"); 1255 dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n");
1254 goto set_trap; 1256 goto set_trap;
@@ -1276,7 +1278,7 @@ mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp,
1276 nh_grp->adj_index_valid = 1; 1278 nh_grp->adj_index_valid = 1;
1277 nh_grp->adj_index = adj_index; 1279 nh_grp->adj_index = adj_index;
1278 nh_grp->ecmp_size = ecmp_size; 1280 nh_grp->ecmp_size = ecmp_size;
1279 err = mlxsw_sp_nexthop_group_mac_update(mlxsw_sp, nh_grp); 1281 err = mlxsw_sp_nexthop_group_mac_update(mlxsw_sp, nh_grp, true);
1280 if (err) { 1282 if (err) {
1281 dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n"); 1283 dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n");
1282 goto set_trap; 1284 goto set_trap;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
index 150ccf5192a9..2e88115e8735 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
@@ -345,6 +345,7 @@ static netdev_tx_t mlxsw_sx_port_xmit(struct sk_buff *skb,
345 dev_kfree_skb_any(skb_orig); 345 dev_kfree_skb_any(skb_orig);
346 return NETDEV_TX_OK; 346 return NETDEV_TX_OK;
347 } 347 }
348 dev_consume_skb_any(skb_orig);
348 } 349 }
349 mlxsw_sx_txhdr_construct(skb, &tx_info); 350 mlxsw_sx_txhdr_construct(skb, &tx_info);
350 /* TX header is consumed by HW on the way so we shouldn't count its 351 /* TX header is consumed by HW on the way so we shouldn't count its
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
index 8e5cb7605b0f..873ce2cd76ba 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
@@ -297,7 +297,7 @@ static void qed_ll2_txq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
297 list_del(&p_pkt->list_entry); 297 list_del(&p_pkt->list_entry);
298 b_last_packet = list_empty(&p_tx->active_descq); 298 b_last_packet = list_empty(&p_tx->active_descq);
299 list_add_tail(&p_pkt->list_entry, &p_tx->free_descq); 299 list_add_tail(&p_pkt->list_entry, &p_tx->free_descq);
300 if (p_ll2_conn->conn_type == QED_LL2_TYPE_ISCSI_OOO) { 300 if (p_ll2_conn->conn.conn_type == QED_LL2_TYPE_ISCSI_OOO) {
301 struct qed_ooo_buffer *p_buffer; 301 struct qed_ooo_buffer *p_buffer;
302 302
303 p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie; 303 p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie;
@@ -309,7 +309,7 @@ static void qed_ll2_txq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
309 b_last_frag = 309 b_last_frag =
310 p_tx->cur_completing_bd_idx == p_pkt->bd_used; 310 p_tx->cur_completing_bd_idx == p_pkt->bd_used;
311 tx_frag = p_pkt->bds_set[0].tx_frag; 311 tx_frag = p_pkt->bds_set[0].tx_frag;
312 if (p_ll2_conn->gsi_enable) 312 if (p_ll2_conn->conn.gsi_enable)
313 qed_ll2b_release_tx_gsi_packet(p_hwfn, 313 qed_ll2b_release_tx_gsi_packet(p_hwfn,
314 p_ll2_conn-> 314 p_ll2_conn->
315 my_id, 315 my_id,
@@ -378,7 +378,7 @@ static int qed_ll2_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie)
378 378
379 spin_unlock_irqrestore(&p_tx->lock, flags); 379 spin_unlock_irqrestore(&p_tx->lock, flags);
380 tx_frag = p_pkt->bds_set[0].tx_frag; 380 tx_frag = p_pkt->bds_set[0].tx_frag;
381 if (p_ll2_conn->gsi_enable) 381 if (p_ll2_conn->conn.gsi_enable)
382 qed_ll2b_complete_tx_gsi_packet(p_hwfn, 382 qed_ll2b_complete_tx_gsi_packet(p_hwfn,
383 p_ll2_conn->my_id, 383 p_ll2_conn->my_id,
384 p_pkt->cookie, 384 p_pkt->cookie,
@@ -550,7 +550,7 @@ static void qed_ll2_rxq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
550 550
551 list_move_tail(&p_pkt->list_entry, &p_rx->free_descq); 551 list_move_tail(&p_pkt->list_entry, &p_rx->free_descq);
552 552
553 if (p_ll2_conn->conn_type == QED_LL2_TYPE_ISCSI_OOO) { 553 if (p_ll2_conn->conn.conn_type == QED_LL2_TYPE_ISCSI_OOO) {
554 struct qed_ooo_buffer *p_buffer; 554 struct qed_ooo_buffer *p_buffer;
555 555
556 p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie; 556 p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie;
@@ -738,7 +738,7 @@ qed_ooo_submit_tx_buffers(struct qed_hwfn *p_hwfn,
738 rc = qed_ll2_prepare_tx_packet(p_hwfn, p_ll2_conn->my_id, 1, 738 rc = qed_ll2_prepare_tx_packet(p_hwfn, p_ll2_conn->my_id, 1,
739 p_buffer->vlan, bd_flags, 739 p_buffer->vlan, bd_flags,
740 l4_hdr_offset_w, 740 l4_hdr_offset_w,
741 p_ll2_conn->tx_dest, 0, 741 p_ll2_conn->conn.tx_dest, 0,
742 first_frag, 742 first_frag,
743 p_buffer->packet_length, 743 p_buffer->packet_length,
744 p_buffer, true); 744 p_buffer, true);
@@ -858,7 +858,7 @@ qed_ll2_acquire_connection_ooo(struct qed_hwfn *p_hwfn,
858 u16 buf_idx; 858 u16 buf_idx;
859 int rc = 0; 859 int rc = 0;
860 860
861 if (p_ll2_info->conn_type != QED_LL2_TYPE_ISCSI_OOO) 861 if (p_ll2_info->conn.conn_type != QED_LL2_TYPE_ISCSI_OOO)
862 return rc; 862 return rc;
863 863
864 if (!rx_num_ooo_buffers) 864 if (!rx_num_ooo_buffers)
@@ -901,7 +901,7 @@ static void
901qed_ll2_establish_connection_ooo(struct qed_hwfn *p_hwfn, 901qed_ll2_establish_connection_ooo(struct qed_hwfn *p_hwfn,
902 struct qed_ll2_info *p_ll2_conn) 902 struct qed_ll2_info *p_ll2_conn)
903{ 903{
904 if (p_ll2_conn->conn_type != QED_LL2_TYPE_ISCSI_OOO) 904 if (p_ll2_conn->conn.conn_type != QED_LL2_TYPE_ISCSI_OOO)
905 return; 905 return;
906 906
907 qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info); 907 qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info);
@@ -913,7 +913,7 @@ static void qed_ll2_release_connection_ooo(struct qed_hwfn *p_hwfn,
913{ 913{
914 struct qed_ooo_buffer *p_buffer; 914 struct qed_ooo_buffer *p_buffer;
915 915
916 if (p_ll2_conn->conn_type != QED_LL2_TYPE_ISCSI_OOO) 916 if (p_ll2_conn->conn.conn_type != QED_LL2_TYPE_ISCSI_OOO)
917 return; 917 return;
918 918
919 qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info); 919 qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info);
@@ -945,23 +945,19 @@ static int qed_ll2_start_ooo(struct qed_dev *cdev,
945{ 945{
946 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); 946 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
947 u8 *handle = &hwfn->pf_params.iscsi_pf_params.ll2_ooo_queue_id; 947 u8 *handle = &hwfn->pf_params.iscsi_pf_params.ll2_ooo_queue_id;
948 struct qed_ll2_info *ll2_info; 948 struct qed_ll2_conn ll2_info;
949 int rc; 949 int rc;
950 950
951 ll2_info = kzalloc(sizeof(*ll2_info), GFP_KERNEL); 951 ll2_info.conn_type = QED_LL2_TYPE_ISCSI_OOO;
952 if (!ll2_info) 952 ll2_info.mtu = params->mtu;
953 return -ENOMEM; 953 ll2_info.rx_drop_ttl0_flg = params->drop_ttl0_packets;
954 ll2_info->conn_type = QED_LL2_TYPE_ISCSI_OOO; 954 ll2_info.rx_vlan_removal_en = params->rx_vlan_stripping;
955 ll2_info->mtu = params->mtu; 955 ll2_info.tx_tc = OOO_LB_TC;
956 ll2_info->rx_drop_ttl0_flg = params->drop_ttl0_packets; 956 ll2_info.tx_dest = CORE_TX_DEST_LB;
957 ll2_info->rx_vlan_removal_en = params->rx_vlan_stripping; 957
958 ll2_info->tx_tc = OOO_LB_TC; 958 rc = qed_ll2_acquire_connection(hwfn, &ll2_info,
959 ll2_info->tx_dest = CORE_TX_DEST_LB;
960
961 rc = qed_ll2_acquire_connection(hwfn, ll2_info,
962 QED_LL2_RX_SIZE, QED_LL2_TX_SIZE, 959 QED_LL2_RX_SIZE, QED_LL2_TX_SIZE,
963 handle); 960 handle);
964 kfree(ll2_info);
965 if (rc) { 961 if (rc) {
966 DP_INFO(cdev, "Failed to acquire LL2 OOO connection\n"); 962 DP_INFO(cdev, "Failed to acquire LL2 OOO connection\n");
967 goto out; 963 goto out;
@@ -1006,7 +1002,7 @@ static int qed_sp_ll2_rx_queue_start(struct qed_hwfn *p_hwfn,
1006 struct qed_ll2_info *p_ll2_conn, 1002 struct qed_ll2_info *p_ll2_conn,
1007 u8 action_on_error) 1003 u8 action_on_error)
1008{ 1004{
1009 enum qed_ll2_conn_type conn_type = p_ll2_conn->conn_type; 1005 enum qed_ll2_conn_type conn_type = p_ll2_conn->conn.conn_type;
1010 struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue; 1006 struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue;
1011 struct core_rx_start_ramrod_data *p_ramrod = NULL; 1007 struct core_rx_start_ramrod_data *p_ramrod = NULL;
1012 struct qed_spq_entry *p_ent = NULL; 1008 struct qed_spq_entry *p_ent = NULL;
@@ -1032,7 +1028,7 @@ static int qed_sp_ll2_rx_queue_start(struct qed_hwfn *p_hwfn,
1032 p_ramrod->sb_index = p_rx->rx_sb_index; 1028 p_ramrod->sb_index = p_rx->rx_sb_index;
1033 p_ramrod->complete_event_flg = 1; 1029 p_ramrod->complete_event_flg = 1;
1034 1030
1035 p_ramrod->mtu = cpu_to_le16(p_ll2_conn->mtu); 1031 p_ramrod->mtu = cpu_to_le16(p_ll2_conn->conn.mtu);
1036 DMA_REGPAIR_LE(p_ramrod->bd_base, 1032 DMA_REGPAIR_LE(p_ramrod->bd_base,
1037 p_rx->rxq_chain.p_phys_addr); 1033 p_rx->rxq_chain.p_phys_addr);
1038 cqe_pbl_size = (u16)qed_chain_get_page_cnt(&p_rx->rcq_chain); 1034 cqe_pbl_size = (u16)qed_chain_get_page_cnt(&p_rx->rcq_chain);
@@ -1040,8 +1036,8 @@ static int qed_sp_ll2_rx_queue_start(struct qed_hwfn *p_hwfn,
1040 DMA_REGPAIR_LE(p_ramrod->cqe_pbl_addr, 1036 DMA_REGPAIR_LE(p_ramrod->cqe_pbl_addr,
1041 qed_chain_get_pbl_phys(&p_rx->rcq_chain)); 1037 qed_chain_get_pbl_phys(&p_rx->rcq_chain));
1042 1038
1043 p_ramrod->drop_ttl0_flg = p_ll2_conn->rx_drop_ttl0_flg; 1039 p_ramrod->drop_ttl0_flg = p_ll2_conn->conn.rx_drop_ttl0_flg;
1044 p_ramrod->inner_vlan_removal_en = p_ll2_conn->rx_vlan_removal_en; 1040 p_ramrod->inner_vlan_removal_en = p_ll2_conn->conn.rx_vlan_removal_en;
1045 p_ramrod->queue_id = p_ll2_conn->queue_id; 1041 p_ramrod->queue_id = p_ll2_conn->queue_id;
1046 p_ramrod->main_func_queue = (conn_type == QED_LL2_TYPE_ISCSI_OOO) ? 0 1042 p_ramrod->main_func_queue = (conn_type == QED_LL2_TYPE_ISCSI_OOO) ? 0
1047 : 1; 1043 : 1;
@@ -1056,14 +1052,14 @@ static int qed_sp_ll2_rx_queue_start(struct qed_hwfn *p_hwfn,
1056 } 1052 }
1057 1053
1058 p_ramrod->action_on_error.error_type = action_on_error; 1054 p_ramrod->action_on_error.error_type = action_on_error;
1059 p_ramrod->gsi_offload_flag = p_ll2_conn->gsi_enable; 1055 p_ramrod->gsi_offload_flag = p_ll2_conn->conn.gsi_enable;
1060 return qed_spq_post(p_hwfn, p_ent, NULL); 1056 return qed_spq_post(p_hwfn, p_ent, NULL);
1061} 1057}
1062 1058
1063static int qed_sp_ll2_tx_queue_start(struct qed_hwfn *p_hwfn, 1059static int qed_sp_ll2_tx_queue_start(struct qed_hwfn *p_hwfn,
1064 struct qed_ll2_info *p_ll2_conn) 1060 struct qed_ll2_info *p_ll2_conn)
1065{ 1061{
1066 enum qed_ll2_conn_type conn_type = p_ll2_conn->conn_type; 1062 enum qed_ll2_conn_type conn_type = p_ll2_conn->conn.conn_type;
1067 struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue; 1063 struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue;
1068 struct core_tx_start_ramrod_data *p_ramrod = NULL; 1064 struct core_tx_start_ramrod_data *p_ramrod = NULL;
1069 struct qed_spq_entry *p_ent = NULL; 1065 struct qed_spq_entry *p_ent = NULL;
@@ -1075,7 +1071,7 @@ static int qed_sp_ll2_tx_queue_start(struct qed_hwfn *p_hwfn,
1075 if (!QED_LL2_TX_REGISTERED(p_ll2_conn)) 1071 if (!QED_LL2_TX_REGISTERED(p_ll2_conn))
1076 return 0; 1072 return 0;
1077 1073
1078 if (p_ll2_conn->conn_type == QED_LL2_TYPE_ISCSI_OOO) 1074 if (p_ll2_conn->conn.conn_type == QED_LL2_TYPE_ISCSI_OOO)
1079 p_ll2_conn->tx_stats_en = 0; 1075 p_ll2_conn->tx_stats_en = 0;
1080 else 1076 else
1081 p_ll2_conn->tx_stats_en = 1; 1077 p_ll2_conn->tx_stats_en = 1;
@@ -1096,7 +1092,7 @@ static int qed_sp_ll2_tx_queue_start(struct qed_hwfn *p_hwfn,
1096 1092
1097 p_ramrod->sb_id = cpu_to_le16(qed_int_get_sp_sb_id(p_hwfn)); 1093 p_ramrod->sb_id = cpu_to_le16(qed_int_get_sp_sb_id(p_hwfn));
1098 p_ramrod->sb_index = p_tx->tx_sb_index; 1094 p_ramrod->sb_index = p_tx->tx_sb_index;
1099 p_ramrod->mtu = cpu_to_le16(p_ll2_conn->mtu); 1095 p_ramrod->mtu = cpu_to_le16(p_ll2_conn->conn.mtu);
1100 p_ramrod->stats_en = p_ll2_conn->tx_stats_en; 1096 p_ramrod->stats_en = p_ll2_conn->tx_stats_en;
1101 p_ramrod->stats_id = p_ll2_conn->tx_stats_id; 1097 p_ramrod->stats_id = p_ll2_conn->tx_stats_id;
1102 1098
@@ -1106,7 +1102,7 @@ static int qed_sp_ll2_tx_queue_start(struct qed_hwfn *p_hwfn,
1106 p_ramrod->pbl_size = cpu_to_le16(pbl_size); 1102 p_ramrod->pbl_size = cpu_to_le16(pbl_size);
1107 1103
1108 memset(&pq_params, 0, sizeof(pq_params)); 1104 memset(&pq_params, 0, sizeof(pq_params));
1109 pq_params.core.tc = p_ll2_conn->tx_tc; 1105 pq_params.core.tc = p_ll2_conn->conn.tx_tc;
1110 pq_id = qed_get_qm_pq(p_hwfn, PROTOCOLID_CORE, &pq_params); 1106 pq_id = qed_get_qm_pq(p_hwfn, PROTOCOLID_CORE, &pq_params);
1111 p_ramrod->qm_pq_id = cpu_to_le16(pq_id); 1107 p_ramrod->qm_pq_id = cpu_to_le16(pq_id);
1112 1108
@@ -1123,7 +1119,7 @@ static int qed_sp_ll2_tx_queue_start(struct qed_hwfn *p_hwfn,
1123 DP_NOTICE(p_hwfn, "Unknown connection type: %d\n", conn_type); 1119 DP_NOTICE(p_hwfn, "Unknown connection type: %d\n", conn_type);
1124 } 1120 }
1125 1121
1126 p_ramrod->gsi_offload_flag = p_ll2_conn->gsi_enable; 1122 p_ramrod->gsi_offload_flag = p_ll2_conn->conn.gsi_enable;
1127 return qed_spq_post(p_hwfn, p_ent, NULL); 1123 return qed_spq_post(p_hwfn, p_ent, NULL);
1128} 1124}
1129 1125
@@ -1224,7 +1220,7 @@ qed_ll2_acquire_connection_rx(struct qed_hwfn *p_hwfn,
1224 1220
1225 DP_VERBOSE(p_hwfn, QED_MSG_LL2, 1221 DP_VERBOSE(p_hwfn, QED_MSG_LL2,
1226 "Allocated LL2 Rxq [Type %08x] with 0x%08x buffers\n", 1222 "Allocated LL2 Rxq [Type %08x] with 0x%08x buffers\n",
1227 p_ll2_info->conn_type, rx_num_desc); 1223 p_ll2_info->conn.conn_type, rx_num_desc);
1228 1224
1229out: 1225out:
1230 return rc; 1226 return rc;
@@ -1262,7 +1258,7 @@ static int qed_ll2_acquire_connection_tx(struct qed_hwfn *p_hwfn,
1262 1258
1263 DP_VERBOSE(p_hwfn, QED_MSG_LL2, 1259 DP_VERBOSE(p_hwfn, QED_MSG_LL2,
1264 "Allocated LL2 Txq [Type %08x] with 0x%08x buffers\n", 1260 "Allocated LL2 Txq [Type %08x] with 0x%08x buffers\n",
1265 p_ll2_info->conn_type, tx_num_desc); 1261 p_ll2_info->conn.conn_type, tx_num_desc);
1266 1262
1267out: 1263out:
1268 if (rc) 1264 if (rc)
@@ -1273,7 +1269,7 @@ out:
1273} 1269}
1274 1270
1275int qed_ll2_acquire_connection(struct qed_hwfn *p_hwfn, 1271int qed_ll2_acquire_connection(struct qed_hwfn *p_hwfn,
1276 struct qed_ll2_info *p_params, 1272 struct qed_ll2_conn *p_params,
1277 u16 rx_num_desc, 1273 u16 rx_num_desc,
1278 u16 tx_num_desc, 1274 u16 tx_num_desc,
1279 u8 *p_connection_handle) 1275 u8 *p_connection_handle)
@@ -1302,15 +1298,7 @@ int qed_ll2_acquire_connection(struct qed_hwfn *p_hwfn,
1302 if (!p_ll2_info) 1298 if (!p_ll2_info)
1303 return -EBUSY; 1299 return -EBUSY;
1304 1300
1305 p_ll2_info->conn_type = p_params->conn_type; 1301 p_ll2_info->conn = *p_params;
1306 p_ll2_info->mtu = p_params->mtu;
1307 p_ll2_info->rx_drop_ttl0_flg = p_params->rx_drop_ttl0_flg;
1308 p_ll2_info->rx_vlan_removal_en = p_params->rx_vlan_removal_en;
1309 p_ll2_info->tx_tc = p_params->tx_tc;
1310 p_ll2_info->tx_dest = p_params->tx_dest;
1311 p_ll2_info->ai_err_packet_too_big = p_params->ai_err_packet_too_big;
1312 p_ll2_info->ai_err_no_buf = p_params->ai_err_no_buf;
1313 p_ll2_info->gsi_enable = p_params->gsi_enable;
1314 1302
1315 rc = qed_ll2_acquire_connection_rx(p_hwfn, p_ll2_info, rx_num_desc); 1303 rc = qed_ll2_acquire_connection_rx(p_hwfn, p_ll2_info, rx_num_desc);
1316 if (rc) 1304 if (rc)
@@ -1371,9 +1359,9 @@ static int qed_ll2_establish_connection_rx(struct qed_hwfn *p_hwfn,
1371 1359
1372 SET_FIELD(action_on_error, 1360 SET_FIELD(action_on_error,
1373 CORE_RX_ACTION_ON_ERROR_PACKET_TOO_BIG, 1361 CORE_RX_ACTION_ON_ERROR_PACKET_TOO_BIG,
1374 p_ll2_conn->ai_err_packet_too_big); 1362 p_ll2_conn->conn.ai_err_packet_too_big);
1375 SET_FIELD(action_on_error, 1363 SET_FIELD(action_on_error,
1376 CORE_RX_ACTION_ON_ERROR_NO_BUFF, p_ll2_conn->ai_err_no_buf); 1364 CORE_RX_ACTION_ON_ERROR_NO_BUFF, p_ll2_conn->conn.ai_err_no_buf);
1377 1365
1378 return qed_sp_ll2_rx_queue_start(p_hwfn, p_ll2_conn, action_on_error); 1366 return qed_sp_ll2_rx_queue_start(p_hwfn, p_ll2_conn, action_on_error);
1379} 1367}
@@ -1600,7 +1588,7 @@ static void qed_ll2_prepare_tx_packet_set_bd(struct qed_hwfn *p_hwfn,
1600 "LL2 [q 0x%02x cid 0x%08x type 0x%08x] Tx Producer at [0x%04x] - set with a %04x bytes %02x BDs buffer at %08x:%08x\n", 1588 "LL2 [q 0x%02x cid 0x%08x type 0x%08x] Tx Producer at [0x%04x] - set with a %04x bytes %02x BDs buffer at %08x:%08x\n",
1601 p_ll2->queue_id, 1589 p_ll2->queue_id,
1602 p_ll2->cid, 1590 p_ll2->cid,
1603 p_ll2->conn_type, 1591 p_ll2->conn.conn_type,
1604 prod_idx, 1592 prod_idx,
1605 first_frag_len, 1593 first_frag_len,
1606 num_of_bds, 1594 num_of_bds,
@@ -1676,7 +1664,7 @@ static void qed_ll2_tx_packet_notify(struct qed_hwfn *p_hwfn,
1676 (NETIF_MSG_TX_QUEUED | QED_MSG_LL2), 1664 (NETIF_MSG_TX_QUEUED | QED_MSG_LL2),
1677 "LL2 [q 0x%02x cid 0x%08x type 0x%08x] Doorbelled [producer 0x%04x]\n", 1665 "LL2 [q 0x%02x cid 0x%08x type 0x%08x] Doorbelled [producer 0x%04x]\n",
1678 p_ll2_conn->queue_id, 1666 p_ll2_conn->queue_id,
1679 p_ll2_conn->cid, p_ll2_conn->conn_type, db_msg.spq_prod); 1667 p_ll2_conn->cid, p_ll2_conn->conn.conn_type, db_msg.spq_prod);
1680} 1668}
1681 1669
1682int qed_ll2_prepare_tx_packet(struct qed_hwfn *p_hwfn, 1670int qed_ll2_prepare_tx_packet(struct qed_hwfn *p_hwfn,
@@ -1817,7 +1805,7 @@ int qed_ll2_terminate_connection(struct qed_hwfn *p_hwfn, u8 connection_handle)
1817 qed_ll2_rxq_flush(p_hwfn, connection_handle); 1805 qed_ll2_rxq_flush(p_hwfn, connection_handle);
1818 } 1806 }
1819 1807
1820 if (p_ll2_conn->conn_type == QED_LL2_TYPE_ISCSI_OOO) 1808 if (p_ll2_conn->conn.conn_type == QED_LL2_TYPE_ISCSI_OOO)
1821 qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info); 1809 qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info);
1822 1810
1823 return rc; 1811 return rc;
@@ -1993,7 +1981,7 @@ static void qed_ll2_register_cb_ops(struct qed_dev *cdev,
1993 1981
1994static int qed_ll2_start(struct qed_dev *cdev, struct qed_ll2_params *params) 1982static int qed_ll2_start(struct qed_dev *cdev, struct qed_ll2_params *params)
1995{ 1983{
1996 struct qed_ll2_info ll2_info; 1984 struct qed_ll2_conn ll2_info;
1997 struct qed_ll2_buffer *buffer, *tmp_buffer; 1985 struct qed_ll2_buffer *buffer, *tmp_buffer;
1998 enum qed_ll2_conn_type conn_type; 1986 enum qed_ll2_conn_type conn_type;
1999 struct qed_ptt *p_ptt; 1987 struct qed_ptt *p_ptt;
@@ -2041,6 +2029,7 @@ static int qed_ll2_start(struct qed_dev *cdev, struct qed_ll2_params *params)
2041 2029
2042 /* Prepare the temporary ll2 information */ 2030 /* Prepare the temporary ll2 information */
2043 memset(&ll2_info, 0, sizeof(ll2_info)); 2031 memset(&ll2_info, 0, sizeof(ll2_info));
2032
2044 ll2_info.conn_type = conn_type; 2033 ll2_info.conn_type = conn_type;
2045 ll2_info.mtu = params->mtu; 2034 ll2_info.mtu = params->mtu;
2046 ll2_info.rx_drop_ttl0_flg = params->drop_ttl0_packets; 2035 ll2_info.rx_drop_ttl0_flg = params->drop_ttl0_packets;
@@ -2120,7 +2109,6 @@ static int qed_ll2_start(struct qed_dev *cdev, struct qed_ll2_params *params)
2120 } 2109 }
2121 2110
2122 ether_addr_copy(cdev->ll2_mac_address, params->ll2_mac_address); 2111 ether_addr_copy(cdev->ll2_mac_address, params->ll2_mac_address);
2123
2124 return 0; 2112 return 0;
2125 2113
2126release_terminate_all: 2114release_terminate_all:
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.h b/drivers/net/ethernet/qlogic/qed/qed_ll2.h
index 6625a3ae5a33..31417928b635 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_ll2.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.h
@@ -112,15 +112,8 @@ struct qed_ll2_tx_queue {
112 bool b_completing_packet; 112 bool b_completing_packet;
113}; 113};
114 114
115struct qed_ll2_info { 115struct qed_ll2_conn {
116 /* Lock protecting the state of LL2 */
117 struct mutex mutex;
118 enum qed_ll2_conn_type conn_type; 116 enum qed_ll2_conn_type conn_type;
119 u32 cid;
120 u8 my_id;
121 u8 queue_id;
122 u8 tx_stats_id;
123 bool b_active;
124 u16 mtu; 117 u16 mtu;
125 u8 rx_drop_ttl0_flg; 118 u8 rx_drop_ttl0_flg;
126 u8 rx_vlan_removal_en; 119 u8 rx_vlan_removal_en;
@@ -128,10 +121,21 @@ struct qed_ll2_info {
128 enum core_tx_dest tx_dest; 121 enum core_tx_dest tx_dest;
129 enum core_error_handle ai_err_packet_too_big; 122 enum core_error_handle ai_err_packet_too_big;
130 enum core_error_handle ai_err_no_buf; 123 enum core_error_handle ai_err_no_buf;
124 u8 gsi_enable;
125};
126
127struct qed_ll2_info {
128 /* Lock protecting the state of LL2 */
129 struct mutex mutex;
130 struct qed_ll2_conn conn;
131 u32 cid;
132 u8 my_id;
133 u8 queue_id;
134 u8 tx_stats_id;
135 bool b_active;
131 u8 tx_stats_en; 136 u8 tx_stats_en;
132 struct qed_ll2_rx_queue rx_queue; 137 struct qed_ll2_rx_queue rx_queue;
133 struct qed_ll2_tx_queue tx_queue; 138 struct qed_ll2_tx_queue tx_queue;
134 u8 gsi_enable;
135}; 139};
136 140
137/** 141/**
@@ -149,7 +153,7 @@ struct qed_ll2_info {
149 * @return 0 on success, failure otherwise 153 * @return 0 on success, failure otherwise
150 */ 154 */
151int qed_ll2_acquire_connection(struct qed_hwfn *p_hwfn, 155int qed_ll2_acquire_connection(struct qed_hwfn *p_hwfn,
152 struct qed_ll2_info *p_params, 156 struct qed_ll2_conn *p_params,
153 u16 rx_num_desc, 157 u16 rx_num_desc,
154 u16 tx_num_desc, 158 u16 tx_num_desc,
155 u8 *p_connection_handle); 159 u8 *p_connection_handle);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_roce.c b/drivers/net/ethernet/qlogic/qed/qed_roce.c
index 2a16547c8966..2dbdb3298991 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_roce.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_roce.c
@@ -2632,7 +2632,7 @@ static int qed_roce_ll2_start(struct qed_dev *cdev,
2632{ 2632{
2633 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); 2633 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
2634 struct qed_roce_ll2_info *roce_ll2; 2634 struct qed_roce_ll2_info *roce_ll2;
2635 struct qed_ll2_info ll2_params; 2635 struct qed_ll2_conn ll2_params;
2636 int rc; 2636 int rc;
2637 2637
2638 if (!params) { 2638 if (!params) {
diff --git a/drivers/net/ethernet/qualcomm/emac/emac-phy.c b/drivers/net/ethernet/qualcomm/emac/emac-phy.c
index 99a14df28b96..2851b4c56570 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac-phy.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac-phy.c
@@ -201,6 +201,13 @@ int emac_phy_config(struct platform_device *pdev, struct emac_adapter *adpt)
201 else 201 else
202 adpt->phydev = mdiobus_get_phy(mii_bus, phy_addr); 202 adpt->phydev = mdiobus_get_phy(mii_bus, phy_addr);
203 203
204 /* of_phy_find_device() claims a reference to the phydev,
205 * so we do that here manually as well. When the driver
206 * later unloads, it can unilaterally drop the reference
207 * without worrying about ACPI vs DT.
208 */
209 if (adpt->phydev)
210 get_device(&adpt->phydev->mdio.dev);
204 } else { 211 } else {
205 struct device_node *phy_np; 212 struct device_node *phy_np;
206 213
diff --git a/drivers/net/ethernet/qualcomm/emac/emac.c b/drivers/net/ethernet/qualcomm/emac/emac.c
index 422289c232bc..f46d300bd585 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac.c
@@ -719,8 +719,7 @@ static int emac_probe(struct platform_device *pdev)
719err_undo_napi: 719err_undo_napi:
720 netif_napi_del(&adpt->rx_q.napi); 720 netif_napi_del(&adpt->rx_q.napi);
721err_undo_mdiobus: 721err_undo_mdiobus:
722 if (!has_acpi_companion(&pdev->dev)) 722 put_device(&adpt->phydev->mdio.dev);
723 put_device(&adpt->phydev->mdio.dev);
724 mdiobus_unregister(adpt->mii_bus); 723 mdiobus_unregister(adpt->mii_bus);
725err_undo_clocks: 724err_undo_clocks:
726 emac_clks_teardown(adpt); 725 emac_clks_teardown(adpt);
@@ -740,8 +739,7 @@ static int emac_remove(struct platform_device *pdev)
740 739
741 emac_clks_teardown(adpt); 740 emac_clks_teardown(adpt);
742 741
743 if (!has_acpi_companion(&pdev->dev)) 742 put_device(&adpt->phydev->mdio.dev);
744 put_device(&adpt->phydev->mdio.dev);
745 mdiobus_unregister(adpt->mii_bus); 743 mdiobus_unregister(adpt->mii_bus);
746 free_netdev(netdev); 744 free_netdev(netdev);
747 745
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index f9b97f5946f8..8f1623bf2134 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -326,6 +326,7 @@ enum cfg_version {
326static const struct pci_device_id rtl8169_pci_tbl[] = { 326static const struct pci_device_id rtl8169_pci_tbl[] = {
327 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8129), 0, 0, RTL_CFG_0 }, 327 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8129), 0, 0, RTL_CFG_0 },
328 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8136), 0, 0, RTL_CFG_2 }, 328 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8136), 0, 0, RTL_CFG_2 },
329 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8161), 0, 0, RTL_CFG_1 },
329 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8167), 0, 0, RTL_CFG_0 }, 330 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8167), 0, 0, RTL_CFG_0 },
330 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8168), 0, 0, RTL_CFG_1 }, 331 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8168), 0, 0, RTL_CFG_1 },
331 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8169), 0, 0, RTL_CFG_0 }, 332 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8169), 0, 0, RTL_CFG_0 },
@@ -695,7 +696,7 @@ enum rtl_tx_desc_bit_1 {
695enum rtl_rx_desc_bit { 696enum rtl_rx_desc_bit {
696 /* Rx private */ 697 /* Rx private */
697 PID1 = (1 << 18), /* Protocol ID bit 1/2 */ 698 PID1 = (1 << 18), /* Protocol ID bit 1/2 */
698 PID0 = (1 << 17), /* Protocol ID bit 2/2 */ 699 PID0 = (1 << 17), /* Protocol ID bit 0/2 */
699 700
700#define RxProtoUDP (PID1) 701#define RxProtoUDP (PID1)
701#define RxProtoTCP (PID0) 702#define RxProtoTCP (PID0)
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index 92d7692c840d..301f48755093 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -179,6 +179,49 @@ static struct mdiobb_ops bb_ops = {
179 .get_mdio_data = ravb_get_mdio_data, 179 .get_mdio_data = ravb_get_mdio_data,
180}; 180};
181 181
182/* Free TX skb function for AVB-IP */
183static int ravb_tx_free(struct net_device *ndev, int q, bool free_txed_only)
184{
185 struct ravb_private *priv = netdev_priv(ndev);
186 struct net_device_stats *stats = &priv->stats[q];
187 struct ravb_tx_desc *desc;
188 int free_num = 0;
189 int entry;
190 u32 size;
191
192 for (; priv->cur_tx[q] - priv->dirty_tx[q] > 0; priv->dirty_tx[q]++) {
193 bool txed;
194
195 entry = priv->dirty_tx[q] % (priv->num_tx_ring[q] *
196 NUM_TX_DESC);
197 desc = &priv->tx_ring[q][entry];
198 txed = desc->die_dt == DT_FEMPTY;
199 if (free_txed_only && !txed)
200 break;
201 /* Descriptor type must be checked before all other reads */
202 dma_rmb();
203 size = le16_to_cpu(desc->ds_tagl) & TX_DS;
204 /* Free the original skb. */
205 if (priv->tx_skb[q][entry / NUM_TX_DESC]) {
206 dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr),
207 size, DMA_TO_DEVICE);
208 /* Last packet descriptor? */
209 if (entry % NUM_TX_DESC == NUM_TX_DESC - 1) {
210 entry /= NUM_TX_DESC;
211 dev_kfree_skb_any(priv->tx_skb[q][entry]);
212 priv->tx_skb[q][entry] = NULL;
213 if (txed)
214 stats->tx_packets++;
215 }
216 free_num++;
217 }
218 if (txed)
219 stats->tx_bytes += size;
220 desc->die_dt = DT_EEMPTY;
221 }
222 return free_num;
223}
224
182/* Free skb's and DMA buffers for Ethernet AVB */ 225/* Free skb's and DMA buffers for Ethernet AVB */
183static void ravb_ring_free(struct net_device *ndev, int q) 226static void ravb_ring_free(struct net_device *ndev, int q)
184{ 227{
@@ -194,19 +237,21 @@ static void ravb_ring_free(struct net_device *ndev, int q)
194 kfree(priv->rx_skb[q]); 237 kfree(priv->rx_skb[q]);
195 priv->rx_skb[q] = NULL; 238 priv->rx_skb[q] = NULL;
196 239
197 /* Free TX skb ringbuffer */
198 if (priv->tx_skb[q]) {
199 for (i = 0; i < priv->num_tx_ring[q]; i++)
200 dev_kfree_skb(priv->tx_skb[q][i]);
201 }
202 kfree(priv->tx_skb[q]);
203 priv->tx_skb[q] = NULL;
204
205 /* Free aligned TX buffers */ 240 /* Free aligned TX buffers */
206 kfree(priv->tx_align[q]); 241 kfree(priv->tx_align[q]);
207 priv->tx_align[q] = NULL; 242 priv->tx_align[q] = NULL;
208 243
209 if (priv->rx_ring[q]) { 244 if (priv->rx_ring[q]) {
245 for (i = 0; i < priv->num_rx_ring[q]; i++) {
246 struct ravb_ex_rx_desc *desc = &priv->rx_ring[q][i];
247
248 if (!dma_mapping_error(ndev->dev.parent,
249 le32_to_cpu(desc->dptr)))
250 dma_unmap_single(ndev->dev.parent,
251 le32_to_cpu(desc->dptr),
252 PKT_BUF_SZ,
253 DMA_FROM_DEVICE);
254 }
210 ring_size = sizeof(struct ravb_ex_rx_desc) * 255 ring_size = sizeof(struct ravb_ex_rx_desc) *
211 (priv->num_rx_ring[q] + 1); 256 (priv->num_rx_ring[q] + 1);
212 dma_free_coherent(ndev->dev.parent, ring_size, priv->rx_ring[q], 257 dma_free_coherent(ndev->dev.parent, ring_size, priv->rx_ring[q],
@@ -215,12 +260,20 @@ static void ravb_ring_free(struct net_device *ndev, int q)
215 } 260 }
216 261
217 if (priv->tx_ring[q]) { 262 if (priv->tx_ring[q]) {
263 ravb_tx_free(ndev, q, false);
264
218 ring_size = sizeof(struct ravb_tx_desc) * 265 ring_size = sizeof(struct ravb_tx_desc) *
219 (priv->num_tx_ring[q] * NUM_TX_DESC + 1); 266 (priv->num_tx_ring[q] * NUM_TX_DESC + 1);
220 dma_free_coherent(ndev->dev.parent, ring_size, priv->tx_ring[q], 267 dma_free_coherent(ndev->dev.parent, ring_size, priv->tx_ring[q],
221 priv->tx_desc_dma[q]); 268 priv->tx_desc_dma[q]);
222 priv->tx_ring[q] = NULL; 269 priv->tx_ring[q] = NULL;
223 } 270 }
271
272 /* Free TX skb ringbuffer.
273 * SKBs are freed by ravb_tx_free() call above.
274 */
275 kfree(priv->tx_skb[q]);
276 priv->tx_skb[q] = NULL;
224} 277}
225 278
226/* Format skb and descriptor buffer for Ethernet AVB */ 279/* Format skb and descriptor buffer for Ethernet AVB */
@@ -431,44 +484,6 @@ static int ravb_dmac_init(struct net_device *ndev)
431 return 0; 484 return 0;
432} 485}
433 486
434/* Free TX skb function for AVB-IP */
435static int ravb_tx_free(struct net_device *ndev, int q)
436{
437 struct ravb_private *priv = netdev_priv(ndev);
438 struct net_device_stats *stats = &priv->stats[q];
439 struct ravb_tx_desc *desc;
440 int free_num = 0;
441 int entry;
442 u32 size;
443
444 for (; priv->cur_tx[q] - priv->dirty_tx[q] > 0; priv->dirty_tx[q]++) {
445 entry = priv->dirty_tx[q] % (priv->num_tx_ring[q] *
446 NUM_TX_DESC);
447 desc = &priv->tx_ring[q][entry];
448 if (desc->die_dt != DT_FEMPTY)
449 break;
450 /* Descriptor type must be checked before all other reads */
451 dma_rmb();
452 size = le16_to_cpu(desc->ds_tagl) & TX_DS;
453 /* Free the original skb. */
454 if (priv->tx_skb[q][entry / NUM_TX_DESC]) {
455 dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr),
456 size, DMA_TO_DEVICE);
457 /* Last packet descriptor? */
458 if (entry % NUM_TX_DESC == NUM_TX_DESC - 1) {
459 entry /= NUM_TX_DESC;
460 dev_kfree_skb_any(priv->tx_skb[q][entry]);
461 priv->tx_skb[q][entry] = NULL;
462 stats->tx_packets++;
463 }
464 free_num++;
465 }
466 stats->tx_bytes += size;
467 desc->die_dt = DT_EEMPTY;
468 }
469 return free_num;
470}
471
472static void ravb_get_tx_tstamp(struct net_device *ndev) 487static void ravb_get_tx_tstamp(struct net_device *ndev)
473{ 488{
474 struct ravb_private *priv = netdev_priv(ndev); 489 struct ravb_private *priv = netdev_priv(ndev);
@@ -902,7 +917,7 @@ static int ravb_poll(struct napi_struct *napi, int budget)
902 spin_lock_irqsave(&priv->lock, flags); 917 spin_lock_irqsave(&priv->lock, flags);
903 /* Clear TX interrupt */ 918 /* Clear TX interrupt */
904 ravb_write(ndev, ~mask, TIS); 919 ravb_write(ndev, ~mask, TIS);
905 ravb_tx_free(ndev, q); 920 ravb_tx_free(ndev, q, true);
906 netif_wake_subqueue(ndev, q); 921 netif_wake_subqueue(ndev, q);
907 mmiowb(); 922 mmiowb();
908 spin_unlock_irqrestore(&priv->lock, flags); 923 spin_unlock_irqrestore(&priv->lock, flags);
@@ -926,14 +941,10 @@ static int ravb_poll(struct napi_struct *napi, int budget)
926 /* Receive error message handling */ 941 /* Receive error message handling */
927 priv->rx_over_errors = priv->stats[RAVB_BE].rx_over_errors; 942 priv->rx_over_errors = priv->stats[RAVB_BE].rx_over_errors;
928 priv->rx_over_errors += priv->stats[RAVB_NC].rx_over_errors; 943 priv->rx_over_errors += priv->stats[RAVB_NC].rx_over_errors;
929 if (priv->rx_over_errors != ndev->stats.rx_over_errors) { 944 if (priv->rx_over_errors != ndev->stats.rx_over_errors)
930 ndev->stats.rx_over_errors = priv->rx_over_errors; 945 ndev->stats.rx_over_errors = priv->rx_over_errors;
931 netif_err(priv, rx_err, ndev, "Receive Descriptor Empty\n"); 946 if (priv->rx_fifo_errors != ndev->stats.rx_fifo_errors)
932 }
933 if (priv->rx_fifo_errors != ndev->stats.rx_fifo_errors) {
934 ndev->stats.rx_fifo_errors = priv->rx_fifo_errors; 947 ndev->stats.rx_fifo_errors = priv->rx_fifo_errors;
935 netif_err(priv, rx_err, ndev, "Receive FIFO Overflow\n");
936 }
937out: 948out:
938 return budget - quota; 949 return budget - quota;
939} 950}
@@ -1508,6 +1519,19 @@ static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1508 buffer = PTR_ALIGN(priv->tx_align[q], DPTR_ALIGN) + 1519 buffer = PTR_ALIGN(priv->tx_align[q], DPTR_ALIGN) +
1509 entry / NUM_TX_DESC * DPTR_ALIGN; 1520 entry / NUM_TX_DESC * DPTR_ALIGN;
1510 len = PTR_ALIGN(skb->data, DPTR_ALIGN) - skb->data; 1521 len = PTR_ALIGN(skb->data, DPTR_ALIGN) - skb->data;
1522 /* Zero length DMA descriptors are problematic as they seem to
1523 * terminate DMA transfers. Avoid them by simply using a length of
1524 * DPTR_ALIGN (4) when skb data is aligned to DPTR_ALIGN.
1525 *
1526 * As skb is guaranteed to have at least ETH_ZLEN (60) bytes of
1527 * data by the call to skb_put_padto() above this is safe with
1528 * respect to both the length of the first DMA descriptor (len)
1529 * overflowing the available data and the length of the second DMA
1530 * descriptor (skb->len - len) being negative.
1531 */
1532 if (len == 0)
1533 len = DPTR_ALIGN;
1534
1511 memcpy(buffer, skb->data, len); 1535 memcpy(buffer, skb->data, len);
1512 dma_addr = dma_map_single(ndev->dev.parent, buffer, len, DMA_TO_DEVICE); 1536 dma_addr = dma_map_single(ndev->dev.parent, buffer, len, DMA_TO_DEVICE);
1513 if (dma_mapping_error(ndev->dev.parent, dma_addr)) 1537 if (dma_mapping_error(ndev->dev.parent, dma_addr))
@@ -1558,7 +1582,8 @@ static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1558 1582
1559 priv->cur_tx[q] += NUM_TX_DESC; 1583 priv->cur_tx[q] += NUM_TX_DESC;
1560 if (priv->cur_tx[q] - priv->dirty_tx[q] > 1584 if (priv->cur_tx[q] - priv->dirty_tx[q] >
1561 (priv->num_tx_ring[q] - 1) * NUM_TX_DESC && !ravb_tx_free(ndev, q)) 1585 (priv->num_tx_ring[q] - 1) * NUM_TX_DESC &&
1586 !ravb_tx_free(ndev, q, true))
1562 netif_stop_subqueue(ndev, q); 1587 netif_stop_subqueue(ndev, q);
1563 1588
1564exit: 1589exit:
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index f341c1bc7001..f729a6b43958 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -574,6 +574,7 @@ static struct sh_eth_cpu_data r8a7740_data = {
574 .rpadir_value = 2 << 16, 574 .rpadir_value = 2 << 16,
575 .no_trimd = 1, 575 .no_trimd = 1,
576 .no_ade = 1, 576 .no_ade = 1,
577 .hw_crc = 1,
577 .tsu = 1, 578 .tsu = 1,
578 .select_mii = 1, 579 .select_mii = 1,
579 .shift_rd0 = 1, 580 .shift_rd0 = 1,
@@ -802,7 +803,7 @@ static struct sh_eth_cpu_data sh7734_data = {
802 803
803 .ecsr_value = ECSR_ICD | ECSR_MPD, 804 .ecsr_value = ECSR_ICD | ECSR_MPD,
804 .ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP, 805 .ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
805 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff, 806 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003f07ff,
806 807
807 .tx_check = EESR_TC1 | EESR_FTC, 808 .tx_check = EESR_TC1 | EESR_FTC,
808 .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | 809 .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
@@ -819,6 +820,7 @@ static struct sh_eth_cpu_data sh7734_data = {
819 .tsu = 1, 820 .tsu = 1,
820 .hw_crc = 1, 821 .hw_crc = 1,
821 .select_mii = 1, 822 .select_mii = 1,
823 .shift_rd0 = 1,
822}; 824};
823 825
824/* SH7763 */ 826/* SH7763 */
@@ -831,7 +833,7 @@ static struct sh_eth_cpu_data sh7763_data = {
831 833
832 .ecsr_value = ECSR_ICD | ECSR_MPD, 834 .ecsr_value = ECSR_ICD | ECSR_MPD,
833 .ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP, 835 .ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
834 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff, 836 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003f07ff,
835 837
836 .tx_check = EESR_TC1 | EESR_FTC, 838 .tx_check = EESR_TC1 | EESR_FTC,
837 .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | 839 .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
@@ -1656,7 +1658,7 @@ static irqreturn_t sh_eth_interrupt(int irq, void *netdev)
1656 else 1658 else
1657 goto out; 1659 goto out;
1658 1660
1659 if (!likely(mdp->irq_enabled)) { 1661 if (unlikely(!mdp->irq_enabled)) {
1660 sh_eth_write(ndev, 0, EESIPR); 1662 sh_eth_write(ndev, 0, EESIPR);
1661 goto out; 1663 goto out;
1662 } 1664 }
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index de2947ccc5ad..5eb0e684fd76 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -1323,7 +1323,8 @@ static int efx_ef10_init_nic(struct efx_nic *efx)
1323 } 1323 }
1324 1324
1325 /* don't fail init if RSS setup doesn't work */ 1325 /* don't fail init if RSS setup doesn't work */
1326 efx->type->rx_push_rss_config(efx, false, efx->rx_indir_table); 1326 rc = efx->type->rx_push_rss_config(efx, false, efx->rx_indir_table);
1327 efx->rss_active = (rc == 0);
1327 1328
1328 return 0; 1329 return 0;
1329} 1330}
diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c
index 87bdc56b4e3a..18ebaea44e82 100644
--- a/drivers/net/ethernet/sfc/ethtool.c
+++ b/drivers/net/ethernet/sfc/ethtool.c
@@ -975,6 +975,8 @@ efx_ethtool_get_rxnfc(struct net_device *net_dev,
975 975
976 case ETHTOOL_GRXFH: { 976 case ETHTOOL_GRXFH: {
977 info->data = 0; 977 info->data = 0;
978 if (!efx->rss_active) /* No RSS */
979 return 0;
978 switch (info->flow_type) { 980 switch (info->flow_type) {
979 case UDP_V4_FLOW: 981 case UDP_V4_FLOW:
980 if (efx->rx_hash_udp_4tuple) 982 if (efx->rx_hash_udp_4tuple)
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index 1a635ced62d0..1c62c1a00fca 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -860,6 +860,7 @@ struct vfdi_status;
860 * @rx_hash_key: Toeplitz hash key for RSS 860 * @rx_hash_key: Toeplitz hash key for RSS
861 * @rx_indir_table: Indirection table for RSS 861 * @rx_indir_table: Indirection table for RSS
862 * @rx_scatter: Scatter mode enabled for receives 862 * @rx_scatter: Scatter mode enabled for receives
863 * @rss_active: RSS enabled on hardware
863 * @rx_hash_udp_4tuple: UDP 4-tuple hashing enabled 864 * @rx_hash_udp_4tuple: UDP 4-tuple hashing enabled
864 * @int_error_count: Number of internal errors seen recently 865 * @int_error_count: Number of internal errors seen recently
865 * @int_error_expire: Time at which error count will be expired 866 * @int_error_expire: Time at which error count will be expired
@@ -998,6 +999,7 @@ struct efx_nic {
998 u8 rx_hash_key[40]; 999 u8 rx_hash_key[40];
999 u32 rx_indir_table[128]; 1000 u32 rx_indir_table[128];
1000 bool rx_scatter; 1001 bool rx_scatter;
1002 bool rss_active;
1001 bool rx_hash_udp_4tuple; 1003 bool rx_hash_udp_4tuple;
1002 1004
1003 unsigned int_error_count; 1005 unsigned int_error_count;
diff --git a/drivers/net/ethernet/sfc/siena.c b/drivers/net/ethernet/sfc/siena.c
index a3901bc96586..4e54e5dc9fcb 100644
--- a/drivers/net/ethernet/sfc/siena.c
+++ b/drivers/net/ethernet/sfc/siena.c
@@ -403,6 +403,7 @@ static int siena_init_nic(struct efx_nic *efx)
403 efx_writeo(efx, &temp, FR_AZ_RX_CFG); 403 efx_writeo(efx, &temp, FR_AZ_RX_CFG);
404 404
405 siena_rx_push_rss_config(efx, false, efx->rx_indir_table); 405 siena_rx_push_rss_config(efx, false, efx->rx_indir_table);
406 efx->rss_active = true;
406 407
407 /* Enable event logging */ 408 /* Enable event logging */
408 rc = efx_mcdi_log_ctrl(efx, true, false, 0); 409 rc = efx_mcdi_log_ctrl(efx, true, false, 0);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-oxnas.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-oxnas.c
index c35597586121..3dc7d279f805 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-oxnas.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-oxnas.c
@@ -60,8 +60,9 @@ struct oxnas_dwmac {
60 struct regmap *regmap; 60 struct regmap *regmap;
61}; 61};
62 62
63static int oxnas_dwmac_init(struct oxnas_dwmac *dwmac) 63static int oxnas_dwmac_init(struct platform_device *pdev, void *priv)
64{ 64{
65 struct oxnas_dwmac *dwmac = priv;
65 unsigned int value; 66 unsigned int value;
66 int ret; 67 int ret;
67 68
@@ -105,20 +106,20 @@ static int oxnas_dwmac_init(struct oxnas_dwmac *dwmac)
105 return 0; 106 return 0;
106} 107}
107 108
109static void oxnas_dwmac_exit(struct platform_device *pdev, void *priv)
110{
111 struct oxnas_dwmac *dwmac = priv;
112
113 clk_disable_unprepare(dwmac->clk);
114}
115
108static int oxnas_dwmac_probe(struct platform_device *pdev) 116static int oxnas_dwmac_probe(struct platform_device *pdev)
109{ 117{
110 struct plat_stmmacenet_data *plat_dat; 118 struct plat_stmmacenet_data *plat_dat;
111 struct stmmac_resources stmmac_res; 119 struct stmmac_resources stmmac_res;
112 struct device_node *sysctrl;
113 struct oxnas_dwmac *dwmac; 120 struct oxnas_dwmac *dwmac;
114 int ret; 121 int ret;
115 122
116 sysctrl = of_parse_phandle(pdev->dev.of_node, "oxsemi,sys-ctrl", 0);
117 if (!sysctrl) {
118 dev_err(&pdev->dev, "failed to get sys-ctrl node\n");
119 return -EINVAL;
120 }
121
122 ret = stmmac_get_platform_resources(pdev, &stmmac_res); 123 ret = stmmac_get_platform_resources(pdev, &stmmac_res);
123 if (ret) 124 if (ret)
124 return ret; 125 return ret;
@@ -128,72 +129,48 @@ static int oxnas_dwmac_probe(struct platform_device *pdev)
128 return PTR_ERR(plat_dat); 129 return PTR_ERR(plat_dat);
129 130
130 dwmac = devm_kzalloc(&pdev->dev, sizeof(*dwmac), GFP_KERNEL); 131 dwmac = devm_kzalloc(&pdev->dev, sizeof(*dwmac), GFP_KERNEL);
131 if (!dwmac) 132 if (!dwmac) {
132 return -ENOMEM; 133 ret = -ENOMEM;
134 goto err_remove_config_dt;
135 }
133 136
134 dwmac->dev = &pdev->dev; 137 dwmac->dev = &pdev->dev;
135 plat_dat->bsp_priv = dwmac; 138 plat_dat->bsp_priv = dwmac;
139 plat_dat->init = oxnas_dwmac_init;
140 plat_dat->exit = oxnas_dwmac_exit;
136 141
137 dwmac->regmap = syscon_node_to_regmap(sysctrl); 142 dwmac->regmap = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
143 "oxsemi,sys-ctrl");
138 if (IS_ERR(dwmac->regmap)) { 144 if (IS_ERR(dwmac->regmap)) {
139 dev_err(&pdev->dev, "failed to have sysctrl regmap\n"); 145 dev_err(&pdev->dev, "failed to have sysctrl regmap\n");
140 return PTR_ERR(dwmac->regmap); 146 ret = PTR_ERR(dwmac->regmap);
147 goto err_remove_config_dt;
141 } 148 }
142 149
143 dwmac->clk = devm_clk_get(&pdev->dev, "gmac"); 150 dwmac->clk = devm_clk_get(&pdev->dev, "gmac");
144 if (IS_ERR(dwmac->clk)) 151 if (IS_ERR(dwmac->clk)) {
145 return PTR_ERR(dwmac->clk); 152 ret = PTR_ERR(dwmac->clk);
153 goto err_remove_config_dt;
154 }
146 155
147 ret = oxnas_dwmac_init(dwmac); 156 ret = oxnas_dwmac_init(pdev, plat_dat->bsp_priv);
148 if (ret) 157 if (ret)
149 return ret; 158 goto err_remove_config_dt;
150 159
151 ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); 160 ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
152 if (ret) 161 if (ret)
153 clk_disable_unprepare(dwmac->clk); 162 goto err_dwmac_exit;
154 163
155 return ret;
156}
157 164
158static int oxnas_dwmac_remove(struct platform_device *pdev) 165 return 0;
159{
160 struct oxnas_dwmac *dwmac = get_stmmac_bsp_priv(&pdev->dev);
161 int ret = stmmac_dvr_remove(&pdev->dev);
162
163 clk_disable_unprepare(dwmac->clk);
164
165 return ret;
166}
167
168#ifdef CONFIG_PM_SLEEP
169static int oxnas_dwmac_suspend(struct device *dev)
170{
171 struct oxnas_dwmac *dwmac = get_stmmac_bsp_priv(dev);
172 int ret;
173
174 ret = stmmac_suspend(dev);
175 clk_disable_unprepare(dwmac->clk);
176
177 return ret;
178}
179
180static int oxnas_dwmac_resume(struct device *dev)
181{
182 struct oxnas_dwmac *dwmac = get_stmmac_bsp_priv(dev);
183 int ret;
184
185 ret = oxnas_dwmac_init(dwmac);
186 if (ret)
187 return ret;
188 166
189 ret = stmmac_resume(dev); 167err_dwmac_exit:
168 oxnas_dwmac_exit(pdev, plat_dat->bsp_priv);
169err_remove_config_dt:
170 stmmac_remove_config_dt(pdev, plat_dat);
190 171
191 return ret; 172 return ret;
192} 173}
193#endif /* CONFIG_PM_SLEEP */
194
195static SIMPLE_DEV_PM_OPS(oxnas_dwmac_pm_ops,
196 oxnas_dwmac_suspend, oxnas_dwmac_resume);
197 174
198static const struct of_device_id oxnas_dwmac_match[] = { 175static const struct of_device_id oxnas_dwmac_match[] = {
199 { .compatible = "oxsemi,ox820-dwmac" }, 176 { .compatible = "oxsemi,ox820-dwmac" },
@@ -203,10 +180,10 @@ MODULE_DEVICE_TABLE(of, oxnas_dwmac_match);
203 180
204static struct platform_driver oxnas_dwmac_driver = { 181static struct platform_driver oxnas_dwmac_driver = {
205 .probe = oxnas_dwmac_probe, 182 .probe = oxnas_dwmac_probe,
206 .remove = oxnas_dwmac_remove, 183 .remove = stmmac_pltfr_remove,
207 .driver = { 184 .driver = {
208 .name = "oxnas-dwmac", 185 .name = "oxnas-dwmac",
209 .pm = &oxnas_dwmac_pm_ops, 186 .pm = &stmmac_pltfr_pm_ops,
210 .of_match_table = oxnas_dwmac_match, 187 .of_match_table = oxnas_dwmac_match,
211 }, 188 },
212}; 189};
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
index be3c91c7f211..5484fd726d5a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
@@ -305,8 +305,12 @@ static int dwmac1000_irq_status(struct mac_device_info *hw,
305{ 305{
306 void __iomem *ioaddr = hw->pcsr; 306 void __iomem *ioaddr = hw->pcsr;
307 u32 intr_status = readl(ioaddr + GMAC_INT_STATUS); 307 u32 intr_status = readl(ioaddr + GMAC_INT_STATUS);
308 u32 intr_mask = readl(ioaddr + GMAC_INT_MASK);
308 int ret = 0; 309 int ret = 0;
309 310
311 /* Discard masked bits */
312 intr_status &= ~intr_mask;
313
310 /* Not used events (e.g. MMC interrupts) are not handled. */ 314 /* Not used events (e.g. MMC interrupts) are not handled. */
311 if ((intr_status & GMAC_INT_STATUS_MMCTIS)) 315 if ((intr_status & GMAC_INT_STATUS_MMCTIS))
312 x->mmc_tx_irq_n++; 316 x->mmc_tx_irq_n++;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index bb40382e205d..e3f6389e1b01 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -3319,8 +3319,16 @@ int stmmac_dvr_probe(struct device *device,
3319 ndev->max_mtu = JUMBO_LEN; 3319 ndev->max_mtu = JUMBO_LEN;
3320 else 3320 else
3321 ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN); 3321 ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
3322 if (priv->plat->maxmtu < ndev->max_mtu) 3322 /* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu
3323 * as well as plat->maxmtu < ndev->min_mtu which is a invalid range.
3324 */
3325 if ((priv->plat->maxmtu < ndev->max_mtu) &&
3326 (priv->plat->maxmtu >= ndev->min_mtu))
3323 ndev->max_mtu = priv->plat->maxmtu; 3327 ndev->max_mtu = priv->plat->maxmtu;
3328 else if (priv->plat->maxmtu < ndev->min_mtu)
3329 dev_warn(priv->device,
3330 "%s: warning: maxmtu having invalid value (%d)\n",
3331 __func__, priv->plat->maxmtu);
3324 3332
3325 if (flow_ctrl) 3333 if (flow_ctrl)
3326 priv->flow_ctrl = FLOW_AUTO; /* RX/TX pause on */ 3334 priv->flow_ctrl = FLOW_AUTO; /* RX/TX pause on */
@@ -3332,20 +3340,14 @@ int stmmac_dvr_probe(struct device *device,
3332 */ 3340 */
3333 if ((priv->synopsys_id >= DWMAC_CORE_3_50) && (!priv->plat->riwt_off)) { 3341 if ((priv->synopsys_id >= DWMAC_CORE_3_50) && (!priv->plat->riwt_off)) {
3334 priv->use_riwt = 1; 3342 priv->use_riwt = 1;
3335 netdev_info(priv->dev, "Enable RX Mitigation via HW Watchdog Timer\n"); 3343 dev_info(priv->device,
3344 "Enable RX Mitigation via HW Watchdog Timer\n");
3336 } 3345 }
3337 3346
3338 netif_napi_add(ndev, &priv->napi, stmmac_poll, 64); 3347 netif_napi_add(ndev, &priv->napi, stmmac_poll, 64);
3339 3348
3340 spin_lock_init(&priv->lock); 3349 spin_lock_init(&priv->lock);
3341 3350
3342 ret = register_netdev(ndev);
3343 if (ret) {
3344 netdev_err(priv->dev, "%s: ERROR %i registering the device\n",
3345 __func__, ret);
3346 goto error_netdev_register;
3347 }
3348
3349 /* If a specific clk_csr value is passed from the platform 3351 /* If a specific clk_csr value is passed from the platform
3350 * this means that the CSR Clock Range selection cannot be 3352 * this means that the CSR Clock Range selection cannot be
3351 * changed at run-time and it is fixed. Viceversa the driver'll try to 3353 * changed at run-time and it is fixed. Viceversa the driver'll try to
@@ -3365,18 +3367,28 @@ int stmmac_dvr_probe(struct device *device,
3365 /* MDIO bus Registration */ 3367 /* MDIO bus Registration */
3366 ret = stmmac_mdio_register(ndev); 3368 ret = stmmac_mdio_register(ndev);
3367 if (ret < 0) { 3369 if (ret < 0) {
3368 netdev_err(priv->dev, 3370 dev_err(priv->device,
3369 "%s: MDIO bus (id: %d) registration failed", 3371 "%s: MDIO bus (id: %d) registration failed",
3370 __func__, priv->plat->bus_id); 3372 __func__, priv->plat->bus_id);
3371 goto error_mdio_register; 3373 goto error_mdio_register;
3372 } 3374 }
3373 } 3375 }
3374 3376
3375 return 0; 3377 ret = register_netdev(ndev);
3378 if (ret) {
3379 dev_err(priv->device, "%s: ERROR %i registering the device\n",
3380 __func__, ret);
3381 goto error_netdev_register;
3382 }
3383
3384 return ret;
3376 3385
3377error_mdio_register:
3378 unregister_netdev(ndev);
3379error_netdev_register: 3386error_netdev_register:
3387 if (priv->hw->pcs != STMMAC_PCS_RGMII &&
3388 priv->hw->pcs != STMMAC_PCS_TBI &&
3389 priv->hw->pcs != STMMAC_PCS_RTBI)
3390 stmmac_mdio_unregister(ndev);
3391error_mdio_register:
3380 netif_napi_del(&priv->napi); 3392 netif_napi_del(&priv->napi);
3381error_hw_init: 3393error_hw_init:
3382 clk_disable_unprepare(priv->pclk); 3394 clk_disable_unprepare(priv->pclk);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
index fda01f770eff..b0344c213752 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
@@ -116,7 +116,7 @@ static int stmmac_mdio_write(struct mii_bus *bus, int phyaddr, int phyreg,
116 unsigned int mii_address = priv->hw->mii.addr; 116 unsigned int mii_address = priv->hw->mii.addr;
117 unsigned int mii_data = priv->hw->mii.data; 117 unsigned int mii_data = priv->hw->mii.data;
118 118
119 u32 value = MII_WRITE | MII_BUSY; 119 u32 value = MII_BUSY;
120 120
121 value |= (phyaddr << priv->hw->mii.addr_shift) 121 value |= (phyaddr << priv->hw->mii.addr_shift)
122 & priv->hw->mii.addr_mask; 122 & priv->hw->mii.addr_mask;
@@ -126,6 +126,8 @@ static int stmmac_mdio_write(struct mii_bus *bus, int phyaddr, int phyreg,
126 & priv->hw->mii.clk_csr_mask; 126 & priv->hw->mii.clk_csr_mask;
127 if (priv->plat->has_gmac4) 127 if (priv->plat->has_gmac4)
128 value |= MII_GMAC4_WRITE; 128 value |= MII_GMAC4_WRITE;
129 else
130 value |= MII_WRITE;
129 131
130 /* Wait until any existing MII operation is complete */ 132 /* Wait until any existing MII operation is complete */
131 if (stmmac_mdio_busy_wait(priv->ioaddr, mii_address)) 133 if (stmmac_mdio_busy_wait(priv->ioaddr, mii_address))
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
index a2831773431a..3da4737620cb 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
@@ -89,6 +89,9 @@ static void stmmac_default_data(struct plat_stmmacenet_data *plat)
89 89
90 /* Set default value for unicast filter entries */ 90 /* Set default value for unicast filter entries */
91 plat->unicast_filter_entries = 1; 91 plat->unicast_filter_entries = 1;
92
93 /* Set the maxmtu to a default of JUMBO_LEN */
94 plat->maxmtu = JUMBO_LEN;
92} 95}
93 96
94static int quark_default_data(struct plat_stmmacenet_data *plat, 97static int quark_default_data(struct plat_stmmacenet_data *plat,
@@ -126,6 +129,9 @@ static int quark_default_data(struct plat_stmmacenet_data *plat,
126 /* Set default value for unicast filter entries */ 129 /* Set default value for unicast filter entries */
127 plat->unicast_filter_entries = 1; 130 plat->unicast_filter_entries = 1;
128 131
132 /* Set the maxmtu to a default of JUMBO_LEN */
133 plat->maxmtu = JUMBO_LEN;
134
129 return 0; 135 return 0;
130} 136}
131 137
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index 082cd48db6a7..36942f5a6a53 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -351,6 +351,7 @@ void stmmac_remove_config_dt(struct platform_device *pdev,
351 if (of_phy_is_fixed_link(np)) 351 if (of_phy_is_fixed_link(np))
352 of_phy_deregister_fixed_link(np); 352 of_phy_deregister_fixed_link(np);
353 of_node_put(plat->phy_node); 353 of_node_put(plat->phy_node);
354 of_node_put(plat->mdio_node);
354} 355}
355#else 356#else
356struct plat_stmmacenet_data * 357struct plat_stmmacenet_data *
diff --git a/drivers/net/ethernet/ti/cpmac.c b/drivers/net/ethernet/ti/cpmac.c
index 77c88fcf2b86..9b8a30bf939b 100644
--- a/drivers/net/ethernet/ti/cpmac.c
+++ b/drivers/net/ethernet/ti/cpmac.c
@@ -1210,7 +1210,7 @@ int cpmac_init(void)
1210 goto fail_alloc; 1210 goto fail_alloc;
1211 } 1211 }
1212 1212
1213#warning FIXME: unhardcode gpio&reset bits 1213 /* FIXME: unhardcode gpio&reset bits */
1214 ar7_gpio_disable(26); 1214 ar7_gpio_disable(26);
1215 ar7_gpio_disable(27); 1215 ar7_gpio_disable(27);
1216 ar7_device_reset(AR7_RESET_BIT_CPMAC_LO); 1216 ar7_device_reset(AR7_RESET_BIT_CPMAC_LO);
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index b203143647e6..65088224c207 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -3160,7 +3160,7 @@ static int cpsw_resume(struct device *dev)
3160{ 3160{
3161 struct platform_device *pdev = to_platform_device(dev); 3161 struct platform_device *pdev = to_platform_device(dev);
3162 struct net_device *ndev = platform_get_drvdata(pdev); 3162 struct net_device *ndev = platform_get_drvdata(pdev);
3163 struct cpsw_common *cpsw = netdev_priv(ndev); 3163 struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
3164 3164
3165 /* Select default pin state */ 3165 /* Select default pin state */
3166 pinctrl_pm_select_default_state(dev); 3166 pinctrl_pm_select_default_state(dev);
diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
index 93dc10b10c09..aa02a03a6d8d 100644
--- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c
+++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
@@ -100,6 +100,14 @@
100/* BUFFER_ALIGN(adr) calculates the number of bytes to the next alignment. */ 100/* BUFFER_ALIGN(adr) calculates the number of bytes to the next alignment. */
101#define BUFFER_ALIGN(adr) ((ALIGNMENT - ((u32) adr)) % ALIGNMENT) 101#define BUFFER_ALIGN(adr) ((ALIGNMENT - ((u32) adr)) % ALIGNMENT)
102 102
103#ifdef __BIG_ENDIAN
104#define xemaclite_readl ioread32be
105#define xemaclite_writel iowrite32be
106#else
107#define xemaclite_readl ioread32
108#define xemaclite_writel iowrite32
109#endif
110
103/** 111/**
104 * struct net_local - Our private per device data 112 * struct net_local - Our private per device data
105 * @ndev: instance of the network device 113 * @ndev: instance of the network device
@@ -156,15 +164,15 @@ static void xemaclite_enable_interrupts(struct net_local *drvdata)
156 u32 reg_data; 164 u32 reg_data;
157 165
158 /* Enable the Tx interrupts for the first Buffer */ 166 /* Enable the Tx interrupts for the first Buffer */
159 reg_data = __raw_readl(drvdata->base_addr + XEL_TSR_OFFSET); 167 reg_data = xemaclite_readl(drvdata->base_addr + XEL_TSR_OFFSET);
160 __raw_writel(reg_data | XEL_TSR_XMIT_IE_MASK, 168 xemaclite_writel(reg_data | XEL_TSR_XMIT_IE_MASK,
161 drvdata->base_addr + XEL_TSR_OFFSET); 169 drvdata->base_addr + XEL_TSR_OFFSET);
162 170
163 /* Enable the Rx interrupts for the first buffer */ 171 /* Enable the Rx interrupts for the first buffer */
164 __raw_writel(XEL_RSR_RECV_IE_MASK, drvdata->base_addr + XEL_RSR_OFFSET); 172 xemaclite_writel(XEL_RSR_RECV_IE_MASK, drvdata->base_addr + XEL_RSR_OFFSET);
165 173
166 /* Enable the Global Interrupt Enable */ 174 /* Enable the Global Interrupt Enable */
167 __raw_writel(XEL_GIER_GIE_MASK, drvdata->base_addr + XEL_GIER_OFFSET); 175 xemaclite_writel(XEL_GIER_GIE_MASK, drvdata->base_addr + XEL_GIER_OFFSET);
168} 176}
169 177
170/** 178/**
@@ -179,17 +187,17 @@ static void xemaclite_disable_interrupts(struct net_local *drvdata)
179 u32 reg_data; 187 u32 reg_data;
180 188
181 /* Disable the Global Interrupt Enable */ 189 /* Disable the Global Interrupt Enable */
182 __raw_writel(XEL_GIER_GIE_MASK, drvdata->base_addr + XEL_GIER_OFFSET); 190 xemaclite_writel(XEL_GIER_GIE_MASK, drvdata->base_addr + XEL_GIER_OFFSET);
183 191
184 /* Disable the Tx interrupts for the first buffer */ 192 /* Disable the Tx interrupts for the first buffer */
185 reg_data = __raw_readl(drvdata->base_addr + XEL_TSR_OFFSET); 193 reg_data = xemaclite_readl(drvdata->base_addr + XEL_TSR_OFFSET);
186 __raw_writel(reg_data & (~XEL_TSR_XMIT_IE_MASK), 194 xemaclite_writel(reg_data & (~XEL_TSR_XMIT_IE_MASK),
187 drvdata->base_addr + XEL_TSR_OFFSET); 195 drvdata->base_addr + XEL_TSR_OFFSET);
188 196
189 /* Disable the Rx interrupts for the first buffer */ 197 /* Disable the Rx interrupts for the first buffer */
190 reg_data = __raw_readl(drvdata->base_addr + XEL_RSR_OFFSET); 198 reg_data = xemaclite_readl(drvdata->base_addr + XEL_RSR_OFFSET);
191 __raw_writel(reg_data & (~XEL_RSR_RECV_IE_MASK), 199 xemaclite_writel(reg_data & (~XEL_RSR_RECV_IE_MASK),
192 drvdata->base_addr + XEL_RSR_OFFSET); 200 drvdata->base_addr + XEL_RSR_OFFSET);
193} 201}
194 202
195/** 203/**
@@ -321,7 +329,7 @@ static int xemaclite_send_data(struct net_local *drvdata, u8 *data,
321 byte_count = ETH_FRAME_LEN; 329 byte_count = ETH_FRAME_LEN;
322 330
323 /* Check if the expected buffer is available */ 331 /* Check if the expected buffer is available */
324 reg_data = __raw_readl(addr + XEL_TSR_OFFSET); 332 reg_data = xemaclite_readl(addr + XEL_TSR_OFFSET);
325 if ((reg_data & (XEL_TSR_XMIT_BUSY_MASK | 333 if ((reg_data & (XEL_TSR_XMIT_BUSY_MASK |
326 XEL_TSR_XMIT_ACTIVE_MASK)) == 0) { 334 XEL_TSR_XMIT_ACTIVE_MASK)) == 0) {
327 335
@@ -334,7 +342,7 @@ static int xemaclite_send_data(struct net_local *drvdata, u8 *data,
334 342
335 addr = (void __iomem __force *)((u32 __force)addr ^ 343 addr = (void __iomem __force *)((u32 __force)addr ^
336 XEL_BUFFER_OFFSET); 344 XEL_BUFFER_OFFSET);
337 reg_data = __raw_readl(addr + XEL_TSR_OFFSET); 345 reg_data = xemaclite_readl(addr + XEL_TSR_OFFSET);
338 346
339 if ((reg_data & (XEL_TSR_XMIT_BUSY_MASK | 347 if ((reg_data & (XEL_TSR_XMIT_BUSY_MASK |
340 XEL_TSR_XMIT_ACTIVE_MASK)) != 0) 348 XEL_TSR_XMIT_ACTIVE_MASK)) != 0)
@@ -345,16 +353,16 @@ static int xemaclite_send_data(struct net_local *drvdata, u8 *data,
345 /* Write the frame to the buffer */ 353 /* Write the frame to the buffer */
346 xemaclite_aligned_write(data, (u32 __force *) addr, byte_count); 354 xemaclite_aligned_write(data, (u32 __force *) addr, byte_count);
347 355
348 __raw_writel((byte_count & XEL_TPLR_LENGTH_MASK), 356 xemaclite_writel((byte_count & XEL_TPLR_LENGTH_MASK),
349 addr + XEL_TPLR_OFFSET); 357 addr + XEL_TPLR_OFFSET);
350 358
351 /* Update the Tx Status Register to indicate that there is a 359 /* Update the Tx Status Register to indicate that there is a
352 * frame to send. Set the XEL_TSR_XMIT_ACTIVE_MASK flag which 360 * frame to send. Set the XEL_TSR_XMIT_ACTIVE_MASK flag which
353 * is used by the interrupt handler to check whether a frame 361 * is used by the interrupt handler to check whether a frame
354 * has been transmitted */ 362 * has been transmitted */
355 reg_data = __raw_readl(addr + XEL_TSR_OFFSET); 363 reg_data = xemaclite_readl(addr + XEL_TSR_OFFSET);
356 reg_data |= (XEL_TSR_XMIT_BUSY_MASK | XEL_TSR_XMIT_ACTIVE_MASK); 364 reg_data |= (XEL_TSR_XMIT_BUSY_MASK | XEL_TSR_XMIT_ACTIVE_MASK);
357 __raw_writel(reg_data, addr + XEL_TSR_OFFSET); 365 xemaclite_writel(reg_data, addr + XEL_TSR_OFFSET);
358 366
359 return 0; 367 return 0;
360} 368}
@@ -369,7 +377,7 @@ static int xemaclite_send_data(struct net_local *drvdata, u8 *data,
369 * 377 *
370 * Return: Total number of bytes received 378 * Return: Total number of bytes received
371 */ 379 */
372static u16 xemaclite_recv_data(struct net_local *drvdata, u8 *data) 380static u16 xemaclite_recv_data(struct net_local *drvdata, u8 *data, int maxlen)
373{ 381{
374 void __iomem *addr; 382 void __iomem *addr;
375 u16 length, proto_type; 383 u16 length, proto_type;
@@ -379,7 +387,7 @@ static u16 xemaclite_recv_data(struct net_local *drvdata, u8 *data)
379 addr = (drvdata->base_addr + drvdata->next_rx_buf_to_use); 387 addr = (drvdata->base_addr + drvdata->next_rx_buf_to_use);
380 388
381 /* Verify which buffer has valid data */ 389 /* Verify which buffer has valid data */
382 reg_data = __raw_readl(addr + XEL_RSR_OFFSET); 390 reg_data = xemaclite_readl(addr + XEL_RSR_OFFSET);
383 391
384 if ((reg_data & XEL_RSR_RECV_DONE_MASK) == XEL_RSR_RECV_DONE_MASK) { 392 if ((reg_data & XEL_RSR_RECV_DONE_MASK) == XEL_RSR_RECV_DONE_MASK) {
385 if (drvdata->rx_ping_pong != 0) 393 if (drvdata->rx_ping_pong != 0)
@@ -396,27 +404,28 @@ static u16 xemaclite_recv_data(struct net_local *drvdata, u8 *data)
396 return 0; /* No data was available */ 404 return 0; /* No data was available */
397 405
398 /* Verify that buffer has valid data */ 406 /* Verify that buffer has valid data */
399 reg_data = __raw_readl(addr + XEL_RSR_OFFSET); 407 reg_data = xemaclite_readl(addr + XEL_RSR_OFFSET);
400 if ((reg_data & XEL_RSR_RECV_DONE_MASK) != 408 if ((reg_data & XEL_RSR_RECV_DONE_MASK) !=
401 XEL_RSR_RECV_DONE_MASK) 409 XEL_RSR_RECV_DONE_MASK)
402 return 0; /* No data was available */ 410 return 0; /* No data was available */
403 } 411 }
404 412
405 /* Get the protocol type of the ethernet frame that arrived */ 413 /* Get the protocol type of the ethernet frame that arrived */
406 proto_type = ((ntohl(__raw_readl(addr + XEL_HEADER_OFFSET + 414 proto_type = ((ntohl(xemaclite_readl(addr + XEL_HEADER_OFFSET +
407 XEL_RXBUFF_OFFSET)) >> XEL_HEADER_SHIFT) & 415 XEL_RXBUFF_OFFSET)) >> XEL_HEADER_SHIFT) &
408 XEL_RPLR_LENGTH_MASK); 416 XEL_RPLR_LENGTH_MASK);
409 417
410 /* Check if received ethernet frame is a raw ethernet frame 418 /* Check if received ethernet frame is a raw ethernet frame
411 * or an IP packet or an ARP packet */ 419 * or an IP packet or an ARP packet */
412 if (proto_type > (ETH_FRAME_LEN + ETH_FCS_LEN)) { 420 if (proto_type > ETH_DATA_LEN) {
413 421
414 if (proto_type == ETH_P_IP) { 422 if (proto_type == ETH_P_IP) {
415 length = ((ntohl(__raw_readl(addr + 423 length = ((ntohl(xemaclite_readl(addr +
416 XEL_HEADER_IP_LENGTH_OFFSET + 424 XEL_HEADER_IP_LENGTH_OFFSET +
417 XEL_RXBUFF_OFFSET)) >> 425 XEL_RXBUFF_OFFSET)) >>
418 XEL_HEADER_SHIFT) & 426 XEL_HEADER_SHIFT) &
419 XEL_RPLR_LENGTH_MASK); 427 XEL_RPLR_LENGTH_MASK);
428 length = min_t(u16, length, ETH_DATA_LEN);
420 length += ETH_HLEN + ETH_FCS_LEN; 429 length += ETH_HLEN + ETH_FCS_LEN;
421 430
422 } else if (proto_type == ETH_P_ARP) 431 } else if (proto_type == ETH_P_ARP)
@@ -429,14 +438,17 @@ static u16 xemaclite_recv_data(struct net_local *drvdata, u8 *data)
429 /* Use the length in the frame, plus the header and trailer */ 438 /* Use the length in the frame, plus the header and trailer */
430 length = proto_type + ETH_HLEN + ETH_FCS_LEN; 439 length = proto_type + ETH_HLEN + ETH_FCS_LEN;
431 440
441 if (WARN_ON(length > maxlen))
442 length = maxlen;
443
432 /* Read from the EmacLite device */ 444 /* Read from the EmacLite device */
433 xemaclite_aligned_read((u32 __force *) (addr + XEL_RXBUFF_OFFSET), 445 xemaclite_aligned_read((u32 __force *) (addr + XEL_RXBUFF_OFFSET),
434 data, length); 446 data, length);
435 447
436 /* Acknowledge the frame */ 448 /* Acknowledge the frame */
437 reg_data = __raw_readl(addr + XEL_RSR_OFFSET); 449 reg_data = xemaclite_readl(addr + XEL_RSR_OFFSET);
438 reg_data &= ~XEL_RSR_RECV_DONE_MASK; 450 reg_data &= ~XEL_RSR_RECV_DONE_MASK;
439 __raw_writel(reg_data, addr + XEL_RSR_OFFSET); 451 xemaclite_writel(reg_data, addr + XEL_RSR_OFFSET);
440 452
441 return length; 453 return length;
442} 454}
@@ -463,14 +475,14 @@ static void xemaclite_update_address(struct net_local *drvdata,
463 475
464 xemaclite_aligned_write(address_ptr, (u32 __force *) addr, ETH_ALEN); 476 xemaclite_aligned_write(address_ptr, (u32 __force *) addr, ETH_ALEN);
465 477
466 __raw_writel(ETH_ALEN, addr + XEL_TPLR_OFFSET); 478 xemaclite_writel(ETH_ALEN, addr + XEL_TPLR_OFFSET);
467 479
468 /* Update the MAC address in the EmacLite */ 480 /* Update the MAC address in the EmacLite */
469 reg_data = __raw_readl(addr + XEL_TSR_OFFSET); 481 reg_data = xemaclite_readl(addr + XEL_TSR_OFFSET);
470 __raw_writel(reg_data | XEL_TSR_PROG_MAC_ADDR, addr + XEL_TSR_OFFSET); 482 xemaclite_writel(reg_data | XEL_TSR_PROG_MAC_ADDR, addr + XEL_TSR_OFFSET);
471 483
472 /* Wait for EmacLite to finish with the MAC address update */ 484 /* Wait for EmacLite to finish with the MAC address update */
473 while ((__raw_readl(addr + XEL_TSR_OFFSET) & 485 while ((xemaclite_readl(addr + XEL_TSR_OFFSET) &
474 XEL_TSR_PROG_MAC_ADDR) != 0) 486 XEL_TSR_PROG_MAC_ADDR) != 0)
475 ; 487 ;
476} 488}
@@ -603,7 +615,7 @@ static void xemaclite_rx_handler(struct net_device *dev)
603 615
604 skb_reserve(skb, 2); 616 skb_reserve(skb, 2);
605 617
606 len = xemaclite_recv_data(lp, (u8 *) skb->data); 618 len = xemaclite_recv_data(lp, (u8 *) skb->data, len);
607 619
608 if (!len) { 620 if (!len) {
609 dev->stats.rx_errors++; 621 dev->stats.rx_errors++;
@@ -640,32 +652,32 @@ static irqreturn_t xemaclite_interrupt(int irq, void *dev_id)
640 u32 tx_status; 652 u32 tx_status;
641 653
642 /* Check if there is Rx Data available */ 654 /* Check if there is Rx Data available */
643 if ((__raw_readl(base_addr + XEL_RSR_OFFSET) & 655 if ((xemaclite_readl(base_addr + XEL_RSR_OFFSET) &
644 XEL_RSR_RECV_DONE_MASK) || 656 XEL_RSR_RECV_DONE_MASK) ||
645 (__raw_readl(base_addr + XEL_BUFFER_OFFSET + XEL_RSR_OFFSET) 657 (xemaclite_readl(base_addr + XEL_BUFFER_OFFSET + XEL_RSR_OFFSET)
646 & XEL_RSR_RECV_DONE_MASK)) 658 & XEL_RSR_RECV_DONE_MASK))
647 659
648 xemaclite_rx_handler(dev); 660 xemaclite_rx_handler(dev);
649 661
650 /* Check if the Transmission for the first buffer is completed */ 662 /* Check if the Transmission for the first buffer is completed */
651 tx_status = __raw_readl(base_addr + XEL_TSR_OFFSET); 663 tx_status = xemaclite_readl(base_addr + XEL_TSR_OFFSET);
652 if (((tx_status & XEL_TSR_XMIT_BUSY_MASK) == 0) && 664 if (((tx_status & XEL_TSR_XMIT_BUSY_MASK) == 0) &&
653 (tx_status & XEL_TSR_XMIT_ACTIVE_MASK) != 0) { 665 (tx_status & XEL_TSR_XMIT_ACTIVE_MASK) != 0) {
654 666
655 tx_status &= ~XEL_TSR_XMIT_ACTIVE_MASK; 667 tx_status &= ~XEL_TSR_XMIT_ACTIVE_MASK;
656 __raw_writel(tx_status, base_addr + XEL_TSR_OFFSET); 668 xemaclite_writel(tx_status, base_addr + XEL_TSR_OFFSET);
657 669
658 tx_complete = true; 670 tx_complete = true;
659 } 671 }
660 672
661 /* Check if the Transmission for the second buffer is completed */ 673 /* Check if the Transmission for the second buffer is completed */
662 tx_status = __raw_readl(base_addr + XEL_BUFFER_OFFSET + XEL_TSR_OFFSET); 674 tx_status = xemaclite_readl(base_addr + XEL_BUFFER_OFFSET + XEL_TSR_OFFSET);
663 if (((tx_status & XEL_TSR_XMIT_BUSY_MASK) == 0) && 675 if (((tx_status & XEL_TSR_XMIT_BUSY_MASK) == 0) &&
664 (tx_status & XEL_TSR_XMIT_ACTIVE_MASK) != 0) { 676 (tx_status & XEL_TSR_XMIT_ACTIVE_MASK) != 0) {
665 677
666 tx_status &= ~XEL_TSR_XMIT_ACTIVE_MASK; 678 tx_status &= ~XEL_TSR_XMIT_ACTIVE_MASK;
667 __raw_writel(tx_status, base_addr + XEL_BUFFER_OFFSET + 679 xemaclite_writel(tx_status, base_addr + XEL_BUFFER_OFFSET +
668 XEL_TSR_OFFSET); 680 XEL_TSR_OFFSET);
669 681
670 tx_complete = true; 682 tx_complete = true;
671 } 683 }
@@ -698,7 +710,7 @@ static int xemaclite_mdio_wait(struct net_local *lp)
698 /* wait for the MDIO interface to not be busy or timeout 710 /* wait for the MDIO interface to not be busy or timeout
699 after some time. 711 after some time.
700 */ 712 */
701 while (__raw_readl(lp->base_addr + XEL_MDIOCTRL_OFFSET) & 713 while (xemaclite_readl(lp->base_addr + XEL_MDIOCTRL_OFFSET) &
702 XEL_MDIOCTRL_MDIOSTS_MASK) { 714 XEL_MDIOCTRL_MDIOSTS_MASK) {
703 if (time_before_eq(end, jiffies)) { 715 if (time_before_eq(end, jiffies)) {
704 WARN_ON(1); 716 WARN_ON(1);
@@ -734,17 +746,17 @@ static int xemaclite_mdio_read(struct mii_bus *bus, int phy_id, int reg)
734 * MDIO Address register. Set the Status bit in the MDIO Control 746 * MDIO Address register. Set the Status bit in the MDIO Control
735 * register to start a MDIO read transaction. 747 * register to start a MDIO read transaction.
736 */ 748 */
737 ctrl_reg = __raw_readl(lp->base_addr + XEL_MDIOCTRL_OFFSET); 749 ctrl_reg = xemaclite_readl(lp->base_addr + XEL_MDIOCTRL_OFFSET);
738 __raw_writel(XEL_MDIOADDR_OP_MASK | 750 xemaclite_writel(XEL_MDIOADDR_OP_MASK |
739 ((phy_id << XEL_MDIOADDR_PHYADR_SHIFT) | reg), 751 ((phy_id << XEL_MDIOADDR_PHYADR_SHIFT) | reg),
740 lp->base_addr + XEL_MDIOADDR_OFFSET); 752 lp->base_addr + XEL_MDIOADDR_OFFSET);
741 __raw_writel(ctrl_reg | XEL_MDIOCTRL_MDIOSTS_MASK, 753 xemaclite_writel(ctrl_reg | XEL_MDIOCTRL_MDIOSTS_MASK,
742 lp->base_addr + XEL_MDIOCTRL_OFFSET); 754 lp->base_addr + XEL_MDIOCTRL_OFFSET);
743 755
744 if (xemaclite_mdio_wait(lp)) 756 if (xemaclite_mdio_wait(lp))
745 return -ETIMEDOUT; 757 return -ETIMEDOUT;
746 758
747 rc = __raw_readl(lp->base_addr + XEL_MDIORD_OFFSET); 759 rc = xemaclite_readl(lp->base_addr + XEL_MDIORD_OFFSET);
748 760
749 dev_dbg(&lp->ndev->dev, 761 dev_dbg(&lp->ndev->dev,
750 "xemaclite_mdio_read(phy_id=%i, reg=%x) == %x\n", 762 "xemaclite_mdio_read(phy_id=%i, reg=%x) == %x\n",
@@ -781,13 +793,13 @@ static int xemaclite_mdio_write(struct mii_bus *bus, int phy_id, int reg,
781 * Data register. Finally, set the Status bit in the MDIO Control 793 * Data register. Finally, set the Status bit in the MDIO Control
782 * register to start a MDIO write transaction. 794 * register to start a MDIO write transaction.
783 */ 795 */
784 ctrl_reg = __raw_readl(lp->base_addr + XEL_MDIOCTRL_OFFSET); 796 ctrl_reg = xemaclite_readl(lp->base_addr + XEL_MDIOCTRL_OFFSET);
785 __raw_writel(~XEL_MDIOADDR_OP_MASK & 797 xemaclite_writel(~XEL_MDIOADDR_OP_MASK &
786 ((phy_id << XEL_MDIOADDR_PHYADR_SHIFT) | reg), 798 ((phy_id << XEL_MDIOADDR_PHYADR_SHIFT) | reg),
787 lp->base_addr + XEL_MDIOADDR_OFFSET); 799 lp->base_addr + XEL_MDIOADDR_OFFSET);
788 __raw_writel(val, lp->base_addr + XEL_MDIOWR_OFFSET); 800 xemaclite_writel(val, lp->base_addr + XEL_MDIOWR_OFFSET);
789 __raw_writel(ctrl_reg | XEL_MDIOCTRL_MDIOSTS_MASK, 801 xemaclite_writel(ctrl_reg | XEL_MDIOCTRL_MDIOSTS_MASK,
790 lp->base_addr + XEL_MDIOCTRL_OFFSET); 802 lp->base_addr + XEL_MDIOCTRL_OFFSET);
791 803
792 return 0; 804 return 0;
793} 805}
@@ -834,8 +846,8 @@ static int xemaclite_mdio_setup(struct net_local *lp, struct device *dev)
834 /* Enable the MDIO bus by asserting the enable bit in MDIO Control 846 /* Enable the MDIO bus by asserting the enable bit in MDIO Control
835 * register. 847 * register.
836 */ 848 */
837 __raw_writel(XEL_MDIOCTRL_MDIOEN_MASK, 849 xemaclite_writel(XEL_MDIOCTRL_MDIOEN_MASK,
838 lp->base_addr + XEL_MDIOCTRL_OFFSET); 850 lp->base_addr + XEL_MDIOCTRL_OFFSET);
839 851
840 bus = mdiobus_alloc(); 852 bus = mdiobus_alloc();
841 if (!bus) { 853 if (!bus) {
@@ -1140,8 +1152,8 @@ static int xemaclite_of_probe(struct platform_device *ofdev)
1140 } 1152 }
1141 1153
1142 /* Clear the Tx CSR's in case this is a restart */ 1154 /* Clear the Tx CSR's in case this is a restart */
1143 __raw_writel(0, lp->base_addr + XEL_TSR_OFFSET); 1155 xemaclite_writel(0, lp->base_addr + XEL_TSR_OFFSET);
1144 __raw_writel(0, lp->base_addr + XEL_BUFFER_OFFSET + XEL_TSR_OFFSET); 1156 xemaclite_writel(0, lp->base_addr + XEL_BUFFER_OFFSET + XEL_TSR_OFFSET);
1145 1157
1146 /* Set the MAC address in the EmacLite device */ 1158 /* Set the MAC address in the EmacLite device */
1147 xemaclite_update_address(lp, ndev->dev_addr); 1159 xemaclite_update_address(lp, ndev->dev_addr);
diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c
index 8b6810bad54b..99d3df788ce8 100644
--- a/drivers/net/gtp.c
+++ b/drivers/net/gtp.c
@@ -69,7 +69,6 @@ struct gtp_dev {
69 struct socket *sock0; 69 struct socket *sock0;
70 struct socket *sock1u; 70 struct socket *sock1u;
71 71
72 struct net *net;
73 struct net_device *dev; 72 struct net_device *dev;
74 73
75 unsigned int hash_size; 74 unsigned int hash_size;
@@ -316,7 +315,7 @@ static int gtp_encap_recv(struct sock *sk, struct sk_buff *skb)
316 315
317 netdev_dbg(gtp->dev, "encap_recv sk=%p\n", sk); 316 netdev_dbg(gtp->dev, "encap_recv sk=%p\n", sk);
318 317
319 xnet = !net_eq(gtp->net, dev_net(gtp->dev)); 318 xnet = !net_eq(sock_net(sk), dev_net(gtp->dev));
320 319
321 switch (udp_sk(sk)->encap_type) { 320 switch (udp_sk(sk)->encap_type) {
322 case UDP_ENCAP_GTP0: 321 case UDP_ENCAP_GTP0:
@@ -612,7 +611,7 @@ static netdev_tx_t gtp_dev_xmit(struct sk_buff *skb, struct net_device *dev)
612 pktinfo.fl4.saddr, pktinfo.fl4.daddr, 611 pktinfo.fl4.saddr, pktinfo.fl4.daddr,
613 pktinfo.iph->tos, 612 pktinfo.iph->tos,
614 ip4_dst_hoplimit(&pktinfo.rt->dst), 613 ip4_dst_hoplimit(&pktinfo.rt->dst),
615 htons(IP_DF), 614 0,
616 pktinfo.gtph_port, pktinfo.gtph_port, 615 pktinfo.gtph_port, pktinfo.gtph_port,
617 true, false); 616 true, false);
618 break; 617 break;
@@ -658,7 +657,7 @@ static void gtp_link_setup(struct net_device *dev)
658static int gtp_hashtable_new(struct gtp_dev *gtp, int hsize); 657static int gtp_hashtable_new(struct gtp_dev *gtp, int hsize);
659static void gtp_hashtable_free(struct gtp_dev *gtp); 658static void gtp_hashtable_free(struct gtp_dev *gtp);
660static int gtp_encap_enable(struct net_device *dev, struct gtp_dev *gtp, 659static int gtp_encap_enable(struct net_device *dev, struct gtp_dev *gtp,
661 int fd_gtp0, int fd_gtp1, struct net *src_net); 660 int fd_gtp0, int fd_gtp1);
662 661
663static int gtp_newlink(struct net *src_net, struct net_device *dev, 662static int gtp_newlink(struct net *src_net, struct net_device *dev,
664 struct nlattr *tb[], struct nlattr *data[]) 663 struct nlattr *tb[], struct nlattr *data[])
@@ -675,7 +674,7 @@ static int gtp_newlink(struct net *src_net, struct net_device *dev,
675 fd0 = nla_get_u32(data[IFLA_GTP_FD0]); 674 fd0 = nla_get_u32(data[IFLA_GTP_FD0]);
676 fd1 = nla_get_u32(data[IFLA_GTP_FD1]); 675 fd1 = nla_get_u32(data[IFLA_GTP_FD1]);
677 676
678 err = gtp_encap_enable(dev, gtp, fd0, fd1, src_net); 677 err = gtp_encap_enable(dev, gtp, fd0, fd1);
679 if (err < 0) 678 if (err < 0)
680 goto out_err; 679 goto out_err;
681 680
@@ -821,7 +820,7 @@ static void gtp_hashtable_free(struct gtp_dev *gtp)
821} 820}
822 821
823static int gtp_encap_enable(struct net_device *dev, struct gtp_dev *gtp, 822static int gtp_encap_enable(struct net_device *dev, struct gtp_dev *gtp,
824 int fd_gtp0, int fd_gtp1, struct net *src_net) 823 int fd_gtp0, int fd_gtp1)
825{ 824{
826 struct udp_tunnel_sock_cfg tuncfg = {NULL}; 825 struct udp_tunnel_sock_cfg tuncfg = {NULL};
827 struct socket *sock0, *sock1u; 826 struct socket *sock0, *sock1u;
@@ -858,7 +857,6 @@ static int gtp_encap_enable(struct net_device *dev, struct gtp_dev *gtp,
858 857
859 gtp->sock0 = sock0; 858 gtp->sock0 = sock0;
860 gtp->sock1u = sock1u; 859 gtp->sock1u = sock1u;
861 gtp->net = src_net;
862 860
863 tuncfg.sk_user_data = gtp; 861 tuncfg.sk_user_data = gtp;
864 tuncfg.encap_rcv = gtp_encap_recv; 862 tuncfg.encap_rcv = gtp_encap_recv;
@@ -1376,3 +1374,4 @@ MODULE_LICENSE("GPL");
1376MODULE_AUTHOR("Harald Welte <hwelte@sysmocom.de>"); 1374MODULE_AUTHOR("Harald Welte <hwelte@sysmocom.de>");
1377MODULE_DESCRIPTION("Interface driver for GTP encapsulated traffic"); 1375MODULE_DESCRIPTION("Interface driver for GTP encapsulated traffic");
1378MODULE_ALIAS_RTNL_LINK("gtp"); 1376MODULE_ALIAS_RTNL_LINK("gtp");
1377MODULE_ALIAS_GENL_FAMILY("gtp");
diff --git a/drivers/net/hamradio/mkiss.c b/drivers/net/hamradio/mkiss.c
index ece59c54a653..4a40a3d825b4 100644
--- a/drivers/net/hamradio/mkiss.c
+++ b/drivers/net/hamradio/mkiss.c
@@ -648,8 +648,8 @@ static void ax_setup(struct net_device *dev)
648{ 648{
649 /* Finish setting up the DEVICE info. */ 649 /* Finish setting up the DEVICE info. */
650 dev->mtu = AX_MTU; 650 dev->mtu = AX_MTU;
651 dev->hard_header_len = 0; 651 dev->hard_header_len = AX25_MAX_HEADER_LEN;
652 dev->addr_len = 0; 652 dev->addr_len = AX25_ADDR_LEN;
653 dev->type = ARPHRD_AX25; 653 dev->type = ARPHRD_AX25;
654 dev->tx_queue_len = 10; 654 dev->tx_queue_len = 10;
655 dev->header_ops = &ax25_header_ops; 655 dev->header_ops = &ax25_header_ops;
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index 5a1cc089acb7..86e5749226ef 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -1295,6 +1295,9 @@ void netvsc_channel_cb(void *context)
1295 ndev = hv_get_drvdata(device); 1295 ndev = hv_get_drvdata(device);
1296 buffer = get_per_channel_state(channel); 1296 buffer = get_per_channel_state(channel);
1297 1297
1298 /* commit_rd_index() -> hv_signal_on_read() needs this. */
1299 init_cached_read_index(channel);
1300
1298 do { 1301 do {
1299 desc = get_next_pkt_raw(channel); 1302 desc = get_next_pkt_raw(channel);
1300 if (desc != NULL) { 1303 if (desc != NULL) {
@@ -1347,6 +1350,9 @@ void netvsc_channel_cb(void *context)
1347 1350
1348 bufferlen = bytes_recvd; 1351 bufferlen = bytes_recvd;
1349 } 1352 }
1353
1354 init_cached_read_index(channel);
1355
1350 } while (1); 1356 } while (1);
1351 1357
1352 if (bufferlen > NETVSC_PACKET_SIZE) 1358 if (bufferlen > NETVSC_PACKET_SIZE)
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index c9414c054852..fcab8019dda0 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -659,6 +659,7 @@ int netvsc_recv_callback(struct hv_device *device_obj,
659 * policy filters on the host). Deliver these via the VF 659 * policy filters on the host). Deliver these via the VF
660 * interface in the guest. 660 * interface in the guest.
661 */ 661 */
662 rcu_read_lock();
662 vf_netdev = rcu_dereference(net_device_ctx->vf_netdev); 663 vf_netdev = rcu_dereference(net_device_ctx->vf_netdev);
663 if (vf_netdev && (vf_netdev->flags & IFF_UP)) 664 if (vf_netdev && (vf_netdev->flags & IFF_UP))
664 net = vf_netdev; 665 net = vf_netdev;
@@ -667,6 +668,7 @@ int netvsc_recv_callback(struct hv_device *device_obj,
667 skb = netvsc_alloc_recv_skb(net, packet, csum_info, *data, vlan_tci); 668 skb = netvsc_alloc_recv_skb(net, packet, csum_info, *data, vlan_tci);
668 if (unlikely(!skb)) { 669 if (unlikely(!skb)) {
669 ++net->stats.rx_dropped; 670 ++net->stats.rx_dropped;
671 rcu_read_unlock();
670 return NVSP_STAT_FAIL; 672 return NVSP_STAT_FAIL;
671 } 673 }
672 674
@@ -696,6 +698,7 @@ int netvsc_recv_callback(struct hv_device *device_obj,
696 * TODO - use NAPI? 698 * TODO - use NAPI?
697 */ 699 */
698 netif_rx(skb); 700 netif_rx(skb);
701 rcu_read_unlock();
699 702
700 return 0; 703 return 0;
701} 704}
diff --git a/drivers/net/ieee802154/at86rf230.c b/drivers/net/ieee802154/at86rf230.c
index 46d53a6c8cf8..76ba7ecfe142 100644
--- a/drivers/net/ieee802154/at86rf230.c
+++ b/drivers/net/ieee802154/at86rf230.c
@@ -1715,9 +1715,9 @@ static int at86rf230_probe(struct spi_device *spi)
1715 /* Reset */ 1715 /* Reset */
1716 if (gpio_is_valid(rstn)) { 1716 if (gpio_is_valid(rstn)) {
1717 udelay(1); 1717 udelay(1);
1718 gpio_set_value(rstn, 0); 1718 gpio_set_value_cansleep(rstn, 0);
1719 udelay(1); 1719 udelay(1);
1720 gpio_set_value(rstn, 1); 1720 gpio_set_value_cansleep(rstn, 1);
1721 usleep_range(120, 240); 1721 usleep_range(120, 240);
1722 } 1722 }
1723 1723
diff --git a/drivers/net/ieee802154/atusb.c b/drivers/net/ieee802154/atusb.c
index 1253f864737a..ef688518ad77 100644
--- a/drivers/net/ieee802154/atusb.c
+++ b/drivers/net/ieee802154/atusb.c
@@ -117,13 +117,26 @@ static int atusb_read_reg(struct atusb *atusb, uint8_t reg)
117{ 117{
118 struct usb_device *usb_dev = atusb->usb_dev; 118 struct usb_device *usb_dev = atusb->usb_dev;
119 int ret; 119 int ret;
120 uint8_t *buffer;
120 uint8_t value; 121 uint8_t value;
121 122
123 buffer = kmalloc(1, GFP_KERNEL);
124 if (!buffer)
125 return -ENOMEM;
126
122 dev_dbg(&usb_dev->dev, "atusb: reg = 0x%x\n", reg); 127 dev_dbg(&usb_dev->dev, "atusb: reg = 0x%x\n", reg);
123 ret = atusb_control_msg(atusb, usb_rcvctrlpipe(usb_dev, 0), 128 ret = atusb_control_msg(atusb, usb_rcvctrlpipe(usb_dev, 0),
124 ATUSB_REG_READ, ATUSB_REQ_FROM_DEV, 129 ATUSB_REG_READ, ATUSB_REQ_FROM_DEV,
125 0, reg, &value, 1, 1000); 130 0, reg, buffer, 1, 1000);
126 return ret >= 0 ? value : ret; 131
132 if (ret >= 0) {
133 value = buffer[0];
134 kfree(buffer);
135 return value;
136 } else {
137 kfree(buffer);
138 return ret;
139 }
127} 140}
128 141
129static int atusb_write_subreg(struct atusb *atusb, uint8_t reg, uint8_t mask, 142static int atusb_write_subreg(struct atusb *atusb, uint8_t reg, uint8_t mask,
@@ -549,13 +562,6 @@ static int
549atusb_set_frame_retries(struct ieee802154_hw *hw, s8 retries) 562atusb_set_frame_retries(struct ieee802154_hw *hw, s8 retries)
550{ 563{
551 struct atusb *atusb = hw->priv; 564 struct atusb *atusb = hw->priv;
552 struct device *dev = &atusb->usb_dev->dev;
553
554 if (atusb->fw_ver_maj == 0 && atusb->fw_ver_min < 3) {
555 dev_info(dev, "Automatic frame retransmission is only available from "
556 "firmware version 0.3. Please update if you want this feature.");
557 return -EINVAL;
558 }
559 565
560 return atusb_write_subreg(atusb, SR_MAX_FRAME_RETRIES, retries); 566 return atusb_write_subreg(atusb, SR_MAX_FRAME_RETRIES, retries);
561} 567}
@@ -608,9 +614,13 @@ static const struct ieee802154_ops atusb_ops = {
608static int atusb_get_and_show_revision(struct atusb *atusb) 614static int atusb_get_and_show_revision(struct atusb *atusb)
609{ 615{
610 struct usb_device *usb_dev = atusb->usb_dev; 616 struct usb_device *usb_dev = atusb->usb_dev;
611 unsigned char buffer[3]; 617 unsigned char *buffer;
612 int ret; 618 int ret;
613 619
620 buffer = kmalloc(3, GFP_KERNEL);
621 if (!buffer)
622 return -ENOMEM;
623
614 /* Get a couple of the ATMega Firmware values */ 624 /* Get a couple of the ATMega Firmware values */
615 ret = atusb_control_msg(atusb, usb_rcvctrlpipe(usb_dev, 0), 625 ret = atusb_control_msg(atusb, usb_rcvctrlpipe(usb_dev, 0),
616 ATUSB_ID, ATUSB_REQ_FROM_DEV, 0, 0, 626 ATUSB_ID, ATUSB_REQ_FROM_DEV, 0, 0,
@@ -631,15 +641,20 @@ static int atusb_get_and_show_revision(struct atusb *atusb)
631 dev_info(&usb_dev->dev, "Please update to version 0.2 or newer"); 641 dev_info(&usb_dev->dev, "Please update to version 0.2 or newer");
632 } 642 }
633 643
644 kfree(buffer);
634 return ret; 645 return ret;
635} 646}
636 647
637static int atusb_get_and_show_build(struct atusb *atusb) 648static int atusb_get_and_show_build(struct atusb *atusb)
638{ 649{
639 struct usb_device *usb_dev = atusb->usb_dev; 650 struct usb_device *usb_dev = atusb->usb_dev;
640 char build[ATUSB_BUILD_SIZE + 1]; 651 char *build;
641 int ret; 652 int ret;
642 653
654 build = kmalloc(ATUSB_BUILD_SIZE + 1, GFP_KERNEL);
655 if (!build)
656 return -ENOMEM;
657
643 ret = atusb_control_msg(atusb, usb_rcvctrlpipe(usb_dev, 0), 658 ret = atusb_control_msg(atusb, usb_rcvctrlpipe(usb_dev, 0),
644 ATUSB_BUILD, ATUSB_REQ_FROM_DEV, 0, 0, 659 ATUSB_BUILD, ATUSB_REQ_FROM_DEV, 0, 0,
645 build, ATUSB_BUILD_SIZE, 1000); 660 build, ATUSB_BUILD_SIZE, 1000);
@@ -648,6 +663,7 @@ static int atusb_get_and_show_build(struct atusb *atusb)
648 dev_info(&usb_dev->dev, "Firmware: build %s\n", build); 663 dev_info(&usb_dev->dev, "Firmware: build %s\n", build);
649 } 664 }
650 665
666 kfree(build);
651 return ret; 667 return ret;
652} 668}
653 669
@@ -698,7 +714,7 @@ fail:
698static int atusb_set_extended_addr(struct atusb *atusb) 714static int atusb_set_extended_addr(struct atusb *atusb)
699{ 715{
700 struct usb_device *usb_dev = atusb->usb_dev; 716 struct usb_device *usb_dev = atusb->usb_dev;
701 unsigned char buffer[IEEE802154_EXTENDED_ADDR_LEN]; 717 unsigned char *buffer;
702 __le64 extended_addr; 718 __le64 extended_addr;
703 u64 addr; 719 u64 addr;
704 int ret; 720 int ret;
@@ -710,12 +726,20 @@ static int atusb_set_extended_addr(struct atusb *atusb)
710 return 0; 726 return 0;
711 } 727 }
712 728
729 buffer = kmalloc(IEEE802154_EXTENDED_ADDR_LEN, GFP_KERNEL);
730 if (!buffer)
731 return -ENOMEM;
732
713 /* Firmware is new enough so we fetch the address from EEPROM */ 733 /* Firmware is new enough so we fetch the address from EEPROM */
714 ret = atusb_control_msg(atusb, usb_rcvctrlpipe(usb_dev, 0), 734 ret = atusb_control_msg(atusb, usb_rcvctrlpipe(usb_dev, 0),
715 ATUSB_EUI64_READ, ATUSB_REQ_FROM_DEV, 0, 0, 735 ATUSB_EUI64_READ, ATUSB_REQ_FROM_DEV, 0, 0,
716 buffer, IEEE802154_EXTENDED_ADDR_LEN, 1000); 736 buffer, IEEE802154_EXTENDED_ADDR_LEN, 1000);
717 if (ret < 0) 737 if (ret < 0) {
718 dev_err(&usb_dev->dev, "failed to fetch extended address\n"); 738 dev_err(&usb_dev->dev, "failed to fetch extended address, random address set\n");
739 ieee802154_random_extended_addr(&atusb->hw->phy->perm_extended_addr);
740 kfree(buffer);
741 return ret;
742 }
719 743
720 memcpy(&extended_addr, buffer, IEEE802154_EXTENDED_ADDR_LEN); 744 memcpy(&extended_addr, buffer, IEEE802154_EXTENDED_ADDR_LEN);
721 /* Check if read address is not empty and the unicast bit is set correctly */ 745 /* Check if read address is not empty and the unicast bit is set correctly */
@@ -729,6 +753,7 @@ static int atusb_set_extended_addr(struct atusb *atusb)
729 &addr); 753 &addr);
730 } 754 }
731 755
756 kfree(buffer);
732 return ret; 757 return ret;
733} 758}
734 759
@@ -770,8 +795,7 @@ static int atusb_probe(struct usb_interface *interface,
770 795
771 hw->parent = &usb_dev->dev; 796 hw->parent = &usb_dev->dev;
772 hw->flags = IEEE802154_HW_TX_OMIT_CKSUM | IEEE802154_HW_AFILT | 797 hw->flags = IEEE802154_HW_TX_OMIT_CKSUM | IEEE802154_HW_AFILT |
773 IEEE802154_HW_PROMISCUOUS | IEEE802154_HW_CSMA_PARAMS | 798 IEEE802154_HW_PROMISCUOUS | IEEE802154_HW_CSMA_PARAMS;
774 IEEE802154_HW_FRAME_RETRIES;
775 799
776 hw->phy->flags = WPAN_PHY_FLAG_TXPOWER | WPAN_PHY_FLAG_CCA_ED_LEVEL | 800 hw->phy->flags = WPAN_PHY_FLAG_TXPOWER | WPAN_PHY_FLAG_CCA_ED_LEVEL |
777 WPAN_PHY_FLAG_CCA_MODE; 801 WPAN_PHY_FLAG_CCA_MODE;
@@ -800,6 +824,9 @@ static int atusb_probe(struct usb_interface *interface,
800 atusb_get_and_show_build(atusb); 824 atusb_get_and_show_build(atusb);
801 atusb_set_extended_addr(atusb); 825 atusb_set_extended_addr(atusb);
802 826
827 if (atusb->fw_ver_maj >= 0 && atusb->fw_ver_min >= 3)
828 hw->flags |= IEEE802154_HW_FRAME_RETRIES;
829
803 ret = atusb_get_and_clear_error(atusb); 830 ret = atusb_get_and_clear_error(atusb);
804 if (ret) { 831 if (ret) {
805 dev_err(&atusb->usb_dev->dev, 832 dev_err(&atusb->usb_dev->dev,
diff --git a/drivers/net/ipvlan/ipvlan.h b/drivers/net/ipvlan/ipvlan.h
index 031093e1c25f..dbfbb33ac66c 100644
--- a/drivers/net/ipvlan/ipvlan.h
+++ b/drivers/net/ipvlan/ipvlan.h
@@ -99,6 +99,11 @@ struct ipvl_port {
99 int count; 99 int count;
100}; 100};
101 101
102struct ipvl_skb_cb {
103 bool tx_pkt;
104};
105#define IPVL_SKB_CB(_skb) ((struct ipvl_skb_cb *)&((_skb)->cb[0]))
106
102static inline struct ipvl_port *ipvlan_port_get_rcu(const struct net_device *d) 107static inline struct ipvl_port *ipvlan_port_get_rcu(const struct net_device *d)
103{ 108{
104 return rcu_dereference(d->rx_handler_data); 109 return rcu_dereference(d->rx_handler_data);
diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c
index b4e990743e1d..83ce74acf82d 100644
--- a/drivers/net/ipvlan/ipvlan_core.c
+++ b/drivers/net/ipvlan/ipvlan_core.c
@@ -198,7 +198,7 @@ void ipvlan_process_multicast(struct work_struct *work)
198 unsigned int mac_hash; 198 unsigned int mac_hash;
199 int ret; 199 int ret;
200 u8 pkt_type; 200 u8 pkt_type;
201 bool hlocal, dlocal; 201 bool tx_pkt;
202 202
203 __skb_queue_head_init(&list); 203 __skb_queue_head_init(&list);
204 204
@@ -207,8 +207,11 @@ void ipvlan_process_multicast(struct work_struct *work)
207 spin_unlock_bh(&port->backlog.lock); 207 spin_unlock_bh(&port->backlog.lock);
208 208
209 while ((skb = __skb_dequeue(&list)) != NULL) { 209 while ((skb = __skb_dequeue(&list)) != NULL) {
210 struct net_device *dev = skb->dev;
211 bool consumed = false;
212
210 ethh = eth_hdr(skb); 213 ethh = eth_hdr(skb);
211 hlocal = ether_addr_equal(ethh->h_source, port->dev->dev_addr); 214 tx_pkt = IPVL_SKB_CB(skb)->tx_pkt;
212 mac_hash = ipvlan_mac_hash(ethh->h_dest); 215 mac_hash = ipvlan_mac_hash(ethh->h_dest);
213 216
214 if (ether_addr_equal(ethh->h_dest, port->dev->broadcast)) 217 if (ether_addr_equal(ethh->h_dest, port->dev->broadcast))
@@ -216,41 +219,45 @@ void ipvlan_process_multicast(struct work_struct *work)
216 else 219 else
217 pkt_type = PACKET_MULTICAST; 220 pkt_type = PACKET_MULTICAST;
218 221
219 dlocal = false;
220 rcu_read_lock(); 222 rcu_read_lock();
221 list_for_each_entry_rcu(ipvlan, &port->ipvlans, pnode) { 223 list_for_each_entry_rcu(ipvlan, &port->ipvlans, pnode) {
222 if (hlocal && (ipvlan->dev == skb->dev)) { 224 if (tx_pkt && (ipvlan->dev == skb->dev))
223 dlocal = true;
224 continue; 225 continue;
225 }
226 if (!test_bit(mac_hash, ipvlan->mac_filters)) 226 if (!test_bit(mac_hash, ipvlan->mac_filters))
227 continue; 227 continue;
228 228 if (!(ipvlan->dev->flags & IFF_UP))
229 continue;
229 ret = NET_RX_DROP; 230 ret = NET_RX_DROP;
230 len = skb->len + ETH_HLEN; 231 len = skb->len + ETH_HLEN;
231 nskb = skb_clone(skb, GFP_ATOMIC); 232 nskb = skb_clone(skb, GFP_ATOMIC);
232 if (!nskb) 233 local_bh_disable();
233 goto acct; 234 if (nskb) {
234 235 consumed = true;
235 nskb->pkt_type = pkt_type; 236 nskb->pkt_type = pkt_type;
236 nskb->dev = ipvlan->dev; 237 nskb->dev = ipvlan->dev;
237 if (hlocal) 238 if (tx_pkt)
238 ret = dev_forward_skb(ipvlan->dev, nskb); 239 ret = dev_forward_skb(ipvlan->dev, nskb);
239 else 240 else
240 ret = netif_rx(nskb); 241 ret = netif_rx(nskb);
241acct: 242 }
242 ipvlan_count_rx(ipvlan, len, ret == NET_RX_SUCCESS, true); 243 ipvlan_count_rx(ipvlan, len, ret == NET_RX_SUCCESS, true);
244 local_bh_enable();
243 } 245 }
244 rcu_read_unlock(); 246 rcu_read_unlock();
245 247
246 if (dlocal) { 248 if (tx_pkt) {
247 /* If the packet originated here, send it out. */ 249 /* If the packet originated here, send it out. */
248 skb->dev = port->dev; 250 skb->dev = port->dev;
249 skb->pkt_type = pkt_type; 251 skb->pkt_type = pkt_type;
250 dev_queue_xmit(skb); 252 dev_queue_xmit(skb);
251 } else { 253 } else {
252 kfree_skb(skb); 254 if (consumed)
255 consume_skb(skb);
256 else
257 kfree_skb(skb);
253 } 258 }
259 if (dev)
260 dev_put(dev);
254 } 261 }
255} 262}
256 263
@@ -470,15 +477,24 @@ out:
470} 477}
471 478
472static void ipvlan_multicast_enqueue(struct ipvl_port *port, 479static void ipvlan_multicast_enqueue(struct ipvl_port *port,
473 struct sk_buff *skb) 480 struct sk_buff *skb, bool tx_pkt)
474{ 481{
475 if (skb->protocol == htons(ETH_P_PAUSE)) { 482 if (skb->protocol == htons(ETH_P_PAUSE)) {
476 kfree_skb(skb); 483 kfree_skb(skb);
477 return; 484 return;
478 } 485 }
479 486
487 /* Record that the deferred packet is from TX or RX path. By
488 * looking at mac-addresses on packet will lead to erronus decisions.
489 * (This would be true for a loopback-mode on master device or a
490 * hair-pin mode of the switch.)
491 */
492 IPVL_SKB_CB(skb)->tx_pkt = tx_pkt;
493
480 spin_lock(&port->backlog.lock); 494 spin_lock(&port->backlog.lock);
481 if (skb_queue_len(&port->backlog) < IPVLAN_QBACKLOG_LIMIT) { 495 if (skb_queue_len(&port->backlog) < IPVLAN_QBACKLOG_LIMIT) {
496 if (skb->dev)
497 dev_hold(skb->dev);
482 __skb_queue_tail(&port->backlog, skb); 498 __skb_queue_tail(&port->backlog, skb);
483 spin_unlock(&port->backlog.lock); 499 spin_unlock(&port->backlog.lock);
484 schedule_work(&port->wq); 500 schedule_work(&port->wq);
@@ -537,7 +553,7 @@ static int ipvlan_xmit_mode_l2(struct sk_buff *skb, struct net_device *dev)
537 553
538 } else if (is_multicast_ether_addr(eth->h_dest)) { 554 } else if (is_multicast_ether_addr(eth->h_dest)) {
539 ipvlan_skb_crossing_ns(skb, NULL); 555 ipvlan_skb_crossing_ns(skb, NULL);
540 ipvlan_multicast_enqueue(ipvlan->port, skb); 556 ipvlan_multicast_enqueue(ipvlan->port, skb, true);
541 return NET_XMIT_SUCCESS; 557 return NET_XMIT_SUCCESS;
542 } 558 }
543 559
@@ -634,7 +650,7 @@ static rx_handler_result_t ipvlan_handle_mode_l2(struct sk_buff **pskb,
634 */ 650 */
635 if (nskb) { 651 if (nskb) {
636 ipvlan_skb_crossing_ns(nskb, NULL); 652 ipvlan_skb_crossing_ns(nskb, NULL);
637 ipvlan_multicast_enqueue(port, nskb); 653 ipvlan_multicast_enqueue(port, nskb, false);
638 } 654 }
639 } 655 }
640 } else { 656 } else {
diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c
index 693ec5b66222..8b0f99300cbc 100644
--- a/drivers/net/ipvlan/ipvlan_main.c
+++ b/drivers/net/ipvlan/ipvlan_main.c
@@ -135,6 +135,7 @@ err:
135static void ipvlan_port_destroy(struct net_device *dev) 135static void ipvlan_port_destroy(struct net_device *dev)
136{ 136{
137 struct ipvl_port *port = ipvlan_port_get_rtnl(dev); 137 struct ipvl_port *port = ipvlan_port_get_rtnl(dev);
138 struct sk_buff *skb;
138 139
139 dev->priv_flags &= ~IFF_IPVLAN_MASTER; 140 dev->priv_flags &= ~IFF_IPVLAN_MASTER;
140 if (port->mode == IPVLAN_MODE_L3S) { 141 if (port->mode == IPVLAN_MODE_L3S) {
@@ -144,7 +145,11 @@ static void ipvlan_port_destroy(struct net_device *dev)
144 } 145 }
145 netdev_rx_handler_unregister(dev); 146 netdev_rx_handler_unregister(dev);
146 cancel_work_sync(&port->wq); 147 cancel_work_sync(&port->wq);
147 __skb_queue_purge(&port->backlog); 148 while ((skb = __skb_dequeue(&port->backlog)) != NULL) {
149 if (skb->dev)
150 dev_put(skb->dev);
151 kfree_skb(skb);
152 }
148 kfree(port); 153 kfree(port);
149} 154}
150 155
diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c
index 1e05b7c2d157..0844f8496413 100644
--- a/drivers/net/loopback.c
+++ b/drivers/net/loopback.c
@@ -164,6 +164,7 @@ static void loopback_setup(struct net_device *dev)
164{ 164{
165 dev->mtu = 64 * 1024; 165 dev->mtu = 64 * 1024;
166 dev->hard_header_len = ETH_HLEN; /* 14 */ 166 dev->hard_header_len = ETH_HLEN; /* 14 */
167 dev->min_header_len = ETH_HLEN; /* 14 */
167 dev->addr_len = ETH_ALEN; /* 6 */ 168 dev->addr_len = ETH_ALEN; /* 6 */
168 dev->type = ARPHRD_LOOPBACK; /* 0x0001*/ 169 dev->type = ARPHRD_LOOPBACK; /* 0x0001*/
169 dev->flags = IFF_LOOPBACK; 170 dev->flags = IFF_LOOPBACK;
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index 5c26653eceb5..c27011bbe30c 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -681,7 +681,7 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
681 size_t linear; 681 size_t linear;
682 682
683 if (q->flags & IFF_VNET_HDR) { 683 if (q->flags & IFF_VNET_HDR) {
684 vnet_hdr_len = q->vnet_hdr_sz; 684 vnet_hdr_len = READ_ONCE(q->vnet_hdr_sz);
685 685
686 err = -EINVAL; 686 err = -EINVAL;
687 if (len < vnet_hdr_len) 687 if (len < vnet_hdr_len)
@@ -820,12 +820,12 @@ static ssize_t macvtap_put_user(struct macvtap_queue *q,
820 820
821 if (q->flags & IFF_VNET_HDR) { 821 if (q->flags & IFF_VNET_HDR) {
822 struct virtio_net_hdr vnet_hdr; 822 struct virtio_net_hdr vnet_hdr;
823 vnet_hdr_len = q->vnet_hdr_sz; 823 vnet_hdr_len = READ_ONCE(q->vnet_hdr_sz);
824 if (iov_iter_count(iter) < vnet_hdr_len) 824 if (iov_iter_count(iter) < vnet_hdr_len)
825 return -EINVAL; 825 return -EINVAL;
826 826
827 if (virtio_net_hdr_from_skb(skb, &vnet_hdr, 827 if (virtio_net_hdr_from_skb(skb, &vnet_hdr,
828 macvtap_is_little_endian(q))) 828 macvtap_is_little_endian(q), true))
829 BUG(); 829 BUG();
830 830
831 if (copy_to_iter(&vnet_hdr, sizeof(vnet_hdr), iter) != 831 if (copy_to_iter(&vnet_hdr, sizeof(vnet_hdr), iter) !=
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index d361835b315d..8dbd59baa34d 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -279,6 +279,7 @@ config MARVELL_PHY
279 279
280config MESON_GXL_PHY 280config MESON_GXL_PHY
281 tristate "Amlogic Meson GXL Internal PHY" 281 tristate "Amlogic Meson GXL Internal PHY"
282 depends on ARCH_MESON || COMPILE_TEST
282 ---help--- 283 ---help---
283 Currently has a driver for the Amlogic Meson GXL Internal PHY 284 Currently has a driver for the Amlogic Meson GXL Internal PHY
284 285
diff --git a/drivers/net/phy/bcm63xx.c b/drivers/net/phy/bcm63xx.c
index e741bf614c4e..b0492ef2cdaa 100644
--- a/drivers/net/phy/bcm63xx.c
+++ b/drivers/net/phy/bcm63xx.c
@@ -21,6 +21,23 @@ MODULE_DESCRIPTION("Broadcom 63xx internal PHY driver");
21MODULE_AUTHOR("Maxime Bizon <mbizon@freebox.fr>"); 21MODULE_AUTHOR("Maxime Bizon <mbizon@freebox.fr>");
22MODULE_LICENSE("GPL"); 22MODULE_LICENSE("GPL");
23 23
24static int bcm63xx_config_intr(struct phy_device *phydev)
25{
26 int reg, err;
27
28 reg = phy_read(phydev, MII_BCM63XX_IR);
29 if (reg < 0)
30 return reg;
31
32 if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
33 reg &= ~MII_BCM63XX_IR_GMASK;
34 else
35 reg |= MII_BCM63XX_IR_GMASK;
36
37 err = phy_write(phydev, MII_BCM63XX_IR, reg);
38 return err;
39}
40
24static int bcm63xx_config_init(struct phy_device *phydev) 41static int bcm63xx_config_init(struct phy_device *phydev)
25{ 42{
26 int reg, err; 43 int reg, err;
@@ -55,7 +72,7 @@ static struct phy_driver bcm63xx_driver[] = {
55 .config_aneg = genphy_config_aneg, 72 .config_aneg = genphy_config_aneg,
56 .read_status = genphy_read_status, 73 .read_status = genphy_read_status,
57 .ack_interrupt = bcm_phy_ack_intr, 74 .ack_interrupt = bcm_phy_ack_intr,
58 .config_intr = bcm_phy_config_intr, 75 .config_intr = bcm63xx_config_intr,
59}, { 76}, {
60 /* same phy as above, with just a different OUI */ 77 /* same phy as above, with just a different OUI */
61 .phy_id = 0x002bdc00, 78 .phy_id = 0x002bdc00,
@@ -67,7 +84,7 @@ static struct phy_driver bcm63xx_driver[] = {
67 .config_aneg = genphy_config_aneg, 84 .config_aneg = genphy_config_aneg,
68 .read_status = genphy_read_status, 85 .read_status = genphy_read_status,
69 .ack_interrupt = bcm_phy_ack_intr, 86 .ack_interrupt = bcm_phy_ack_intr,
70 .config_intr = bcm_phy_config_intr, 87 .config_intr = bcm63xx_config_intr,
71} }; 88} };
72 89
73module_phy_driver(bcm63xx_driver); 90module_phy_driver(bcm63xx_driver);
diff --git a/drivers/net/phy/dp83848.c b/drivers/net/phy/dp83848.c
index 800b39f06279..a10d0e7fc5f7 100644
--- a/drivers/net/phy/dp83848.c
+++ b/drivers/net/phy/dp83848.c
@@ -17,6 +17,7 @@
17#include <linux/phy.h> 17#include <linux/phy.h>
18 18
19#define TI_DP83848C_PHY_ID 0x20005ca0 19#define TI_DP83848C_PHY_ID 0x20005ca0
20#define TI_DP83620_PHY_ID 0x20005ce0
20#define NS_DP83848C_PHY_ID 0x20005c90 21#define NS_DP83848C_PHY_ID 0x20005c90
21#define TLK10X_PHY_ID 0x2000a210 22#define TLK10X_PHY_ID 0x2000a210
22#define TI_DP83822_PHY_ID 0x2000a240 23#define TI_DP83822_PHY_ID 0x2000a240
@@ -77,6 +78,7 @@ static int dp83848_config_intr(struct phy_device *phydev)
77static struct mdio_device_id __maybe_unused dp83848_tbl[] = { 78static struct mdio_device_id __maybe_unused dp83848_tbl[] = {
78 { TI_DP83848C_PHY_ID, 0xfffffff0 }, 79 { TI_DP83848C_PHY_ID, 0xfffffff0 },
79 { NS_DP83848C_PHY_ID, 0xfffffff0 }, 80 { NS_DP83848C_PHY_ID, 0xfffffff0 },
81 { TI_DP83620_PHY_ID, 0xfffffff0 },
80 { TLK10X_PHY_ID, 0xfffffff0 }, 82 { TLK10X_PHY_ID, 0xfffffff0 },
81 { TI_DP83822_PHY_ID, 0xfffffff0 }, 83 { TI_DP83822_PHY_ID, 0xfffffff0 },
82 { } 84 { }
@@ -106,6 +108,7 @@ MODULE_DEVICE_TABLE(mdio, dp83848_tbl);
106static struct phy_driver dp83848_driver[] = { 108static struct phy_driver dp83848_driver[] = {
107 DP83848_PHY_DRIVER(TI_DP83848C_PHY_ID, "TI DP83848C 10/100 Mbps PHY"), 109 DP83848_PHY_DRIVER(TI_DP83848C_PHY_ID, "TI DP83848C 10/100 Mbps PHY"),
108 DP83848_PHY_DRIVER(NS_DP83848C_PHY_ID, "NS DP83848C 10/100 Mbps PHY"), 110 DP83848_PHY_DRIVER(NS_DP83848C_PHY_ID, "NS DP83848C 10/100 Mbps PHY"),
111 DP83848_PHY_DRIVER(TI_DP83620_PHY_ID, "TI DP83620 10/100 Mbps PHY"),
109 DP83848_PHY_DRIVER(TLK10X_PHY_ID, "TI TLK10X 10/100 Mbps PHY"), 112 DP83848_PHY_DRIVER(TLK10X_PHY_ID, "TI TLK10X 10/100 Mbps PHY"),
110 DP83848_PHY_DRIVER(TI_DP83822_PHY_ID, "TI DP83822 10/100 Mbps PHY"), 113 DP83848_PHY_DRIVER(TI_DP83822_PHY_ID, "TI DP83822 10/100 Mbps PHY"),
111}; 114};
diff --git a/drivers/net/phy/dp83867.c b/drivers/net/phy/dp83867.c
index 1b639242f9e2..ca1b462bf7b2 100644
--- a/drivers/net/phy/dp83867.c
+++ b/drivers/net/phy/dp83867.c
@@ -29,6 +29,7 @@
29#define MII_DP83867_MICR 0x12 29#define MII_DP83867_MICR 0x12
30#define MII_DP83867_ISR 0x13 30#define MII_DP83867_ISR 0x13
31#define DP83867_CTRL 0x1f 31#define DP83867_CTRL 0x1f
32#define DP83867_CFG3 0x1e
32 33
33/* Extended Registers */ 34/* Extended Registers */
34#define DP83867_RGMIICTL 0x0032 35#define DP83867_RGMIICTL 0x0032
@@ -98,6 +99,8 @@ static int dp83867_config_intr(struct phy_device *phydev)
98 micr_status |= 99 micr_status |=
99 (MII_DP83867_MICR_AN_ERR_INT_EN | 100 (MII_DP83867_MICR_AN_ERR_INT_EN |
100 MII_DP83867_MICR_SPEED_CHNG_INT_EN | 101 MII_DP83867_MICR_SPEED_CHNG_INT_EN |
102 MII_DP83867_MICR_AUTONEG_COMP_INT_EN |
103 MII_DP83867_MICR_LINK_STS_CHNG_INT_EN |
101 MII_DP83867_MICR_DUP_MODE_CHNG_INT_EN | 104 MII_DP83867_MICR_DUP_MODE_CHNG_INT_EN |
102 MII_DP83867_MICR_SLEEP_MODE_CHNG_INT_EN); 105 MII_DP83867_MICR_SLEEP_MODE_CHNG_INT_EN);
103 106
@@ -129,12 +132,16 @@ static int dp83867_of_init(struct phy_device *phydev)
129 132
130 ret = of_property_read_u32(of_node, "ti,rx-internal-delay", 133 ret = of_property_read_u32(of_node, "ti,rx-internal-delay",
131 &dp83867->rx_id_delay); 134 &dp83867->rx_id_delay);
132 if (ret) 135 if (ret &&
136 (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
137 phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID))
133 return ret; 138 return ret;
134 139
135 ret = of_property_read_u32(of_node, "ti,tx-internal-delay", 140 ret = of_property_read_u32(of_node, "ti,tx-internal-delay",
136 &dp83867->tx_id_delay); 141 &dp83867->tx_id_delay);
137 if (ret) 142 if (ret &&
143 (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
144 phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID))
138 return ret; 145 return ret;
139 146
140 return of_property_read_u32(of_node, "ti,fifo-depth", 147 return of_property_read_u32(of_node, "ti,fifo-depth",
@@ -214,6 +221,13 @@ static int dp83867_config_init(struct phy_device *phydev)
214 } 221 }
215 } 222 }
216 223
224 /* Enable Interrupt output INT_OE in CFG3 register */
225 if (phy_interrupt_is_valid(phydev)) {
226 val = phy_read(phydev, DP83867_CFG3);
227 val |= BIT(7);
228 phy_write(phydev, DP83867_CFG3, val);
229 }
230
217 return 0; 231 return 0;
218} 232}
219 233
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index e269262471a4..ed0d235cf850 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -1192,7 +1192,8 @@ static int marvell_read_status(struct phy_device *phydev)
1192 int err; 1192 int err;
1193 1193
1194 /* Check the fiber mode first */ 1194 /* Check the fiber mode first */
1195 if (phydev->supported & SUPPORTED_FIBRE) { 1195 if (phydev->supported & SUPPORTED_FIBRE &&
1196 phydev->interface != PHY_INTERFACE_MODE_SGMII) {
1196 err = phy_write(phydev, MII_MARVELL_PHY_PAGE, MII_M1111_FIBER); 1197 err = phy_write(phydev, MII_MARVELL_PHY_PAGE, MII_M1111_FIBER);
1197 if (err < 0) 1198 if (err < 0)
1198 goto error; 1199 goto error;
@@ -1678,6 +1679,8 @@ static struct phy_driver marvell_drivers[] = {
1678 .ack_interrupt = &marvell_ack_interrupt, 1679 .ack_interrupt = &marvell_ack_interrupt,
1679 .config_intr = &marvell_config_intr, 1680 .config_intr = &marvell_config_intr,
1680 .did_interrupt = &m88e1121_did_interrupt, 1681 .did_interrupt = &m88e1121_did_interrupt,
1682 .get_wol = &m88e1318_get_wol,
1683 .set_wol = &m88e1318_set_wol,
1681 .resume = &marvell_resume, 1684 .resume = &marvell_resume,
1682 .suspend = &marvell_suspend, 1685 .suspend = &marvell_suspend,
1683 .get_sset_count = marvell_get_sset_count, 1686 .get_sset_count = marvell_get_sset_count,
diff --git a/drivers/net/phy/mdio-bcm-iproc.c b/drivers/net/phy/mdio-bcm-iproc.c
index c0b4e65267af..46fe1ae919a3 100644
--- a/drivers/net/phy/mdio-bcm-iproc.c
+++ b/drivers/net/phy/mdio-bcm-iproc.c
@@ -81,8 +81,6 @@ static int iproc_mdio_read(struct mii_bus *bus, int phy_id, int reg)
81 if (rc) 81 if (rc)
82 return rc; 82 return rc;
83 83
84 iproc_mdio_config_clk(priv->base);
85
86 /* Prepare the read operation */ 84 /* Prepare the read operation */
87 cmd = (MII_DATA_TA_VAL << MII_DATA_TA_SHIFT) | 85 cmd = (MII_DATA_TA_VAL << MII_DATA_TA_SHIFT) |
88 (reg << MII_DATA_RA_SHIFT) | 86 (reg << MII_DATA_RA_SHIFT) |
@@ -112,8 +110,6 @@ static int iproc_mdio_write(struct mii_bus *bus, int phy_id,
112 if (rc) 110 if (rc)
113 return rc; 111 return rc;
114 112
115 iproc_mdio_config_clk(priv->base);
116
117 /* Prepare the write operation */ 113 /* Prepare the write operation */
118 cmd = (MII_DATA_TA_VAL << MII_DATA_TA_SHIFT) | 114 cmd = (MII_DATA_TA_VAL << MII_DATA_TA_SHIFT) |
119 (reg << MII_DATA_RA_SHIFT) | 115 (reg << MII_DATA_RA_SHIFT) |
@@ -163,6 +159,8 @@ static int iproc_mdio_probe(struct platform_device *pdev)
163 bus->read = iproc_mdio_read; 159 bus->read = iproc_mdio_read;
164 bus->write = iproc_mdio_write; 160 bus->write = iproc_mdio_write;
165 161
162 iproc_mdio_config_clk(priv->base);
163
166 rc = of_mdiobus_register(bus, pdev->dev.of_node); 164 rc = of_mdiobus_register(bus, pdev->dev.of_node);
167 if (rc) { 165 if (rc) {
168 dev_err(&pdev->dev, "MDIO bus registration failed\n"); 166 dev_err(&pdev->dev, "MDIO bus registration failed\n");
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index 9a77289109b7..6742070ca676 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -1008,6 +1008,20 @@ static struct phy_driver ksphy_driver[] = {
1008 .get_stats = kszphy_get_stats, 1008 .get_stats = kszphy_get_stats,
1009 .suspend = genphy_suspend, 1009 .suspend = genphy_suspend,
1010 .resume = genphy_resume, 1010 .resume = genphy_resume,
1011}, {
1012 .phy_id = PHY_ID_KSZ8795,
1013 .phy_id_mask = MICREL_PHY_ID_MASK,
1014 .name = "Micrel KSZ8795",
1015 .features = PHY_BASIC_FEATURES,
1016 .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
1017 .config_init = kszphy_config_init,
1018 .config_aneg = ksz8873mll_config_aneg,
1019 .read_status = ksz8873mll_read_status,
1020 .get_sset_count = kszphy_get_sset_count,
1021 .get_strings = kszphy_get_strings,
1022 .get_stats = kszphy_get_stats,
1023 .suspend = genphy_suspend,
1024 .resume = genphy_resume,
1011} }; 1025} };
1012 1026
1013module_phy_driver(ksphy_driver); 1027module_phy_driver(ksphy_driver);
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 25f93a98863b..7cc1b7dcfe05 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -29,6 +29,7 @@
29#include <linux/mii.h> 29#include <linux/mii.h>
30#include <linux/ethtool.h> 30#include <linux/ethtool.h>
31#include <linux/phy.h> 31#include <linux/phy.h>
32#include <linux/phy_led_triggers.h>
32#include <linux/timer.h> 33#include <linux/timer.h>
33#include <linux/workqueue.h> 34#include <linux/workqueue.h>
34#include <linux/mdio.h> 35#include <linux/mdio.h>
@@ -649,14 +650,18 @@ void phy_start_machine(struct phy_device *phydev)
649 * phy_trigger_machine - trigger the state machine to run 650 * phy_trigger_machine - trigger the state machine to run
650 * 651 *
651 * @phydev: the phy_device struct 652 * @phydev: the phy_device struct
653 * @sync: indicate whether we should wait for the workqueue cancelation
652 * 654 *
653 * Description: There has been a change in state which requires that the 655 * Description: There has been a change in state which requires that the
654 * state machine runs. 656 * state machine runs.
655 */ 657 */
656 658
657static void phy_trigger_machine(struct phy_device *phydev) 659static void phy_trigger_machine(struct phy_device *phydev, bool sync)
658{ 660{
659 cancel_delayed_work_sync(&phydev->state_queue); 661 if (sync)
662 cancel_delayed_work_sync(&phydev->state_queue);
663 else
664 cancel_delayed_work(&phydev->state_queue);
660 queue_delayed_work(system_power_efficient_wq, &phydev->state_queue, 0); 665 queue_delayed_work(system_power_efficient_wq, &phydev->state_queue, 0);
661} 666}
662 667
@@ -693,7 +698,7 @@ static void phy_error(struct phy_device *phydev)
693 phydev->state = PHY_HALTED; 698 phydev->state = PHY_HALTED;
694 mutex_unlock(&phydev->lock); 699 mutex_unlock(&phydev->lock);
695 700
696 phy_trigger_machine(phydev); 701 phy_trigger_machine(phydev, false);
697} 702}
698 703
699/** 704/**
@@ -840,7 +845,7 @@ void phy_change(struct phy_device *phydev)
840 } 845 }
841 846
842 /* reschedule state queue work to run as soon as possible */ 847 /* reschedule state queue work to run as soon as possible */
843 phy_trigger_machine(phydev); 848 phy_trigger_machine(phydev, true);
844 return; 849 return;
845 850
846ignore: 851ignore:
@@ -942,7 +947,7 @@ void phy_start(struct phy_device *phydev)
942 if (do_resume) 947 if (do_resume)
943 phy_resume(phydev); 948 phy_resume(phydev);
944 949
945 phy_trigger_machine(phydev); 950 phy_trigger_machine(phydev, true);
946} 951}
947EXPORT_SYMBOL(phy_start); 952EXPORT_SYMBOL(phy_start);
948 953
@@ -1065,6 +1070,15 @@ void phy_state_machine(struct work_struct *work)
1065 if (old_link != phydev->link) 1070 if (old_link != phydev->link)
1066 phydev->state = PHY_CHANGELINK; 1071 phydev->state = PHY_CHANGELINK;
1067 } 1072 }
1073 /*
1074 * Failsafe: check that nobody set phydev->link=0 between two
1075 * poll cycles, otherwise we won't leave RUNNING state as long
1076 * as link remains down.
1077 */
1078 if (!phydev->link && phydev->state == PHY_RUNNING) {
1079 phydev->state = PHY_CHANGELINK;
1080 phydev_err(phydev, "no link in PHY_RUNNING\n");
1081 }
1068 break; 1082 break;
1069 case PHY_CHANGELINK: 1083 case PHY_CHANGELINK:
1070 err = phy_read_status(phydev); 1084 err = phy_read_status(phydev);
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 92b08383cafa..8c8e15b8739d 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -908,6 +908,7 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
908 struct module *ndev_owner = dev->dev.parent->driver->owner; 908 struct module *ndev_owner = dev->dev.parent->driver->owner;
909 struct mii_bus *bus = phydev->mdio.bus; 909 struct mii_bus *bus = phydev->mdio.bus;
910 struct device *d = &phydev->mdio.dev; 910 struct device *d = &phydev->mdio.dev;
911 bool using_genphy = false;
911 int err; 912 int err;
912 913
913 /* For Ethernet device drivers that register their own MDIO bus, we 914 /* For Ethernet device drivers that register their own MDIO bus, we
@@ -933,12 +934,22 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
933 d->driver = 934 d->driver =
934 &genphy_driver[GENPHY_DRV_1G].mdiodrv.driver; 935 &genphy_driver[GENPHY_DRV_1G].mdiodrv.driver;
935 936
937 using_genphy = true;
938 }
939
940 if (!try_module_get(d->driver->owner)) {
941 dev_err(&dev->dev, "failed to get the device driver module\n");
942 err = -EIO;
943 goto error_put_device;
944 }
945
946 if (using_genphy) {
936 err = d->driver->probe(d); 947 err = d->driver->probe(d);
937 if (err >= 0) 948 if (err >= 0)
938 err = device_bind_driver(d); 949 err = device_bind_driver(d);
939 950
940 if (err) 951 if (err)
941 goto error; 952 goto error_module_put;
942 } 953 }
943 954
944 if (phydev->attached_dev) { 955 if (phydev->attached_dev) {
@@ -975,7 +986,13 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
975 return err; 986 return err;
976 987
977error: 988error:
989 /* phy_detach() does all of the cleanup below */
978 phy_detach(phydev); 990 phy_detach(phydev);
991 return err;
992
993error_module_put:
994 module_put(d->driver->owner);
995error_put_device:
979 put_device(d); 996 put_device(d);
980 if (ndev_owner != bus->owner) 997 if (ndev_owner != bus->owner)
981 module_put(bus->owner); 998 module_put(bus->owner);
@@ -1039,6 +1056,8 @@ void phy_detach(struct phy_device *phydev)
1039 1056
1040 phy_led_triggers_unregister(phydev); 1057 phy_led_triggers_unregister(phydev);
1041 1058
1059 module_put(phydev->mdio.dev.driver->owner);
1060
1042 /* If the device had no specific driver before (i.e. - it 1061 /* If the device had no specific driver before (i.e. - it
1043 * was using the generic driver), we unbind the device 1062 * was using the generic driver), we unbind the device
1044 * from the generic driver so that there's a chance a 1063 * from the generic driver so that there's a chance a
diff --git a/drivers/net/phy/phy_led_triggers.c b/drivers/net/phy/phy_led_triggers.c
index fa62bdf2f526..94ca42e630bb 100644
--- a/drivers/net/phy/phy_led_triggers.c
+++ b/drivers/net/phy/phy_led_triggers.c
@@ -12,6 +12,7 @@
12 */ 12 */
13#include <linux/leds.h> 13#include <linux/leds.h>
14#include <linux/phy.h> 14#include <linux/phy.h>
15#include <linux/phy_led_triggers.h>
15#include <linux/netdevice.h> 16#include <linux/netdevice.h>
16 17
17static struct phy_led_trigger *phy_speed_to_led_trigger(struct phy_device *phy, 18static struct phy_led_trigger *phy_speed_to_led_trigger(struct phy_device *phy,
@@ -102,8 +103,10 @@ int phy_led_triggers_register(struct phy_device *phy)
102 sizeof(struct phy_led_trigger) * 103 sizeof(struct phy_led_trigger) *
103 phy->phy_num_led_triggers, 104 phy->phy_num_led_triggers,
104 GFP_KERNEL); 105 GFP_KERNEL);
105 if (!phy->phy_led_triggers) 106 if (!phy->phy_led_triggers) {
106 return -ENOMEM; 107 err = -ENOMEM;
108 goto out_clear;
109 }
107 110
108 for (i = 0; i < phy->phy_num_led_triggers; i++) { 111 for (i = 0; i < phy->phy_num_led_triggers; i++) {
109 err = phy_led_trigger_register(phy, &phy->phy_led_triggers[i], 112 err = phy_led_trigger_register(phy, &phy->phy_led_triggers[i],
@@ -120,6 +123,8 @@ out_unreg:
120 while (i--) 123 while (i--)
121 phy_led_trigger_unregister(&phy->phy_led_triggers[i]); 124 phy_led_trigger_unregister(&phy->phy_led_triggers[i]);
122 devm_kfree(&phy->mdio.dev, phy->phy_led_triggers); 125 devm_kfree(&phy->mdio.dev, phy->phy_led_triggers);
126out_clear:
127 phy->phy_num_led_triggers = 0;
123 return err; 128 return err;
124} 129}
125EXPORT_SYMBOL_GPL(phy_led_triggers_register); 130EXPORT_SYMBOL_GPL(phy_led_triggers_register);
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index cd8e02c94be0..bfabe180053e 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -1170,9 +1170,11 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
1170 } 1170 }
1171 1171
1172 if (tun->flags & IFF_VNET_HDR) { 1172 if (tun->flags & IFF_VNET_HDR) {
1173 if (len < tun->vnet_hdr_sz) 1173 int vnet_hdr_sz = READ_ONCE(tun->vnet_hdr_sz);
1174
1175 if (len < vnet_hdr_sz)
1174 return -EINVAL; 1176 return -EINVAL;
1175 len -= tun->vnet_hdr_sz; 1177 len -= vnet_hdr_sz;
1176 1178
1177 if (!copy_from_iter_full(&gso, sizeof(gso), from)) 1179 if (!copy_from_iter_full(&gso, sizeof(gso), from))
1178 return -EFAULT; 1180 return -EFAULT;
@@ -1183,7 +1185,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
1183 1185
1184 if (tun16_to_cpu(tun, gso.hdr_len) > len) 1186 if (tun16_to_cpu(tun, gso.hdr_len) > len)
1185 return -EINVAL; 1187 return -EINVAL;
1186 iov_iter_advance(from, tun->vnet_hdr_sz - sizeof(gso)); 1188 iov_iter_advance(from, vnet_hdr_sz - sizeof(gso));
1187 } 1189 }
1188 1190
1189 if ((tun->flags & TUN_TYPE_MASK) == IFF_TAP) { 1191 if ((tun->flags & TUN_TYPE_MASK) == IFF_TAP) {
@@ -1335,7 +1337,7 @@ static ssize_t tun_put_user(struct tun_struct *tun,
1335 vlan_hlen = VLAN_HLEN; 1337 vlan_hlen = VLAN_HLEN;
1336 1338
1337 if (tun->flags & IFF_VNET_HDR) 1339 if (tun->flags & IFF_VNET_HDR)
1338 vnet_hdr_sz = tun->vnet_hdr_sz; 1340 vnet_hdr_sz = READ_ONCE(tun->vnet_hdr_sz);
1339 1341
1340 total = skb->len + vlan_hlen + vnet_hdr_sz; 1342 total = skb->len + vlan_hlen + vnet_hdr_sz;
1341 1343
@@ -1360,7 +1362,7 @@ static ssize_t tun_put_user(struct tun_struct *tun,
1360 return -EINVAL; 1362 return -EINVAL;
1361 1363
1362 if (virtio_net_hdr_from_skb(skb, &gso, 1364 if (virtio_net_hdr_from_skb(skb, &gso,
1363 tun_is_little_endian(tun))) { 1365 tun_is_little_endian(tun), true)) {
1364 struct skb_shared_info *sinfo = skb_shinfo(skb); 1366 struct skb_shared_info *sinfo = skb_shinfo(skb);
1365 pr_err("unexpected GSO type: " 1367 pr_err("unexpected GSO type: "
1366 "0x%x, gso_size %d, hdr_len %d\n", 1368 "0x%x, gso_size %d, hdr_len %d\n",
diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c
index 6c646e228833..6e98ede997d3 100644
--- a/drivers/net/usb/asix_devices.c
+++ b/drivers/net/usb/asix_devices.c
@@ -1367,6 +1367,7 @@ static struct usb_driver asix_driver = {
1367 .probe = usbnet_probe, 1367 .probe = usbnet_probe,
1368 .suspend = asix_suspend, 1368 .suspend = asix_suspend,
1369 .resume = asix_resume, 1369 .resume = asix_resume,
1370 .reset_resume = asix_resume,
1370 .disconnect = usbnet_disconnect, 1371 .disconnect = usbnet_disconnect,
1371 .supports_autosuspend = 1, 1372 .supports_autosuspend = 1,
1372 .disable_hub_initiated_lpm = 1, 1373 .disable_hub_initiated_lpm = 1,
diff --git a/drivers/net/usb/catc.c b/drivers/net/usb/catc.c
index 3daa41bdd4ea..0acc9b640419 100644
--- a/drivers/net/usb/catc.c
+++ b/drivers/net/usb/catc.c
@@ -776,7 +776,7 @@ static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id
776 struct net_device *netdev; 776 struct net_device *netdev;
777 struct catc *catc; 777 struct catc *catc;
778 u8 broadcast[ETH_ALEN]; 778 u8 broadcast[ETH_ALEN];
779 int i, pktsz; 779 int pktsz, ret;
780 780
781 if (usb_set_interface(usbdev, 781 if (usb_set_interface(usbdev,
782 intf->altsetting->desc.bInterfaceNumber, 1)) { 782 intf->altsetting->desc.bInterfaceNumber, 1)) {
@@ -811,12 +811,8 @@ static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id
811 if ((!catc->ctrl_urb) || (!catc->tx_urb) || 811 if ((!catc->ctrl_urb) || (!catc->tx_urb) ||
812 (!catc->rx_urb) || (!catc->irq_urb)) { 812 (!catc->rx_urb) || (!catc->irq_urb)) {
813 dev_err(&intf->dev, "No free urbs available.\n"); 813 dev_err(&intf->dev, "No free urbs available.\n");
814 usb_free_urb(catc->ctrl_urb); 814 ret = -ENOMEM;
815 usb_free_urb(catc->tx_urb); 815 goto fail_free;
816 usb_free_urb(catc->rx_urb);
817 usb_free_urb(catc->irq_urb);
818 free_netdev(netdev);
819 return -ENOMEM;
820 } 816 }
821 817
822 /* The F5U011 has the same vendor/product as the netmate but a device version of 0x130 */ 818 /* The F5U011 has the same vendor/product as the netmate but a device version of 0x130 */
@@ -844,15 +840,24 @@ static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id
844 catc->irq_buf, 2, catc_irq_done, catc, 1); 840 catc->irq_buf, 2, catc_irq_done, catc, 1);
845 841
846 if (!catc->is_f5u011) { 842 if (!catc->is_f5u011) {
843 u32 *buf;
844 int i;
845
847 dev_dbg(dev, "Checking memory size\n"); 846 dev_dbg(dev, "Checking memory size\n");
848 847
849 i = 0x12345678; 848 buf = kmalloc(4, GFP_KERNEL);
850 catc_write_mem(catc, 0x7a80, &i, 4); 849 if (!buf) {
851 i = 0x87654321; 850 ret = -ENOMEM;
852 catc_write_mem(catc, 0xfa80, &i, 4); 851 goto fail_free;
853 catc_read_mem(catc, 0x7a80, &i, 4); 852 }
853
854 *buf = 0x12345678;
855 catc_write_mem(catc, 0x7a80, buf, 4);
856 *buf = 0x87654321;
857 catc_write_mem(catc, 0xfa80, buf, 4);
858 catc_read_mem(catc, 0x7a80, buf, 4);
854 859
855 switch (i) { 860 switch (*buf) {
856 case 0x12345678: 861 case 0x12345678:
857 catc_set_reg(catc, TxBufCount, 8); 862 catc_set_reg(catc, TxBufCount, 8);
858 catc_set_reg(catc, RxBufCount, 32); 863 catc_set_reg(catc, RxBufCount, 32);
@@ -867,6 +872,8 @@ static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id
867 dev_dbg(dev, "32k Memory\n"); 872 dev_dbg(dev, "32k Memory\n");
868 break; 873 break;
869 } 874 }
875
876 kfree(buf);
870 877
871 dev_dbg(dev, "Getting MAC from SEEROM.\n"); 878 dev_dbg(dev, "Getting MAC from SEEROM.\n");
872 879
@@ -913,16 +920,21 @@ static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id
913 usb_set_intfdata(intf, catc); 920 usb_set_intfdata(intf, catc);
914 921
915 SET_NETDEV_DEV(netdev, &intf->dev); 922 SET_NETDEV_DEV(netdev, &intf->dev);
916 if (register_netdev(netdev) != 0) { 923 ret = register_netdev(netdev);
917 usb_set_intfdata(intf, NULL); 924 if (ret)
918 usb_free_urb(catc->ctrl_urb); 925 goto fail_clear_intfdata;
919 usb_free_urb(catc->tx_urb); 926
920 usb_free_urb(catc->rx_urb);
921 usb_free_urb(catc->irq_urb);
922 free_netdev(netdev);
923 return -EIO;
924 }
925 return 0; 927 return 0;
928
929fail_clear_intfdata:
930 usb_set_intfdata(intf, NULL);
931fail_free:
932 usb_free_urb(catc->ctrl_urb);
933 usb_free_urb(catc->tx_urb);
934 usb_free_urb(catc->rx_urb);
935 usb_free_urb(catc->irq_urb);
936 free_netdev(netdev);
937 return ret;
926} 938}
927 939
928static void catc_disconnect(struct usb_interface *intf) 940static void catc_disconnect(struct usb_interface *intf)
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index fe7b2886cb6b..86144f9a80ee 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -531,6 +531,7 @@ static const struct driver_info wwan_info = {
531#define SAMSUNG_VENDOR_ID 0x04e8 531#define SAMSUNG_VENDOR_ID 0x04e8
532#define LENOVO_VENDOR_ID 0x17ef 532#define LENOVO_VENDOR_ID 0x17ef
533#define NVIDIA_VENDOR_ID 0x0955 533#define NVIDIA_VENDOR_ID 0x0955
534#define HP_VENDOR_ID 0x03f0
534 535
535static const struct usb_device_id products[] = { 536static const struct usb_device_id products[] = {
536/* BLACKLIST !! 537/* BLACKLIST !!
@@ -677,6 +678,13 @@ static const struct usb_device_id products[] = {
677 .driver_info = 0, 678 .driver_info = 0,
678}, 679},
679 680
681/* HP lt2523 (Novatel E371) - handled by qmi_wwan */
682{
683 USB_DEVICE_AND_INTERFACE_INFO(HP_VENDOR_ID, 0x421d, USB_CLASS_COMM,
684 USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
685 .driver_info = 0,
686},
687
680/* AnyDATA ADU960S - handled by qmi_wwan */ 688/* AnyDATA ADU960S - handled by qmi_wwan */
681{ 689{
682 USB_DEVICE_AND_INTERFACE_INFO(0x16d5, 0x650a, USB_CLASS_COMM, 690 USB_DEVICE_AND_INTERFACE_INFO(0x16d5, 0x650a, USB_CLASS_COMM,
diff --git a/drivers/net/usb/pegasus.c b/drivers/net/usb/pegasus.c
index 24e803fe9a53..36674484c6fb 100644
--- a/drivers/net/usb/pegasus.c
+++ b/drivers/net/usb/pegasus.c
@@ -126,40 +126,61 @@ static void async_ctrl_callback(struct urb *urb)
126 126
127static int get_registers(pegasus_t *pegasus, __u16 indx, __u16 size, void *data) 127static int get_registers(pegasus_t *pegasus, __u16 indx, __u16 size, void *data)
128{ 128{
129 u8 *buf;
129 int ret; 130 int ret;
130 131
132 buf = kmalloc(size, GFP_NOIO);
133 if (!buf)
134 return -ENOMEM;
135
131 ret = usb_control_msg(pegasus->usb, usb_rcvctrlpipe(pegasus->usb, 0), 136 ret = usb_control_msg(pegasus->usb, usb_rcvctrlpipe(pegasus->usb, 0),
132 PEGASUS_REQ_GET_REGS, PEGASUS_REQT_READ, 0, 137 PEGASUS_REQ_GET_REGS, PEGASUS_REQT_READ, 0,
133 indx, data, size, 1000); 138 indx, buf, size, 1000);
134 if (ret < 0) 139 if (ret < 0)
135 netif_dbg(pegasus, drv, pegasus->net, 140 netif_dbg(pegasus, drv, pegasus->net,
136 "%s returned %d\n", __func__, ret); 141 "%s returned %d\n", __func__, ret);
142 else if (ret <= size)
143 memcpy(data, buf, ret);
144 kfree(buf);
137 return ret; 145 return ret;
138} 146}
139 147
140static int set_registers(pegasus_t *pegasus, __u16 indx, __u16 size, void *data) 148static int set_registers(pegasus_t *pegasus, __u16 indx, __u16 size,
149 const void *data)
141{ 150{
151 u8 *buf;
142 int ret; 152 int ret;
143 153
154 buf = kmemdup(data, size, GFP_NOIO);
155 if (!buf)
156 return -ENOMEM;
157
144 ret = usb_control_msg(pegasus->usb, usb_sndctrlpipe(pegasus->usb, 0), 158 ret = usb_control_msg(pegasus->usb, usb_sndctrlpipe(pegasus->usb, 0),
145 PEGASUS_REQ_SET_REGS, PEGASUS_REQT_WRITE, 0, 159 PEGASUS_REQ_SET_REGS, PEGASUS_REQT_WRITE, 0,
146 indx, data, size, 100); 160 indx, buf, size, 100);
147 if (ret < 0) 161 if (ret < 0)
148 netif_dbg(pegasus, drv, pegasus->net, 162 netif_dbg(pegasus, drv, pegasus->net,
149 "%s returned %d\n", __func__, ret); 163 "%s returned %d\n", __func__, ret);
164 kfree(buf);
150 return ret; 165 return ret;
151} 166}
152 167
153static int set_register(pegasus_t *pegasus, __u16 indx, __u8 data) 168static int set_register(pegasus_t *pegasus, __u16 indx, __u8 data)
154{ 169{
170 u8 *buf;
155 int ret; 171 int ret;
156 172
173 buf = kmemdup(&data, 1, GFP_NOIO);
174 if (!buf)
175 return -ENOMEM;
176
157 ret = usb_control_msg(pegasus->usb, usb_sndctrlpipe(pegasus->usb, 0), 177 ret = usb_control_msg(pegasus->usb, usb_sndctrlpipe(pegasus->usb, 0),
158 PEGASUS_REQ_SET_REG, PEGASUS_REQT_WRITE, data, 178 PEGASUS_REQ_SET_REG, PEGASUS_REQT_WRITE, data,
159 indx, &data, 1, 1000); 179 indx, buf, 1, 1000);
160 if (ret < 0) 180 if (ret < 0)
161 netif_dbg(pegasus, drv, pegasus->net, 181 netif_dbg(pegasus, drv, pegasus->net,
162 "%s returned %d\n", __func__, ret); 182 "%s returned %d\n", __func__, ret);
183 kfree(buf);
163 return ret; 184 return ret;
164} 185}
165 186
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 6fe1cdb0174f..24d5272cdce5 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -654,6 +654,13 @@ static const struct usb_device_id products[] = {
654 USB_CDC_PROTO_NONE), 654 USB_CDC_PROTO_NONE),
655 .driver_info = (unsigned long)&qmi_wwan_info, 655 .driver_info = (unsigned long)&qmi_wwan_info,
656 }, 656 },
657 { /* HP lt2523 (Novatel E371) */
658 USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0x421d,
659 USB_CLASS_COMM,
660 USB_CDC_SUBCLASS_ETHERNET,
661 USB_CDC_PROTO_NONE),
662 .driver_info = (unsigned long)&qmi_wwan_info,
663 },
657 { /* HP lt4112 LTE/HSPA+ Gobi 4G Module (Huawei me906e) */ 664 { /* HP lt4112 LTE/HSPA+ Gobi 4G Module (Huawei me906e) */
658 USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0x581d, USB_CLASS_VENDOR_SPEC, 1, 7), 665 USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0x581d, USB_CLASS_VENDOR_SPEC, 1, 7),
659 .driver_info = (unsigned long)&qmi_wwan_info, 666 .driver_info = (unsigned long)&qmi_wwan_info,
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index 7dc61228c55b..ad42295356dd 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -32,7 +32,7 @@
32#define NETNEXT_VERSION "08" 32#define NETNEXT_VERSION "08"
33 33
34/* Information for net */ 34/* Information for net */
35#define NET_VERSION "6" 35#define NET_VERSION "8"
36 36
37#define DRIVER_VERSION "v1." NETNEXT_VERSION "." NET_VERSION 37#define DRIVER_VERSION "v1." NETNEXT_VERSION "." NET_VERSION
38#define DRIVER_AUTHOR "Realtek linux nic maintainers <nic_swsd@realtek.com>" 38#define DRIVER_AUTHOR "Realtek linux nic maintainers <nic_swsd@realtek.com>"
@@ -1730,7 +1730,7 @@ static u8 r8152_rx_csum(struct r8152 *tp, struct rx_desc *rx_desc)
1730 u8 checksum = CHECKSUM_NONE; 1730 u8 checksum = CHECKSUM_NONE;
1731 u32 opts2, opts3; 1731 u32 opts2, opts3;
1732 1732
1733 if (tp->version == RTL_VER_01 || tp->version == RTL_VER_02) 1733 if (!(tp->netdev->features & NETIF_F_RXCSUM))
1734 goto return_result; 1734 goto return_result;
1735 1735
1736 opts2 = le32_to_cpu(rx_desc->opts2); 1736 opts2 = le32_to_cpu(rx_desc->opts2);
@@ -1936,6 +1936,9 @@ static int r8152_poll(struct napi_struct *napi, int budget)
1936 napi_complete(napi); 1936 napi_complete(napi);
1937 if (!list_empty(&tp->rx_done)) 1937 if (!list_empty(&tp->rx_done))
1938 napi_schedule(napi); 1938 napi_schedule(napi);
1939 else if (!skb_queue_empty(&tp->tx_queue) &&
1940 !list_empty(&tp->tx_free))
1941 napi_schedule(napi);
1939 } 1942 }
1940 1943
1941 return work_done; 1944 return work_done;
@@ -3155,10 +3158,13 @@ static void set_carrier(struct r8152 *tp)
3155 if (!netif_carrier_ok(netdev)) { 3158 if (!netif_carrier_ok(netdev)) {
3156 tp->rtl_ops.enable(tp); 3159 tp->rtl_ops.enable(tp);
3157 set_bit(RTL8152_SET_RX_MODE, &tp->flags); 3160 set_bit(RTL8152_SET_RX_MODE, &tp->flags);
3161 netif_stop_queue(netdev);
3158 napi_disable(&tp->napi); 3162 napi_disable(&tp->napi);
3159 netif_carrier_on(netdev); 3163 netif_carrier_on(netdev);
3160 rtl_start_rx(tp); 3164 rtl_start_rx(tp);
3161 napi_enable(&tp->napi); 3165 napi_enable(&tp->napi);
3166 netif_wake_queue(netdev);
3167 netif_info(tp, link, netdev, "carrier on\n");
3162 } 3168 }
3163 } else { 3169 } else {
3164 if (netif_carrier_ok(netdev)) { 3170 if (netif_carrier_ok(netdev)) {
@@ -3166,6 +3172,7 @@ static void set_carrier(struct r8152 *tp)
3166 napi_disable(&tp->napi); 3172 napi_disable(&tp->napi);
3167 tp->rtl_ops.disable(tp); 3173 tp->rtl_ops.disable(tp);
3168 napi_enable(&tp->napi); 3174 napi_enable(&tp->napi);
3175 netif_info(tp, link, netdev, "carrier off\n");
3169 } 3176 }
3170 } 3177 }
3171} 3178}
@@ -3515,12 +3522,12 @@ static int rtl8152_pre_reset(struct usb_interface *intf)
3515 if (!netif_running(netdev)) 3522 if (!netif_running(netdev))
3516 return 0; 3523 return 0;
3517 3524
3525 netif_stop_queue(netdev);
3518 napi_disable(&tp->napi); 3526 napi_disable(&tp->napi);
3519 clear_bit(WORK_ENABLE, &tp->flags); 3527 clear_bit(WORK_ENABLE, &tp->flags);
3520 usb_kill_urb(tp->intr_urb); 3528 usb_kill_urb(tp->intr_urb);
3521 cancel_delayed_work_sync(&tp->schedule); 3529 cancel_delayed_work_sync(&tp->schedule);
3522 if (netif_carrier_ok(netdev)) { 3530 if (netif_carrier_ok(netdev)) {
3523 netif_stop_queue(netdev);
3524 mutex_lock(&tp->control); 3531 mutex_lock(&tp->control);
3525 tp->rtl_ops.disable(tp); 3532 tp->rtl_ops.disable(tp);
3526 mutex_unlock(&tp->control); 3533 mutex_unlock(&tp->control);
@@ -3545,12 +3552,17 @@ static int rtl8152_post_reset(struct usb_interface *intf)
3545 if (netif_carrier_ok(netdev)) { 3552 if (netif_carrier_ok(netdev)) {
3546 mutex_lock(&tp->control); 3553 mutex_lock(&tp->control);
3547 tp->rtl_ops.enable(tp); 3554 tp->rtl_ops.enable(tp);
3555 rtl_start_rx(tp);
3548 rtl8152_set_rx_mode(netdev); 3556 rtl8152_set_rx_mode(netdev);
3549 mutex_unlock(&tp->control); 3557 mutex_unlock(&tp->control);
3550 netif_wake_queue(netdev);
3551 } 3558 }
3552 3559
3553 napi_enable(&tp->napi); 3560 napi_enable(&tp->napi);
3561 netif_wake_queue(netdev);
3562 usb_submit_urb(tp->intr_urb, GFP_KERNEL);
3563
3564 if (!list_empty(&tp->rx_done))
3565 napi_schedule(&tp->napi);
3554 3566
3555 return 0; 3567 return 0;
3556} 3568}
@@ -3572,43 +3584,98 @@ static bool delay_autosuspend(struct r8152 *tp)
3572 */ 3584 */
3573 if (!sw_linking && tp->rtl_ops.in_nway(tp)) 3585 if (!sw_linking && tp->rtl_ops.in_nway(tp))
3574 return true; 3586 return true;
3587 else if (!skb_queue_empty(&tp->tx_queue))
3588 return true;
3575 else 3589 else
3576 return false; 3590 return false;
3577} 3591}
3578 3592
3579static int rtl8152_suspend(struct usb_interface *intf, pm_message_t message) 3593static int rtl8152_rumtime_suspend(struct r8152 *tp)
3580{ 3594{
3581 struct r8152 *tp = usb_get_intfdata(intf);
3582 struct net_device *netdev = tp->netdev; 3595 struct net_device *netdev = tp->netdev;
3583 int ret = 0; 3596 int ret = 0;
3584 3597
3585 mutex_lock(&tp->control); 3598 set_bit(SELECTIVE_SUSPEND, &tp->flags);
3599 smp_mb__after_atomic();
3600
3601 if (netif_running(netdev) && test_bit(WORK_ENABLE, &tp->flags)) {
3602 u32 rcr = 0;
3586 3603
3587 if (PMSG_IS_AUTO(message)) { 3604 if (delay_autosuspend(tp)) {
3588 if (netif_running(netdev) && delay_autosuspend(tp)) { 3605 clear_bit(SELECTIVE_SUSPEND, &tp->flags);
3606 smp_mb__after_atomic();
3589 ret = -EBUSY; 3607 ret = -EBUSY;
3590 goto out1; 3608 goto out1;
3591 } 3609 }
3592 3610
3593 set_bit(SELECTIVE_SUSPEND, &tp->flags); 3611 if (netif_carrier_ok(netdev)) {
3594 } else { 3612 u32 ocp_data;
3595 netif_device_detach(netdev); 3613
3614 rcr = ocp_read_dword(tp, MCU_TYPE_PLA, PLA_RCR);
3615 ocp_data = rcr & ~RCR_ACPT_ALL;
3616 ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, ocp_data);
3617 rxdy_gated_en(tp, true);
3618 ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA,
3619 PLA_OOB_CTRL);
3620 if (!(ocp_data & RXFIFO_EMPTY)) {
3621 rxdy_gated_en(tp, false);
3622 ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, rcr);
3623 clear_bit(SELECTIVE_SUSPEND, &tp->flags);
3624 smp_mb__after_atomic();
3625 ret = -EBUSY;
3626 goto out1;
3627 }
3628 }
3629
3630 clear_bit(WORK_ENABLE, &tp->flags);
3631 usb_kill_urb(tp->intr_urb);
3632
3633 tp->rtl_ops.autosuspend_en(tp, true);
3634
3635 if (netif_carrier_ok(netdev)) {
3636 napi_disable(&tp->napi);
3637 rtl_stop_rx(tp);
3638 rxdy_gated_en(tp, false);
3639 ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, rcr);
3640 napi_enable(&tp->napi);
3641 }
3596 } 3642 }
3597 3643
3644out1:
3645 return ret;
3646}
3647
3648static int rtl8152_system_suspend(struct r8152 *tp)
3649{
3650 struct net_device *netdev = tp->netdev;
3651 int ret = 0;
3652
3653 netif_device_detach(netdev);
3654
3598 if (netif_running(netdev) && test_bit(WORK_ENABLE, &tp->flags)) { 3655 if (netif_running(netdev) && test_bit(WORK_ENABLE, &tp->flags)) {
3599 clear_bit(WORK_ENABLE, &tp->flags); 3656 clear_bit(WORK_ENABLE, &tp->flags);
3600 usb_kill_urb(tp->intr_urb); 3657 usb_kill_urb(tp->intr_urb);
3601 napi_disable(&tp->napi); 3658 napi_disable(&tp->napi);
3602 if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) { 3659 cancel_delayed_work_sync(&tp->schedule);
3603 rtl_stop_rx(tp); 3660 tp->rtl_ops.down(tp);
3604 tp->rtl_ops.autosuspend_en(tp, true);
3605 } else {
3606 cancel_delayed_work_sync(&tp->schedule);
3607 tp->rtl_ops.down(tp);
3608 }
3609 napi_enable(&tp->napi); 3661 napi_enable(&tp->napi);
3610 } 3662 }
3611out1: 3663
3664 return ret;
3665}
3666
3667static int rtl8152_suspend(struct usb_interface *intf, pm_message_t message)
3668{
3669 struct r8152 *tp = usb_get_intfdata(intf);
3670 int ret;
3671
3672 mutex_lock(&tp->control);
3673
3674 if (PMSG_IS_AUTO(message))
3675 ret = rtl8152_rumtime_suspend(tp);
3676 else
3677 ret = rtl8152_system_suspend(tp);
3678
3612 mutex_unlock(&tp->control); 3679 mutex_unlock(&tp->control);
3613 3680
3614 return ret; 3681 return ret;
@@ -3629,12 +3696,15 @@ static int rtl8152_resume(struct usb_interface *intf)
3629 if (netif_running(tp->netdev) && tp->netdev->flags & IFF_UP) { 3696 if (netif_running(tp->netdev) && tp->netdev->flags & IFF_UP) {
3630 if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) { 3697 if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) {
3631 tp->rtl_ops.autosuspend_en(tp, false); 3698 tp->rtl_ops.autosuspend_en(tp, false);
3632 clear_bit(SELECTIVE_SUSPEND, &tp->flags);
3633 napi_disable(&tp->napi); 3699 napi_disable(&tp->napi);
3634 set_bit(WORK_ENABLE, &tp->flags); 3700 set_bit(WORK_ENABLE, &tp->flags);
3635 if (netif_carrier_ok(tp->netdev)) 3701 if (netif_carrier_ok(tp->netdev))
3636 rtl_start_rx(tp); 3702 rtl_start_rx(tp);
3637 napi_enable(&tp->napi); 3703 napi_enable(&tp->napi);
3704 clear_bit(SELECTIVE_SUSPEND, &tp->flags);
3705 smp_mb__after_atomic();
3706 if (!list_empty(&tp->rx_done))
3707 napi_schedule(&tp->napi);
3638 } else { 3708 } else {
3639 tp->rtl_ops.up(tp); 3709 tp->rtl_ops.up(tp);
3640 netif_carrier_off(tp->netdev); 3710 netif_carrier_off(tp->netdev);
@@ -4308,6 +4378,11 @@ static int rtl8152_probe(struct usb_interface *intf,
4308 NETIF_F_HIGHDMA | NETIF_F_FRAGLIST | 4378 NETIF_F_HIGHDMA | NETIF_F_FRAGLIST |
4309 NETIF_F_IPV6_CSUM | NETIF_F_TSO6; 4379 NETIF_F_IPV6_CSUM | NETIF_F_TSO6;
4310 4380
4381 if (tp->version == RTL_VER_01) {
4382 netdev->features &= ~NETIF_F_RXCSUM;
4383 netdev->hw_features &= ~NETIF_F_RXCSUM;
4384 }
4385
4311 netdev->ethtool_ops = &ops; 4386 netdev->ethtool_ops = &ops;
4312 netif_set_gso_max_size(netdev, RTL_LIMITED_TSO_SIZE); 4387 netif_set_gso_max_size(netdev, RTL_LIMITED_TSO_SIZE);
4313 4388
diff --git a/drivers/net/usb/rtl8150.c b/drivers/net/usb/rtl8150.c
index 95b7bd0d7abc..c81c79110cef 100644
--- a/drivers/net/usb/rtl8150.c
+++ b/drivers/net/usb/rtl8150.c
@@ -155,16 +155,36 @@ static const char driver_name [] = "rtl8150";
155*/ 155*/
156static int get_registers(rtl8150_t * dev, u16 indx, u16 size, void *data) 156static int get_registers(rtl8150_t * dev, u16 indx, u16 size, void *data)
157{ 157{
158 return usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0), 158 void *buf;
159 RTL8150_REQ_GET_REGS, RTL8150_REQT_READ, 159 int ret;
160 indx, 0, data, size, 500); 160
161 buf = kmalloc(size, GFP_NOIO);
162 if (!buf)
163 return -ENOMEM;
164
165 ret = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
166 RTL8150_REQ_GET_REGS, RTL8150_REQT_READ,
167 indx, 0, buf, size, 500);
168 if (ret > 0 && ret <= size)
169 memcpy(data, buf, ret);
170 kfree(buf);
171 return ret;
161} 172}
162 173
163static int set_registers(rtl8150_t * dev, u16 indx, u16 size, void *data) 174static int set_registers(rtl8150_t * dev, u16 indx, u16 size, const void *data)
164{ 175{
165 return usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0), 176 void *buf;
166 RTL8150_REQ_SET_REGS, RTL8150_REQT_WRITE, 177 int ret;
167 indx, 0, data, size, 500); 178
179 buf = kmemdup(data, size, GFP_NOIO);
180 if (!buf)
181 return -ENOMEM;
182
183 ret = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
184 RTL8150_REQ_SET_REGS, RTL8150_REQT_WRITE,
185 indx, 0, buf, size, 500);
186 kfree(buf);
187 return ret;
168} 188}
169 189
170static void async_set_reg_cb(struct urb *urb) 190static void async_set_reg_cb(struct urb *urb)
diff --git a/drivers/net/usb/sierra_net.c b/drivers/net/usb/sierra_net.c
index 12071f1582df..d9440bc022f2 100644
--- a/drivers/net/usb/sierra_net.c
+++ b/drivers/net/usb/sierra_net.c
@@ -73,8 +73,6 @@ static atomic_t iface_counter = ATOMIC_INIT(0);
73/* Private data structure */ 73/* Private data structure */
74struct sierra_net_data { 74struct sierra_net_data {
75 75
76 u8 ethr_hdr_tmpl[ETH_HLEN]; /* ethernet header template for rx'd pkts */
77
78 u16 link_up; /* air link up or down */ 76 u16 link_up; /* air link up or down */
79 u8 tx_hdr_template[4]; /* part of HIP hdr for tx'd packets */ 77 u8 tx_hdr_template[4]; /* part of HIP hdr for tx'd packets */
80 78
@@ -122,6 +120,7 @@ struct param {
122 120
123/* LSI Protocol types */ 121/* LSI Protocol types */
124#define SIERRA_NET_PROTOCOL_UMTS 0x01 122#define SIERRA_NET_PROTOCOL_UMTS 0x01
123#define SIERRA_NET_PROTOCOL_UMTS_DS 0x04
125/* LSI Coverage */ 124/* LSI Coverage */
126#define SIERRA_NET_COVERAGE_NONE 0x00 125#define SIERRA_NET_COVERAGE_NONE 0x00
127#define SIERRA_NET_COVERAGE_NOPACKET 0x01 126#define SIERRA_NET_COVERAGE_NOPACKET 0x01
@@ -129,7 +128,8 @@ struct param {
129/* LSI Session */ 128/* LSI Session */
130#define SIERRA_NET_SESSION_IDLE 0x00 129#define SIERRA_NET_SESSION_IDLE 0x00
131/* LSI Link types */ 130/* LSI Link types */
132#define SIERRA_NET_AS_LINK_TYPE_IPv4 0x00 131#define SIERRA_NET_AS_LINK_TYPE_IPV4 0x00
132#define SIERRA_NET_AS_LINK_TYPE_IPV6 0x02
133 133
134struct lsi_umts { 134struct lsi_umts {
135 u8 protocol; 135 u8 protocol;
@@ -137,9 +137,14 @@ struct lsi_umts {
137 __be16 length; 137 __be16 length;
138 /* eventually use a union for the rest - assume umts for now */ 138 /* eventually use a union for the rest - assume umts for now */
139 u8 coverage; 139 u8 coverage;
140 u8 unused2[41]; 140 u8 network_len; /* network name len */
141 u8 network[40]; /* network name (UCS2, bigendian) */
141 u8 session_state; 142 u8 session_state;
142 u8 unused3[33]; 143 u8 unused3[33];
144} __packed;
145
146struct lsi_umts_single {
147 struct lsi_umts lsi;
143 u8 link_type; 148 u8 link_type;
144 u8 pdp_addr_len; /* NW-supplied PDP address len */ 149 u8 pdp_addr_len; /* NW-supplied PDP address len */
145 u8 pdp_addr[16]; /* NW-supplied PDP address (bigendian)) */ 150 u8 pdp_addr[16]; /* NW-supplied PDP address (bigendian)) */
@@ -158,10 +163,31 @@ struct lsi_umts {
158 u8 reserved[8]; 163 u8 reserved[8];
159} __packed; 164} __packed;
160 165
166struct lsi_umts_dual {
167 struct lsi_umts lsi;
168 u8 pdp_addr4_len; /* NW-supplied PDP IPv4 address len */
169 u8 pdp_addr4[4]; /* NW-supplied PDP IPv4 address (bigendian)) */
170 u8 pdp_addr6_len; /* NW-supplied PDP IPv6 address len */
171 u8 pdp_addr6[16]; /* NW-supplied PDP IPv6 address (bigendian)) */
172 u8 unused4[23];
173 u8 dns1_addr4_len; /* NW-supplied 1st DNS v4 address len (bigendian) */
174 u8 dns1_addr4[4]; /* NW-supplied 1st DNS v4 address */
175 u8 dns1_addr6_len; /* NW-supplied 1st DNS v6 address len */
176 u8 dns1_addr6[16]; /* NW-supplied 1st DNS v6 address (bigendian)*/
177 u8 dns2_addr4_len; /* NW-supplied 2nd DNS v4 address len (bigendian) */
178 u8 dns2_addr4[4]; /* NW-supplied 2nd DNS v4 address */
179 u8 dns2_addr6_len; /* NW-supplied 2nd DNS v6 address len */
180 u8 dns2_addr6[16]; /* NW-supplied 2nd DNS v6 address (bigendian)*/
181 u8 unused5[68];
182} __packed;
183
161#define SIERRA_NET_LSI_COMMON_LEN 4 184#define SIERRA_NET_LSI_COMMON_LEN 4
162#define SIERRA_NET_LSI_UMTS_LEN (sizeof(struct lsi_umts)) 185#define SIERRA_NET_LSI_UMTS_LEN (sizeof(struct lsi_umts_single))
163#define SIERRA_NET_LSI_UMTS_STATUS_LEN \ 186#define SIERRA_NET_LSI_UMTS_STATUS_LEN \
164 (SIERRA_NET_LSI_UMTS_LEN - SIERRA_NET_LSI_COMMON_LEN) 187 (SIERRA_NET_LSI_UMTS_LEN - SIERRA_NET_LSI_COMMON_LEN)
188#define SIERRA_NET_LSI_UMTS_DS_LEN (sizeof(struct lsi_umts_dual))
189#define SIERRA_NET_LSI_UMTS_DS_STATUS_LEN \
190 (SIERRA_NET_LSI_UMTS_DS_LEN - SIERRA_NET_LSI_COMMON_LEN)
165 191
166/* Forward definitions */ 192/* Forward definitions */
167static void sierra_sync_timer(unsigned long syncdata); 193static void sierra_sync_timer(unsigned long syncdata);
@@ -190,10 +216,11 @@ static inline void sierra_net_set_private(struct usbnet *dev,
190 dev->data[0] = (unsigned long)priv; 216 dev->data[0] = (unsigned long)priv;
191} 217}
192 218
193/* is packet IPv4 */ 219/* is packet IPv4/IPv6 */
194static inline int is_ip(struct sk_buff *skb) 220static inline int is_ip(struct sk_buff *skb)
195{ 221{
196 return skb->protocol == cpu_to_be16(ETH_P_IP); 222 return skb->protocol == cpu_to_be16(ETH_P_IP) ||
223 skb->protocol == cpu_to_be16(ETH_P_IPV6);
197} 224}
198 225
199/* 226/*
@@ -349,49 +376,54 @@ static inline int sierra_net_is_valid_addrlen(u8 len)
349static int sierra_net_parse_lsi(struct usbnet *dev, char *data, int datalen) 376static int sierra_net_parse_lsi(struct usbnet *dev, char *data, int datalen)
350{ 377{
351 struct lsi_umts *lsi = (struct lsi_umts *)data; 378 struct lsi_umts *lsi = (struct lsi_umts *)data;
379 u32 expected_length;
352 380
353 if (datalen < sizeof(struct lsi_umts)) { 381 if (datalen < sizeof(struct lsi_umts_single)) {
354 netdev_err(dev->net, "%s: Data length %d, exp %Zu\n", 382 netdev_err(dev->net, "%s: Data length %d, exp >= %Zu\n",
355 __func__, datalen, 383 __func__, datalen, sizeof(struct lsi_umts_single));
356 sizeof(struct lsi_umts));
357 return -1; 384 return -1;
358 } 385 }
359 386
360 if (lsi->length != cpu_to_be16(SIERRA_NET_LSI_UMTS_STATUS_LEN)) { 387 /* Validate the session state */
361 netdev_err(dev->net, "%s: LSI_UMTS_STATUS_LEN %d, exp %u\n", 388 if (lsi->session_state == SIERRA_NET_SESSION_IDLE) {
362 __func__, be16_to_cpu(lsi->length), 389 netdev_err(dev->net, "Session idle, 0x%02x\n",
363 (u32)SIERRA_NET_LSI_UMTS_STATUS_LEN); 390 lsi->session_state);
364 return -1; 391 return 0;
365 } 392 }
366 393
367 /* Validate the protocol - only support UMTS for now */ 394 /* Validate the protocol - only support UMTS for now */
368 if (lsi->protocol != SIERRA_NET_PROTOCOL_UMTS) { 395 if (lsi->protocol == SIERRA_NET_PROTOCOL_UMTS) {
396 struct lsi_umts_single *single = (struct lsi_umts_single *)lsi;
397
398 /* Validate the link type */
399 if (single->link_type != SIERRA_NET_AS_LINK_TYPE_IPV4 &&
400 single->link_type != SIERRA_NET_AS_LINK_TYPE_IPV6) {
401 netdev_err(dev->net, "Link type unsupported: 0x%02x\n",
402 single->link_type);
403 return -1;
404 }
405 expected_length = SIERRA_NET_LSI_UMTS_STATUS_LEN;
406 } else if (lsi->protocol == SIERRA_NET_PROTOCOL_UMTS_DS) {
407 expected_length = SIERRA_NET_LSI_UMTS_DS_STATUS_LEN;
408 } else {
369 netdev_err(dev->net, "Protocol unsupported, 0x%02x\n", 409 netdev_err(dev->net, "Protocol unsupported, 0x%02x\n",
370 lsi->protocol); 410 lsi->protocol);
371 return -1; 411 return -1;
372 } 412 }
373 413
374 /* Validate the link type */ 414 if (be16_to_cpu(lsi->length) != expected_length) {
375 if (lsi->link_type != SIERRA_NET_AS_LINK_TYPE_IPv4) { 415 netdev_err(dev->net, "%s: LSI_UMTS_STATUS_LEN %d, exp %u\n",
376 netdev_err(dev->net, "Link type unsupported: 0x%02x\n", 416 __func__, be16_to_cpu(lsi->length), expected_length);
377 lsi->link_type);
378 return -1; 417 return -1;
379 } 418 }
380 419
381 /* Validate the coverage */ 420 /* Validate the coverage */
382 if (lsi->coverage == SIERRA_NET_COVERAGE_NONE 421 if (lsi->coverage == SIERRA_NET_COVERAGE_NONE ||
383 || lsi->coverage == SIERRA_NET_COVERAGE_NOPACKET) { 422 lsi->coverage == SIERRA_NET_COVERAGE_NOPACKET) {
384 netdev_err(dev->net, "No coverage, 0x%02x\n", lsi->coverage); 423 netdev_err(dev->net, "No coverage, 0x%02x\n", lsi->coverage);
385 return 0; 424 return 0;
386 } 425 }
387 426
388 /* Validate the session state */
389 if (lsi->session_state == SIERRA_NET_SESSION_IDLE) {
390 netdev_err(dev->net, "Session idle, 0x%02x\n",
391 lsi->session_state);
392 return 0;
393 }
394
395 /* Set link_sense true */ 427 /* Set link_sense true */
396 return 1; 428 return 1;
397} 429}
@@ -652,7 +684,6 @@ static int sierra_net_bind(struct usbnet *dev, struct usb_interface *intf)
652 u8 numendpoints; 684 u8 numendpoints;
653 u16 fwattr = 0; 685 u16 fwattr = 0;
654 int status; 686 int status;
655 struct ethhdr *eth;
656 struct sierra_net_data *priv; 687 struct sierra_net_data *priv;
657 static const u8 sync_tmplate[sizeof(priv->sync_msg)] = { 688 static const u8 sync_tmplate[sizeof(priv->sync_msg)] = {
658 0x00, 0x00, SIERRA_NET_HIP_MSYNC_ID, 0x00}; 689 0x00, 0x00, SIERRA_NET_HIP_MSYNC_ID, 0x00};
@@ -690,11 +721,6 @@ static int sierra_net_bind(struct usbnet *dev, struct usb_interface *intf)
690 dev->net->dev_addr[ETH_ALEN-2] = atomic_inc_return(&iface_counter); 721 dev->net->dev_addr[ETH_ALEN-2] = atomic_inc_return(&iface_counter);
691 dev->net->dev_addr[ETH_ALEN-1] = ifacenum; 722 dev->net->dev_addr[ETH_ALEN-1] = ifacenum;
692 723
693 /* we will have to manufacture ethernet headers, prepare template */
694 eth = (struct ethhdr *)priv->ethr_hdr_tmpl;
695 memcpy(&eth->h_dest, dev->net->dev_addr, ETH_ALEN);
696 eth->h_proto = cpu_to_be16(ETH_P_IP);
697
698 /* prepare shutdown message template */ 724 /* prepare shutdown message template */
699 memcpy(priv->shdwn_msg, shdwn_tmplate, sizeof(priv->shdwn_msg)); 725 memcpy(priv->shdwn_msg, shdwn_tmplate, sizeof(priv->shdwn_msg));
700 /* set context index initially to 0 - prepares tx hdr template */ 726 /* set context index initially to 0 - prepares tx hdr template */
@@ -824,9 +850,14 @@ static int sierra_net_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
824 850
825 skb_pull(skb, hh.hdrlen); 851 skb_pull(skb, hh.hdrlen);
826 852
827 /* We are going to accept this packet, prepare it */ 853 /* We are going to accept this packet, prepare it.
828 memcpy(skb->data, sierra_net_get_private(dev)->ethr_hdr_tmpl, 854 * In case protocol is IPv6, keep it, otherwise force IPv4.
829 ETH_HLEN); 855 */
856 skb_reset_mac_header(skb);
857 if (eth_hdr(skb)->h_proto != cpu_to_be16(ETH_P_IPV6))
858 eth_hdr(skb)->h_proto = cpu_to_be16(ETH_P_IP);
859 eth_zero_addr(eth_hdr(skb)->h_source);
860 memcpy(eth_hdr(skb)->h_dest, dev->net->dev_addr, ETH_ALEN);
830 861
831 /* Last packet in batch handled by usbnet */ 862 /* Last packet in batch handled by usbnet */
832 if (hh.payload_len.word == skb->len) 863 if (hh.payload_len.word == skb->len)
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 4a105006ca63..765c2d6358da 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -48,8 +48,16 @@ module_param(gso, bool, 0444);
48 */ 48 */
49DECLARE_EWMA(pkt_len, 1, 64) 49DECLARE_EWMA(pkt_len, 1, 64)
50 50
51/* With mergeable buffers we align buffer address and use the low bits to
52 * encode its true size. Buffer size is up to 1 page so we need to align to
53 * square root of page size to ensure we reserve enough bits to encode the true
54 * size.
55 */
56#define MERGEABLE_BUFFER_MIN_ALIGN_SHIFT ((PAGE_SHIFT + 1) / 2)
57
51/* Minimum alignment for mergeable packet buffers. */ 58/* Minimum alignment for mergeable packet buffers. */
52#define MERGEABLE_BUFFER_ALIGN max(L1_CACHE_BYTES, 256) 59#define MERGEABLE_BUFFER_ALIGN max(L1_CACHE_BYTES, \
60 1 << MERGEABLE_BUFFER_MIN_ALIGN_SHIFT)
53 61
54#define VIRTNET_DRIVER_VERSION "1.0.0" 62#define VIRTNET_DRIVER_VERSION "1.0.0"
55 63
@@ -1104,7 +1112,7 @@ static int xmit_skb(struct send_queue *sq, struct sk_buff *skb)
1104 hdr = skb_vnet_hdr(skb); 1112 hdr = skb_vnet_hdr(skb);
1105 1113
1106 if (virtio_net_hdr_from_skb(skb, &hdr->hdr, 1114 if (virtio_net_hdr_from_skb(skb, &hdr->hdr,
1107 virtio_is_little_endian(vi->vdev))) 1115 virtio_is_little_endian(vi->vdev), false))
1108 BUG(); 1116 BUG();
1109 1117
1110 if (vi->mergeable_rx_bufs) 1118 if (vi->mergeable_rx_bufs)
@@ -1707,6 +1715,11 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog)
1707 u16 xdp_qp = 0, curr_qp; 1715 u16 xdp_qp = 0, curr_qp;
1708 int i, err; 1716 int i, err;
1709 1717
1718 if (prog && prog->xdp_adjust_head) {
1719 netdev_warn(dev, "Does not support bpf_xdp_adjust_head()\n");
1720 return -EOPNOTSUPP;
1721 }
1722
1710 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO4) || 1723 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO4) ||
1711 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO6) || 1724 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO6) ||
1712 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_ECN) || 1725 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_ECN) ||
@@ -1890,8 +1903,12 @@ static void free_receive_page_frags(struct virtnet_info *vi)
1890 put_page(vi->rq[i].alloc_frag.page); 1903 put_page(vi->rq[i].alloc_frag.page);
1891} 1904}
1892 1905
1893static bool is_xdp_queue(struct virtnet_info *vi, int q) 1906static bool is_xdp_raw_buffer_queue(struct virtnet_info *vi, int q)
1894{ 1907{
1908 /* For small receive mode always use kfree_skb variants */
1909 if (!vi->mergeable_rx_bufs)
1910 return false;
1911
1895 if (q < (vi->curr_queue_pairs - vi->xdp_queue_pairs)) 1912 if (q < (vi->curr_queue_pairs - vi->xdp_queue_pairs))
1896 return false; 1913 return false;
1897 else if (q < vi->curr_queue_pairs) 1914 else if (q < vi->curr_queue_pairs)
@@ -1908,7 +1925,7 @@ static void free_unused_bufs(struct virtnet_info *vi)
1908 for (i = 0; i < vi->max_queue_pairs; i++) { 1925 for (i = 0; i < vi->max_queue_pairs; i++) {
1909 struct virtqueue *vq = vi->sq[i].vq; 1926 struct virtqueue *vq = vi->sq[i].vq;
1910 while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) { 1927 while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) {
1911 if (!is_xdp_queue(vi, i)) 1928 if (!is_xdp_raw_buffer_queue(vi, i))
1912 dev_kfree_skb(buf); 1929 dev_kfree_skb(buf);
1913 else 1930 else
1914 put_page(virt_to_head_page(buf)); 1931 put_page(virt_to_head_page(buf));
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
index 7532646c3b7b..454f907d419a 100644
--- a/drivers/net/vrf.c
+++ b/drivers/net/vrf.c
@@ -263,7 +263,9 @@ static netdev_tx_t vrf_process_v4_outbound(struct sk_buff *skb,
263 .flowi4_iif = LOOPBACK_IFINDEX, 263 .flowi4_iif = LOOPBACK_IFINDEX,
264 .flowi4_tos = RT_TOS(ip4h->tos), 264 .flowi4_tos = RT_TOS(ip4h->tos),
265 .flowi4_flags = FLOWI_FLAG_ANYSRC | FLOWI_FLAG_SKIP_NH_OIF, 265 .flowi4_flags = FLOWI_FLAG_ANYSRC | FLOWI_FLAG_SKIP_NH_OIF,
266 .flowi4_proto = ip4h->protocol,
266 .daddr = ip4h->daddr, 267 .daddr = ip4h->daddr,
268 .saddr = ip4h->saddr,
267 }; 269 };
268 struct net *net = dev_net(vrf_dev); 270 struct net *net = dev_net(vrf_dev);
269 struct rtable *rt; 271 struct rtable *rt;
@@ -967,6 +969,7 @@ static struct sk_buff *vrf_ip6_rcv(struct net_device *vrf_dev,
967 */ 969 */
968 need_strict = rt6_need_strict(&ipv6_hdr(skb)->daddr); 970 need_strict = rt6_need_strict(&ipv6_hdr(skb)->daddr);
969 if (!ipv6_ndisc_frame(skb) && !need_strict) { 971 if (!ipv6_ndisc_frame(skb) && !need_strict) {
972 vrf_rx_stats(vrf_dev, skb->len);
970 skb->dev = vrf_dev; 973 skb->dev = vrf_dev;
971 skb->skb_iif = vrf_dev->ifindex; 974 skb->skb_iif = vrf_dev->ifindex;
972 975
@@ -1011,6 +1014,8 @@ static struct sk_buff *vrf_ip_rcv(struct net_device *vrf_dev,
1011 goto out; 1014 goto out;
1012 } 1015 }
1013 1016
1017 vrf_rx_stats(vrf_dev, skb->len);
1018
1014 skb_push(skb, skb->mac_len); 1019 skb_push(skb, skb->mac_len);
1015 dev_queue_xmit_nit(skb, vrf_dev); 1020 dev_queue_xmit_nit(skb, vrf_dev);
1016 skb_pull(skb, skb->mac_len); 1021 skb_pull(skb, skb->mac_len);
@@ -1247,6 +1252,8 @@ static int vrf_newlink(struct net *src_net, struct net_device *dev,
1247 return -EINVAL; 1252 return -EINVAL;
1248 1253
1249 vrf->tb_id = nla_get_u32(data[IFLA_VRF_TABLE]); 1254 vrf->tb_id = nla_get_u32(data[IFLA_VRF_TABLE]);
1255 if (vrf->tb_id == RT_TABLE_UNSPEC)
1256 return -EINVAL;
1250 1257
1251 dev->priv_flags |= IFF_L3MDEV_MASTER; 1258 dev->priv_flags |= IFF_L3MDEV_MASTER;
1252 1259
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index bb70dd5723b5..30b04cf2bb1e 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -1798,7 +1798,7 @@ static int vxlan_build_skb(struct sk_buff *skb, struct dst_entry *dst,
1798static struct rtable *vxlan_get_route(struct vxlan_dev *vxlan, struct net_device *dev, 1798static struct rtable *vxlan_get_route(struct vxlan_dev *vxlan, struct net_device *dev,
1799 struct vxlan_sock *sock4, 1799 struct vxlan_sock *sock4,
1800 struct sk_buff *skb, int oif, u8 tos, 1800 struct sk_buff *skb, int oif, u8 tos,
1801 __be32 daddr, __be32 *saddr, 1801 __be32 daddr, __be32 *saddr, __be16 dport, __be16 sport,
1802 struct dst_cache *dst_cache, 1802 struct dst_cache *dst_cache,
1803 const struct ip_tunnel_info *info) 1803 const struct ip_tunnel_info *info)
1804{ 1804{
@@ -1824,6 +1824,8 @@ static struct rtable *vxlan_get_route(struct vxlan_dev *vxlan, struct net_device
1824 fl4.flowi4_proto = IPPROTO_UDP; 1824 fl4.flowi4_proto = IPPROTO_UDP;
1825 fl4.daddr = daddr; 1825 fl4.daddr = daddr;
1826 fl4.saddr = *saddr; 1826 fl4.saddr = *saddr;
1827 fl4.fl4_dport = dport;
1828 fl4.fl4_sport = sport;
1827 1829
1828 rt = ip_route_output_key(vxlan->net, &fl4); 1830 rt = ip_route_output_key(vxlan->net, &fl4);
1829 if (likely(!IS_ERR(rt))) { 1831 if (likely(!IS_ERR(rt))) {
@@ -1851,6 +1853,7 @@ static struct dst_entry *vxlan6_get_route(struct vxlan_dev *vxlan,
1851 __be32 label, 1853 __be32 label,
1852 const struct in6_addr *daddr, 1854 const struct in6_addr *daddr,
1853 struct in6_addr *saddr, 1855 struct in6_addr *saddr,
1856 __be16 dport, __be16 sport,
1854 struct dst_cache *dst_cache, 1857 struct dst_cache *dst_cache,
1855 const struct ip_tunnel_info *info) 1858 const struct ip_tunnel_info *info)
1856{ 1859{
@@ -1877,6 +1880,8 @@ static struct dst_entry *vxlan6_get_route(struct vxlan_dev *vxlan,
1877 fl6.flowlabel = ip6_make_flowinfo(RT_TOS(tos), label); 1880 fl6.flowlabel = ip6_make_flowinfo(RT_TOS(tos), label);
1878 fl6.flowi6_mark = skb->mark; 1881 fl6.flowi6_mark = skb->mark;
1879 fl6.flowi6_proto = IPPROTO_UDP; 1882 fl6.flowi6_proto = IPPROTO_UDP;
1883 fl6.fl6_dport = dport;
1884 fl6.fl6_sport = sport;
1880 1885
1881 err = ipv6_stub->ipv6_dst_lookup(vxlan->net, 1886 err = ipv6_stub->ipv6_dst_lookup(vxlan->net,
1882 sock6->sock->sk, 1887 sock6->sock->sk,
@@ -2068,6 +2073,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
2068 rdst ? rdst->remote_ifindex : 0, tos, 2073 rdst ? rdst->remote_ifindex : 0, tos,
2069 dst->sin.sin_addr.s_addr, 2074 dst->sin.sin_addr.s_addr,
2070 &src->sin.sin_addr.s_addr, 2075 &src->sin.sin_addr.s_addr,
2076 dst_port, src_port,
2071 dst_cache, info); 2077 dst_cache, info);
2072 if (IS_ERR(rt)) { 2078 if (IS_ERR(rt)) {
2073 err = PTR_ERR(rt); 2079 err = PTR_ERR(rt);
@@ -2104,6 +2110,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
2104 rdst ? rdst->remote_ifindex : 0, tos, 2110 rdst ? rdst->remote_ifindex : 0, tos,
2105 label, &dst->sin6.sin6_addr, 2111 label, &dst->sin6.sin6_addr,
2106 &src->sin6.sin6_addr, 2112 &src->sin6.sin6_addr,
2113 dst_port, src_port,
2107 dst_cache, info); 2114 dst_cache, info);
2108 if (IS_ERR(ndst)) { 2115 if (IS_ERR(ndst)) {
2109 err = PTR_ERR(ndst); 2116 err = PTR_ERR(ndst);
@@ -2261,7 +2268,7 @@ static void vxlan_cleanup(unsigned long arg)
2261 = container_of(p, struct vxlan_fdb, hlist); 2268 = container_of(p, struct vxlan_fdb, hlist);
2262 unsigned long timeout; 2269 unsigned long timeout;
2263 2270
2264 if (f->state & NUD_PERMANENT) 2271 if (f->state & (NUD_PERMANENT | NUD_NOARP))
2265 continue; 2272 continue;
2266 2273
2267 timeout = f->used + vxlan->cfg.age_interval * HZ; 2274 timeout = f->used + vxlan->cfg.age_interval * HZ;
@@ -2347,7 +2354,7 @@ static int vxlan_open(struct net_device *dev)
2347} 2354}
2348 2355
2349/* Purge the forwarding table */ 2356/* Purge the forwarding table */
2350static void vxlan_flush(struct vxlan_dev *vxlan) 2357static void vxlan_flush(struct vxlan_dev *vxlan, bool do_all)
2351{ 2358{
2352 unsigned int h; 2359 unsigned int h;
2353 2360
@@ -2357,6 +2364,8 @@ static void vxlan_flush(struct vxlan_dev *vxlan)
2357 hlist_for_each_safe(p, n, &vxlan->fdb_head[h]) { 2364 hlist_for_each_safe(p, n, &vxlan->fdb_head[h]) {
2358 struct vxlan_fdb *f 2365 struct vxlan_fdb *f
2359 = container_of(p, struct vxlan_fdb, hlist); 2366 = container_of(p, struct vxlan_fdb, hlist);
2367 if (!do_all && (f->state & (NUD_PERMANENT | NUD_NOARP)))
2368 continue;
2360 /* the all_zeros_mac entry is deleted at vxlan_uninit */ 2369 /* the all_zeros_mac entry is deleted at vxlan_uninit */
2361 if (!is_zero_ether_addr(f->eth_addr)) 2370 if (!is_zero_ether_addr(f->eth_addr))
2362 vxlan_fdb_destroy(vxlan, f); 2371 vxlan_fdb_destroy(vxlan, f);
@@ -2378,7 +2387,7 @@ static int vxlan_stop(struct net_device *dev)
2378 2387
2379 del_timer_sync(&vxlan->age_timer); 2388 del_timer_sync(&vxlan->age_timer);
2380 2389
2381 vxlan_flush(vxlan); 2390 vxlan_flush(vxlan, false);
2382 vxlan_sock_release(vxlan); 2391 vxlan_sock_release(vxlan);
2383 2392
2384 return ret; 2393 return ret;
@@ -2430,7 +2439,8 @@ static int vxlan_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
2430 2439
2431 rt = vxlan_get_route(vxlan, dev, sock4, skb, 0, info->key.tos, 2440 rt = vxlan_get_route(vxlan, dev, sock4, skb, 0, info->key.tos,
2432 info->key.u.ipv4.dst, 2441 info->key.u.ipv4.dst,
2433 &info->key.u.ipv4.src, NULL, info); 2442 &info->key.u.ipv4.src, dport, sport,
2443 &info->dst_cache, info);
2434 if (IS_ERR(rt)) 2444 if (IS_ERR(rt))
2435 return PTR_ERR(rt); 2445 return PTR_ERR(rt);
2436 ip_rt_put(rt); 2446 ip_rt_put(rt);
@@ -2441,7 +2451,8 @@ static int vxlan_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
2441 2451
2442 ndst = vxlan6_get_route(vxlan, dev, sock6, skb, 0, info->key.tos, 2452 ndst = vxlan6_get_route(vxlan, dev, sock6, skb, 0, info->key.tos,
2443 info->key.label, &info->key.u.ipv6.dst, 2453 info->key.label, &info->key.u.ipv6.dst,
2444 &info->key.u.ipv6.src, NULL, info); 2454 &info->key.u.ipv6.src, dport, sport,
2455 &info->dst_cache, info);
2445 if (IS_ERR(ndst)) 2456 if (IS_ERR(ndst))
2446 return PTR_ERR(ndst); 2457 return PTR_ERR(ndst);
2447 dst_release(ndst); 2458 dst_release(ndst);
@@ -2883,7 +2894,7 @@ static int vxlan_dev_configure(struct net *src_net, struct net_device *dev,
2883 memcpy(&vxlan->cfg, conf, sizeof(*conf)); 2894 memcpy(&vxlan->cfg, conf, sizeof(*conf));
2884 if (!vxlan->cfg.dst_port) { 2895 if (!vxlan->cfg.dst_port) {
2885 if (conf->flags & VXLAN_F_GPE) 2896 if (conf->flags & VXLAN_F_GPE)
2886 vxlan->cfg.dst_port = 4790; /* IANA assigned VXLAN-GPE port */ 2897 vxlan->cfg.dst_port = htons(4790); /* IANA VXLAN-GPE port */
2887 else 2898 else
2888 vxlan->cfg.dst_port = default_port; 2899 vxlan->cfg.dst_port = default_port;
2889 } 2900 }
@@ -3051,6 +3062,8 @@ static void vxlan_dellink(struct net_device *dev, struct list_head *head)
3051 struct vxlan_dev *vxlan = netdev_priv(dev); 3062 struct vxlan_dev *vxlan = netdev_priv(dev);
3052 struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id); 3063 struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
3053 3064
3065 vxlan_flush(vxlan, true);
3066
3054 spin_lock(&vn->sock_lock); 3067 spin_lock(&vn->sock_lock);
3055 if (!hlist_unhashed(&vxlan->hlist)) 3068 if (!hlist_unhashed(&vxlan->hlist))
3056 hlist_del_rcu(&vxlan->hlist); 3069 hlist_del_rcu(&vxlan->hlist);
diff --git a/drivers/net/wan/slic_ds26522.c b/drivers/net/wan/slic_ds26522.c
index b776a0ab106c..9d9b4e0def2a 100644
--- a/drivers/net/wan/slic_ds26522.c
+++ b/drivers/net/wan/slic_ds26522.c
@@ -218,7 +218,7 @@ static int slic_ds26522_probe(struct spi_device *spi)
218 218
219 ret = slic_ds26522_init_configure(spi); 219 ret = slic_ds26522_init_configure(spi);
220 if (ret == 0) 220 if (ret == 0)
221 pr_info("DS26522 cs%d configurated\n", spi->chip_select); 221 pr_info("DS26522 cs%d configured\n", spi->chip_select);
222 222
223 return ret; 223 return ret;
224} 224}
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-8000.c b/drivers/net/wireless/intel/iwlwifi/iwl-8000.c
index d02ca1491d16..8d3e53fac1da 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-8000.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-8000.c
@@ -91,7 +91,7 @@
91 91
92#define IWL8000_FW_PRE "iwlwifi-8000C-" 92#define IWL8000_FW_PRE "iwlwifi-8000C-"
93#define IWL8000_MODULE_FIRMWARE(api) \ 93#define IWL8000_MODULE_FIRMWARE(api) \
94 IWL8000_FW_PRE "-" __stringify(api) ".ucode" 94 IWL8000_FW_PRE __stringify(api) ".ucode"
95 95
96#define IWL8265_FW_PRE "iwlwifi-8265-" 96#define IWL8265_FW_PRE "iwlwifi-8265-"
97#define IWL8265_MODULE_FIRMWARE(api) \ 97#define IWL8265_MODULE_FIRMWARE(api) \
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
index 636c8b03e318..09e9e2e3ed04 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
@@ -1164,9 +1164,10 @@ static void iwl_mvm_realloc_queues_after_restart(struct iwl_mvm *mvm,
1164 .frame_limit = IWL_FRAME_LIMIT, 1164 .frame_limit = IWL_FRAME_LIMIT,
1165 }; 1165 };
1166 1166
1167 /* Make sure reserved queue is still marked as such (or allocated) */ 1167 /* Make sure reserved queue is still marked as such (if allocated) */
1168 mvm->queue_info[mvm_sta->reserved_queue].status = 1168 if (mvm_sta->reserved_queue != IEEE80211_INVAL_HW_QUEUE)
1169 IWL_MVM_QUEUE_RESERVED; 1169 mvm->queue_info[mvm_sta->reserved_queue].status =
1170 IWL_MVM_QUEUE_RESERVED;
1170 1171
1171 for (i = 0; i <= IWL_MAX_TID_COUNT; i++) { 1172 for (i = 0; i <= IWL_MAX_TID_COUNT; i++) {
1172 struct iwl_mvm_tid_data *tid_data = &mvm_sta->tid_data[i]; 1173 struct iwl_mvm_tid_data *tid_data = &mvm_sta->tid_data[i];
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c
index 63a051be832e..bec7d9c46087 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c
@@ -843,8 +843,10 @@ static void iwl_mvm_thermal_zone_unregister(struct iwl_mvm *mvm)
843 return; 843 return;
844 844
845 IWL_DEBUG_TEMP(mvm, "Thermal zone device unregister\n"); 845 IWL_DEBUG_TEMP(mvm, "Thermal zone device unregister\n");
846 thermal_zone_device_unregister(mvm->tz_device.tzone); 846 if (mvm->tz_device.tzone) {
847 mvm->tz_device.tzone = NULL; 847 thermal_zone_device_unregister(mvm->tz_device.tzone);
848 mvm->tz_device.tzone = NULL;
849 }
848} 850}
849 851
850static void iwl_mvm_cooling_device_unregister(struct iwl_mvm *mvm) 852static void iwl_mvm_cooling_device_unregister(struct iwl_mvm *mvm)
@@ -853,8 +855,10 @@ static void iwl_mvm_cooling_device_unregister(struct iwl_mvm *mvm)
853 return; 855 return;
854 856
855 IWL_DEBUG_TEMP(mvm, "Cooling device unregister\n"); 857 IWL_DEBUG_TEMP(mvm, "Cooling device unregister\n");
856 thermal_cooling_device_unregister(mvm->cooling_dev.cdev); 858 if (mvm->cooling_dev.cdev) {
857 mvm->cooling_dev.cdev = NULL; 859 thermal_cooling_device_unregister(mvm->cooling_dev.cdev);
860 mvm->cooling_dev.cdev = NULL;
861 }
858} 862}
859#endif /* CONFIG_THERMAL */ 863#endif /* CONFIG_THERMAL */
860 864
diff --git a/drivers/net/wireless/intersil/orinoco/mic.c b/drivers/net/wireless/intersil/orinoco/mic.c
index bc7397d709d3..08bc7822f820 100644
--- a/drivers/net/wireless/intersil/orinoco/mic.c
+++ b/drivers/net/wireless/intersil/orinoco/mic.c
@@ -16,7 +16,7 @@
16/********************************************************************/ 16/********************************************************************/
17int orinoco_mic_init(struct orinoco_private *priv) 17int orinoco_mic_init(struct orinoco_private *priv)
18{ 18{
19 priv->tx_tfm_mic = crypto_alloc_ahash("michael_mic", 0, 19 priv->tx_tfm_mic = crypto_alloc_shash("michael_mic", 0,
20 CRYPTO_ALG_ASYNC); 20 CRYPTO_ALG_ASYNC);
21 if (IS_ERR(priv->tx_tfm_mic)) { 21 if (IS_ERR(priv->tx_tfm_mic)) {
22 printk(KERN_DEBUG "orinoco_mic_init: could not allocate " 22 printk(KERN_DEBUG "orinoco_mic_init: could not allocate "
@@ -25,7 +25,7 @@ int orinoco_mic_init(struct orinoco_private *priv)
25 return -ENOMEM; 25 return -ENOMEM;
26 } 26 }
27 27
28 priv->rx_tfm_mic = crypto_alloc_ahash("michael_mic", 0, 28 priv->rx_tfm_mic = crypto_alloc_shash("michael_mic", 0,
29 CRYPTO_ALG_ASYNC); 29 CRYPTO_ALG_ASYNC);
30 if (IS_ERR(priv->rx_tfm_mic)) { 30 if (IS_ERR(priv->rx_tfm_mic)) {
31 printk(KERN_DEBUG "orinoco_mic_init: could not allocate " 31 printk(KERN_DEBUG "orinoco_mic_init: could not allocate "
@@ -40,17 +40,16 @@ int orinoco_mic_init(struct orinoco_private *priv)
40void orinoco_mic_free(struct orinoco_private *priv) 40void orinoco_mic_free(struct orinoco_private *priv)
41{ 41{
42 if (priv->tx_tfm_mic) 42 if (priv->tx_tfm_mic)
43 crypto_free_ahash(priv->tx_tfm_mic); 43 crypto_free_shash(priv->tx_tfm_mic);
44 if (priv->rx_tfm_mic) 44 if (priv->rx_tfm_mic)
45 crypto_free_ahash(priv->rx_tfm_mic); 45 crypto_free_shash(priv->rx_tfm_mic);
46} 46}
47 47
48int orinoco_mic(struct crypto_ahash *tfm_michael, u8 *key, 48int orinoco_mic(struct crypto_shash *tfm_michael, u8 *key,
49 u8 *da, u8 *sa, u8 priority, 49 u8 *da, u8 *sa, u8 priority,
50 u8 *data, size_t data_len, u8 *mic) 50 u8 *data, size_t data_len, u8 *mic)
51{ 51{
52 AHASH_REQUEST_ON_STACK(req, tfm_michael); 52 SHASH_DESC_ON_STACK(desc, tfm_michael);
53 struct scatterlist sg[2];
54 u8 hdr[ETH_HLEN + 2]; /* size of header + padding */ 53 u8 hdr[ETH_HLEN + 2]; /* size of header + padding */
55 int err; 54 int err;
56 55
@@ -67,18 +66,27 @@ int orinoco_mic(struct crypto_ahash *tfm_michael, u8 *key,
67 hdr[ETH_ALEN * 2 + 2] = 0; 66 hdr[ETH_ALEN * 2 + 2] = 0;
68 hdr[ETH_ALEN * 2 + 3] = 0; 67 hdr[ETH_ALEN * 2 + 3] = 0;
69 68
70 /* Use scatter gather to MIC header and data in one go */ 69 desc->tfm = tfm_michael;
71 sg_init_table(sg, 2); 70 desc->flags = 0;
72 sg_set_buf(&sg[0], hdr, sizeof(hdr));
73 sg_set_buf(&sg[1], data, data_len);
74 71
75 if (crypto_ahash_setkey(tfm_michael, key, MIC_KEYLEN)) 72 err = crypto_shash_setkey(tfm_michael, key, MIC_KEYLEN);
76 return -1; 73 if (err)
74 return err;
75
76 err = crypto_shash_init(desc);
77 if (err)
78 return err;
79
80 err = crypto_shash_update(desc, hdr, sizeof(hdr));
81 if (err)
82 return err;
83
84 err = crypto_shash_update(desc, data, data_len);
85 if (err)
86 return err;
87
88 err = crypto_shash_final(desc, mic);
89 shash_desc_zero(desc);
77 90
78 ahash_request_set_tfm(req, tfm_michael);
79 ahash_request_set_callback(req, 0, NULL, NULL);
80 ahash_request_set_crypt(req, sg, mic, data_len + sizeof(hdr));
81 err = crypto_ahash_digest(req);
82 ahash_request_zero(req);
83 return err; 91 return err;
84} 92}
diff --git a/drivers/net/wireless/intersil/orinoco/mic.h b/drivers/net/wireless/intersil/orinoco/mic.h
index ce731d05cc98..e8724e889219 100644
--- a/drivers/net/wireless/intersil/orinoco/mic.h
+++ b/drivers/net/wireless/intersil/orinoco/mic.h
@@ -6,6 +6,7 @@
6#define _ORINOCO_MIC_H_ 6#define _ORINOCO_MIC_H_
7 7
8#include <linux/types.h> 8#include <linux/types.h>
9#include <crypto/hash.h>
9 10
10#define MICHAEL_MIC_LEN 8 11#define MICHAEL_MIC_LEN 8
11 12
@@ -15,7 +16,7 @@ struct crypto_ahash;
15 16
16int orinoco_mic_init(struct orinoco_private *priv); 17int orinoco_mic_init(struct orinoco_private *priv);
17void orinoco_mic_free(struct orinoco_private *priv); 18void orinoco_mic_free(struct orinoco_private *priv);
18int orinoco_mic(struct crypto_ahash *tfm_michael, u8 *key, 19int orinoco_mic(struct crypto_shash *tfm_michael, u8 *key,
19 u8 *da, u8 *sa, u8 priority, 20 u8 *da, u8 *sa, u8 priority,
20 u8 *data, size_t data_len, u8 *mic); 21 u8 *data, size_t data_len, u8 *mic);
21 22
diff --git a/drivers/net/wireless/intersil/orinoco/orinoco.h b/drivers/net/wireless/intersil/orinoco/orinoco.h
index 2f0c84b1c440..5fa1c3e3713f 100644
--- a/drivers/net/wireless/intersil/orinoco/orinoco.h
+++ b/drivers/net/wireless/intersil/orinoco/orinoco.h
@@ -152,8 +152,8 @@ struct orinoco_private {
152 u8 *wpa_ie; 152 u8 *wpa_ie;
153 int wpa_ie_len; 153 int wpa_ie_len;
154 154
155 struct crypto_ahash *rx_tfm_mic; 155 struct crypto_shash *rx_tfm_mic;
156 struct crypto_ahash *tx_tfm_mic; 156 struct crypto_shash *tx_tfm_mic;
157 157
158 unsigned int wpa_enabled:1; 158 unsigned int wpa_enabled:1;
159 unsigned int tkip_cm_active:1; 159 unsigned int tkip_cm_active:1;
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c
index 691ddef1ae28..a33a06d58a9a 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c
@@ -92,7 +92,7 @@ int rtl92c_init_sw_vars(struct ieee80211_hw *hw)
92 struct rtl_priv *rtlpriv = rtl_priv(hw); 92 struct rtl_priv *rtlpriv = rtl_priv(hw);
93 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); 93 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
94 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); 94 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
95 char *fw_name = "rtlwifi/rtl8192cfwU.bin"; 95 char *fw_name;
96 96
97 rtl8192ce_bt_reg_init(hw); 97 rtl8192ce_bt_reg_init(hw);
98 98
@@ -164,8 +164,13 @@ int rtl92c_init_sw_vars(struct ieee80211_hw *hw)
164 } 164 }
165 165
166 /* request fw */ 166 /* request fw */
167 if (IS_81XXC_VENDOR_UMC_B_CUT(rtlhal->version)) 167 if (IS_VENDOR_UMC_A_CUT(rtlhal->version) &&
168 !IS_92C_SERIAL(rtlhal->version))
169 fw_name = "rtlwifi/rtl8192cfwU.bin";
170 else if (IS_81XXC_VENDOR_UMC_B_CUT(rtlhal->version))
168 fw_name = "rtlwifi/rtl8192cfwU_B.bin"; 171 fw_name = "rtlwifi/rtl8192cfwU_B.bin";
172 else
173 fw_name = "rtlwifi/rtl8192cfw.bin";
169 174
170 rtlpriv->max_fw_size = 0x4000; 175 rtlpriv->max_fw_size = 0x4000;
171 pr_info("Using firmware %s\n", fw_name); 176 pr_info("Using firmware %s\n", fw_name);
diff --git a/drivers/net/wireless/realtek/rtlwifi/usb.c b/drivers/net/wireless/realtek/rtlwifi/usb.c
index 0a508649903d..49015b05f3d1 100644
--- a/drivers/net/wireless/realtek/rtlwifi/usb.c
+++ b/drivers/net/wireless/realtek/rtlwifi/usb.c
@@ -1063,6 +1063,7 @@ int rtl_usb_probe(struct usb_interface *intf,
1063 return -ENOMEM; 1063 return -ENOMEM;
1064 } 1064 }
1065 rtlpriv = hw->priv; 1065 rtlpriv = hw->priv;
1066 rtlpriv->hw = hw;
1066 rtlpriv->usb_data = kzalloc(RTL_USB_MAX_RX_COUNT * sizeof(u32), 1067 rtlpriv->usb_data = kzalloc(RTL_USB_MAX_RX_COUNT * sizeof(u32),
1067 GFP_KERNEL); 1068 GFP_KERNEL);
1068 if (!rtlpriv->usb_data) 1069 if (!rtlpriv->usb_data)
diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h
index 3ce1f7da8647..530586be05b4 100644
--- a/drivers/net/xen-netback/common.h
+++ b/drivers/net/xen-netback/common.h
@@ -113,10 +113,10 @@ struct xenvif_stats {
113 * A subset of struct net_device_stats that contains only the 113 * A subset of struct net_device_stats that contains only the
114 * fields that are updated in netback.c for each queue. 114 * fields that are updated in netback.c for each queue.
115 */ 115 */
116 unsigned int rx_bytes; 116 u64 rx_bytes;
117 unsigned int rx_packets; 117 u64 rx_packets;
118 unsigned int tx_bytes; 118 u64 tx_bytes;
119 unsigned int tx_packets; 119 u64 tx_packets;
120 120
121 /* Additional stats used by xenvif */ 121 /* Additional stats used by xenvif */
122 unsigned long rx_gso_checksum_fixup; 122 unsigned long rx_gso_checksum_fixup;
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index e30ffd29b7e9..50fa1692d985 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -221,18 +221,18 @@ static struct net_device_stats *xenvif_get_stats(struct net_device *dev)
221{ 221{
222 struct xenvif *vif = netdev_priv(dev); 222 struct xenvif *vif = netdev_priv(dev);
223 struct xenvif_queue *queue = NULL; 223 struct xenvif_queue *queue = NULL;
224 unsigned int num_queues = vif->num_queues; 224 u64 rx_bytes = 0;
225 unsigned long rx_bytes = 0; 225 u64 rx_packets = 0;
226 unsigned long rx_packets = 0; 226 u64 tx_bytes = 0;
227 unsigned long tx_bytes = 0; 227 u64 tx_packets = 0;
228 unsigned long tx_packets = 0;
229 unsigned int index; 228 unsigned int index;
230 229
230 spin_lock(&vif->lock);
231 if (vif->queues == NULL) 231 if (vif->queues == NULL)
232 goto out; 232 goto out;
233 233
234 /* Aggregate tx and rx stats from each queue */ 234 /* Aggregate tx and rx stats from each queue */
235 for (index = 0; index < num_queues; ++index) { 235 for (index = 0; index < vif->num_queues; ++index) {
236 queue = &vif->queues[index]; 236 queue = &vif->queues[index];
237 rx_bytes += queue->stats.rx_bytes; 237 rx_bytes += queue->stats.rx_bytes;
238 rx_packets += queue->stats.rx_packets; 238 rx_packets += queue->stats.rx_packets;
@@ -241,6 +241,8 @@ static struct net_device_stats *xenvif_get_stats(struct net_device *dev)
241 } 241 }
242 242
243out: 243out:
244 spin_unlock(&vif->lock);
245
244 vif->dev->stats.rx_bytes = rx_bytes; 246 vif->dev->stats.rx_bytes = rx_bytes;
245 vif->dev->stats.rx_packets = rx_packets; 247 vif->dev->stats.rx_packets = rx_packets;
246 vif->dev->stats.tx_bytes = tx_bytes; 248 vif->dev->stats.tx_bytes = tx_bytes;
diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c
index 3124eaec9427..85b742e1c42f 100644
--- a/drivers/net/xen-netback/xenbus.c
+++ b/drivers/net/xen-netback/xenbus.c
@@ -493,11 +493,22 @@ static int backend_create_xenvif(struct backend_info *be)
493static void backend_disconnect(struct backend_info *be) 493static void backend_disconnect(struct backend_info *be)
494{ 494{
495 if (be->vif) { 495 if (be->vif) {
496 unsigned int queue_index;
497
496 xen_unregister_watchers(be->vif); 498 xen_unregister_watchers(be->vif);
497#ifdef CONFIG_DEBUG_FS 499#ifdef CONFIG_DEBUG_FS
498 xenvif_debugfs_delif(be->vif); 500 xenvif_debugfs_delif(be->vif);
499#endif /* CONFIG_DEBUG_FS */ 501#endif /* CONFIG_DEBUG_FS */
500 xenvif_disconnect_data(be->vif); 502 xenvif_disconnect_data(be->vif);
503 for (queue_index = 0; queue_index < be->vif->num_queues; ++queue_index)
504 xenvif_deinit_queue(&be->vif->queues[queue_index]);
505
506 spin_lock(&be->vif->lock);
507 vfree(be->vif->queues);
508 be->vif->num_queues = 0;
509 be->vif->queues = NULL;
510 spin_unlock(&be->vif->lock);
511
501 xenvif_disconnect_ctrl(be->vif); 512 xenvif_disconnect_ctrl(be->vif);
502 } 513 }
503} 514}
@@ -1034,6 +1045,8 @@ static void connect(struct backend_info *be)
1034err: 1045err:
1035 if (be->vif->num_queues > 0) 1046 if (be->vif->num_queues > 0)
1036 xenvif_disconnect_data(be->vif); /* Clean up existing queues */ 1047 xenvif_disconnect_data(be->vif); /* Clean up existing queues */
1048 for (queue_index = 0; queue_index < be->vif->num_queues; ++queue_index)
1049 xenvif_deinit_queue(&be->vif->queues[queue_index]);
1037 vfree(be->vif->queues); 1050 vfree(be->vif->queues);
1038 be->vif->queues = NULL; 1051 be->vif->queues = NULL;
1039 be->vif->num_queues = 0; 1052 be->vif->num_queues = 0;
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index a479cd99911d..1e4125a98291 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -281,6 +281,7 @@ static void xennet_alloc_rx_buffers(struct netfront_queue *queue)
281{ 281{
282 RING_IDX req_prod = queue->rx.req_prod_pvt; 282 RING_IDX req_prod = queue->rx.req_prod_pvt;
283 int notify; 283 int notify;
284 int err = 0;
284 285
285 if (unlikely(!netif_carrier_ok(queue->info->netdev))) 286 if (unlikely(!netif_carrier_ok(queue->info->netdev)))
286 return; 287 return;
@@ -295,8 +296,10 @@ static void xennet_alloc_rx_buffers(struct netfront_queue *queue)
295 struct xen_netif_rx_request *req; 296 struct xen_netif_rx_request *req;
296 297
297 skb = xennet_alloc_one_rx_buffer(queue); 298 skb = xennet_alloc_one_rx_buffer(queue);
298 if (!skb) 299 if (!skb) {
300 err = -ENOMEM;
299 break; 301 break;
302 }
300 303
301 id = xennet_rxidx(req_prod); 304 id = xennet_rxidx(req_prod);
302 305
@@ -320,8 +323,13 @@ static void xennet_alloc_rx_buffers(struct netfront_queue *queue)
320 323
321 queue->rx.req_prod_pvt = req_prod; 324 queue->rx.req_prod_pvt = req_prod;
322 325
323 /* Not enough requests? Try again later. */ 326 /* Try again later if there are not enough requests or skb allocation
324 if (req_prod - queue->rx.rsp_cons < NET_RX_SLOTS_MIN) { 327 * failed.
328 * Enough requests is quantified as the sum of newly created slots and
329 * the unconsumed slots at the backend.
330 */
331 if (req_prod - queue->rx.rsp_cons < NET_RX_SLOTS_MIN ||
332 unlikely(err)) {
325 mod_timer(&queue->rx_refill_timer, jiffies + (HZ/10)); 333 mod_timer(&queue->rx_refill_timer, jiffies + (HZ/10));
326 return; 334 return;
327 } 335 }
@@ -1379,6 +1387,8 @@ static void xennet_disconnect_backend(struct netfront_info *info)
1379 for (i = 0; i < num_queues && info->queues; ++i) { 1387 for (i = 0; i < num_queues && info->queues; ++i) {
1380 struct netfront_queue *queue = &info->queues[i]; 1388 struct netfront_queue *queue = &info->queues[i];
1381 1389
1390 del_timer_sync(&queue->rx_refill_timer);
1391
1382 if (queue->tx_irq && (queue->tx_irq == queue->rx_irq)) 1392 if (queue->tx_irq && (queue->tx_irq == queue->rx_irq))
1383 unbind_from_irqhandler(queue->tx_irq, queue); 1393 unbind_from_irqhandler(queue->tx_irq, queue);
1384 if (queue->tx_irq && (queue->tx_irq != queue->rx_irq)) { 1394 if (queue->tx_irq && (queue->tx_irq != queue->rx_irq)) {
@@ -1733,7 +1743,6 @@ static void xennet_destroy_queues(struct netfront_info *info)
1733 1743
1734 if (netif_running(info->netdev)) 1744 if (netif_running(info->netdev))
1735 napi_disable(&queue->napi); 1745 napi_disable(&queue->napi);
1736 del_timer_sync(&queue->rx_refill_timer);
1737 netif_napi_del(&queue->napi); 1746 netif_napi_del(&queue->napi);
1738 } 1747 }
1739 1748
@@ -1822,27 +1831,19 @@ static int talk_to_netback(struct xenbus_device *dev,
1822 xennet_destroy_queues(info); 1831 xennet_destroy_queues(info);
1823 1832
1824 err = xennet_create_queues(info, &num_queues); 1833 err = xennet_create_queues(info, &num_queues);
1825 if (err < 0) 1834 if (err < 0) {
1826 goto destroy_ring; 1835 xenbus_dev_fatal(dev, err, "creating queues");
1836 kfree(info->queues);
1837 info->queues = NULL;
1838 goto out;
1839 }
1827 1840
1828 /* Create shared ring, alloc event channel -- for each queue */ 1841 /* Create shared ring, alloc event channel -- for each queue */
1829 for (i = 0; i < num_queues; ++i) { 1842 for (i = 0; i < num_queues; ++i) {
1830 queue = &info->queues[i]; 1843 queue = &info->queues[i];
1831 err = setup_netfront(dev, queue, feature_split_evtchn); 1844 err = setup_netfront(dev, queue, feature_split_evtchn);
1832 if (err) { 1845 if (err)
1833 /* setup_netfront() will tidy up the current 1846 goto destroy_ring;
1834 * queue on error, but we need to clean up
1835 * those already allocated.
1836 */
1837 if (i > 0) {
1838 rtnl_lock();
1839 netif_set_real_num_tx_queues(info->netdev, i);
1840 rtnl_unlock();
1841 goto destroy_ring;
1842 } else {
1843 goto out;
1844 }
1845 }
1846 } 1847 }
1847 1848
1848again: 1849again:
@@ -1932,9 +1933,10 @@ abort_transaction_no_dev_fatal:
1932 xenbus_transaction_end(xbt, 1); 1933 xenbus_transaction_end(xbt, 1);
1933 destroy_ring: 1934 destroy_ring:
1934 xennet_disconnect_backend(info); 1935 xennet_disconnect_backend(info);
1935 kfree(info->queues); 1936 xennet_destroy_queues(info);
1936 info->queues = NULL;
1937 out: 1937 out:
1938 unregister_netdev(info->netdev);
1939 xennet_free_netdev(info->netdev);
1938 return err; 1940 return err;
1939} 1941}
1940 1942
diff --git a/drivers/ntb/hw/intel/ntb_hw_intel.c b/drivers/ntb/hw/intel/ntb_hw_intel.c
index eca9688bf9d9..c00238491673 100644
--- a/drivers/ntb/hw/intel/ntb_hw_intel.c
+++ b/drivers/ntb/hw/intel/ntb_hw_intel.c
@@ -1629,6 +1629,28 @@ static void atom_deinit_dev(struct intel_ntb_dev *ndev)
1629 1629
1630/* Skylake Xeon NTB */ 1630/* Skylake Xeon NTB */
1631 1631
1632static int skx_poll_link(struct intel_ntb_dev *ndev)
1633{
1634 u16 reg_val;
1635 int rc;
1636
1637 ndev->reg->db_iowrite(ndev->db_link_mask,
1638 ndev->self_mmio +
1639 ndev->self_reg->db_clear);
1640
1641 rc = pci_read_config_word(ndev->ntb.pdev,
1642 SKX_LINK_STATUS_OFFSET, &reg_val);
1643 if (rc)
1644 return 0;
1645
1646 if (reg_val == ndev->lnk_sta)
1647 return 0;
1648
1649 ndev->lnk_sta = reg_val;
1650
1651 return 1;
1652}
1653
1632static u64 skx_db_ioread(void __iomem *mmio) 1654static u64 skx_db_ioread(void __iomem *mmio)
1633{ 1655{
1634 return ioread64(mmio); 1656 return ioread64(mmio);
@@ -2852,7 +2874,7 @@ static struct intel_b2b_addr xeon_b2b_dsd_addr = {
2852}; 2874};
2853 2875
2854static const struct intel_ntb_reg skx_reg = { 2876static const struct intel_ntb_reg skx_reg = {
2855 .poll_link = xeon_poll_link, 2877 .poll_link = skx_poll_link,
2856 .link_is_up = xeon_link_is_up, 2878 .link_is_up = xeon_link_is_up,
2857 .db_ioread = skx_db_ioread, 2879 .db_ioread = skx_db_ioread,
2858 .db_iowrite = skx_db_iowrite, 2880 .db_iowrite = skx_db_iowrite,
diff --git a/drivers/ntb/ntb_transport.c b/drivers/ntb/ntb_transport.c
index f81aa4b18d9f..02ca45fdd892 100644
--- a/drivers/ntb/ntb_transport.c
+++ b/drivers/ntb/ntb_transport.c
@@ -1802,7 +1802,7 @@ ntb_transport_create_queue(void *data, struct device *client_dev,
1802 1802
1803 node = dev_to_node(&ndev->dev); 1803 node = dev_to_node(&ndev->dev);
1804 1804
1805 free_queue = ffs(nt->qp_bitmap); 1805 free_queue = ffs(nt->qp_bitmap_free);
1806 if (!free_queue) 1806 if (!free_queue)
1807 goto err; 1807 goto err;
1808 1808
@@ -2273,9 +2273,8 @@ module_init(ntb_transport_init);
2273 2273
2274static void __exit ntb_transport_exit(void) 2274static void __exit ntb_transport_exit(void)
2275{ 2275{
2276 debugfs_remove_recursive(nt_debugfs_dir);
2277
2278 ntb_unregister_client(&ntb_transport_client); 2276 ntb_unregister_client(&ntb_transport_client);
2279 bus_unregister(&ntb_transport_bus); 2277 bus_unregister(&ntb_transport_bus);
2278 debugfs_remove_recursive(nt_debugfs_dir);
2280} 2279}
2281module_exit(ntb_transport_exit); 2280module_exit(ntb_transport_exit);
diff --git a/drivers/ntb/test/ntb_perf.c b/drivers/ntb/test/ntb_perf.c
index e75d4fdc0866..434e1d474f33 100644
--- a/drivers/ntb/test/ntb_perf.c
+++ b/drivers/ntb/test/ntb_perf.c
@@ -265,6 +265,8 @@ static ssize_t perf_copy(struct pthr_ctx *pctx, char __iomem *dst,
265 if (dma_submit_error(cookie)) 265 if (dma_submit_error(cookie))
266 goto err_set_unmap; 266 goto err_set_unmap;
267 267
268 dmaengine_unmap_put(unmap);
269
268 atomic_inc(&pctx->dma_sync); 270 atomic_inc(&pctx->dma_sync);
269 dma_async_issue_pending(chan); 271 dma_async_issue_pending(chan);
270 272
diff --git a/drivers/nvdimm/namespace_devs.c b/drivers/nvdimm/namespace_devs.c
index 6307088b375f..ce3e8dfa10ad 100644
--- a/drivers/nvdimm/namespace_devs.c
+++ b/drivers/nvdimm/namespace_devs.c
@@ -52,17 +52,17 @@ static void namespace_blk_release(struct device *dev)
52 kfree(nsblk); 52 kfree(nsblk);
53} 53}
54 54
55static struct device_type namespace_io_device_type = { 55static const struct device_type namespace_io_device_type = {
56 .name = "nd_namespace_io", 56 .name = "nd_namespace_io",
57 .release = namespace_io_release, 57 .release = namespace_io_release,
58}; 58};
59 59
60static struct device_type namespace_pmem_device_type = { 60static const struct device_type namespace_pmem_device_type = {
61 .name = "nd_namespace_pmem", 61 .name = "nd_namespace_pmem",
62 .release = namespace_pmem_release, 62 .release = namespace_pmem_release,
63}; 63};
64 64
65static struct device_type namespace_blk_device_type = { 65static const struct device_type namespace_blk_device_type = {
66 .name = "nd_namespace_blk", 66 .name = "nd_namespace_blk",
67 .release = namespace_blk_release, 67 .release = namespace_blk_release,
68}; 68};
@@ -957,25 +957,28 @@ static ssize_t __size_store(struct device *dev, unsigned long long val)
957{ 957{
958 resource_size_t allocated = 0, available = 0; 958 resource_size_t allocated = 0, available = 0;
959 struct nd_region *nd_region = to_nd_region(dev->parent); 959 struct nd_region *nd_region = to_nd_region(dev->parent);
960 struct nd_namespace_common *ndns = to_ndns(dev);
960 struct nd_mapping *nd_mapping; 961 struct nd_mapping *nd_mapping;
961 struct nvdimm_drvdata *ndd; 962 struct nvdimm_drvdata *ndd;
962 struct nd_label_id label_id; 963 struct nd_label_id label_id;
963 u32 flags = 0, remainder; 964 u32 flags = 0, remainder;
965 int rc, i, id = -1;
964 u8 *uuid = NULL; 966 u8 *uuid = NULL;
965 int rc, i;
966 967
967 if (dev->driver || to_ndns(dev)->claim) 968 if (dev->driver || ndns->claim)
968 return -EBUSY; 969 return -EBUSY;
969 970
970 if (is_namespace_pmem(dev)) { 971 if (is_namespace_pmem(dev)) {
971 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev); 972 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
972 973
973 uuid = nspm->uuid; 974 uuid = nspm->uuid;
975 id = nspm->id;
974 } else if (is_namespace_blk(dev)) { 976 } else if (is_namespace_blk(dev)) {
975 struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev); 977 struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
976 978
977 uuid = nsblk->uuid; 979 uuid = nsblk->uuid;
978 flags = NSLABEL_FLAG_LOCAL; 980 flags = NSLABEL_FLAG_LOCAL;
981 id = nsblk->id;
979 } 982 }
980 983
981 /* 984 /*
@@ -1034,20 +1037,17 @@ static ssize_t __size_store(struct device *dev, unsigned long long val)
1034 1037
1035 nd_namespace_pmem_set_resource(nd_region, nspm, 1038 nd_namespace_pmem_set_resource(nd_region, nspm,
1036 val * nd_region->ndr_mappings); 1039 val * nd_region->ndr_mappings);
1037 } else if (is_namespace_blk(dev)) {
1038 struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
1039
1040 /*
1041 * Try to delete the namespace if we deleted all of its
1042 * allocation, this is not the seed device for the
1043 * region, and it is not actively claimed by a btt
1044 * instance.
1045 */
1046 if (val == 0 && nd_region->ns_seed != dev
1047 && !nsblk->common.claim)
1048 nd_device_unregister(dev, ND_ASYNC);
1049 } 1040 }
1050 1041
1042 /*
1043 * Try to delete the namespace if we deleted all of its
1044 * allocation, this is not the seed or 0th device for the
1045 * region, and it is not actively claimed by a btt, pfn, or dax
1046 * instance.
1047 */
1048 if (val == 0 && id != 0 && nd_region->ns_seed != dev && !ndns->claim)
1049 nd_device_unregister(dev, ND_ASYNC);
1050
1051 return rc; 1051 return rc;
1052} 1052}
1053 1053
diff --git a/drivers/nvdimm/pfn_devs.c b/drivers/nvdimm/pfn_devs.c
index a2ac9e641aa9..6c033c9a2f06 100644
--- a/drivers/nvdimm/pfn_devs.c
+++ b/drivers/nvdimm/pfn_devs.c
@@ -627,15 +627,12 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn)
627 size = resource_size(&nsio->res); 627 size = resource_size(&nsio->res);
628 npfns = (size - start_pad - end_trunc - SZ_8K) / SZ_4K; 628 npfns = (size - start_pad - end_trunc - SZ_8K) / SZ_4K;
629 if (nd_pfn->mode == PFN_MODE_PMEM) { 629 if (nd_pfn->mode == PFN_MODE_PMEM) {
630 unsigned long memmap_size;
631
632 /* 630 /*
633 * vmemmap_populate_hugepages() allocates the memmap array in 631 * vmemmap_populate_hugepages() allocates the memmap array in
634 * HPAGE_SIZE chunks. 632 * HPAGE_SIZE chunks.
635 */ 633 */
636 memmap_size = ALIGN(64 * npfns, HPAGE_SIZE); 634 offset = ALIGN(start + SZ_8K + 64 * npfns + dax_label_reserve,
637 offset = ALIGN(start + SZ_8K + memmap_size + dax_label_reserve, 635 max(nd_pfn->align, HPAGE_SIZE)) - start;
638 nd_pfn->align) - start;
639 } else if (nd_pfn->mode == PFN_MODE_RAM) 636 } else if (nd_pfn->mode == PFN_MODE_RAM)
640 offset = ALIGN(start + SZ_8K + dax_label_reserve, 637 offset = ALIGN(start + SZ_8K + dax_label_reserve,
641 nd_pfn->align) - start; 638 nd_pfn->align) - start;
diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
index 7282d7495bf1..5b536be5a12e 100644
--- a/drivers/nvdimm/pmem.c
+++ b/drivers/nvdimm/pmem.c
@@ -90,7 +90,9 @@ static int read_pmem(struct page *page, unsigned int off,
90 90
91 rc = memcpy_from_pmem(mem + off, pmem_addr, len); 91 rc = memcpy_from_pmem(mem + off, pmem_addr, len);
92 kunmap_atomic(mem); 92 kunmap_atomic(mem);
93 return rc; 93 if (rc)
94 return -EIO;
95 return 0;
94} 96}
95 97
96static int pmem_do_bvec(struct pmem_device *pmem, struct page *page, 98static int pmem_do_bvec(struct pmem_device *pmem, struct page *page,
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index b40cfb076f02..8a3c3e32a704 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -1106,12 +1106,7 @@ int nvme_disable_ctrl(struct nvme_ctrl *ctrl, u64 cap)
1106 if (ret) 1106 if (ret)
1107 return ret; 1107 return ret;
1108 1108
1109 /* Checking for ctrl->tagset is a trick to avoid sleeping on module 1109 if (ctrl->quirks & NVME_QUIRK_DELAY_BEFORE_CHK_RDY)
1110 * load, since we only need the quirk on reset_controller. Notice
1111 * that the HGST device needs this delay only in firmware activation
1112 * procedure; unfortunately we have no (easy) way to verify this.
1113 */
1114 if ((ctrl->quirks & NVME_QUIRK_DELAY_BEFORE_CHK_RDY) && ctrl->tagset)
1115 msleep(NVME_QUIRK_DELAY_AMOUNT); 1110 msleep(NVME_QUIRK_DELAY_AMOUNT);
1116 1111
1117 return nvme_wait_ready(ctrl, cap, false); 1112 return nvme_wait_ready(ctrl, cap, false);
@@ -1193,8 +1188,8 @@ static void nvme_set_queue_limits(struct nvme_ctrl *ctrl,
1193 blk_queue_max_hw_sectors(q, ctrl->max_hw_sectors); 1188 blk_queue_max_hw_sectors(q, ctrl->max_hw_sectors);
1194 blk_queue_max_segments(q, min_t(u32, max_segments, USHRT_MAX)); 1189 blk_queue_max_segments(q, min_t(u32, max_segments, USHRT_MAX));
1195 } 1190 }
1196 if (ctrl->stripe_size) 1191 if (ctrl->quirks & NVME_QUIRK_STRIPE_SIZE)
1197 blk_queue_chunk_sectors(q, ctrl->stripe_size >> 9); 1192 blk_queue_chunk_sectors(q, ctrl->max_hw_sectors);
1198 blk_queue_virt_boundary(q, ctrl->page_size - 1); 1193 blk_queue_virt_boundary(q, ctrl->page_size - 1);
1199 if (ctrl->vwc & NVME_CTRL_VWC_PRESENT) 1194 if (ctrl->vwc & NVME_CTRL_VWC_PRESENT)
1200 vwc = true; 1195 vwc = true;
@@ -1250,19 +1245,6 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
1250 ctrl->max_hw_sectors = 1245 ctrl->max_hw_sectors =
1251 min_not_zero(ctrl->max_hw_sectors, max_hw_sectors); 1246 min_not_zero(ctrl->max_hw_sectors, max_hw_sectors);
1252 1247
1253 if ((ctrl->quirks & NVME_QUIRK_STRIPE_SIZE) && id->vs[3]) {
1254 unsigned int max_hw_sectors;
1255
1256 ctrl->stripe_size = 1 << (id->vs[3] + page_shift);
1257 max_hw_sectors = ctrl->stripe_size >> (page_shift - 9);
1258 if (ctrl->max_hw_sectors) {
1259 ctrl->max_hw_sectors = min(max_hw_sectors,
1260 ctrl->max_hw_sectors);
1261 } else {
1262 ctrl->max_hw_sectors = max_hw_sectors;
1263 }
1264 }
1265
1266 nvme_set_queue_limits(ctrl, ctrl->admin_q); 1248 nvme_set_queue_limits(ctrl, ctrl->admin_q);
1267 ctrl->sgls = le32_to_cpu(id->sgls); 1249 ctrl->sgls = le32_to_cpu(id->sgls);
1268 ctrl->kas = le16_to_cpu(id->kas); 1250 ctrl->kas = le16_to_cpu(id->kas);
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index 771e2e761872..e65041c640cb 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -1491,19 +1491,20 @@ static int
1491nvme_fc_create_hw_io_queues(struct nvme_fc_ctrl *ctrl, u16 qsize) 1491nvme_fc_create_hw_io_queues(struct nvme_fc_ctrl *ctrl, u16 qsize)
1492{ 1492{
1493 struct nvme_fc_queue *queue = &ctrl->queues[1]; 1493 struct nvme_fc_queue *queue = &ctrl->queues[1];
1494 int i, j, ret; 1494 int i, ret;
1495 1495
1496 for (i = 1; i < ctrl->queue_count; i++, queue++) { 1496 for (i = 1; i < ctrl->queue_count; i++, queue++) {
1497 ret = __nvme_fc_create_hw_queue(ctrl, queue, i, qsize); 1497 ret = __nvme_fc_create_hw_queue(ctrl, queue, i, qsize);
1498 if (ret) { 1498 if (ret)
1499 for (j = i-1; j >= 0; j--) 1499 goto delete_queues;
1500 __nvme_fc_delete_hw_queue(ctrl,
1501 &ctrl->queues[j], j);
1502 return ret;
1503 }
1504 } 1500 }
1505 1501
1506 return 0; 1502 return 0;
1503
1504delete_queues:
1505 for (; i >= 0; i--)
1506 __nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[i], i);
1507 return ret;
1507} 1508}
1508 1509
1509static int 1510static int
@@ -1653,23 +1654,22 @@ nvme_fc_map_data(struct nvme_fc_ctrl *ctrl, struct request *rq,
1653 struct nvme_fc_fcp_op *op) 1654 struct nvme_fc_fcp_op *op)
1654{ 1655{
1655 struct nvmefc_fcp_req *freq = &op->fcp_req; 1656 struct nvmefc_fcp_req *freq = &op->fcp_req;
1656 u32 map_len = nvme_map_len(rq);
1657 enum dma_data_direction dir; 1657 enum dma_data_direction dir;
1658 int ret; 1658 int ret;
1659 1659
1660 freq->sg_cnt = 0; 1660 freq->sg_cnt = 0;
1661 1661
1662 if (!map_len) 1662 if (!blk_rq_payload_bytes(rq))
1663 return 0; 1663 return 0;
1664 1664
1665 freq->sg_table.sgl = freq->first_sgl; 1665 freq->sg_table.sgl = freq->first_sgl;
1666 ret = sg_alloc_table_chained(&freq->sg_table, rq->nr_phys_segments, 1666 ret = sg_alloc_table_chained(&freq->sg_table,
1667 freq->sg_table.sgl); 1667 blk_rq_nr_phys_segments(rq), freq->sg_table.sgl);
1668 if (ret) 1668 if (ret)
1669 return -ENOMEM; 1669 return -ENOMEM;
1670 1670
1671 op->nents = blk_rq_map_sg(rq->q, rq, freq->sg_table.sgl); 1671 op->nents = blk_rq_map_sg(rq->q, rq, freq->sg_table.sgl);
1672 WARN_ON(op->nents > rq->nr_phys_segments); 1672 WARN_ON(op->nents > blk_rq_nr_phys_segments(rq));
1673 dir = (rq_data_dir(rq) == WRITE) ? DMA_TO_DEVICE : DMA_FROM_DEVICE; 1673 dir = (rq_data_dir(rq) == WRITE) ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
1674 freq->sg_cnt = fc_dma_map_sg(ctrl->lport->dev, freq->sg_table.sgl, 1674 freq->sg_cnt = fc_dma_map_sg(ctrl->lport->dev, freq->sg_table.sgl,
1675 op->nents, dir); 1675 op->nents, dir);
@@ -1853,7 +1853,7 @@ nvme_fc_queue_rq(struct blk_mq_hw_ctx *hctx,
1853 if (ret) 1853 if (ret)
1854 return ret; 1854 return ret;
1855 1855
1856 data_len = nvme_map_len(rq); 1856 data_len = blk_rq_payload_bytes(rq);
1857 if (data_len) 1857 if (data_len)
1858 io_dir = ((rq_data_dir(rq) == WRITE) ? 1858 io_dir = ((rq_data_dir(rq) == WRITE) ?
1859 NVMEFC_FCP_WRITE : NVMEFC_FCP_READ); 1859 NVMEFC_FCP_WRITE : NVMEFC_FCP_READ);
@@ -2401,8 +2401,8 @@ __nvme_fc_create_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
2401 WARN_ON_ONCE(!changed); 2401 WARN_ON_ONCE(!changed);
2402 2402
2403 dev_info(ctrl->ctrl.device, 2403 dev_info(ctrl->ctrl.device,
2404 "NVME-FC{%d}: new ctrl: NQN \"%s\" (%p)\n", 2404 "NVME-FC{%d}: new ctrl: NQN \"%s\"\n",
2405 ctrl->cnum, ctrl->ctrl.opts->subsysnqn, &ctrl); 2405 ctrl->cnum, ctrl->ctrl.opts->subsysnqn);
2406 2406
2407 kref_get(&ctrl->ctrl.kref); 2407 kref_get(&ctrl->ctrl.kref);
2408 2408
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index bd5321441d12..aead6d08ed2c 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -135,7 +135,6 @@ struct nvme_ctrl {
135 135
136 u32 page_size; 136 u32 page_size;
137 u32 max_hw_sectors; 137 u32 max_hw_sectors;
138 u32 stripe_size;
139 u16 oncs; 138 u16 oncs;
140 u16 vid; 139 u16 vid;
141 atomic_t abort_limit; 140 atomic_t abort_limit;
@@ -226,14 +225,6 @@ static inline u64 nvme_block_nr(struct nvme_ns *ns, sector_t sector)
226 return (sector >> (ns->lba_shift - 9)); 225 return (sector >> (ns->lba_shift - 9));
227} 226}
228 227
229static inline unsigned nvme_map_len(struct request *rq)
230{
231 if (req_op(rq) == REQ_OP_DISCARD)
232 return sizeof(struct nvme_dsm_range);
233 else
234 return blk_rq_bytes(rq);
235}
236
237static inline void nvme_cleanup_cmd(struct request *req) 228static inline void nvme_cleanup_cmd(struct request *req)
238{ 229{
239 if (req->rq_flags & RQF_SPECIAL_PAYLOAD) { 230 if (req->rq_flags & RQF_SPECIAL_PAYLOAD) {
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 3d21a154dce7..3faefabf339c 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -306,11 +306,11 @@ static __le64 **iod_list(struct request *req)
306 return (__le64 **)(iod->sg + blk_rq_nr_phys_segments(req)); 306 return (__le64 **)(iod->sg + blk_rq_nr_phys_segments(req));
307} 307}
308 308
309static int nvme_init_iod(struct request *rq, unsigned size, 309static int nvme_init_iod(struct request *rq, struct nvme_dev *dev)
310 struct nvme_dev *dev)
311{ 310{
312 struct nvme_iod *iod = blk_mq_rq_to_pdu(rq); 311 struct nvme_iod *iod = blk_mq_rq_to_pdu(rq);
313 int nseg = blk_rq_nr_phys_segments(rq); 312 int nseg = blk_rq_nr_phys_segments(rq);
313 unsigned int size = blk_rq_payload_bytes(rq);
314 314
315 if (nseg > NVME_INT_PAGES || size > NVME_INT_BYTES(dev)) { 315 if (nseg > NVME_INT_PAGES || size > NVME_INT_BYTES(dev)) {
316 iod->sg = kmalloc(nvme_iod_alloc_size(dev, size, nseg), GFP_ATOMIC); 316 iod->sg = kmalloc(nvme_iod_alloc_size(dev, size, nseg), GFP_ATOMIC);
@@ -420,12 +420,11 @@ static void nvme_dif_complete(u32 p, u32 v, struct t10_pi_tuple *pi)
420} 420}
421#endif 421#endif
422 422
423static bool nvme_setup_prps(struct nvme_dev *dev, struct request *req, 423static bool nvme_setup_prps(struct nvme_dev *dev, struct request *req)
424 int total_len)
425{ 424{
426 struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 425 struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
427 struct dma_pool *pool; 426 struct dma_pool *pool;
428 int length = total_len; 427 int length = blk_rq_payload_bytes(req);
429 struct scatterlist *sg = iod->sg; 428 struct scatterlist *sg = iod->sg;
430 int dma_len = sg_dma_len(sg); 429 int dma_len = sg_dma_len(sg);
431 u64 dma_addr = sg_dma_address(sg); 430 u64 dma_addr = sg_dma_address(sg);
@@ -501,7 +500,7 @@ static bool nvme_setup_prps(struct nvme_dev *dev, struct request *req,
501} 500}
502 501
503static int nvme_map_data(struct nvme_dev *dev, struct request *req, 502static int nvme_map_data(struct nvme_dev *dev, struct request *req,
504 unsigned size, struct nvme_command *cmnd) 503 struct nvme_command *cmnd)
505{ 504{
506 struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 505 struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
507 struct request_queue *q = req->q; 506 struct request_queue *q = req->q;
@@ -519,7 +518,7 @@ static int nvme_map_data(struct nvme_dev *dev, struct request *req,
519 DMA_ATTR_NO_WARN)) 518 DMA_ATTR_NO_WARN))
520 goto out; 519 goto out;
521 520
522 if (!nvme_setup_prps(dev, req, size)) 521 if (!nvme_setup_prps(dev, req))
523 goto out_unmap; 522 goto out_unmap;
524 523
525 ret = BLK_MQ_RQ_QUEUE_ERROR; 524 ret = BLK_MQ_RQ_QUEUE_ERROR;
@@ -580,7 +579,6 @@ static int nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
580 struct nvme_dev *dev = nvmeq->dev; 579 struct nvme_dev *dev = nvmeq->dev;
581 struct request *req = bd->rq; 580 struct request *req = bd->rq;
582 struct nvme_command cmnd; 581 struct nvme_command cmnd;
583 unsigned map_len;
584 int ret = BLK_MQ_RQ_QUEUE_OK; 582 int ret = BLK_MQ_RQ_QUEUE_OK;
585 583
586 /* 584 /*
@@ -600,13 +598,12 @@ static int nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
600 if (ret != BLK_MQ_RQ_QUEUE_OK) 598 if (ret != BLK_MQ_RQ_QUEUE_OK)
601 return ret; 599 return ret;
602 600
603 map_len = nvme_map_len(req); 601 ret = nvme_init_iod(req, dev);
604 ret = nvme_init_iod(req, map_len, dev);
605 if (ret != BLK_MQ_RQ_QUEUE_OK) 602 if (ret != BLK_MQ_RQ_QUEUE_OK)
606 goto out_free_cmd; 603 goto out_free_cmd;
607 604
608 if (blk_rq_nr_phys_segments(req)) 605 if (blk_rq_nr_phys_segments(req))
609 ret = nvme_map_data(dev, req, map_len, &cmnd); 606 ret = nvme_map_data(dev, req, &cmnd);
610 607
611 if (ret != BLK_MQ_RQ_QUEUE_OK) 608 if (ret != BLK_MQ_RQ_QUEUE_OK)
612 goto out_cleanup_iod; 609 goto out_cleanup_iod;
@@ -712,15 +709,8 @@ static void __nvme_process_cq(struct nvme_queue *nvmeq, unsigned int *tag)
712 req = blk_mq_tag_to_rq(*nvmeq->tags, cqe.command_id); 709 req = blk_mq_tag_to_rq(*nvmeq->tags, cqe.command_id);
713 nvme_req(req)->result = cqe.result; 710 nvme_req(req)->result = cqe.result;
714 blk_mq_complete_request(req, le16_to_cpu(cqe.status) >> 1); 711 blk_mq_complete_request(req, le16_to_cpu(cqe.status) >> 1);
715
716 } 712 }
717 713
718 /* If the controller ignores the cq head doorbell and continuously
719 * writes to the queue, it is theoretically possible to wrap around
720 * the queue twice and mistakenly return IRQ_NONE. Linux only
721 * requires that 0.1% of your interrupts are handled, so this isn't
722 * a big problem.
723 */
724 if (head == nvmeq->cq_head && phase == nvmeq->cq_phase) 714 if (head == nvmeq->cq_head && phase == nvmeq->cq_phase)
725 return; 715 return;
726 716
@@ -1909,10 +1899,10 @@ static int nvme_dev_map(struct nvme_dev *dev)
1909 if (!dev->bar) 1899 if (!dev->bar)
1910 goto release; 1900 goto release;
1911 1901
1912 return 0; 1902 return 0;
1913 release: 1903 release:
1914 pci_release_mem_regions(pdev); 1904 pci_release_mem_regions(pdev);
1915 return -ENODEV; 1905 return -ENODEV;
1916} 1906}
1917 1907
1918static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id) 1908static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index f587af345889..557f29b1f1bb 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -981,8 +981,7 @@ static int nvme_rdma_map_sg_fr(struct nvme_rdma_queue *queue,
981} 981}
982 982
983static int nvme_rdma_map_data(struct nvme_rdma_queue *queue, 983static int nvme_rdma_map_data(struct nvme_rdma_queue *queue,
984 struct request *rq, unsigned int map_len, 984 struct request *rq, struct nvme_command *c)
985 struct nvme_command *c)
986{ 985{
987 struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq); 986 struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
988 struct nvme_rdma_device *dev = queue->device; 987 struct nvme_rdma_device *dev = queue->device;
@@ -1014,9 +1013,9 @@ static int nvme_rdma_map_data(struct nvme_rdma_queue *queue,
1014 } 1013 }
1015 1014
1016 if (count == 1) { 1015 if (count == 1) {
1017 if (rq_data_dir(rq) == WRITE && 1016 if (rq_data_dir(rq) == WRITE && nvme_rdma_queue_idx(queue) &&
1018 map_len <= nvme_rdma_inline_data_size(queue) && 1017 blk_rq_payload_bytes(rq) <=
1019 nvme_rdma_queue_idx(queue)) 1018 nvme_rdma_inline_data_size(queue))
1020 return nvme_rdma_map_sg_inline(queue, req, c); 1019 return nvme_rdma_map_sg_inline(queue, req, c);
1021 1020
1022 if (dev->pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY) 1021 if (dev->pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY)
@@ -1422,7 +1421,7 @@ static inline bool nvme_rdma_queue_is_ready(struct nvme_rdma_queue *queue,
1422 struct request *rq) 1421 struct request *rq)
1423{ 1422{
1424 if (unlikely(!test_bit(NVME_RDMA_Q_LIVE, &queue->flags))) { 1423 if (unlikely(!test_bit(NVME_RDMA_Q_LIVE, &queue->flags))) {
1425 struct nvme_command *cmd = (struct nvme_command *)rq->cmd; 1424 struct nvme_command *cmd = nvme_req(rq)->cmd;
1426 1425
1427 if (rq->cmd_type != REQ_TYPE_DRV_PRIV || 1426 if (rq->cmd_type != REQ_TYPE_DRV_PRIV ||
1428 cmd->common.opcode != nvme_fabrics_command || 1427 cmd->common.opcode != nvme_fabrics_command ||
@@ -1444,7 +1443,6 @@ static int nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
1444 struct nvme_command *c = sqe->data; 1443 struct nvme_command *c = sqe->data;
1445 bool flush = false; 1444 bool flush = false;
1446 struct ib_device *dev; 1445 struct ib_device *dev;
1447 unsigned int map_len;
1448 int ret; 1446 int ret;
1449 1447
1450 WARN_ON_ONCE(rq->tag < 0); 1448 WARN_ON_ONCE(rq->tag < 0);
@@ -1462,8 +1460,7 @@ static int nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
1462 1460
1463 blk_mq_start_request(rq); 1461 blk_mq_start_request(rq);
1464 1462
1465 map_len = nvme_map_len(rq); 1463 ret = nvme_rdma_map_data(queue, rq, c);
1466 ret = nvme_rdma_map_data(queue, rq, map_len, c);
1467 if (ret < 0) { 1464 if (ret < 0) {
1468 dev_err(queue->ctrl->ctrl.device, 1465 dev_err(queue->ctrl->ctrl.device,
1469 "Failed to map data (%d)\n", ret); 1466 "Failed to map data (%d)\n", ret);
diff --git a/drivers/nvme/host/scsi.c b/drivers/nvme/host/scsi.c
index b71e95044b43..a5c09e703bd8 100644
--- a/drivers/nvme/host/scsi.c
+++ b/drivers/nvme/host/scsi.c
@@ -2160,30 +2160,6 @@ static int nvme_trans_synchronize_cache(struct nvme_ns *ns,
2160 return nvme_trans_status_code(hdr, nvme_sc); 2160 return nvme_trans_status_code(hdr, nvme_sc);
2161} 2161}
2162 2162
2163static int nvme_trans_start_stop(struct nvme_ns *ns, struct sg_io_hdr *hdr,
2164 u8 *cmd)
2165{
2166 u8 immed, no_flush;
2167
2168 immed = cmd[1] & 0x01;
2169 no_flush = cmd[4] & 0x04;
2170
2171 if (immed != 0) {
2172 return nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
2173 ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
2174 SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
2175 } else {
2176 if (no_flush == 0) {
2177 /* Issue NVME FLUSH command prior to START STOP UNIT */
2178 int res = nvme_trans_synchronize_cache(ns, hdr);
2179 if (res)
2180 return res;
2181 }
2182
2183 return 0;
2184 }
2185}
2186
2187static int nvme_trans_format_unit(struct nvme_ns *ns, struct sg_io_hdr *hdr, 2163static int nvme_trans_format_unit(struct nvme_ns *ns, struct sg_io_hdr *hdr,
2188 u8 *cmd) 2164 u8 *cmd)
2189{ 2165{
@@ -2439,9 +2415,6 @@ static int nvme_scsi_translate(struct nvme_ns *ns, struct sg_io_hdr *hdr)
2439 case SECURITY_PROTOCOL_OUT: 2415 case SECURITY_PROTOCOL_OUT:
2440 retcode = nvme_trans_security_protocol(ns, hdr, cmd); 2416 retcode = nvme_trans_security_protocol(ns, hdr, cmd);
2441 break; 2417 break;
2442 case START_STOP:
2443 retcode = nvme_trans_start_stop(ns, hdr, cmd);
2444 break;
2445 case SYNCHRONIZE_CACHE: 2418 case SYNCHRONIZE_CACHE:
2446 retcode = nvme_trans_synchronize_cache(ns, hdr); 2419 retcode = nvme_trans_synchronize_cache(ns, hdr);
2447 break; 2420 break;
diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c
index ec1ad2aa0a4c..95ae52390478 100644
--- a/drivers/nvme/target/admin-cmd.c
+++ b/drivers/nvme/target/admin-cmd.c
@@ -382,7 +382,6 @@ static void nvmet_execute_set_features(struct nvmet_req *req)
382{ 382{
383 struct nvmet_subsys *subsys = req->sq->ctrl->subsys; 383 struct nvmet_subsys *subsys = req->sq->ctrl->subsys;
384 u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10[0]); 384 u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10[0]);
385 u64 val;
386 u32 val32; 385 u32 val32;
387 u16 status = 0; 386 u16 status = 0;
388 387
@@ -392,8 +391,7 @@ static void nvmet_execute_set_features(struct nvmet_req *req)
392 (subsys->max_qid - 1) | ((subsys->max_qid - 1) << 16)); 391 (subsys->max_qid - 1) | ((subsys->max_qid - 1) << 16));
393 break; 392 break;
394 case NVME_FEAT_KATO: 393 case NVME_FEAT_KATO:
395 val = le64_to_cpu(req->cmd->prop_set.value); 394 val32 = le32_to_cpu(req->cmd->common.cdw10[1]);
396 val32 = val & 0xffff;
397 req->sq->ctrl->kato = DIV_ROUND_UP(val32, 1000); 395 req->sq->ctrl->kato = DIV_ROUND_UP(val32, 1000);
398 nvmet_set_result(req, req->sq->ctrl->kato); 396 nvmet_set_result(req, req->sq->ctrl->kato);
399 break; 397 break;
diff --git a/drivers/nvme/target/configfs.c b/drivers/nvme/target/configfs.c
index 6f5074153dcd..be8c800078e2 100644
--- a/drivers/nvme/target/configfs.c
+++ b/drivers/nvme/target/configfs.c
@@ -631,6 +631,7 @@ static void nvmet_subsys_release(struct config_item *item)
631{ 631{
632 struct nvmet_subsys *subsys = to_subsys(item); 632 struct nvmet_subsys *subsys = to_subsys(item);
633 633
634 nvmet_subsys_del_ctrls(subsys);
634 nvmet_subsys_put(subsys); 635 nvmet_subsys_put(subsys);
635} 636}
636 637
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index b1d66ed655c9..fc5ba2f9e15f 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -200,7 +200,7 @@ static void nvmet_keep_alive_timer(struct work_struct *work)
200 pr_err("ctrl %d keep-alive timer (%d seconds) expired!\n", 200 pr_err("ctrl %d keep-alive timer (%d seconds) expired!\n",
201 ctrl->cntlid, ctrl->kato); 201 ctrl->cntlid, ctrl->kato);
202 202
203 ctrl->ops->delete_ctrl(ctrl); 203 nvmet_ctrl_fatal_error(ctrl);
204} 204}
205 205
206static void nvmet_start_keep_alive_timer(struct nvmet_ctrl *ctrl) 206static void nvmet_start_keep_alive_timer(struct nvmet_ctrl *ctrl)
@@ -816,6 +816,9 @@ static void nvmet_ctrl_free(struct kref *ref)
816 list_del(&ctrl->subsys_entry); 816 list_del(&ctrl->subsys_entry);
817 mutex_unlock(&subsys->lock); 817 mutex_unlock(&subsys->lock);
818 818
819 flush_work(&ctrl->async_event_work);
820 cancel_work_sync(&ctrl->fatal_err_work);
821
819 ida_simple_remove(&subsys->cntlid_ida, ctrl->cntlid); 822 ida_simple_remove(&subsys->cntlid_ida, ctrl->cntlid);
820 nvmet_subsys_put(subsys); 823 nvmet_subsys_put(subsys);
821 824
@@ -935,6 +938,16 @@ static void nvmet_subsys_free(struct kref *ref)
935 kfree(subsys); 938 kfree(subsys);
936} 939}
937 940
941void nvmet_subsys_del_ctrls(struct nvmet_subsys *subsys)
942{
943 struct nvmet_ctrl *ctrl;
944
945 mutex_lock(&subsys->lock);
946 list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
947 ctrl->ops->delete_ctrl(ctrl);
948 mutex_unlock(&subsys->lock);
949}
950
938void nvmet_subsys_put(struct nvmet_subsys *subsys) 951void nvmet_subsys_put(struct nvmet_subsys *subsys)
939{ 952{
940 kref_put(&subsys->ref, nvmet_subsys_free); 953 kref_put(&subsys->ref, nvmet_subsys_free);
diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c
index 173e842f19c9..ba57f9852bde 100644
--- a/drivers/nvme/target/fc.c
+++ b/drivers/nvme/target/fc.c
@@ -1314,7 +1314,7 @@ nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport *tgtport,
1314 (struct fcnvme_ls_disconnect_rqst *)iod->rqstbuf; 1314 (struct fcnvme_ls_disconnect_rqst *)iod->rqstbuf;
1315 struct fcnvme_ls_disconnect_acc *acc = 1315 struct fcnvme_ls_disconnect_acc *acc =
1316 (struct fcnvme_ls_disconnect_acc *)iod->rspbuf; 1316 (struct fcnvme_ls_disconnect_acc *)iod->rspbuf;
1317 struct nvmet_fc_tgt_queue *queue; 1317 struct nvmet_fc_tgt_queue *queue = NULL;
1318 struct nvmet_fc_tgt_assoc *assoc; 1318 struct nvmet_fc_tgt_assoc *assoc;
1319 int ret = 0; 1319 int ret = 0;
1320 bool del_assoc = false; 1320 bool del_assoc = false;
@@ -1348,7 +1348,18 @@ nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport *tgtport,
1348 assoc = nvmet_fc_find_target_assoc(tgtport, 1348 assoc = nvmet_fc_find_target_assoc(tgtport,
1349 be64_to_cpu(rqst->associd.association_id)); 1349 be64_to_cpu(rqst->associd.association_id));
1350 iod->assoc = assoc; 1350 iod->assoc = assoc;
1351 if (!assoc) 1351 if (assoc) {
1352 if (rqst->discon_cmd.scope ==
1353 FCNVME_DISCONN_CONNECTION) {
1354 queue = nvmet_fc_find_target_queue(tgtport,
1355 be64_to_cpu(
1356 rqst->discon_cmd.id));
1357 if (!queue) {
1358 nvmet_fc_tgt_a_put(assoc);
1359 ret = VERR_NO_CONN;
1360 }
1361 }
1362 } else
1352 ret = VERR_NO_ASSOC; 1363 ret = VERR_NO_ASSOC;
1353 } 1364 }
1354 1365
@@ -1373,21 +1384,18 @@ nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport *tgtport,
1373 FCNVME_LS_DISCONNECT); 1384 FCNVME_LS_DISCONNECT);
1374 1385
1375 1386
1376 if (rqst->discon_cmd.scope == FCNVME_DISCONN_CONNECTION) { 1387 /* are we to delete a Connection ID (queue) */
1377 queue = nvmet_fc_find_target_queue(tgtport, 1388 if (queue) {
1378 be64_to_cpu(rqst->discon_cmd.id)); 1389 int qid = queue->qid;
1379 if (queue) {
1380 int qid = queue->qid;
1381 1390
1382 nvmet_fc_delete_target_queue(queue); 1391 nvmet_fc_delete_target_queue(queue);
1383 1392
1384 /* release the get taken by find_target_queue */ 1393 /* release the get taken by find_target_queue */
1385 nvmet_fc_tgt_q_put(queue); 1394 nvmet_fc_tgt_q_put(queue);
1386 1395
1387 /* tear association down if io queue terminated */ 1396 /* tear association down if io queue terminated */
1388 if (!qid) 1397 if (!qid)
1389 del_assoc = true; 1398 del_assoc = true;
1390 }
1391 } 1399 }
1392 1400
1393 /* release get taken in nvmet_fc_find_target_assoc */ 1401 /* release get taken in nvmet_fc_find_target_assoc */
diff --git a/drivers/nvme/target/fcloop.c b/drivers/nvme/target/fcloop.c
index bcb8ebeb01c5..4e8e6a22bce1 100644
--- a/drivers/nvme/target/fcloop.c
+++ b/drivers/nvme/target/fcloop.c
@@ -845,7 +845,7 @@ fcloop_create_remote_port(struct device *dev, struct device_attribute *attr,
845 rport->lport = nport->lport; 845 rport->lport = nport->lport;
846 nport->rport = rport; 846 nport->rport = rport;
847 847
848 return ret ? ret : count; 848 return count;
849} 849}
850 850
851 851
@@ -952,7 +952,7 @@ fcloop_create_target_port(struct device *dev, struct device_attribute *attr,
952 tport->lport = nport->lport; 952 tport->lport = nport->lport;
953 nport->tport = tport; 953 nport->tport = tport;
954 954
955 return ret ? ret : count; 955 return count;
956} 956}
957 957
958 958
diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h
index 23d5eb1c944f..cc7ad06b43a7 100644
--- a/drivers/nvme/target/nvmet.h
+++ b/drivers/nvme/target/nvmet.h
@@ -282,6 +282,7 @@ void nvmet_ctrl_put(struct nvmet_ctrl *ctrl);
282struct nvmet_subsys *nvmet_subsys_alloc(const char *subsysnqn, 282struct nvmet_subsys *nvmet_subsys_alloc(const char *subsysnqn,
283 enum nvme_subsys_type type); 283 enum nvme_subsys_type type);
284void nvmet_subsys_put(struct nvmet_subsys *subsys); 284void nvmet_subsys_put(struct nvmet_subsys *subsys);
285void nvmet_subsys_del_ctrls(struct nvmet_subsys *subsys);
285 286
286struct nvmet_ns *nvmet_find_namespace(struct nvmet_ctrl *ctrl, __le32 nsid); 287struct nvmet_ns *nvmet_find_namespace(struct nvmet_ctrl *ctrl, __le32 nsid);
287void nvmet_put_namespace(struct nvmet_ns *ns); 288void nvmet_put_namespace(struct nvmet_ns *ns);
diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c
index 8c3760a78ac0..60990220bd83 100644
--- a/drivers/nvme/target/rdma.c
+++ b/drivers/nvme/target/rdma.c
@@ -438,6 +438,10 @@ static int nvmet_rdma_post_recv(struct nvmet_rdma_device *ndev,
438{ 438{
439 struct ib_recv_wr *bad_wr; 439 struct ib_recv_wr *bad_wr;
440 440
441 ib_dma_sync_single_for_device(ndev->device,
442 cmd->sge[0].addr, cmd->sge[0].length,
443 DMA_FROM_DEVICE);
444
441 if (ndev->srq) 445 if (ndev->srq)
442 return ib_post_srq_recv(ndev->srq, &cmd->wr, &bad_wr); 446 return ib_post_srq_recv(ndev->srq, &cmd->wr, &bad_wr);
443 return ib_post_recv(cmd->queue->cm_id->qp, &cmd->wr, &bad_wr); 447 return ib_post_recv(cmd->queue->cm_id->qp, &cmd->wr, &bad_wr);
@@ -538,6 +542,11 @@ static void nvmet_rdma_queue_response(struct nvmet_req *req)
538 first_wr = &rsp->send_wr; 542 first_wr = &rsp->send_wr;
539 543
540 nvmet_rdma_post_recv(rsp->queue->dev, rsp->cmd); 544 nvmet_rdma_post_recv(rsp->queue->dev, rsp->cmd);
545
546 ib_dma_sync_single_for_device(rsp->queue->dev->device,
547 rsp->send_sge.addr, rsp->send_sge.length,
548 DMA_TO_DEVICE);
549
541 if (ib_post_send(cm_id->qp, first_wr, &bad_wr)) { 550 if (ib_post_send(cm_id->qp, first_wr, &bad_wr)) {
542 pr_err("sending cmd response failed\n"); 551 pr_err("sending cmd response failed\n");
543 nvmet_rdma_release_rsp(rsp); 552 nvmet_rdma_release_rsp(rsp);
@@ -698,6 +707,14 @@ static void nvmet_rdma_handle_command(struct nvmet_rdma_queue *queue,
698 cmd->n_rdma = 0; 707 cmd->n_rdma = 0;
699 cmd->req.port = queue->port; 708 cmd->req.port = queue->port;
700 709
710
711 ib_dma_sync_single_for_cpu(queue->dev->device,
712 cmd->cmd->sge[0].addr, cmd->cmd->sge[0].length,
713 DMA_FROM_DEVICE);
714 ib_dma_sync_single_for_cpu(queue->dev->device,
715 cmd->send_sge.addr, cmd->send_sge.length,
716 DMA_TO_DEVICE);
717
701 if (!nvmet_req_init(&cmd->req, &queue->nvme_cq, 718 if (!nvmet_req_init(&cmd->req, &queue->nvme_cq,
702 &queue->nvme_sq, &nvmet_rdma_ops)) 719 &queue->nvme_sq, &nvmet_rdma_ops))
703 return; 720 return;
diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c
index 965911d9b36a..398ea7f54826 100644
--- a/drivers/nvmem/core.c
+++ b/drivers/nvmem/core.c
@@ -981,8 +981,8 @@ static int __nvmem_cell_read(struct nvmem_device *nvmem,
981 * @cell: nvmem cell to be read. 981 * @cell: nvmem cell to be read.
982 * @len: pointer to length of cell which will be populated on successful read. 982 * @len: pointer to length of cell which will be populated on successful read.
983 * 983 *
984 * Return: ERR_PTR() on error or a valid pointer to a char * buffer on success. 984 * Return: ERR_PTR() on error or a valid pointer to a buffer on success. The
985 * The buffer should be freed by the consumer with a kfree(). 985 * buffer should be freed by the consumer with a kfree().
986 */ 986 */
987void *nvmem_cell_read(struct nvmem_cell *cell, size_t *len) 987void *nvmem_cell_read(struct nvmem_cell *cell, size_t *len)
988{ 988{
diff --git a/drivers/nvmem/imx-ocotp.c b/drivers/nvmem/imx-ocotp.c
index ac27b9bac3b9..8e7b120696fa 100644
--- a/drivers/nvmem/imx-ocotp.c
+++ b/drivers/nvmem/imx-ocotp.c
@@ -71,7 +71,7 @@ static struct nvmem_config imx_ocotp_nvmem_config = {
71 71
72static const struct of_device_id imx_ocotp_dt_ids[] = { 72static const struct of_device_id imx_ocotp_dt_ids[] = {
73 { .compatible = "fsl,imx6q-ocotp", (void *)128 }, 73 { .compatible = "fsl,imx6q-ocotp", (void *)128 },
74 { .compatible = "fsl,imx6sl-ocotp", (void *)32 }, 74 { .compatible = "fsl,imx6sl-ocotp", (void *)64 },
75 { .compatible = "fsl,imx6sx-ocotp", (void *)128 }, 75 { .compatible = "fsl,imx6sx-ocotp", (void *)128 },
76 { }, 76 { },
77}; 77};
diff --git a/drivers/nvmem/qfprom.c b/drivers/nvmem/qfprom.c
index b5305f08b184..2bdb6c389328 100644
--- a/drivers/nvmem/qfprom.c
+++ b/drivers/nvmem/qfprom.c
@@ -21,11 +21,11 @@ static int qfprom_reg_read(void *context,
21 unsigned int reg, void *_val, size_t bytes) 21 unsigned int reg, void *_val, size_t bytes)
22{ 22{
23 void __iomem *base = context; 23 void __iomem *base = context;
24 u32 *val = _val; 24 u8 *val = _val;
25 int i = 0, words = bytes / 4; 25 int i = 0, words = bytes;
26 26
27 while (words--) 27 while (words--)
28 *val++ = readl(base + reg + (i++ * 4)); 28 *val++ = readb(base + reg + i++);
29 29
30 return 0; 30 return 0;
31} 31}
@@ -34,11 +34,11 @@ static int qfprom_reg_write(void *context,
34 unsigned int reg, void *_val, size_t bytes) 34 unsigned int reg, void *_val, size_t bytes)
35{ 35{
36 void __iomem *base = context; 36 void __iomem *base = context;
37 u32 *val = _val; 37 u8 *val = _val;
38 int i = 0, words = bytes / 4; 38 int i = 0, words = bytes;
39 39
40 while (words--) 40 while (words--)
41 writel(*val++, base + reg + (i++ * 4)); 41 writeb(*val++, base + reg + i++);
42 42
43 return 0; 43 return 0;
44} 44}
@@ -53,7 +53,7 @@ static int qfprom_remove(struct platform_device *pdev)
53static struct nvmem_config econfig = { 53static struct nvmem_config econfig = {
54 .name = "qfprom", 54 .name = "qfprom",
55 .owner = THIS_MODULE, 55 .owner = THIS_MODULE,
56 .stride = 4, 56 .stride = 1,
57 .word_size = 1, 57 .word_size = 1,
58 .reg_read = qfprom_reg_read, 58 .reg_read = qfprom_reg_read,
59 .reg_write = qfprom_reg_write, 59 .reg_write = qfprom_reg_write,
diff --git a/drivers/parport/parport_gsc.c b/drivers/parport/parport_gsc.c
index dd6d4ccb41e4..3858b87fd0bb 100644
--- a/drivers/parport/parport_gsc.c
+++ b/drivers/parport/parport_gsc.c
@@ -293,7 +293,7 @@ struct parport *parport_gsc_probe_port(unsigned long base,
293 p->irq = PARPORT_IRQ_NONE; 293 p->irq = PARPORT_IRQ_NONE;
294 } 294 }
295 if (p->irq != PARPORT_IRQ_NONE) { 295 if (p->irq != PARPORT_IRQ_NONE) {
296 printk(", irq %d", p->irq); 296 pr_cont(", irq %d", p->irq);
297 297
298 if (p->dma == PARPORT_DMA_AUTO) { 298 if (p->dma == PARPORT_DMA_AUTO) {
299 p->dma = PARPORT_DMA_NONE; 299 p->dma = PARPORT_DMA_NONE;
@@ -303,8 +303,8 @@ struct parport *parport_gsc_probe_port(unsigned long base,
303 is mandatory (see above) */ 303 is mandatory (see above) */
304 p->dma = PARPORT_DMA_NONE; 304 p->dma = PARPORT_DMA_NONE;
305 305
306 printk(" ["); 306 pr_cont(" [");
307#define printmode(x) {if(p->modes&PARPORT_MODE_##x){printk("%s%s",f?",":"",#x);f++;}} 307#define printmode(x) {if(p->modes&PARPORT_MODE_##x){pr_cont("%s%s",f?",":"",#x);f++;}}
308 { 308 {
309 int f = 0; 309 int f = 0;
310 printmode(PCSPP); 310 printmode(PCSPP);
@@ -315,7 +315,7 @@ struct parport *parport_gsc_probe_port(unsigned long base,
315// printmode(DMA); 315// printmode(DMA);
316 } 316 }
317#undef printmode 317#undef printmode
318 printk("]\n"); 318 pr_cont("]\n");
319 319
320 if (p->irq != PARPORT_IRQ_NONE) { 320 if (p->irq != PARPORT_IRQ_NONE) {
321 if (request_irq (p->irq, parport_irq_handler, 321 if (request_irq (p->irq, parport_irq_handler,
diff --git a/drivers/pci/host/pci-xgene-msi.c b/drivers/pci/host/pci-xgene-msi.c
index 1f38d0836751..f1b633bce525 100644
--- a/drivers/pci/host/pci-xgene-msi.c
+++ b/drivers/pci/host/pci-xgene-msi.c
@@ -517,7 +517,7 @@ static int xgene_msi_probe(struct platform_device *pdev)
517 517
518 rc = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "pci/xgene:online", 518 rc = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "pci/xgene:online",
519 xgene_msi_hwirq_alloc, NULL); 519 xgene_msi_hwirq_alloc, NULL);
520 if (rc) 520 if (rc < 0)
521 goto err_cpuhp; 521 goto err_cpuhp;
522 pci_xgene_online = rc; 522 pci_xgene_online = rc;
523 rc = cpuhp_setup_state(CPUHP_PCI_XGENE_DEAD, "pci/xgene:dead", NULL, 523 rc = cpuhp_setup_state(CPUHP_PCI_XGENE_DEAD, "pci/xgene:dead", NULL,
diff --git a/drivers/pci/host/pcie-designware.c b/drivers/pci/host/pcie-designware.c
index bed19994c1e9..af8f6e92e885 100644
--- a/drivers/pci/host/pcie-designware.c
+++ b/drivers/pci/host/pcie-designware.c
@@ -807,11 +807,6 @@ void dw_pcie_setup_rc(struct pcie_port *pp)
807{ 807{
808 u32 val; 808 u32 val;
809 809
810 /* get iATU unroll support */
811 pp->iatu_unroll_enabled = dw_pcie_iatu_unroll_enabled(pp);
812 dev_dbg(pp->dev, "iATU unroll: %s\n",
813 pp->iatu_unroll_enabled ? "enabled" : "disabled");
814
815 /* set the number of lanes */ 810 /* set the number of lanes */
816 val = dw_pcie_readl_rc(pp, PCIE_PORT_LINK_CONTROL); 811 val = dw_pcie_readl_rc(pp, PCIE_PORT_LINK_CONTROL);
817 val &= ~PORT_LINK_MODE_MASK; 812 val &= ~PORT_LINK_MODE_MASK;
@@ -882,6 +877,11 @@ void dw_pcie_setup_rc(struct pcie_port *pp)
882 * we should not program the ATU here. 877 * we should not program the ATU here.
883 */ 878 */
884 if (!pp->ops->rd_other_conf) { 879 if (!pp->ops->rd_other_conf) {
880 /* get iATU unroll support */
881 pp->iatu_unroll_enabled = dw_pcie_iatu_unroll_enabled(pp);
882 dev_dbg(pp->dev, "iATU unroll: %s\n",
883 pp->iatu_unroll_enabled ? "enabled" : "disabled");
884
885 dw_pcie_prog_outbound_atu(pp, PCIE_ATU_REGION_INDEX0, 885 dw_pcie_prog_outbound_atu(pp, PCIE_ATU_REGION_INDEX0,
886 PCIE_ATU_TYPE_MEM, pp->mem_base, 886 PCIE_ATU_TYPE_MEM, pp->mem_base,
887 pp->mem_bus_addr, pp->mem_size); 887 pp->mem_bus_addr, pp->mem_size);
diff --git a/drivers/pci/hotplug/pciehp_ctrl.c b/drivers/pci/hotplug/pciehp_ctrl.c
index 10c9c0ba8ff2..ec0b4c11ccd9 100644
--- a/drivers/pci/hotplug/pciehp_ctrl.c
+++ b/drivers/pci/hotplug/pciehp_ctrl.c
@@ -31,7 +31,6 @@
31#include <linux/kernel.h> 31#include <linux/kernel.h>
32#include <linux/types.h> 32#include <linux/types.h>
33#include <linux/slab.h> 33#include <linux/slab.h>
34#include <linux/pm_runtime.h>
35#include <linux/pci.h> 34#include <linux/pci.h>
36#include "../pci.h" 35#include "../pci.h"
37#include "pciehp.h" 36#include "pciehp.h"
@@ -99,7 +98,6 @@ static int board_added(struct slot *p_slot)
99 pciehp_green_led_blink(p_slot); 98 pciehp_green_led_blink(p_slot);
100 99
101 /* Check link training status */ 100 /* Check link training status */
102 pm_runtime_get_sync(&ctrl->pcie->port->dev);
103 retval = pciehp_check_link_status(ctrl); 101 retval = pciehp_check_link_status(ctrl);
104 if (retval) { 102 if (retval) {
105 ctrl_err(ctrl, "Failed to check link status\n"); 103 ctrl_err(ctrl, "Failed to check link status\n");
@@ -120,14 +118,12 @@ static int board_added(struct slot *p_slot)
120 if (retval != -EEXIST) 118 if (retval != -EEXIST)
121 goto err_exit; 119 goto err_exit;
122 } 120 }
123 pm_runtime_put(&ctrl->pcie->port->dev);
124 121
125 pciehp_green_led_on(p_slot); 122 pciehp_green_led_on(p_slot);
126 pciehp_set_attention_status(p_slot, 0); 123 pciehp_set_attention_status(p_slot, 0);
127 return 0; 124 return 0;
128 125
129err_exit: 126err_exit:
130 pm_runtime_put(&ctrl->pcie->port->dev);
131 set_slot_off(ctrl, p_slot); 127 set_slot_off(ctrl, p_slot);
132 return retval; 128 return retval;
133} 129}
@@ -141,9 +137,7 @@ static int remove_board(struct slot *p_slot)
141 int retval; 137 int retval;
142 struct controller *ctrl = p_slot->ctrl; 138 struct controller *ctrl = p_slot->ctrl;
143 139
144 pm_runtime_get_sync(&ctrl->pcie->port->dev);
145 retval = pciehp_unconfigure_device(p_slot); 140 retval = pciehp_unconfigure_device(p_slot);
146 pm_runtime_put(&ctrl->pcie->port->dev);
147 if (retval) 141 if (retval)
148 return retval; 142 return retval;
149 143
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
index 50c5003295ca..7f73bacf13ed 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -1206,6 +1206,16 @@ int pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs,
1206 if (flags & PCI_IRQ_AFFINITY) { 1206 if (flags & PCI_IRQ_AFFINITY) {
1207 if (!affd) 1207 if (!affd)
1208 affd = &msi_default_affd; 1208 affd = &msi_default_affd;
1209
1210 if (affd->pre_vectors + affd->post_vectors > min_vecs)
1211 return -EINVAL;
1212
1213 /*
1214 * If there aren't any vectors left after applying the pre/post
1215 * vectors don't bother with assigning affinity.
1216 */
1217 if (affd->pre_vectors + affd->post_vectors == min_vecs)
1218 affd = NULL;
1209 } else { 1219 } else {
1210 if (WARN_ON(affd)) 1220 if (WARN_ON(affd))
1211 affd = NULL; 1221 affd = NULL;
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index a881c0d3d2e8..7904d02ffdb9 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -2241,10 +2241,13 @@ bool pci_bridge_d3_possible(struct pci_dev *bridge)
2241 return false; 2241 return false;
2242 2242
2243 /* 2243 /*
2244 * Hotplug ports handled by firmware in System Management Mode 2244 * Hotplug interrupts cannot be delivered if the link is down,
2245 * so parents of a hotplug port must stay awake. In addition,
2246 * hotplug ports handled by firmware in System Management Mode
2245 * may not be put into D3 by the OS (Thunderbolt on non-Macs). 2247 * may not be put into D3 by the OS (Thunderbolt on non-Macs).
2248 * For simplicity, disallow in general for now.
2246 */ 2249 */
2247 if (bridge->is_hotplug_bridge && !pciehp_is_native(bridge)) 2250 if (bridge->is_hotplug_bridge)
2248 return false; 2251 return false;
2249 2252
2250 if (pci_bridge_d3_force) 2253 if (pci_bridge_d3_force)
@@ -2276,10 +2279,7 @@ static int pci_dev_check_d3cold(struct pci_dev *dev, void *data)
2276 !pci_pme_capable(dev, PCI_D3cold)) || 2279 !pci_pme_capable(dev, PCI_D3cold)) ||
2277 2280
2278 /* If it is a bridge it must be allowed to go to D3. */ 2281 /* If it is a bridge it must be allowed to go to D3. */
2279 !pci_power_manageable(dev) || 2282 !pci_power_manageable(dev))
2280
2281 /* Hotplug interrupts cannot be delivered if the link is down. */
2282 dev->is_hotplug_bridge)
2283 2283
2284 *d3cold_ok = false; 2284 *d3cold_ok = false;
2285 2285
diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
index 17ac1dce3286..3dd8bcbb3011 100644
--- a/drivers/pci/pcie/aspm.c
+++ b/drivers/pci/pcie/aspm.c
@@ -532,25 +532,32 @@ static struct pcie_link_state *alloc_pcie_link_state(struct pci_dev *pdev)
532 link = kzalloc(sizeof(*link), GFP_KERNEL); 532 link = kzalloc(sizeof(*link), GFP_KERNEL);
533 if (!link) 533 if (!link)
534 return NULL; 534 return NULL;
535
535 INIT_LIST_HEAD(&link->sibling); 536 INIT_LIST_HEAD(&link->sibling);
536 INIT_LIST_HEAD(&link->children); 537 INIT_LIST_HEAD(&link->children);
537 INIT_LIST_HEAD(&link->link); 538 INIT_LIST_HEAD(&link->link);
538 link->pdev = pdev; 539 link->pdev = pdev;
539 if (pci_pcie_type(pdev) != PCI_EXP_TYPE_ROOT_PORT) { 540
541 /*
542 * Root Ports and PCI/PCI-X to PCIe Bridges are roots of PCIe
543 * hierarchies.
544 */
545 if (pci_pcie_type(pdev) == PCI_EXP_TYPE_ROOT_PORT ||
546 pci_pcie_type(pdev) == PCI_EXP_TYPE_PCIE_BRIDGE) {
547 link->root = link;
548 } else {
540 struct pcie_link_state *parent; 549 struct pcie_link_state *parent;
550
541 parent = pdev->bus->parent->self->link_state; 551 parent = pdev->bus->parent->self->link_state;
542 if (!parent) { 552 if (!parent) {
543 kfree(link); 553 kfree(link);
544 return NULL; 554 return NULL;
545 } 555 }
556
546 link->parent = parent; 557 link->parent = parent;
558 link->root = link->parent->root;
547 list_add(&link->link, &parent->children); 559 list_add(&link->link, &parent->children);
548 } 560 }
549 /* Setup a pointer to the root port link */
550 if (!link->parent)
551 link->root = link;
552 else
553 link->root = link->parent->root;
554 561
555 list_add(&link->sibling, &link_list); 562 list_add(&link->sibling, &link_list);
556 pdev->link_state = link; 563 pdev->link_state = link;
diff --git a/drivers/pci/pcie/pme.c b/drivers/pci/pcie/pme.c
index 717529331dac..2dd1c68e6de8 100644
--- a/drivers/pci/pcie/pme.c
+++ b/drivers/pci/pcie/pme.c
@@ -433,6 +433,17 @@ static int pcie_pme_resume(struct pcie_device *srv)
433 return 0; 433 return 0;
434} 434}
435 435
436/**
437 * pcie_pme_remove - Prepare PCIe PME service device for removal.
438 * @srv - PCIe service device to remove.
439 */
440static void pcie_pme_remove(struct pcie_device *srv)
441{
442 pcie_pme_suspend(srv);
443 free_irq(srv->irq, srv);
444 kfree(get_service_data(srv));
445}
446
436static struct pcie_port_service_driver pcie_pme_driver = { 447static struct pcie_port_service_driver pcie_pme_driver = {
437 .name = "pcie_pme", 448 .name = "pcie_pme",
438 .port_type = PCI_EXP_TYPE_ROOT_PORT, 449 .port_type = PCI_EXP_TYPE_ROOT_PORT,
@@ -441,6 +452,7 @@ static struct pcie_port_service_driver pcie_pme_driver = {
441 .probe = pcie_pme_probe, 452 .probe = pcie_pme_probe,
442 .suspend = pcie_pme_suspend, 453 .suspend = pcie_pme_suspend,
443 .resume = pcie_pme_resume, 454 .resume = pcie_pme_resume,
455 .remove = pcie_pme_remove,
444}; 456};
445 457
446/** 458/**
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index e164b5c9f0f0..204960e70333 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -1169,6 +1169,7 @@ void set_pcie_port_type(struct pci_dev *pdev)
1169 pos = pci_find_capability(pdev, PCI_CAP_ID_EXP); 1169 pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
1170 if (!pos) 1170 if (!pos)
1171 return; 1171 return;
1172
1172 pdev->pcie_cap = pos; 1173 pdev->pcie_cap = pos;
1173 pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, &reg16); 1174 pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, &reg16);
1174 pdev->pcie_flags_reg = reg16; 1175 pdev->pcie_flags_reg = reg16;
@@ -1176,13 +1177,14 @@ void set_pcie_port_type(struct pci_dev *pdev)
1176 pdev->pcie_mpss = reg16 & PCI_EXP_DEVCAP_PAYLOAD; 1177 pdev->pcie_mpss = reg16 & PCI_EXP_DEVCAP_PAYLOAD;
1177 1178
1178 /* 1179 /*
1179 * A Root Port is always the upstream end of a Link. No PCIe 1180 * A Root Port or a PCI-to-PCIe bridge is always the upstream end
1180 * component has two Links. Two Links are connected by a Switch 1181 * of a Link. No PCIe component has two Links. Two Links are
1181 * that has a Port on each Link and internal logic to connect the 1182 * connected by a Switch that has a Port on each Link and internal
1182 * two Ports. 1183 * logic to connect the two Ports.
1183 */ 1184 */
1184 type = pci_pcie_type(pdev); 1185 type = pci_pcie_type(pdev);
1185 if (type == PCI_EXP_TYPE_ROOT_PORT) 1186 if (type == PCI_EXP_TYPE_ROOT_PORT ||
1187 type == PCI_EXP_TYPE_PCIE_BRIDGE)
1186 pdev->has_secondary_link = 1; 1188 pdev->has_secondary_link = 1;
1187 else if (type == PCI_EXP_TYPE_UPSTREAM || 1189 else if (type == PCI_EXP_TYPE_UPSTREAM ||
1188 type == PCI_EXP_TYPE_DOWNSTREAM) { 1190 type == PCI_EXP_TYPE_DOWNSTREAM) {
diff --git a/drivers/pinctrl/berlin/berlin-bg4ct.c b/drivers/pinctrl/berlin/berlin-bg4ct.c
index 09172043d589..c617ec49e9ed 100644
--- a/drivers/pinctrl/berlin/berlin-bg4ct.c
+++ b/drivers/pinctrl/berlin/berlin-bg4ct.c
@@ -217,7 +217,7 @@ static const struct berlin_desc_group berlin4ct_soc_pinctrl_groups[] = {
217 BERLIN_PINCTRL_GROUP("SCRD0_CRD_PRES", 0xc, 0x3, 0x15, 217 BERLIN_PINCTRL_GROUP("SCRD0_CRD_PRES", 0xc, 0x3, 0x15,
218 BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* GPIO20 */ 218 BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* GPIO20 */
219 BERLIN_PINCTRL_FUNCTION(0x1, "scrd0"), /* crd pres */ 219 BERLIN_PINCTRL_FUNCTION(0x1, "scrd0"), /* crd pres */
220 BERLIN_PINCTRL_FUNCTION(0x1, "sd1a")), /* DAT3 */ 220 BERLIN_PINCTRL_FUNCTION(0x3, "sd1a")), /* DAT3 */
221 BERLIN_PINCTRL_GROUP("SPI1_SS0n", 0xc, 0x3, 0x18, 221 BERLIN_PINCTRL_GROUP("SPI1_SS0n", 0xc, 0x3, 0x18,
222 BERLIN_PINCTRL_FUNCTION(0x0, "spi1"), /* SS0n */ 222 BERLIN_PINCTRL_FUNCTION(0x0, "spi1"), /* SS0n */
223 BERLIN_PINCTRL_FUNCTION(0x1, "gpio"), /* GPIO37 */ 223 BERLIN_PINCTRL_FUNCTION(0x1, "gpio"), /* GPIO37 */
diff --git a/drivers/pinctrl/intel/pinctrl-baytrail.c b/drivers/pinctrl/intel/pinctrl-baytrail.c
index 37300634b7d2..d94aef17348b 100644
--- a/drivers/pinctrl/intel/pinctrl-baytrail.c
+++ b/drivers/pinctrl/intel/pinctrl-baytrail.c
@@ -731,16 +731,23 @@ static void __iomem *byt_gpio_reg(struct byt_gpio *vg, unsigned int offset,
731 int reg) 731 int reg)
732{ 732{
733 struct byt_community *comm = byt_get_community(vg, offset); 733 struct byt_community *comm = byt_get_community(vg, offset);
734 u32 reg_offset = 0; 734 u32 reg_offset;
735 735
736 if (!comm) 736 if (!comm)
737 return NULL; 737 return NULL;
738 738
739 offset -= comm->pin_base; 739 offset -= comm->pin_base;
740 if (reg == BYT_INT_STAT_REG) 740 switch (reg) {
741 case BYT_INT_STAT_REG:
741 reg_offset = (offset / 32) * 4; 742 reg_offset = (offset / 32) * 4;
742 else 743 break;
744 case BYT_DEBOUNCE_REG:
745 reg_offset = 0;
746 break;
747 default:
743 reg_offset = comm->pad_map[offset] * 16; 748 reg_offset = comm->pad_map[offset] * 16;
749 break;
750 }
744 751
745 return comm->reg_base + reg_offset + reg; 752 return comm->reg_base + reg_offset + reg;
746} 753}
@@ -1092,6 +1099,7 @@ static int byt_pin_config_get(struct pinctrl_dev *pctl_dev, unsigned int offset,
1092 enum pin_config_param param = pinconf_to_config_param(*config); 1099 enum pin_config_param param = pinconf_to_config_param(*config);
1093 void __iomem *conf_reg = byt_gpio_reg(vg, offset, BYT_CONF0_REG); 1100 void __iomem *conf_reg = byt_gpio_reg(vg, offset, BYT_CONF0_REG);
1094 void __iomem *val_reg = byt_gpio_reg(vg, offset, BYT_VAL_REG); 1101 void __iomem *val_reg = byt_gpio_reg(vg, offset, BYT_VAL_REG);
1102 void __iomem *db_reg = byt_gpio_reg(vg, offset, BYT_DEBOUNCE_REG);
1095 unsigned long flags; 1103 unsigned long flags;
1096 u32 conf, pull, val, debounce; 1104 u32 conf, pull, val, debounce;
1097 u16 arg = 0; 1105 u16 arg = 0;
@@ -1128,7 +1136,7 @@ static int byt_pin_config_get(struct pinctrl_dev *pctl_dev, unsigned int offset,
1128 return -EINVAL; 1136 return -EINVAL;
1129 1137
1130 raw_spin_lock_irqsave(&vg->lock, flags); 1138 raw_spin_lock_irqsave(&vg->lock, flags);
1131 debounce = readl(byt_gpio_reg(vg, offset, BYT_DEBOUNCE_REG)); 1139 debounce = readl(db_reg);
1132 raw_spin_unlock_irqrestore(&vg->lock, flags); 1140 raw_spin_unlock_irqrestore(&vg->lock, flags);
1133 1141
1134 switch (debounce & BYT_DEBOUNCE_PULSE_MASK) { 1142 switch (debounce & BYT_DEBOUNCE_PULSE_MASK) {
@@ -1176,6 +1184,7 @@ static int byt_pin_config_set(struct pinctrl_dev *pctl_dev,
1176 unsigned int param, arg; 1184 unsigned int param, arg;
1177 void __iomem *conf_reg = byt_gpio_reg(vg, offset, BYT_CONF0_REG); 1185 void __iomem *conf_reg = byt_gpio_reg(vg, offset, BYT_CONF0_REG);
1178 void __iomem *val_reg = byt_gpio_reg(vg, offset, BYT_VAL_REG); 1186 void __iomem *val_reg = byt_gpio_reg(vg, offset, BYT_VAL_REG);
1187 void __iomem *db_reg = byt_gpio_reg(vg, offset, BYT_DEBOUNCE_REG);
1179 unsigned long flags; 1188 unsigned long flags;
1180 u32 conf, val, debounce; 1189 u32 conf, val, debounce;
1181 int i, ret = 0; 1190 int i, ret = 0;
@@ -1238,36 +1247,44 @@ static int byt_pin_config_set(struct pinctrl_dev *pctl_dev,
1238 1247
1239 break; 1248 break;
1240 case PIN_CONFIG_INPUT_DEBOUNCE: 1249 case PIN_CONFIG_INPUT_DEBOUNCE:
1241 debounce = readl(byt_gpio_reg(vg, offset, 1250 debounce = readl(db_reg);
1242 BYT_DEBOUNCE_REG)); 1251 debounce &= ~BYT_DEBOUNCE_PULSE_MASK;
1243 conf &= ~BYT_DEBOUNCE_PULSE_MASK; 1252
1253 if (arg)
1254 conf |= BYT_DEBOUNCE_EN;
1255 else
1256 conf &= ~BYT_DEBOUNCE_EN;
1244 1257
1245 switch (arg) { 1258 switch (arg) {
1246 case 375: 1259 case 375:
1247 conf |= BYT_DEBOUNCE_PULSE_375US; 1260 debounce |= BYT_DEBOUNCE_PULSE_375US;
1248 break; 1261 break;
1249 case 750: 1262 case 750:
1250 conf |= BYT_DEBOUNCE_PULSE_750US; 1263 debounce |= BYT_DEBOUNCE_PULSE_750US;
1251 break; 1264 break;
1252 case 1500: 1265 case 1500:
1253 conf |= BYT_DEBOUNCE_PULSE_1500US; 1266 debounce |= BYT_DEBOUNCE_PULSE_1500US;
1254 break; 1267 break;
1255 case 3000: 1268 case 3000:
1256 conf |= BYT_DEBOUNCE_PULSE_3MS; 1269 debounce |= BYT_DEBOUNCE_PULSE_3MS;
1257 break; 1270 break;
1258 case 6000: 1271 case 6000:
1259 conf |= BYT_DEBOUNCE_PULSE_6MS; 1272 debounce |= BYT_DEBOUNCE_PULSE_6MS;
1260 break; 1273 break;
1261 case 12000: 1274 case 12000:
1262 conf |= BYT_DEBOUNCE_PULSE_12MS; 1275 debounce |= BYT_DEBOUNCE_PULSE_12MS;
1263 break; 1276 break;
1264 case 24000: 1277 case 24000:
1265 conf |= BYT_DEBOUNCE_PULSE_24MS; 1278 debounce |= BYT_DEBOUNCE_PULSE_24MS;
1266 break; 1279 break;
1267 default: 1280 default:
1268 ret = -EINVAL; 1281 if (arg)
1282 ret = -EINVAL;
1283 break;
1269 } 1284 }
1270 1285
1286 if (!ret)
1287 writel(debounce, db_reg);
1271 break; 1288 break;
1272 default: 1289 default:
1273 ret = -ENOTSUPP; 1290 ret = -ENOTSUPP;
@@ -1606,7 +1623,9 @@ static void byt_gpio_irq_handler(struct irq_desc *desc)
1606 continue; 1623 continue;
1607 } 1624 }
1608 1625
1626 raw_spin_lock(&vg->lock);
1609 pending = readl(reg); 1627 pending = readl(reg);
1628 raw_spin_unlock(&vg->lock);
1610 for_each_set_bit(pin, &pending, 32) { 1629 for_each_set_bit(pin, &pending, 32) {
1611 virq = irq_find_mapping(vg->chip.irqdomain, base + pin); 1630 virq = irq_find_mapping(vg->chip.irqdomain, base + pin);
1612 generic_handle_irq(virq); 1631 generic_handle_irq(virq);
@@ -1617,6 +1636,8 @@ static void byt_gpio_irq_handler(struct irq_desc *desc)
1617 1636
1618static void byt_gpio_irq_init_hw(struct byt_gpio *vg) 1637static void byt_gpio_irq_init_hw(struct byt_gpio *vg)
1619{ 1638{
1639 struct gpio_chip *gc = &vg->chip;
1640 struct device *dev = &vg->pdev->dev;
1620 void __iomem *reg; 1641 void __iomem *reg;
1621 u32 base, value; 1642 u32 base, value;
1622 int i; 1643 int i;
@@ -1638,10 +1659,12 @@ static void byt_gpio_irq_init_hw(struct byt_gpio *vg)
1638 } 1659 }
1639 1660
1640 value = readl(reg); 1661 value = readl(reg);
1641 if ((value & BYT_PIN_MUX) == byt_get_gpio_mux(vg, i) && 1662 if (value & BYT_DIRECT_IRQ_EN) {
1642 !(value & BYT_DIRECT_IRQ_EN)) { 1663 clear_bit(i, gc->irq_valid_mask);
1664 dev_dbg(dev, "excluding GPIO %d from IRQ domain\n", i);
1665 } else if ((value & BYT_PIN_MUX) == byt_get_gpio_mux(vg, i)) {
1643 byt_gpio_clear_triggering(vg, i); 1666 byt_gpio_clear_triggering(vg, i);
1644 dev_dbg(&vg->pdev->dev, "disabling GPIO %d\n", i); 1667 dev_dbg(dev, "disabling GPIO %d\n", i);
1645 } 1668 }
1646 } 1669 }
1647 1670
@@ -1680,6 +1703,7 @@ static int byt_gpio_probe(struct byt_gpio *vg)
1680 gc->can_sleep = false; 1703 gc->can_sleep = false;
1681 gc->parent = &vg->pdev->dev; 1704 gc->parent = &vg->pdev->dev;
1682 gc->ngpio = vg->soc_data->npins; 1705 gc->ngpio = vg->soc_data->npins;
1706 gc->irq_need_valid_mask = true;
1683 1707
1684#ifdef CONFIG_PM_SLEEP 1708#ifdef CONFIG_PM_SLEEP
1685 vg->saved_context = devm_kcalloc(&vg->pdev->dev, gc->ngpio, 1709 vg->saved_context = devm_kcalloc(&vg->pdev->dev, gc->ngpio,
diff --git a/drivers/pinctrl/intel/pinctrl-broxton.c b/drivers/pinctrl/intel/pinctrl-broxton.c
index 59cb7a6fc5be..901b356b09d7 100644
--- a/drivers/pinctrl/intel/pinctrl-broxton.c
+++ b/drivers/pinctrl/intel/pinctrl-broxton.c
@@ -19,7 +19,7 @@
19 19
20#define BXT_PAD_OWN 0x020 20#define BXT_PAD_OWN 0x020
21#define BXT_HOSTSW_OWN 0x080 21#define BXT_HOSTSW_OWN 0x080
22#define BXT_PADCFGLOCK 0x090 22#define BXT_PADCFGLOCK 0x060
23#define BXT_GPI_IE 0x110 23#define BXT_GPI_IE 0x110
24 24
25#define BXT_COMMUNITY(s, e) \ 25#define BXT_COMMUNITY(s, e) \
diff --git a/drivers/pinctrl/intel/pinctrl-intel.c b/drivers/pinctrl/intel/pinctrl-intel.c
index 1e139672f1af..6df35dcb29ae 100644
--- a/drivers/pinctrl/intel/pinctrl-intel.c
+++ b/drivers/pinctrl/intel/pinctrl-intel.c
@@ -353,6 +353,21 @@ static int intel_pinmux_set_mux(struct pinctrl_dev *pctldev, unsigned function,
353 return 0; 353 return 0;
354} 354}
355 355
356static void __intel_gpio_set_direction(void __iomem *padcfg0, bool input)
357{
358 u32 value;
359
360 value = readl(padcfg0);
361 if (input) {
362 value &= ~PADCFG0_GPIORXDIS;
363 value |= PADCFG0_GPIOTXDIS;
364 } else {
365 value &= ~PADCFG0_GPIOTXDIS;
366 value |= PADCFG0_GPIORXDIS;
367 }
368 writel(value, padcfg0);
369}
370
356static int intel_gpio_request_enable(struct pinctrl_dev *pctldev, 371static int intel_gpio_request_enable(struct pinctrl_dev *pctldev,
357 struct pinctrl_gpio_range *range, 372 struct pinctrl_gpio_range *range,
358 unsigned pin) 373 unsigned pin)
@@ -375,11 +390,11 @@ static int intel_gpio_request_enable(struct pinctrl_dev *pctldev,
375 /* Disable SCI/SMI/NMI generation */ 390 /* Disable SCI/SMI/NMI generation */
376 value &= ~(PADCFG0_GPIROUTIOXAPIC | PADCFG0_GPIROUTSCI); 391 value &= ~(PADCFG0_GPIROUTIOXAPIC | PADCFG0_GPIROUTSCI);
377 value &= ~(PADCFG0_GPIROUTSMI | PADCFG0_GPIROUTNMI); 392 value &= ~(PADCFG0_GPIROUTSMI | PADCFG0_GPIROUTNMI);
378 /* Disable TX buffer and enable RX (this will be input) */
379 value &= ~PADCFG0_GPIORXDIS;
380 value |= PADCFG0_GPIOTXDIS;
381 writel(value, padcfg0); 393 writel(value, padcfg0);
382 394
395 /* Disable TX buffer and enable RX (this will be input) */
396 __intel_gpio_set_direction(padcfg0, true);
397
383 raw_spin_unlock_irqrestore(&pctrl->lock, flags); 398 raw_spin_unlock_irqrestore(&pctrl->lock, flags);
384 399
385 return 0; 400 return 0;
@@ -392,18 +407,11 @@ static int intel_gpio_set_direction(struct pinctrl_dev *pctldev,
392 struct intel_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev); 407 struct intel_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
393 void __iomem *padcfg0; 408 void __iomem *padcfg0;
394 unsigned long flags; 409 unsigned long flags;
395 u32 value;
396 410
397 raw_spin_lock_irqsave(&pctrl->lock, flags); 411 raw_spin_lock_irqsave(&pctrl->lock, flags);
398 412
399 padcfg0 = intel_get_padcfg(pctrl, pin, PADCFG0); 413 padcfg0 = intel_get_padcfg(pctrl, pin, PADCFG0);
400 414 __intel_gpio_set_direction(padcfg0, input);
401 value = readl(padcfg0);
402 if (input)
403 value |= PADCFG0_GPIOTXDIS;
404 else
405 value &= ~PADCFG0_GPIOTXDIS;
406 writel(value, padcfg0);
407 415
408 raw_spin_unlock_irqrestore(&pctrl->lock, flags); 416 raw_spin_unlock_irqrestore(&pctrl->lock, flags);
409 417
diff --git a/drivers/pinctrl/intel/pinctrl-merrifield.c b/drivers/pinctrl/intel/pinctrl-merrifield.c
index b21896126f76..4d4ef42a39b5 100644
--- a/drivers/pinctrl/intel/pinctrl-merrifield.c
+++ b/drivers/pinctrl/intel/pinctrl-merrifield.c
@@ -794,6 +794,9 @@ static int mrfld_config_set(struct pinctrl_dev *pctldev, unsigned int pin,
794 unsigned int i; 794 unsigned int i;
795 int ret; 795 int ret;
796 796
797 if (!mrfld_buf_available(mp, pin))
798 return -ENOTSUPP;
799
797 for (i = 0; i < nconfigs; i++) { 800 for (i = 0; i < nconfigs; i++) {
798 switch (pinconf_to_config_param(configs[i])) { 801 switch (pinconf_to_config_param(configs[i])) {
799 case PIN_CONFIG_BIAS_DISABLE: 802 case PIN_CONFIG_BIAS_DISABLE:
diff --git a/drivers/pinctrl/meson/pinctrl-meson-gxbb.c b/drivers/pinctrl/meson/pinctrl-meson-gxbb.c
index c3928aa3fefa..e0bca4df2a2f 100644
--- a/drivers/pinctrl/meson/pinctrl-meson-gxbb.c
+++ b/drivers/pinctrl/meson/pinctrl-meson-gxbb.c
@@ -253,9 +253,8 @@ static const unsigned int uart_tx_ao_a_pins[] = { PIN(GPIOAO_0, 0) };
253static const unsigned int uart_rx_ao_a_pins[] = { PIN(GPIOAO_1, 0) }; 253static const unsigned int uart_rx_ao_a_pins[] = { PIN(GPIOAO_1, 0) };
254static const unsigned int uart_cts_ao_a_pins[] = { PIN(GPIOAO_2, 0) }; 254static const unsigned int uart_cts_ao_a_pins[] = { PIN(GPIOAO_2, 0) };
255static const unsigned int uart_rts_ao_a_pins[] = { PIN(GPIOAO_3, 0) }; 255static const unsigned int uart_rts_ao_a_pins[] = { PIN(GPIOAO_3, 0) };
256static const unsigned int uart_tx_ao_b_pins[] = { PIN(GPIOAO_0, 0) }; 256static const unsigned int uart_tx_ao_b_pins[] = { PIN(GPIOAO_4, 0) };
257static const unsigned int uart_rx_ao_b_pins[] = { PIN(GPIOAO_1, 0), 257static const unsigned int uart_rx_ao_b_pins[] = { PIN(GPIOAO_5, 0) };
258 PIN(GPIOAO_5, 0) };
259static const unsigned int uart_cts_ao_b_pins[] = { PIN(GPIOAO_2, 0) }; 258static const unsigned int uart_cts_ao_b_pins[] = { PIN(GPIOAO_2, 0) };
260static const unsigned int uart_rts_ao_b_pins[] = { PIN(GPIOAO_3, 0) }; 259static const unsigned int uart_rts_ao_b_pins[] = { PIN(GPIOAO_3, 0) };
261 260
@@ -498,7 +497,7 @@ static struct meson_pmx_group meson_gxbb_aobus_groups[] = {
498 GPIO_GROUP(GPIOAO_13, 0), 497 GPIO_GROUP(GPIOAO_13, 0),
499 498
500 /* bank AO */ 499 /* bank AO */
501 GROUP(uart_tx_ao_b, 0, 26), 500 GROUP(uart_tx_ao_b, 0, 24),
502 GROUP(uart_rx_ao_b, 0, 25), 501 GROUP(uart_rx_ao_b, 0, 25),
503 GROUP(uart_tx_ao_a, 0, 12), 502 GROUP(uart_tx_ao_a, 0, 12),
504 GROUP(uart_rx_ao_a, 0, 11), 503 GROUP(uart_rx_ao_a, 0, 11),
diff --git a/drivers/pinctrl/meson/pinctrl-meson-gxl.c b/drivers/pinctrl/meson/pinctrl-meson-gxl.c
index 25694f7094c7..b69743b07a1d 100644
--- a/drivers/pinctrl/meson/pinctrl-meson-gxl.c
+++ b/drivers/pinctrl/meson/pinctrl-meson-gxl.c
@@ -214,9 +214,8 @@ static const unsigned int uart_tx_ao_a_pins[] = { PIN(GPIOAO_0, 0) };
214static const unsigned int uart_rx_ao_a_pins[] = { PIN(GPIOAO_1, 0) }; 214static const unsigned int uart_rx_ao_a_pins[] = { PIN(GPIOAO_1, 0) };
215static const unsigned int uart_cts_ao_a_pins[] = { PIN(GPIOAO_2, 0) }; 215static const unsigned int uart_cts_ao_a_pins[] = { PIN(GPIOAO_2, 0) };
216static const unsigned int uart_rts_ao_a_pins[] = { PIN(GPIOAO_3, 0) }; 216static const unsigned int uart_rts_ao_a_pins[] = { PIN(GPIOAO_3, 0) };
217static const unsigned int uart_tx_ao_b_pins[] = { PIN(GPIOAO_0, 0) }; 217static const unsigned int uart_tx_ao_b_pins[] = { PIN(GPIOAO_4, 0) };
218static const unsigned int uart_rx_ao_b_pins[] = { PIN(GPIOAO_1, 0), 218static const unsigned int uart_rx_ao_b_pins[] = { PIN(GPIOAO_5, 0) };
219 PIN(GPIOAO_5, 0) };
220static const unsigned int uart_cts_ao_b_pins[] = { PIN(GPIOAO_2, 0) }; 219static const unsigned int uart_cts_ao_b_pins[] = { PIN(GPIOAO_2, 0) };
221static const unsigned int uart_rts_ao_b_pins[] = { PIN(GPIOAO_3, 0) }; 220static const unsigned int uart_rts_ao_b_pins[] = { PIN(GPIOAO_3, 0) };
222 221
@@ -409,7 +408,7 @@ static struct meson_pmx_group meson_gxl_aobus_groups[] = {
409 GPIO_GROUP(GPIOAO_9, 0), 408 GPIO_GROUP(GPIOAO_9, 0),
410 409
411 /* bank AO */ 410 /* bank AO */
412 GROUP(uart_tx_ao_b, 0, 26), 411 GROUP(uart_tx_ao_b, 0, 24),
413 GROUP(uart_rx_ao_b, 0, 25), 412 GROUP(uart_rx_ao_b, 0, 25),
414 GROUP(uart_tx_ao_a, 0, 12), 413 GROUP(uart_tx_ao_a, 0, 12),
415 GROUP(uart_rx_ao_a, 0, 11), 414 GROUP(uart_rx_ao_a, 0, 11),
diff --git a/drivers/pinctrl/meson/pinctrl-meson.c b/drivers/pinctrl/meson/pinctrl-meson.c
index a579126832af..620c231a2889 100644
--- a/drivers/pinctrl/meson/pinctrl-meson.c
+++ b/drivers/pinctrl/meson/pinctrl-meson.c
@@ -212,7 +212,7 @@ static int meson_pmx_request_gpio(struct pinctrl_dev *pcdev,
212{ 212{
213 struct meson_pinctrl *pc = pinctrl_dev_get_drvdata(pcdev); 213 struct meson_pinctrl *pc = pinctrl_dev_get_drvdata(pcdev);
214 214
215 meson_pmx_disable_other_groups(pc, range->pin_base + offset, -1); 215 meson_pmx_disable_other_groups(pc, offset, -1);
216 216
217 return 0; 217 return 0;
218} 218}
diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c
index aea310a91821..537b52055756 100644
--- a/drivers/pinctrl/pinctrl-amd.c
+++ b/drivers/pinctrl/pinctrl-amd.c
@@ -202,6 +202,8 @@ static void amd_gpio_dbg_show(struct seq_file *s, struct gpio_chip *gc)
202 i = 128; 202 i = 128;
203 pin_num = AMD_GPIO_PINS_BANK2 + i; 203 pin_num = AMD_GPIO_PINS_BANK2 + i;
204 break; 204 break;
205 default:
206 return;
205 } 207 }
206 208
207 for (; i < pin_num; i++) { 209 for (; i < pin_num; i++) {
@@ -382,26 +384,21 @@ static int amd_gpio_irq_set_type(struct irq_data *d, unsigned int type)
382{ 384{
383 int ret = 0; 385 int ret = 0;
384 u32 pin_reg; 386 u32 pin_reg;
385 unsigned long flags; 387 unsigned long flags, irq_flags;
386 bool level_trig;
387 u32 active_level;
388 struct gpio_chip *gc = irq_data_get_irq_chip_data(d); 388 struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
389 struct amd_gpio *gpio_dev = gpiochip_get_data(gc); 389 struct amd_gpio *gpio_dev = gpiochip_get_data(gc);
390 390
391 spin_lock_irqsave(&gpio_dev->lock, flags); 391 spin_lock_irqsave(&gpio_dev->lock, flags);
392 pin_reg = readl(gpio_dev->base + (d->hwirq)*4); 392 pin_reg = readl(gpio_dev->base + (d->hwirq)*4);
393 393
394 /* 394 /* Ignore the settings coming from the client and
395 * When level_trig is set EDGE and active_level is set HIGH in BIOS 395 * read the values from the ACPI tables
396 * default settings, ignore incoming settings from client and use 396 * while setting the trigger type
397 * BIOS settings to configure GPIO register.
398 */ 397 */
399 level_trig = !(pin_reg & (LEVEL_TRIGGER << LEVEL_TRIG_OFF));
400 active_level = pin_reg & (ACTIVE_LEVEL_MASK << ACTIVE_LEVEL_OFF);
401 398
402 if(level_trig && 399 irq_flags = irq_get_trigger_type(d->irq);
403 ((active_level >> ACTIVE_LEVEL_OFF) == ACTIVE_HIGH)) 400 if (irq_flags != IRQ_TYPE_NONE)
404 type = IRQ_TYPE_EDGE_FALLING; 401 type = irq_flags;
405 402
406 switch (type & IRQ_TYPE_SENSE_MASK) { 403 switch (type & IRQ_TYPE_SENSE_MASK) {
407 case IRQ_TYPE_EDGE_RISING: 404 case IRQ_TYPE_EDGE_RISING:
diff --git a/drivers/pinctrl/samsung/pinctrl-exynos.c b/drivers/pinctrl/samsung/pinctrl-exynos.c
index 12f7d1eb65bc..07409fde02b2 100644
--- a/drivers/pinctrl/samsung/pinctrl-exynos.c
+++ b/drivers/pinctrl/samsung/pinctrl-exynos.c
@@ -56,6 +56,17 @@ static const struct samsung_pin_bank_type bank_type_alive = {
56 .reg_offset = { 0x00, 0x04, 0x08, 0x0c, }, 56 .reg_offset = { 0x00, 0x04, 0x08, 0x0c, },
57}; 57};
58 58
59/* Exynos5433 has the 4bit widths for PINCFG_TYPE_DRV bitfields. */
60static const struct samsung_pin_bank_type exynos5433_bank_type_off = {
61 .fld_width = { 4, 1, 2, 4, 2, 2, },
62 .reg_offset = { 0x00, 0x04, 0x08, 0x0c, 0x10, 0x14, },
63};
64
65static const struct samsung_pin_bank_type exynos5433_bank_type_alive = {
66 .fld_width = { 4, 1, 2, 4, },
67 .reg_offset = { 0x00, 0x04, 0x08, 0x0c, },
68};
69
59static void exynos_irq_mask(struct irq_data *irqd) 70static void exynos_irq_mask(struct irq_data *irqd)
60{ 71{
61 struct irq_chip *chip = irq_data_get_irq_chip(irqd); 72 struct irq_chip *chip = irq_data_get_irq_chip(irqd);
@@ -1335,82 +1346,82 @@ const struct samsung_pin_ctrl exynos5420_pin_ctrl[] __initconst = {
1335 1346
1336/* pin banks of exynos5433 pin-controller - ALIVE */ 1347/* pin banks of exynos5433 pin-controller - ALIVE */
1337static const struct samsung_pin_bank_data exynos5433_pin_banks0[] = { 1348static const struct samsung_pin_bank_data exynos5433_pin_banks0[] = {
1338 EXYNOS_PIN_BANK_EINTW(8, 0x000, "gpa0", 0x00), 1349 EXYNOS5433_PIN_BANK_EINTW(8, 0x000, "gpa0", 0x00),
1339 EXYNOS_PIN_BANK_EINTW(8, 0x020, "gpa1", 0x04), 1350 EXYNOS5433_PIN_BANK_EINTW(8, 0x020, "gpa1", 0x04),
1340 EXYNOS_PIN_BANK_EINTW(8, 0x040, "gpa2", 0x08), 1351 EXYNOS5433_PIN_BANK_EINTW(8, 0x040, "gpa2", 0x08),
1341 EXYNOS_PIN_BANK_EINTW(8, 0x060, "gpa3", 0x0c), 1352 EXYNOS5433_PIN_BANK_EINTW(8, 0x060, "gpa3", 0x0c),
1342 EXYNOS_PIN_BANK_EINTW_EXT(8, 0x020, "gpf1", 0x1004, 1), 1353 EXYNOS5433_PIN_BANK_EINTW_EXT(8, 0x020, "gpf1", 0x1004, 1),
1343 EXYNOS_PIN_BANK_EINTW_EXT(4, 0x040, "gpf2", 0x1008, 1), 1354 EXYNOS5433_PIN_BANK_EINTW_EXT(4, 0x040, "gpf2", 0x1008, 1),
1344 EXYNOS_PIN_BANK_EINTW_EXT(4, 0x060, "gpf3", 0x100c, 1), 1355 EXYNOS5433_PIN_BANK_EINTW_EXT(4, 0x060, "gpf3", 0x100c, 1),
1345 EXYNOS_PIN_BANK_EINTW_EXT(8, 0x080, "gpf4", 0x1010, 1), 1356 EXYNOS5433_PIN_BANK_EINTW_EXT(8, 0x080, "gpf4", 0x1010, 1),
1346 EXYNOS_PIN_BANK_EINTW_EXT(8, 0x0a0, "gpf5", 0x1014, 1), 1357 EXYNOS5433_PIN_BANK_EINTW_EXT(8, 0x0a0, "gpf5", 0x1014, 1),
1347}; 1358};
1348 1359
1349/* pin banks of exynos5433 pin-controller - AUD */ 1360/* pin banks of exynos5433 pin-controller - AUD */
1350static const struct samsung_pin_bank_data exynos5433_pin_banks1[] = { 1361static const struct samsung_pin_bank_data exynos5433_pin_banks1[] = {
1351 EXYNOS_PIN_BANK_EINTG(7, 0x000, "gpz0", 0x00), 1362 EXYNOS5433_PIN_BANK_EINTG(7, 0x000, "gpz0", 0x00),
1352 EXYNOS_PIN_BANK_EINTG(4, 0x020, "gpz1", 0x04), 1363 EXYNOS5433_PIN_BANK_EINTG(4, 0x020, "gpz1", 0x04),
1353}; 1364};
1354 1365
1355/* pin banks of exynos5433 pin-controller - CPIF */ 1366/* pin banks of exynos5433 pin-controller - CPIF */
1356static const struct samsung_pin_bank_data exynos5433_pin_banks2[] = { 1367static const struct samsung_pin_bank_data exynos5433_pin_banks2[] = {
1357 EXYNOS_PIN_BANK_EINTG(2, 0x000, "gpv6", 0x00), 1368 EXYNOS5433_PIN_BANK_EINTG(2, 0x000, "gpv6", 0x00),
1358}; 1369};
1359 1370
1360/* pin banks of exynos5433 pin-controller - eSE */ 1371/* pin banks of exynos5433 pin-controller - eSE */
1361static const struct samsung_pin_bank_data exynos5433_pin_banks3[] = { 1372static const struct samsung_pin_bank_data exynos5433_pin_banks3[] = {
1362 EXYNOS_PIN_BANK_EINTG(3, 0x000, "gpj2", 0x00), 1373 EXYNOS5433_PIN_BANK_EINTG(3, 0x000, "gpj2", 0x00),
1363}; 1374};
1364 1375
1365/* pin banks of exynos5433 pin-controller - FINGER */ 1376/* pin banks of exynos5433 pin-controller - FINGER */
1366static const struct samsung_pin_bank_data exynos5433_pin_banks4[] = { 1377static const struct samsung_pin_bank_data exynos5433_pin_banks4[] = {
1367 EXYNOS_PIN_BANK_EINTG(4, 0x000, "gpd5", 0x00), 1378 EXYNOS5433_PIN_BANK_EINTG(4, 0x000, "gpd5", 0x00),
1368}; 1379};
1369 1380
1370/* pin banks of exynos5433 pin-controller - FSYS */ 1381/* pin banks of exynos5433 pin-controller - FSYS */
1371static const struct samsung_pin_bank_data exynos5433_pin_banks5[] = { 1382static const struct samsung_pin_bank_data exynos5433_pin_banks5[] = {
1372 EXYNOS_PIN_BANK_EINTG(6, 0x000, "gph1", 0x00), 1383 EXYNOS5433_PIN_BANK_EINTG(6, 0x000, "gph1", 0x00),
1373 EXYNOS_PIN_BANK_EINTG(7, 0x020, "gpr4", 0x04), 1384 EXYNOS5433_PIN_BANK_EINTG(7, 0x020, "gpr4", 0x04),
1374 EXYNOS_PIN_BANK_EINTG(5, 0x040, "gpr0", 0x08), 1385 EXYNOS5433_PIN_BANK_EINTG(5, 0x040, "gpr0", 0x08),
1375 EXYNOS_PIN_BANK_EINTG(8, 0x060, "gpr1", 0x0c), 1386 EXYNOS5433_PIN_BANK_EINTG(8, 0x060, "gpr1", 0x0c),
1376 EXYNOS_PIN_BANK_EINTG(2, 0x080, "gpr2", 0x10), 1387 EXYNOS5433_PIN_BANK_EINTG(2, 0x080, "gpr2", 0x10),
1377 EXYNOS_PIN_BANK_EINTG(8, 0x0a0, "gpr3", 0x14), 1388 EXYNOS5433_PIN_BANK_EINTG(8, 0x0a0, "gpr3", 0x14),
1378}; 1389};
1379 1390
1380/* pin banks of exynos5433 pin-controller - IMEM */ 1391/* pin banks of exynos5433 pin-controller - IMEM */
1381static const struct samsung_pin_bank_data exynos5433_pin_banks6[] = { 1392static const struct samsung_pin_bank_data exynos5433_pin_banks6[] = {
1382 EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpf0", 0x00), 1393 EXYNOS5433_PIN_BANK_EINTG(8, 0x000, "gpf0", 0x00),
1383}; 1394};
1384 1395
1385/* pin banks of exynos5433 pin-controller - NFC */ 1396/* pin banks of exynos5433 pin-controller - NFC */
1386static const struct samsung_pin_bank_data exynos5433_pin_banks7[] = { 1397static const struct samsung_pin_bank_data exynos5433_pin_banks7[] = {
1387 EXYNOS_PIN_BANK_EINTG(3, 0x000, "gpj0", 0x00), 1398 EXYNOS5433_PIN_BANK_EINTG(3, 0x000, "gpj0", 0x00),
1388}; 1399};
1389 1400
1390/* pin banks of exynos5433 pin-controller - PERIC */ 1401/* pin banks of exynos5433 pin-controller - PERIC */
1391static const struct samsung_pin_bank_data exynos5433_pin_banks8[] = { 1402static const struct samsung_pin_bank_data exynos5433_pin_banks8[] = {
1392 EXYNOS_PIN_BANK_EINTG(6, 0x000, "gpv7", 0x00), 1403 EXYNOS5433_PIN_BANK_EINTG(6, 0x000, "gpv7", 0x00),
1393 EXYNOS_PIN_BANK_EINTG(5, 0x020, "gpb0", 0x04), 1404 EXYNOS5433_PIN_BANK_EINTG(5, 0x020, "gpb0", 0x04),
1394 EXYNOS_PIN_BANK_EINTG(8, 0x040, "gpc0", 0x08), 1405 EXYNOS5433_PIN_BANK_EINTG(8, 0x040, "gpc0", 0x08),
1395 EXYNOS_PIN_BANK_EINTG(2, 0x060, "gpc1", 0x0c), 1406 EXYNOS5433_PIN_BANK_EINTG(2, 0x060, "gpc1", 0x0c),
1396 EXYNOS_PIN_BANK_EINTG(6, 0x080, "gpc2", 0x10), 1407 EXYNOS5433_PIN_BANK_EINTG(6, 0x080, "gpc2", 0x10),
1397 EXYNOS_PIN_BANK_EINTG(8, 0x0a0, "gpc3", 0x14), 1408 EXYNOS5433_PIN_BANK_EINTG(8, 0x0a0, "gpc3", 0x14),
1398 EXYNOS_PIN_BANK_EINTG(2, 0x0c0, "gpg0", 0x18), 1409 EXYNOS5433_PIN_BANK_EINTG(2, 0x0c0, "gpg0", 0x18),
1399 EXYNOS_PIN_BANK_EINTG(4, 0x0e0, "gpd0", 0x1c), 1410 EXYNOS5433_PIN_BANK_EINTG(4, 0x0e0, "gpd0", 0x1c),
1400 EXYNOS_PIN_BANK_EINTG(6, 0x100, "gpd1", 0x20), 1411 EXYNOS5433_PIN_BANK_EINTG(6, 0x100, "gpd1", 0x20),
1401 EXYNOS_PIN_BANK_EINTG(8, 0x120, "gpd2", 0x24), 1412 EXYNOS5433_PIN_BANK_EINTG(8, 0x120, "gpd2", 0x24),
1402 EXYNOS_PIN_BANK_EINTG(5, 0x140, "gpd4", 0x28), 1413 EXYNOS5433_PIN_BANK_EINTG(5, 0x140, "gpd4", 0x28),
1403 EXYNOS_PIN_BANK_EINTG(2, 0x160, "gpd8", 0x2c), 1414 EXYNOS5433_PIN_BANK_EINTG(2, 0x160, "gpd8", 0x2c),
1404 EXYNOS_PIN_BANK_EINTG(7, 0x180, "gpd6", 0x30), 1415 EXYNOS5433_PIN_BANK_EINTG(7, 0x180, "gpd6", 0x30),
1405 EXYNOS_PIN_BANK_EINTG(3, 0x1a0, "gpd7", 0x34), 1416 EXYNOS5433_PIN_BANK_EINTG(3, 0x1a0, "gpd7", 0x34),
1406 EXYNOS_PIN_BANK_EINTG(5, 0x1c0, "gpg1", 0x38), 1417 EXYNOS5433_PIN_BANK_EINTG(5, 0x1c0, "gpg1", 0x38),
1407 EXYNOS_PIN_BANK_EINTG(2, 0x1e0, "gpg2", 0x3c), 1418 EXYNOS5433_PIN_BANK_EINTG(2, 0x1e0, "gpg2", 0x3c),
1408 EXYNOS_PIN_BANK_EINTG(8, 0x200, "gpg3", 0x40), 1419 EXYNOS5433_PIN_BANK_EINTG(8, 0x200, "gpg3", 0x40),
1409}; 1420};
1410 1421
1411/* pin banks of exynos5433 pin-controller - TOUCH */ 1422/* pin banks of exynos5433 pin-controller - TOUCH */
1412static const struct samsung_pin_bank_data exynos5433_pin_banks9[] = { 1423static const struct samsung_pin_bank_data exynos5433_pin_banks9[] = {
1413 EXYNOS_PIN_BANK_EINTG(3, 0x000, "gpj1", 0x00), 1424 EXYNOS5433_PIN_BANK_EINTG(3, 0x000, "gpj1", 0x00),
1414}; 1425};
1415 1426
1416/* 1427/*
diff --git a/drivers/pinctrl/samsung/pinctrl-exynos.h b/drivers/pinctrl/samsung/pinctrl-exynos.h
index 5821525a2c84..a473092fb8d2 100644
--- a/drivers/pinctrl/samsung/pinctrl-exynos.h
+++ b/drivers/pinctrl/samsung/pinctrl-exynos.h
@@ -90,6 +90,37 @@
90 .pctl_res_idx = pctl_idx, \ 90 .pctl_res_idx = pctl_idx, \
91 } \ 91 } \
92 92
93#define EXYNOS5433_PIN_BANK_EINTG(pins, reg, id, offs) \
94 { \
95 .type = &exynos5433_bank_type_off, \
96 .pctl_offset = reg, \
97 .nr_pins = pins, \
98 .eint_type = EINT_TYPE_GPIO, \
99 .eint_offset = offs, \
100 .name = id \
101 }
102
103#define EXYNOS5433_PIN_BANK_EINTW(pins, reg, id, offs) \
104 { \
105 .type = &exynos5433_bank_type_alive, \
106 .pctl_offset = reg, \
107 .nr_pins = pins, \
108 .eint_type = EINT_TYPE_WKUP, \
109 .eint_offset = offs, \
110 .name = id \
111 }
112
113#define EXYNOS5433_PIN_BANK_EINTW_EXT(pins, reg, id, offs, pctl_idx) \
114 { \
115 .type = &exynos5433_bank_type_alive, \
116 .pctl_offset = reg, \
117 .nr_pins = pins, \
118 .eint_type = EINT_TYPE_WKUP, \
119 .eint_offset = offs, \
120 .name = id, \
121 .pctl_res_idx = pctl_idx, \
122 } \
123
93/** 124/**
94 * struct exynos_weint_data: irq specific data for all the wakeup interrupts 125 * struct exynos_weint_data: irq specific data for all the wakeup interrupts
95 * generated by the external wakeup interrupt controller. 126 * generated by the external wakeup interrupt controller.
diff --git a/drivers/pinctrl/sunxi/pinctrl-sunxi.c b/drivers/pinctrl/sunxi/pinctrl-sunxi.c
index 0eb51e33cb1b..207a8de4e1ed 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sunxi.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sunxi.c
@@ -564,8 +564,7 @@ static int sunxi_pconf_group_set(struct pinctrl_dev *pctldev,
564 val = arg / 10 - 1; 564 val = arg / 10 - 1;
565 break; 565 break;
566 case PIN_CONFIG_BIAS_DISABLE: 566 case PIN_CONFIG_BIAS_DISABLE:
567 val = 0; 567 continue;
568 break;
569 case PIN_CONFIG_BIAS_PULL_UP: 568 case PIN_CONFIG_BIAS_PULL_UP:
570 if (arg == 0) 569 if (arg == 0)
571 return -EINVAL; 570 return -EINVAL;
diff --git a/drivers/pinctrl/uniphier/pinctrl-uniphier-ld20.c b/drivers/pinctrl/uniphier/pinctrl-uniphier-ld20.c
index aa8bd9794683..96686336e3a3 100644
--- a/drivers/pinctrl/uniphier/pinctrl-uniphier-ld20.c
+++ b/drivers/pinctrl/uniphier/pinctrl-uniphier-ld20.c
@@ -561,7 +561,7 @@ static const int ether_rgmii_muxvals[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
561 0, 0, 0, 0}; 561 0, 0, 0, 0};
562static const unsigned ether_rmii_pins[] = {30, 31, 32, 33, 34, 35, 36, 37, 39, 562static const unsigned ether_rmii_pins[] = {30, 31, 32, 33, 34, 35, 36, 37, 39,
563 41, 42, 45}; 563 41, 42, 45};
564static const int ether_rmii_muxvals[] = {1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}; 564static const int ether_rmii_muxvals[] = {0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1};
565static const unsigned i2c0_pins[] = {63, 64}; 565static const unsigned i2c0_pins[] = {63, 64};
566static const int i2c0_muxvals[] = {0, 0}; 566static const int i2c0_muxvals[] = {0, 0};
567static const unsigned i2c1_pins[] = {65, 66}; 567static const unsigned i2c1_pins[] = {65, 66};
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index 5fe8be089b8b..59aa8e302bc3 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -1034,7 +1034,7 @@ config SURFACE_PRO3_BUTTON
1034 1034
1035config SURFACE_3_BUTTON 1035config SURFACE_3_BUTTON
1036 tristate "Power/home/volume buttons driver for Microsoft Surface 3 tablet" 1036 tristate "Power/home/volume buttons driver for Microsoft Surface 3 tablet"
1037 depends on ACPI && KEYBOARD_GPIO 1037 depends on ACPI && KEYBOARD_GPIO && I2C
1038 ---help--- 1038 ---help---
1039 This driver handles the power/home/volume buttons on the Microsoft Surface 3 tablet. 1039 This driver handles the power/home/volume buttons on the Microsoft Surface 3 tablet.
1040 1040
diff --git a/drivers/platform/x86/fujitsu-laptop.c b/drivers/platform/x86/fujitsu-laptop.c
index 61f39abf5dc8..82d67715ce76 100644
--- a/drivers/platform/x86/fujitsu-laptop.c
+++ b/drivers/platform/x86/fujitsu-laptop.c
@@ -177,43 +177,43 @@ static void acpi_fujitsu_hotkey_notify(struct acpi_device *device, u32 event);
177 177
178#if IS_ENABLED(CONFIG_LEDS_CLASS) 178#if IS_ENABLED(CONFIG_LEDS_CLASS)
179static enum led_brightness logolamp_get(struct led_classdev *cdev); 179static enum led_brightness logolamp_get(struct led_classdev *cdev);
180static void logolamp_set(struct led_classdev *cdev, 180static int logolamp_set(struct led_classdev *cdev,
181 enum led_brightness brightness); 181 enum led_brightness brightness);
182 182
183static struct led_classdev logolamp_led = { 183static struct led_classdev logolamp_led = {
184 .name = "fujitsu::logolamp", 184 .name = "fujitsu::logolamp",
185 .brightness_get = logolamp_get, 185 .brightness_get = logolamp_get,
186 .brightness_set = logolamp_set 186 .brightness_set_blocking = logolamp_set
187}; 187};
188 188
189static enum led_brightness kblamps_get(struct led_classdev *cdev); 189static enum led_brightness kblamps_get(struct led_classdev *cdev);
190static void kblamps_set(struct led_classdev *cdev, 190static int kblamps_set(struct led_classdev *cdev,
191 enum led_brightness brightness); 191 enum led_brightness brightness);
192 192
193static struct led_classdev kblamps_led = { 193static struct led_classdev kblamps_led = {
194 .name = "fujitsu::kblamps", 194 .name = "fujitsu::kblamps",
195 .brightness_get = kblamps_get, 195 .brightness_get = kblamps_get,
196 .brightness_set = kblamps_set 196 .brightness_set_blocking = kblamps_set
197}; 197};
198 198
199static enum led_brightness radio_led_get(struct led_classdev *cdev); 199static enum led_brightness radio_led_get(struct led_classdev *cdev);
200static void radio_led_set(struct led_classdev *cdev, 200static int radio_led_set(struct led_classdev *cdev,
201 enum led_brightness brightness); 201 enum led_brightness brightness);
202 202
203static struct led_classdev radio_led = { 203static struct led_classdev radio_led = {
204 .name = "fujitsu::radio_led", 204 .name = "fujitsu::radio_led",
205 .brightness_get = radio_led_get, 205 .brightness_get = radio_led_get,
206 .brightness_set = radio_led_set 206 .brightness_set_blocking = radio_led_set
207}; 207};
208 208
209static enum led_brightness eco_led_get(struct led_classdev *cdev); 209static enum led_brightness eco_led_get(struct led_classdev *cdev);
210static void eco_led_set(struct led_classdev *cdev, 210static int eco_led_set(struct led_classdev *cdev,
211 enum led_brightness brightness); 211 enum led_brightness brightness);
212 212
213static struct led_classdev eco_led = { 213static struct led_classdev eco_led = {
214 .name = "fujitsu::eco_led", 214 .name = "fujitsu::eco_led",
215 .brightness_get = eco_led_get, 215 .brightness_get = eco_led_get,
216 .brightness_set = eco_led_set 216 .brightness_set_blocking = eco_led_set
217}; 217};
218#endif 218#endif
219 219
@@ -267,48 +267,48 @@ static int call_fext_func(int cmd, int arg0, int arg1, int arg2)
267#if IS_ENABLED(CONFIG_LEDS_CLASS) 267#if IS_ENABLED(CONFIG_LEDS_CLASS)
268/* LED class callbacks */ 268/* LED class callbacks */
269 269
270static void logolamp_set(struct led_classdev *cdev, 270static int logolamp_set(struct led_classdev *cdev,
271 enum led_brightness brightness) 271 enum led_brightness brightness)
272{ 272{
273 if (brightness >= LED_FULL) { 273 if (brightness >= LED_FULL) {
274 call_fext_func(FUNC_LEDS, 0x1, LOGOLAMP_POWERON, FUNC_LED_ON); 274 call_fext_func(FUNC_LEDS, 0x1, LOGOLAMP_POWERON, FUNC_LED_ON);
275 call_fext_func(FUNC_LEDS, 0x1, LOGOLAMP_ALWAYS, FUNC_LED_ON); 275 return call_fext_func(FUNC_LEDS, 0x1, LOGOLAMP_ALWAYS, FUNC_LED_ON);
276 } else if (brightness >= LED_HALF) { 276 } else if (brightness >= LED_HALF) {
277 call_fext_func(FUNC_LEDS, 0x1, LOGOLAMP_POWERON, FUNC_LED_ON); 277 call_fext_func(FUNC_LEDS, 0x1, LOGOLAMP_POWERON, FUNC_LED_ON);
278 call_fext_func(FUNC_LEDS, 0x1, LOGOLAMP_ALWAYS, FUNC_LED_OFF); 278 return call_fext_func(FUNC_LEDS, 0x1, LOGOLAMP_ALWAYS, FUNC_LED_OFF);
279 } else { 279 } else {
280 call_fext_func(FUNC_LEDS, 0x1, LOGOLAMP_POWERON, FUNC_LED_OFF); 280 return call_fext_func(FUNC_LEDS, 0x1, LOGOLAMP_POWERON, FUNC_LED_OFF);
281 } 281 }
282} 282}
283 283
284static void kblamps_set(struct led_classdev *cdev, 284static int kblamps_set(struct led_classdev *cdev,
285 enum led_brightness brightness) 285 enum led_brightness brightness)
286{ 286{
287 if (brightness >= LED_FULL) 287 if (brightness >= LED_FULL)
288 call_fext_func(FUNC_LEDS, 0x1, KEYBOARD_LAMPS, FUNC_LED_ON); 288 return call_fext_func(FUNC_LEDS, 0x1, KEYBOARD_LAMPS, FUNC_LED_ON);
289 else 289 else
290 call_fext_func(FUNC_LEDS, 0x1, KEYBOARD_LAMPS, FUNC_LED_OFF); 290 return call_fext_func(FUNC_LEDS, 0x1, KEYBOARD_LAMPS, FUNC_LED_OFF);
291} 291}
292 292
293static void radio_led_set(struct led_classdev *cdev, 293static int radio_led_set(struct led_classdev *cdev,
294 enum led_brightness brightness) 294 enum led_brightness brightness)
295{ 295{
296 if (brightness >= LED_FULL) 296 if (brightness >= LED_FULL)
297 call_fext_func(FUNC_RFKILL, 0x5, RADIO_LED_ON, RADIO_LED_ON); 297 return call_fext_func(FUNC_RFKILL, 0x5, RADIO_LED_ON, RADIO_LED_ON);
298 else 298 else
299 call_fext_func(FUNC_RFKILL, 0x5, RADIO_LED_ON, 0x0); 299 return call_fext_func(FUNC_RFKILL, 0x5, RADIO_LED_ON, 0x0);
300} 300}
301 301
302static void eco_led_set(struct led_classdev *cdev, 302static int eco_led_set(struct led_classdev *cdev,
303 enum led_brightness brightness) 303 enum led_brightness brightness)
304{ 304{
305 int curr; 305 int curr;
306 306
307 curr = call_fext_func(FUNC_LEDS, 0x2, ECO_LED, 0x0); 307 curr = call_fext_func(FUNC_LEDS, 0x2, ECO_LED, 0x0);
308 if (brightness >= LED_FULL) 308 if (brightness >= LED_FULL)
309 call_fext_func(FUNC_LEDS, 0x1, ECO_LED, curr | ECO_LED_ON); 309 return call_fext_func(FUNC_LEDS, 0x1, ECO_LED, curr | ECO_LED_ON);
310 else 310 else
311 call_fext_func(FUNC_LEDS, 0x1, ECO_LED, curr & ~ECO_LED_ON); 311 return call_fext_func(FUNC_LEDS, 0x1, ECO_LED, curr & ~ECO_LED_ON);
312} 312}
313 313
314static enum led_brightness logolamp_get(struct led_classdev *cdev) 314static enum led_brightness logolamp_get(struct led_classdev *cdev)
diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c
index 410741acb3c9..f46ece2ce3c4 100644
--- a/drivers/platform/x86/ideapad-laptop.c
+++ b/drivers/platform/x86/ideapad-laptop.c
@@ -813,6 +813,7 @@ static void ideapad_acpi_notify(acpi_handle handle, u32 event, void *data)
813 case 8: 813 case 8:
814 case 7: 814 case 7:
815 case 6: 815 case 6:
816 case 1:
816 ideapad_input_report(priv, vpc_bit); 817 ideapad_input_report(priv, vpc_bit);
817 break; 818 break;
818 case 5: 819 case 5:
diff --git a/drivers/platform/x86/intel_mid_powerbtn.c b/drivers/platform/x86/intel_mid_powerbtn.c
index 1fc0de870ff8..361770568ad0 100644
--- a/drivers/platform/x86/intel_mid_powerbtn.c
+++ b/drivers/platform/x86/intel_mid_powerbtn.c
@@ -77,7 +77,7 @@ static int mfld_pb_probe(struct platform_device *pdev)
77 77
78 input_set_capability(input, EV_KEY, KEY_POWER); 78 input_set_capability(input, EV_KEY, KEY_POWER);
79 79
80 error = request_threaded_irq(irq, NULL, mfld_pb_isr, 0, 80 error = request_threaded_irq(irq, NULL, mfld_pb_isr, IRQF_ONESHOT,
81 DRIVER_NAME, input); 81 DRIVER_NAME, input);
82 if (error) { 82 if (error) {
83 dev_err(&pdev->dev, "Unable to request irq %d for mfld power" 83 dev_err(&pdev->dev, "Unable to request irq %d for mfld power"
diff --git a/drivers/platform/x86/mlx-platform.c b/drivers/platform/x86/mlx-platform.c
index 97b4c3a219c0..25f15df5c2d7 100644
--- a/drivers/platform/x86/mlx-platform.c
+++ b/drivers/platform/x86/mlx-platform.c
@@ -326,7 +326,7 @@ static int __init mlxplat_init(void)
326 return 0; 326 return 0;
327 327
328fail_platform_mux_register: 328fail_platform_mux_register:
329 for (i--; i > 0 ; i--) 329 while (--i >= 0)
330 platform_device_unregister(priv->pdev_mux[i]); 330 platform_device_unregister(priv->pdev_mux[i]);
331 platform_device_unregister(priv->pdev_i2c); 331 platform_device_unregister(priv->pdev_i2c);
332fail_alloc: 332fail_alloc:
diff --git a/drivers/platform/x86/surface3-wmi.c b/drivers/platform/x86/surface3-wmi.c
index cbf4d83a7271..25b176996cb7 100644
--- a/drivers/platform/x86/surface3-wmi.c
+++ b/drivers/platform/x86/surface3-wmi.c
@@ -139,7 +139,7 @@ static acpi_status s3_wmi_attach_spi_device(acpi_handle handle,
139 139
140static int s3_wmi_check_platform_device(struct device *dev, void *data) 140static int s3_wmi_check_platform_device(struct device *dev, void *data)
141{ 141{
142 struct acpi_device *adev, *ts_adev; 142 struct acpi_device *adev, *ts_adev = NULL;
143 acpi_handle handle; 143 acpi_handle handle;
144 acpi_status status; 144 acpi_status status;
145 145
@@ -244,13 +244,11 @@ static int s3_wmi_remove(struct platform_device *device)
244 return 0; 244 return 0;
245} 245}
246 246
247#ifdef CONFIG_PM 247static int __maybe_unused s3_wmi_resume(struct device *dev)
248static int s3_wmi_resume(struct device *dev)
249{ 248{
250 s3_wmi_send_lid_state(); 249 s3_wmi_send_lid_state();
251 return 0; 250 return 0;
252} 251}
253#endif
254static SIMPLE_DEV_PM_OPS(s3_wmi_pm, NULL, s3_wmi_resume); 252static SIMPLE_DEV_PM_OPS(s3_wmi_pm, NULL, s3_wmi_resume);
255 253
256static struct platform_driver s3_wmi_driver = { 254static struct platform_driver s3_wmi_driver = {
diff --git a/drivers/regulator/axp20x-regulator.c b/drivers/regulator/axp20x-regulator.c
index e6a512ebeae2..a3ade9e4ef47 100644
--- a/drivers/regulator/axp20x-regulator.c
+++ b/drivers/regulator/axp20x-regulator.c
@@ -272,7 +272,7 @@ static const struct regulator_desc axp806_regulators[] = {
272 64, AXP806_DCDCD_V_CTRL, 0x3f, AXP806_PWR_OUT_CTRL1, 272 64, AXP806_DCDCD_V_CTRL, 0x3f, AXP806_PWR_OUT_CTRL1,
273 BIT(3)), 273 BIT(3)),
274 AXP_DESC(AXP806, DCDCE, "dcdce", "vine", 1100, 3400, 100, 274 AXP_DESC(AXP806, DCDCE, "dcdce", "vine", 1100, 3400, 100,
275 AXP806_DCDCB_V_CTRL, 0x1f, AXP806_PWR_OUT_CTRL1, BIT(4)), 275 AXP806_DCDCE_V_CTRL, 0x1f, AXP806_PWR_OUT_CTRL1, BIT(4)),
276 AXP_DESC(AXP806, ALDO1, "aldo1", "aldoin", 700, 3300, 100, 276 AXP_DESC(AXP806, ALDO1, "aldo1", "aldoin", 700, 3300, 100,
277 AXP806_ALDO1_V_CTRL, 0x1f, AXP806_PWR_OUT_CTRL1, BIT(5)), 277 AXP806_ALDO1_V_CTRL, 0x1f, AXP806_PWR_OUT_CTRL1, BIT(5)),
278 AXP_DESC(AXP806, ALDO2, "aldo2", "aldoin", 700, 3400, 100, 278 AXP_DESC(AXP806, ALDO2, "aldo2", "aldoin", 700, 3400, 100,
diff --git a/drivers/regulator/fixed.c b/drivers/regulator/fixed.c
index a43b0e8a438d..988a7472c2ab 100644
--- a/drivers/regulator/fixed.c
+++ b/drivers/regulator/fixed.c
@@ -30,9 +30,6 @@
30#include <linux/of_gpio.h> 30#include <linux/of_gpio.h>
31#include <linux/regulator/of_regulator.h> 31#include <linux/regulator/of_regulator.h>
32#include <linux/regulator/machine.h> 32#include <linux/regulator/machine.h>
33#include <linux/acpi.h>
34#include <linux/property.h>
35#include <linux/gpio/consumer.h>
36 33
37struct fixed_voltage_data { 34struct fixed_voltage_data {
38 struct regulator_desc desc; 35 struct regulator_desc desc;
@@ -97,44 +94,6 @@ of_get_fixed_voltage_config(struct device *dev,
97 return config; 94 return config;
98} 95}
99 96
100/**
101 * acpi_get_fixed_voltage_config - extract fixed_voltage_config structure info
102 * @dev: device requesting for fixed_voltage_config
103 * @desc: regulator description
104 *
105 * Populates fixed_voltage_config structure by extracting data through ACPI
106 * interface, returns a pointer to the populated structure of NULL if memory
107 * alloc fails.
108 */
109static struct fixed_voltage_config *
110acpi_get_fixed_voltage_config(struct device *dev,
111 const struct regulator_desc *desc)
112{
113 struct fixed_voltage_config *config;
114 const char *supply_name;
115 struct gpio_desc *gpiod;
116 int ret;
117
118 config = devm_kzalloc(dev, sizeof(*config), GFP_KERNEL);
119 if (!config)
120 return ERR_PTR(-ENOMEM);
121
122 ret = device_property_read_string(dev, "supply-name", &supply_name);
123 if (!ret)
124 config->supply_name = supply_name;
125
126 gpiod = gpiod_get(dev, "gpio", GPIOD_ASIS);
127 if (IS_ERR(gpiod))
128 return ERR_PTR(-ENODEV);
129
130 config->gpio = desc_to_gpio(gpiod);
131 config->enable_high = device_property_read_bool(dev,
132 "enable-active-high");
133 gpiod_put(gpiod);
134
135 return config;
136}
137
138static struct regulator_ops fixed_voltage_ops = { 97static struct regulator_ops fixed_voltage_ops = {
139}; 98};
140 99
@@ -155,11 +114,6 @@ static int reg_fixed_voltage_probe(struct platform_device *pdev)
155 &drvdata->desc); 114 &drvdata->desc);
156 if (IS_ERR(config)) 115 if (IS_ERR(config))
157 return PTR_ERR(config); 116 return PTR_ERR(config);
158 } else if (ACPI_HANDLE(&pdev->dev)) {
159 config = acpi_get_fixed_voltage_config(&pdev->dev,
160 &drvdata->desc);
161 if (IS_ERR(config))
162 return PTR_ERR(config);
163 } else { 117 } else {
164 config = dev_get_platdata(&pdev->dev); 118 config = dev_get_platdata(&pdev->dev);
165 } 119 }
diff --git a/drivers/regulator/twl6030-regulator.c b/drivers/regulator/twl6030-regulator.c
index 4864b9d742c0..716191046a70 100644
--- a/drivers/regulator/twl6030-regulator.c
+++ b/drivers/regulator/twl6030-regulator.c
@@ -452,7 +452,7 @@ static int twl6030smps_map_voltage(struct regulator_dev *rdev, int min_uV,
452 vsel = 62; 452 vsel = 62;
453 else if ((min_uV > 1800000) && (min_uV <= 1900000)) 453 else if ((min_uV > 1800000) && (min_uV <= 1900000))
454 vsel = 61; 454 vsel = 61;
455 else if ((min_uV > 1350000) && (min_uV <= 1800000)) 455 else if ((min_uV > 1500000) && (min_uV <= 1800000))
456 vsel = 60; 456 vsel = 60;
457 else if ((min_uV > 1350000) && (min_uV <= 1500000)) 457 else if ((min_uV > 1350000) && (min_uV <= 1500000))
458 vsel = 59; 458 vsel = 59;
diff --git a/drivers/remoteproc/remoteproc_core.c b/drivers/remoteproc/remoteproc_core.c
index 9a507e77eced..90b05c72186c 100644
--- a/drivers/remoteproc/remoteproc_core.c
+++ b/drivers/remoteproc/remoteproc_core.c
@@ -396,9 +396,6 @@ static int rproc_handle_vdev(struct rproc *rproc, struct fw_rsc_vdev *rsc,
396 goto unwind_vring_allocations; 396 goto unwind_vring_allocations;
397 } 397 }
398 398
399 /* track the rvdevs list reference */
400 kref_get(&rvdev->refcount);
401
402 list_add_tail(&rvdev->node, &rproc->rvdevs); 399 list_add_tail(&rvdev->node, &rproc->rvdevs);
403 400
404 rproc_add_subdev(rproc, &rvdev->subdev, 401 rproc_add_subdev(rproc, &rvdev->subdev,
@@ -889,13 +886,15 @@ static int rproc_fw_boot(struct rproc *rproc, const struct firmware *fw)
889 /* 886 /*
890 * Create a copy of the resource table. When a virtio device starts 887 * Create a copy of the resource table. When a virtio device starts
891 * and calls vring_new_virtqueue() the address of the allocated vring 888 * and calls vring_new_virtqueue() the address of the allocated vring
892 * will be stored in the table_ptr. Before the device is started, 889 * will be stored in the cached_table. Before the device is started,
893 * table_ptr will be copied into device memory. 890 * cached_table will be copied into device memory.
894 */ 891 */
895 rproc->table_ptr = kmemdup(table, tablesz, GFP_KERNEL); 892 rproc->cached_table = kmemdup(table, tablesz, GFP_KERNEL);
896 if (!rproc->table_ptr) 893 if (!rproc->cached_table)
897 goto clean_up; 894 goto clean_up;
898 895
896 rproc->table_ptr = rproc->cached_table;
897
899 /* reset max_notifyid */ 898 /* reset max_notifyid */
900 rproc->max_notifyid = -1; 899 rproc->max_notifyid = -1;
901 900
@@ -914,16 +913,18 @@ static int rproc_fw_boot(struct rproc *rproc, const struct firmware *fw)
914 } 913 }
915 914
916 /* 915 /*
917 * The starting device has been given the rproc->table_ptr as the 916 * The starting device has been given the rproc->cached_table as the
918 * resource table. The address of the vring along with the other 917 * resource table. The address of the vring along with the other
919 * allocated resources (carveouts etc) is stored in table_ptr. 918 * allocated resources (carveouts etc) is stored in cached_table.
920 * In order to pass this information to the remote device we must copy 919 * In order to pass this information to the remote device we must copy
921 * this information to device memory. We also update the table_ptr so 920 * this information to device memory. We also update the table_ptr so
922 * that any subsequent changes will be applied to the loaded version. 921 * that any subsequent changes will be applied to the loaded version.
923 */ 922 */
924 loaded_table = rproc_find_loaded_rsc_table(rproc, fw); 923 loaded_table = rproc_find_loaded_rsc_table(rproc, fw);
925 if (loaded_table) 924 if (loaded_table) {
926 memcpy(loaded_table, rproc->table_ptr, tablesz); 925 memcpy(loaded_table, rproc->cached_table, tablesz);
926 rproc->table_ptr = loaded_table;
927 }
927 928
928 /* power up the remote processor */ 929 /* power up the remote processor */
929 ret = rproc->ops->start(rproc); 930 ret = rproc->ops->start(rproc);
@@ -951,7 +952,8 @@ stop_rproc:
951clean_up_resources: 952clean_up_resources:
952 rproc_resource_cleanup(rproc); 953 rproc_resource_cleanup(rproc);
953clean_up: 954clean_up:
954 kfree(rproc->table_ptr); 955 kfree(rproc->cached_table);
956 rproc->cached_table = NULL;
955 rproc->table_ptr = NULL; 957 rproc->table_ptr = NULL;
956 958
957 rproc_disable_iommu(rproc); 959 rproc_disable_iommu(rproc);
@@ -1185,7 +1187,8 @@ void rproc_shutdown(struct rproc *rproc)
1185 rproc_disable_iommu(rproc); 1187 rproc_disable_iommu(rproc);
1186 1188
1187 /* Free the copy of the resource table */ 1189 /* Free the copy of the resource table */
1188 kfree(rproc->table_ptr); 1190 kfree(rproc->cached_table);
1191 rproc->cached_table = NULL;
1189 rproc->table_ptr = NULL; 1192 rproc->table_ptr = NULL;
1190 1193
1191 /* if in crash state, unlock crash handler */ 1194 /* if in crash state, unlock crash handler */
diff --git a/drivers/reset/core.c b/drivers/reset/core.c
index 10368ed8fd13..b6f5f1e1826c 100644
--- a/drivers/reset/core.c
+++ b/drivers/reset/core.c
@@ -163,7 +163,7 @@ int reset_control_reset(struct reset_control *rstc)
163 } 163 }
164 164
165 ret = rstc->rcdev->ops->reset(rstc->rcdev, rstc->id); 165 ret = rstc->rcdev->ops->reset(rstc->rcdev, rstc->id);
166 if (rstc->shared && !ret) 166 if (rstc->shared && ret)
167 atomic_dec(&rstc->triggered_count); 167 atomic_dec(&rstc->triggered_count);
168 168
169 return ret; 169 return ret;
diff --git a/drivers/rpmsg/rpmsg_core.c b/drivers/rpmsg/rpmsg_core.c
index a79cb5a9e5f2..1cfb775e8e82 100644
--- a/drivers/rpmsg/rpmsg_core.c
+++ b/drivers/rpmsg/rpmsg_core.c
@@ -453,8 +453,8 @@ int rpmsg_register_device(struct rpmsg_device *rpdev)
453 struct device *dev = &rpdev->dev; 453 struct device *dev = &rpdev->dev;
454 int ret; 454 int ret;
455 455
456 dev_set_name(&rpdev->dev, "%s:%s", 456 dev_set_name(&rpdev->dev, "%s.%s.%d.%d", dev_name(dev->parent),
457 dev_name(dev->parent), rpdev->id.name); 457 rpdev->id.name, rpdev->src, rpdev->dst);
458 458
459 rpdev->dev.bus = &rpmsg_bus; 459 rpdev->dev.bus = &rpmsg_bus;
460 rpdev->dev.release = rpmsg_release_device; 460 rpdev->dev.release = rpmsg_release_device;
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index c93c5a8fba32..5dc673dc9487 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -1551,12 +1551,15 @@ config RTC_DRV_MPC5121
1551 will be called rtc-mpc5121. 1551 will be called rtc-mpc5121.
1552 1552
1553config RTC_DRV_JZ4740 1553config RTC_DRV_JZ4740
1554 bool "Ingenic JZ4740 SoC" 1554 tristate "Ingenic JZ4740 SoC"
1555 depends on MACH_INGENIC || COMPILE_TEST 1555 depends on MACH_INGENIC || COMPILE_TEST
1556 help 1556 help
1557 If you say yes here you get support for the Ingenic JZ47xx SoCs RTC 1557 If you say yes here you get support for the Ingenic JZ47xx SoCs RTC
1558 controllers. 1558 controllers.
1559 1559
1560 This driver can also be buillt as a module. If so, the module
1561 will be called rtc-jz4740.
1562
1560config RTC_DRV_LPC24XX 1563config RTC_DRV_LPC24XX
1561 tristate "NXP RTC for LPC178x/18xx/408x/43xx" 1564 tristate "NXP RTC for LPC178x/18xx/408x/43xx"
1562 depends on ARCH_LPC18XX || COMPILE_TEST 1565 depends on ARCH_LPC18XX || COMPILE_TEST
diff --git a/drivers/rtc/rtc-jz4740.c b/drivers/rtc/rtc-jz4740.c
index 72918c1ba092..64989afffa3d 100644
--- a/drivers/rtc/rtc-jz4740.c
+++ b/drivers/rtc/rtc-jz4740.c
@@ -17,6 +17,7 @@
17#include <linux/clk.h> 17#include <linux/clk.h>
18#include <linux/io.h> 18#include <linux/io.h>
19#include <linux/kernel.h> 19#include <linux/kernel.h>
20#include <linux/module.h>
20#include <linux/of_device.h> 21#include <linux/of_device.h>
21#include <linux/platform_device.h> 22#include <linux/platform_device.h>
22#include <linux/reboot.h> 23#include <linux/reboot.h>
@@ -294,7 +295,7 @@ static void jz4740_rtc_power_off(void)
294 JZ_REG_RTC_RESET_COUNTER, reset_counter_ticks); 295 JZ_REG_RTC_RESET_COUNTER, reset_counter_ticks);
295 296
296 jz4740_rtc_poweroff(dev_for_power_off); 297 jz4740_rtc_poweroff(dev_for_power_off);
297 machine_halt(); 298 kernel_halt();
298} 299}
299 300
300static const struct of_device_id jz4740_rtc_of_match[] = { 301static const struct of_device_id jz4740_rtc_of_match[] = {
@@ -302,6 +303,7 @@ static const struct of_device_id jz4740_rtc_of_match[] = {
302 { .compatible = "ingenic,jz4780-rtc", .data = (void *)ID_JZ4780 }, 303 { .compatible = "ingenic,jz4780-rtc", .data = (void *)ID_JZ4780 },
303 {}, 304 {},
304}; 305};
306MODULE_DEVICE_TABLE(of, jz4740_rtc_of_match);
305 307
306static int jz4740_rtc_probe(struct platform_device *pdev) 308static int jz4740_rtc_probe(struct platform_device *pdev)
307{ 309{
@@ -429,6 +431,7 @@ static const struct platform_device_id jz4740_rtc_ids[] = {
429 { "jz4780-rtc", ID_JZ4780 }, 431 { "jz4780-rtc", ID_JZ4780 },
430 {} 432 {}
431}; 433};
434MODULE_DEVICE_TABLE(platform, jz4740_rtc_ids);
432 435
433static struct platform_driver jz4740_rtc_driver = { 436static struct platform_driver jz4740_rtc_driver = {
434 .probe = jz4740_rtc_probe, 437 .probe = jz4740_rtc_probe,
@@ -440,4 +443,9 @@ static struct platform_driver jz4740_rtc_driver = {
440 .id_table = jz4740_rtc_ids, 443 .id_table = jz4740_rtc_ids,
441}; 444};
442 445
443builtin_platform_driver(jz4740_rtc_driver); 446module_platform_driver(jz4740_rtc_driver);
447
448MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
449MODULE_LICENSE("GPL");
450MODULE_DESCRIPTION("RTC driver for the JZ4740 SoC\n");
451MODULE_ALIAS("platform:jz4740-rtc");
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
index 75f820ca17b7..27ff38f839fc 100644
--- a/drivers/s390/scsi/zfcp_fsf.c
+++ b/drivers/s390/scsi/zfcp_fsf.c
@@ -1583,7 +1583,7 @@ out:
1583int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *wka_port) 1583int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *wka_port)
1584{ 1584{
1585 struct zfcp_qdio *qdio = wka_port->adapter->qdio; 1585 struct zfcp_qdio *qdio = wka_port->adapter->qdio;
1586 struct zfcp_fsf_req *req = NULL; 1586 struct zfcp_fsf_req *req;
1587 int retval = -EIO; 1587 int retval = -EIO;
1588 1588
1589 spin_lock_irq(&qdio->req_q_lock); 1589 spin_lock_irq(&qdio->req_q_lock);
@@ -1612,7 +1612,7 @@ int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *wka_port)
1612 zfcp_fsf_req_free(req); 1612 zfcp_fsf_req_free(req);
1613out: 1613out:
1614 spin_unlock_irq(&qdio->req_q_lock); 1614 spin_unlock_irq(&qdio->req_q_lock);
1615 if (req && !IS_ERR(req)) 1615 if (!retval)
1616 zfcp_dbf_rec_run_wka("fsowp_1", wka_port, req->req_id); 1616 zfcp_dbf_rec_run_wka("fsowp_1", wka_port, req->req_id);
1617 return retval; 1617 return retval;
1618} 1618}
@@ -1638,7 +1638,7 @@ static void zfcp_fsf_close_wka_port_handler(struct zfcp_fsf_req *req)
1638int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *wka_port) 1638int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *wka_port)
1639{ 1639{
1640 struct zfcp_qdio *qdio = wka_port->adapter->qdio; 1640 struct zfcp_qdio *qdio = wka_port->adapter->qdio;
1641 struct zfcp_fsf_req *req = NULL; 1641 struct zfcp_fsf_req *req;
1642 int retval = -EIO; 1642 int retval = -EIO;
1643 1643
1644 spin_lock_irq(&qdio->req_q_lock); 1644 spin_lock_irq(&qdio->req_q_lock);
@@ -1667,7 +1667,7 @@ int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *wka_port)
1667 zfcp_fsf_req_free(req); 1667 zfcp_fsf_req_free(req);
1668out: 1668out:
1669 spin_unlock_irq(&qdio->req_q_lock); 1669 spin_unlock_irq(&qdio->req_q_lock);
1670 if (req && !IS_ERR(req)) 1670 if (!retval)
1671 zfcp_dbf_rec_run_wka("fscwp_1", wka_port, req->req_id); 1671 zfcp_dbf_rec_run_wka("fscwp_1", wka_port, req->req_id);
1672 return retval; 1672 return retval;
1673} 1673}
diff --git a/drivers/s390/virtio/virtio_ccw.c b/drivers/s390/virtio/virtio_ccw.c
index 639ed4e6afd1..070c4da95f48 100644
--- a/drivers/s390/virtio/virtio_ccw.c
+++ b/drivers/s390/virtio/virtio_ccw.c
@@ -145,6 +145,7 @@ static struct airq_info *airq_areas[MAX_AIRQ_AREAS];
145#define CCW_CMD_WRITE_CONF 0x21 145#define CCW_CMD_WRITE_CONF 0x21
146#define CCW_CMD_WRITE_STATUS 0x31 146#define CCW_CMD_WRITE_STATUS 0x31
147#define CCW_CMD_READ_VQ_CONF 0x32 147#define CCW_CMD_READ_VQ_CONF 0x32
148#define CCW_CMD_READ_STATUS 0x72
148#define CCW_CMD_SET_IND_ADAPTER 0x73 149#define CCW_CMD_SET_IND_ADAPTER 0x73
149#define CCW_CMD_SET_VIRTIO_REV 0x83 150#define CCW_CMD_SET_VIRTIO_REV 0x83
150 151
@@ -160,6 +161,7 @@ static struct airq_info *airq_areas[MAX_AIRQ_AREAS];
160#define VIRTIO_CCW_DOING_SET_CONF_IND 0x04000000 161#define VIRTIO_CCW_DOING_SET_CONF_IND 0x04000000
161#define VIRTIO_CCW_DOING_SET_IND_ADAPTER 0x08000000 162#define VIRTIO_CCW_DOING_SET_IND_ADAPTER 0x08000000
162#define VIRTIO_CCW_DOING_SET_VIRTIO_REV 0x10000000 163#define VIRTIO_CCW_DOING_SET_VIRTIO_REV 0x10000000
164#define VIRTIO_CCW_DOING_READ_STATUS 0x20000000
163#define VIRTIO_CCW_INTPARM_MASK 0xffff0000 165#define VIRTIO_CCW_INTPARM_MASK 0xffff0000
164 166
165static struct virtio_ccw_device *to_vc_device(struct virtio_device *vdev) 167static struct virtio_ccw_device *to_vc_device(struct virtio_device *vdev)
@@ -452,7 +454,7 @@ static void virtio_ccw_del_vq(struct virtqueue *vq, struct ccw1 *ccw)
452 * This may happen on device detach. 454 * This may happen on device detach.
453 */ 455 */
454 if (ret && (ret != -ENODEV)) 456 if (ret && (ret != -ENODEV))
455 dev_warn(&vq->vdev->dev, "Error %d while deleting queue %d", 457 dev_warn(&vq->vdev->dev, "Error %d while deleting queue %d\n",
456 ret, index); 458 ret, index);
457 459
458 vring_del_virtqueue(vq); 460 vring_del_virtqueue(vq);
@@ -892,6 +894,28 @@ out_free:
892static u8 virtio_ccw_get_status(struct virtio_device *vdev) 894static u8 virtio_ccw_get_status(struct virtio_device *vdev)
893{ 895{
894 struct virtio_ccw_device *vcdev = to_vc_device(vdev); 896 struct virtio_ccw_device *vcdev = to_vc_device(vdev);
897 u8 old_status = *vcdev->status;
898 struct ccw1 *ccw;
899
900 if (vcdev->revision < 1)
901 return *vcdev->status;
902
903 ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
904 if (!ccw)
905 return old_status;
906
907 ccw->cmd_code = CCW_CMD_READ_STATUS;
908 ccw->flags = 0;
909 ccw->count = sizeof(*vcdev->status);
910 ccw->cda = (__u32)(unsigned long)vcdev->status;
911 ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_READ_STATUS);
912/*
913 * If the channel program failed (should only happen if the device
914 * was hotunplugged, and then we clean up via the machine check
915 * handler anyway), vcdev->status was not overwritten and we just
916 * return the old status, which is fine.
917*/
918 kfree(ccw);
895 919
896 return *vcdev->status; 920 return *vcdev->status;
897} 921}
@@ -920,7 +944,7 @@ static void virtio_ccw_set_status(struct virtio_device *vdev, u8 status)
920 kfree(ccw); 944 kfree(ccw);
921} 945}
922 946
923static struct virtio_config_ops virtio_ccw_config_ops = { 947static const struct virtio_config_ops virtio_ccw_config_ops = {
924 .get_features = virtio_ccw_get_features, 948 .get_features = virtio_ccw_get_features,
925 .finalize_features = virtio_ccw_finalize_features, 949 .finalize_features = virtio_ccw_finalize_features,
926 .get = virtio_ccw_get_config, 950 .get = virtio_ccw_get_config,
@@ -987,6 +1011,7 @@ static void virtio_ccw_check_activity(struct virtio_ccw_device *vcdev,
987 case VIRTIO_CCW_DOING_READ_CONFIG: 1011 case VIRTIO_CCW_DOING_READ_CONFIG:
988 case VIRTIO_CCW_DOING_WRITE_CONFIG: 1012 case VIRTIO_CCW_DOING_WRITE_CONFIG:
989 case VIRTIO_CCW_DOING_WRITE_STATUS: 1013 case VIRTIO_CCW_DOING_WRITE_STATUS:
1014 case VIRTIO_CCW_DOING_READ_STATUS:
990 case VIRTIO_CCW_DOING_SET_VQ: 1015 case VIRTIO_CCW_DOING_SET_VQ:
991 case VIRTIO_CCW_DOING_SET_IND: 1016 case VIRTIO_CCW_DOING_SET_IND:
992 case VIRTIO_CCW_DOING_SET_CONF_IND: 1017 case VIRTIO_CCW_DOING_SET_CONF_IND:
diff --git a/drivers/scsi/aacraid/comminit.c b/drivers/scsi/aacraid/comminit.c
index 4f56b1003cc7..5b48bedd7c38 100644
--- a/drivers/scsi/aacraid/comminit.c
+++ b/drivers/scsi/aacraid/comminit.c
@@ -50,9 +50,13 @@ struct aac_common aac_config = {
50 50
51static inline int aac_is_msix_mode(struct aac_dev *dev) 51static inline int aac_is_msix_mode(struct aac_dev *dev)
52{ 52{
53 u32 status; 53 u32 status = 0;
54 54
55 status = src_readl(dev, MUnit.OMR); 55 if (dev->pdev->device == PMC_DEVICE_S6 ||
56 dev->pdev->device == PMC_DEVICE_S7 ||
57 dev->pdev->device == PMC_DEVICE_S8) {
58 status = src_readl(dev, MUnit.OMR);
59 }
56 return (status & AAC_INT_MODE_MSIX); 60 return (status & AAC_INT_MODE_MSIX);
57} 61}
58 62
diff --git a/drivers/scsi/bfa/bfad.c b/drivers/scsi/bfa/bfad.c
index d9e15210b110..5caf5f3ff642 100644
--- a/drivers/scsi/bfa/bfad.c
+++ b/drivers/scsi/bfa/bfad.c
@@ -64,9 +64,9 @@ int max_rport_logins = BFA_FCS_MAX_RPORT_LOGINS;
64u32 bfi_image_cb_size, bfi_image_ct_size, bfi_image_ct2_size; 64u32 bfi_image_cb_size, bfi_image_ct_size, bfi_image_ct2_size;
65u32 *bfi_image_cb, *bfi_image_ct, *bfi_image_ct2; 65u32 *bfi_image_cb, *bfi_image_ct, *bfi_image_ct2;
66 66
67#define BFAD_FW_FILE_CB "cbfw-3.2.3.0.bin" 67#define BFAD_FW_FILE_CB "cbfw-3.2.5.1.bin"
68#define BFAD_FW_FILE_CT "ctfw-3.2.3.0.bin" 68#define BFAD_FW_FILE_CT "ctfw-3.2.5.1.bin"
69#define BFAD_FW_FILE_CT2 "ct2fw-3.2.3.0.bin" 69#define BFAD_FW_FILE_CT2 "ct2fw-3.2.5.1.bin"
70 70
71static u32 *bfad_load_fwimg(struct pci_dev *pdev); 71static u32 *bfad_load_fwimg(struct pci_dev *pdev);
72static void bfad_free_fwimg(void); 72static void bfad_free_fwimg(void);
diff --git a/drivers/scsi/bfa/bfad_bsg.c b/drivers/scsi/bfa/bfad_bsg.c
index a9a00169ad91..b2e8c0dfc79c 100644
--- a/drivers/scsi/bfa/bfad_bsg.c
+++ b/drivers/scsi/bfa/bfad_bsg.c
@@ -3363,7 +3363,7 @@ bfad_im_bsg_els_ct_request(struct bsg_job *job)
3363 struct bfad_fcxp *drv_fcxp; 3363 struct bfad_fcxp *drv_fcxp;
3364 struct bfa_fcs_lport_s *fcs_port; 3364 struct bfa_fcs_lport_s *fcs_port;
3365 struct bfa_fcs_rport_s *fcs_rport; 3365 struct bfa_fcs_rport_s *fcs_rport;
3366 struct fc_bsg_request *bsg_request = bsg_request; 3366 struct fc_bsg_request *bsg_request = job->request;
3367 struct fc_bsg_reply *bsg_reply = job->reply; 3367 struct fc_bsg_reply *bsg_reply = job->reply;
3368 uint32_t command_type = bsg_request->msgcode; 3368 uint32_t command_type = bsg_request->msgcode;
3369 unsigned long flags; 3369 unsigned long flags;
diff --git a/drivers/scsi/bfa/bfad_drv.h b/drivers/scsi/bfa/bfad_drv.h
index f9e862093a25..cfcfff48e8e1 100644
--- a/drivers/scsi/bfa/bfad_drv.h
+++ b/drivers/scsi/bfa/bfad_drv.h
@@ -58,7 +58,7 @@
58#ifdef BFA_DRIVER_VERSION 58#ifdef BFA_DRIVER_VERSION
59#define BFAD_DRIVER_VERSION BFA_DRIVER_VERSION 59#define BFAD_DRIVER_VERSION BFA_DRIVER_VERSION
60#else 60#else
61#define BFAD_DRIVER_VERSION "3.2.25.0" 61#define BFAD_DRIVER_VERSION "3.2.25.1"
62#endif 62#endif
63 63
64#define BFAD_PROTO_NAME FCPI_NAME 64#define BFAD_PROTO_NAME FCPI_NAME
diff --git a/drivers/scsi/fnic/fnic.h b/drivers/scsi/fnic/fnic.h
index 9ddc9200e0a4..9e4b7709043e 100644
--- a/drivers/scsi/fnic/fnic.h
+++ b/drivers/scsi/fnic/fnic.h
@@ -248,6 +248,7 @@ struct fnic {
248 struct completion *remove_wait; /* device remove thread blocks */ 248 struct completion *remove_wait; /* device remove thread blocks */
249 249
250 atomic_t in_flight; /* io counter */ 250 atomic_t in_flight; /* io counter */
251 bool internal_reset_inprogress;
251 u32 _reserved; /* fill hole */ 252 u32 _reserved; /* fill hole */
252 unsigned long state_flags; /* protected by host lock */ 253 unsigned long state_flags; /* protected by host lock */
253 enum fnic_state state; 254 enum fnic_state state;
diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c
index 2544a37ece0a..adb3d5871e74 100644
--- a/drivers/scsi/fnic/fnic_scsi.c
+++ b/drivers/scsi/fnic/fnic_scsi.c
@@ -2581,6 +2581,19 @@ int fnic_host_reset(struct scsi_cmnd *sc)
2581 unsigned long wait_host_tmo; 2581 unsigned long wait_host_tmo;
2582 struct Scsi_Host *shost = sc->device->host; 2582 struct Scsi_Host *shost = sc->device->host;
2583 struct fc_lport *lp = shost_priv(shost); 2583 struct fc_lport *lp = shost_priv(shost);
2584 struct fnic *fnic = lport_priv(lp);
2585 unsigned long flags;
2586
2587 spin_lock_irqsave(&fnic->fnic_lock, flags);
2588 if (fnic->internal_reset_inprogress == 0) {
2589 fnic->internal_reset_inprogress = 1;
2590 } else {
2591 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
2592 FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
2593 "host reset in progress skipping another host reset\n");
2594 return SUCCESS;
2595 }
2596 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
2584 2597
2585 /* 2598 /*
2586 * If fnic_reset is successful, wait for fabric login to complete 2599 * If fnic_reset is successful, wait for fabric login to complete
@@ -2601,6 +2614,9 @@ int fnic_host_reset(struct scsi_cmnd *sc)
2601 } 2614 }
2602 } 2615 }
2603 2616
2617 spin_lock_irqsave(&fnic->fnic_lock, flags);
2618 fnic->internal_reset_inprogress = 0;
2619 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
2604 return ret; 2620 return ret;
2605} 2621}
2606 2622
diff --git a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
index 3d3768aaab4f..0f807798c624 100644
--- a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
+++ b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
@@ -46,6 +46,7 @@
46 46
47#define INITIAL_SRP_LIMIT 800 47#define INITIAL_SRP_LIMIT 800
48#define DEFAULT_MAX_SECTORS 256 48#define DEFAULT_MAX_SECTORS 256
49#define MAX_TXU 1024 * 1024
49 50
50static uint max_vdma_size = MAX_H_COPY_RDMA; 51static uint max_vdma_size = MAX_H_COPY_RDMA;
51 52
@@ -1391,7 +1392,7 @@ static long ibmvscsis_adapter_info(struct scsi_info *vscsi,
1391 } 1392 }
1392 1393
1393 info = dma_alloc_coherent(&vscsi->dma_dev->dev, sizeof(*info), &token, 1394 info = dma_alloc_coherent(&vscsi->dma_dev->dev, sizeof(*info), &token,
1394 GFP_KERNEL); 1395 GFP_ATOMIC);
1395 if (!info) { 1396 if (!info) {
1396 dev_err(&vscsi->dev, "bad dma_alloc_coherent %p\n", 1397 dev_err(&vscsi->dev, "bad dma_alloc_coherent %p\n",
1397 iue->target); 1398 iue->target);
@@ -1443,7 +1444,7 @@ static long ibmvscsis_adapter_info(struct scsi_info *vscsi,
1443 info->mad_version = cpu_to_be32(MAD_VERSION_1); 1444 info->mad_version = cpu_to_be32(MAD_VERSION_1);
1444 info->os_type = cpu_to_be32(LINUX); 1445 info->os_type = cpu_to_be32(LINUX);
1445 memset(&info->port_max_txu[0], 0, sizeof(info->port_max_txu)); 1446 memset(&info->port_max_txu[0], 0, sizeof(info->port_max_txu));
1446 info->port_max_txu[0] = cpu_to_be32(128 * PAGE_SIZE); 1447 info->port_max_txu[0] = cpu_to_be32(MAX_TXU);
1447 1448
1448 dma_wmb(); 1449 dma_wmb();
1449 rc = h_copy_rdma(sizeof(*info), vscsi->dds.window[LOCAL].liobn, 1450 rc = h_copy_rdma(sizeof(*info), vscsi->dds.window[LOCAL].liobn,
@@ -1509,7 +1510,7 @@ static int ibmvscsis_cap_mad(struct scsi_info *vscsi, struct iu_entry *iue)
1509 } 1510 }
1510 1511
1511 cap = dma_alloc_coherent(&vscsi->dma_dev->dev, olen, &token, 1512 cap = dma_alloc_coherent(&vscsi->dma_dev->dev, olen, &token,
1512 GFP_KERNEL); 1513 GFP_ATOMIC);
1513 if (!cap) { 1514 if (!cap) {
1514 dev_err(&vscsi->dev, "bad dma_alloc_coherent %p\n", 1515 dev_err(&vscsi->dev, "bad dma_alloc_coherent %p\n",
1515 iue->target); 1516 iue->target);
@@ -3585,7 +3586,7 @@ static int ibmvscsis_write_pending(struct se_cmd *se_cmd)
3585 1, 1); 3586 1, 1);
3586 if (rc) { 3587 if (rc) {
3587 pr_err("srp_transfer_data() failed: %d\n", rc); 3588 pr_err("srp_transfer_data() failed: %d\n", rc);
3588 return -EAGAIN; 3589 return -EIO;
3589 } 3590 }
3590 /* 3591 /*
3591 * We now tell TCM to add this WRITE CDB directly into the TCM storage 3592 * We now tell TCM to add this WRITE CDB directly into the TCM storage
@@ -3815,6 +3816,7 @@ static struct configfs_attribute *ibmvscsis_tpg_attrs[] = {
3815static const struct target_core_fabric_ops ibmvscsis_ops = { 3816static const struct target_core_fabric_ops ibmvscsis_ops = {
3816 .module = THIS_MODULE, 3817 .module = THIS_MODULE,
3817 .name = "ibmvscsis", 3818 .name = "ibmvscsis",
3819 .max_data_sg_nents = MAX_TXU / PAGE_SIZE,
3818 .get_fabric_name = ibmvscsis_get_fabric_name, 3820 .get_fabric_name = ibmvscsis_get_fabric_name,
3819 .tpg_get_wwn = ibmvscsis_get_fabric_wwn, 3821 .tpg_get_wwn = ibmvscsis_get_fabric_wwn,
3820 .tpg_get_tag = ibmvscsis_get_tag, 3822 .tpg_get_tag = ibmvscsis_get_tag,
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 236e4e51d161..7b6bd8ed0d0b 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -3590,12 +3590,14 @@ lpfc_els_free_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *elsiocb)
3590 } else { 3590 } else {
3591 buf_ptr1 = (struct lpfc_dmabuf *) elsiocb->context2; 3591 buf_ptr1 = (struct lpfc_dmabuf *) elsiocb->context2;
3592 lpfc_els_free_data(phba, buf_ptr1); 3592 lpfc_els_free_data(phba, buf_ptr1);
3593 elsiocb->context2 = NULL;
3593 } 3594 }
3594 } 3595 }
3595 3596
3596 if (elsiocb->context3) { 3597 if (elsiocb->context3) {
3597 buf_ptr = (struct lpfc_dmabuf *) elsiocb->context3; 3598 buf_ptr = (struct lpfc_dmabuf *) elsiocb->context3;
3598 lpfc_els_free_bpl(phba, buf_ptr); 3599 lpfc_els_free_bpl(phba, buf_ptr);
3600 elsiocb->context3 = NULL;
3599 } 3601 }
3600 lpfc_sli_release_iocbq(phba, elsiocb); 3602 lpfc_sli_release_iocbq(phba, elsiocb);
3601 return 0; 3603 return 0;
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 4faa7672fc1d..a78a3df68f67 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -5954,18 +5954,25 @@ lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba)
5954 5954
5955 free_vfi_bmask: 5955 free_vfi_bmask:
5956 kfree(phba->sli4_hba.vfi_bmask); 5956 kfree(phba->sli4_hba.vfi_bmask);
5957 phba->sli4_hba.vfi_bmask = NULL;
5957 free_xri_ids: 5958 free_xri_ids:
5958 kfree(phba->sli4_hba.xri_ids); 5959 kfree(phba->sli4_hba.xri_ids);
5960 phba->sli4_hba.xri_ids = NULL;
5959 free_xri_bmask: 5961 free_xri_bmask:
5960 kfree(phba->sli4_hba.xri_bmask); 5962 kfree(phba->sli4_hba.xri_bmask);
5963 phba->sli4_hba.xri_bmask = NULL;
5961 free_vpi_ids: 5964 free_vpi_ids:
5962 kfree(phba->vpi_ids); 5965 kfree(phba->vpi_ids);
5966 phba->vpi_ids = NULL;
5963 free_vpi_bmask: 5967 free_vpi_bmask:
5964 kfree(phba->vpi_bmask); 5968 kfree(phba->vpi_bmask);
5969 phba->vpi_bmask = NULL;
5965 free_rpi_ids: 5970 free_rpi_ids:
5966 kfree(phba->sli4_hba.rpi_ids); 5971 kfree(phba->sli4_hba.rpi_ids);
5972 phba->sli4_hba.rpi_ids = NULL;
5967 free_rpi_bmask: 5973 free_rpi_bmask:
5968 kfree(phba->sli4_hba.rpi_bmask); 5974 kfree(phba->sli4_hba.rpi_bmask);
5975 phba->sli4_hba.rpi_bmask = NULL;
5969 err_exit: 5976 err_exit:
5970 return rc; 5977 return rc;
5971} 5978}
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h
index 394fe1338d09..dcb33f4fa687 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.h
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.h
@@ -393,6 +393,7 @@ struct MPT3SAS_TARGET {
393 * @eedp_enable: eedp support enable bit 393 * @eedp_enable: eedp support enable bit
394 * @eedp_type: 0(type_1), 1(type_2), 2(type_3) 394 * @eedp_type: 0(type_1), 1(type_2), 2(type_3)
395 * @eedp_block_length: block size 395 * @eedp_block_length: block size
396 * @ata_command_pending: SATL passthrough outstanding for device
396 */ 397 */
397struct MPT3SAS_DEVICE { 398struct MPT3SAS_DEVICE {
398 struct MPT3SAS_TARGET *sas_target; 399 struct MPT3SAS_TARGET *sas_target;
@@ -404,6 +405,17 @@ struct MPT3SAS_DEVICE {
404 u8 ignore_delay_remove; 405 u8 ignore_delay_remove;
405 /* Iopriority Command Handling */ 406 /* Iopriority Command Handling */
406 u8 ncq_prio_enable; 407 u8 ncq_prio_enable;
408 /*
409 * Bug workaround for SATL handling: the mpt2/3sas firmware
410 * doesn't return BUSY or TASK_SET_FULL for subsequent
411 * commands while a SATL pass through is in operation as the
412 * spec requires, it simply does nothing with them until the
413 * pass through completes, causing them possibly to timeout if
414 * the passthrough is a long executing command (like format or
415 * secure erase). This variable allows us to do the right
416 * thing while a SATL command is pending.
417 */
418 unsigned long ata_command_pending;
407 419
408}; 420};
409 421
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index b5c966e319d3..0b5b423b1db0 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -51,6 +51,7 @@
51#include <linux/workqueue.h> 51#include <linux/workqueue.h>
52#include <linux/delay.h> 52#include <linux/delay.h>
53#include <linux/pci.h> 53#include <linux/pci.h>
54#include <linux/pci-aspm.h>
54#include <linux/interrupt.h> 55#include <linux/interrupt.h>
55#include <linux/aer.h> 56#include <linux/aer.h>
56#include <linux/raid_class.h> 57#include <linux/raid_class.h>
@@ -3899,9 +3900,18 @@ _scsih_temp_threshold_events(struct MPT3SAS_ADAPTER *ioc,
3899 } 3900 }
3900} 3901}
3901 3902
3902static inline bool ata_12_16_cmd(struct scsi_cmnd *scmd) 3903static int _scsih_set_satl_pending(struct scsi_cmnd *scmd, bool pending)
3903{ 3904{
3904 return (scmd->cmnd[0] == ATA_12 || scmd->cmnd[0] == ATA_16); 3905 struct MPT3SAS_DEVICE *priv = scmd->device->hostdata;
3906
3907 if (scmd->cmnd[0] != ATA_12 && scmd->cmnd[0] != ATA_16)
3908 return 0;
3909
3910 if (pending)
3911 return test_and_set_bit(0, &priv->ata_command_pending);
3912
3913 clear_bit(0, &priv->ata_command_pending);
3914 return 0;
3905} 3915}
3906 3916
3907/** 3917/**
@@ -3925,9 +3935,7 @@ _scsih_flush_running_cmds(struct MPT3SAS_ADAPTER *ioc)
3925 if (!scmd) 3935 if (!scmd)
3926 continue; 3936 continue;
3927 count++; 3937 count++;
3928 if (ata_12_16_cmd(scmd)) 3938 _scsih_set_satl_pending(scmd, false);
3929 scsi_internal_device_unblock(scmd->device,
3930 SDEV_RUNNING);
3931 mpt3sas_base_free_smid(ioc, smid); 3939 mpt3sas_base_free_smid(ioc, smid);
3932 scsi_dma_unmap(scmd); 3940 scsi_dma_unmap(scmd);
3933 if (ioc->pci_error_recovery) 3941 if (ioc->pci_error_recovery)
@@ -4063,13 +4071,6 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
4063 if (ioc->logging_level & MPT_DEBUG_SCSI) 4071 if (ioc->logging_level & MPT_DEBUG_SCSI)
4064 scsi_print_command(scmd); 4072 scsi_print_command(scmd);
4065 4073
4066 /*
4067 * Lock the device for any subsequent command until command is
4068 * done.
4069 */
4070 if (ata_12_16_cmd(scmd))
4071 scsi_internal_device_block(scmd->device);
4072
4073 sas_device_priv_data = scmd->device->hostdata; 4074 sas_device_priv_data = scmd->device->hostdata;
4074 if (!sas_device_priv_data || !sas_device_priv_data->sas_target) { 4075 if (!sas_device_priv_data || !sas_device_priv_data->sas_target) {
4075 scmd->result = DID_NO_CONNECT << 16; 4076 scmd->result = DID_NO_CONNECT << 16;
@@ -4083,6 +4084,19 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
4083 return 0; 4084 return 0;
4084 } 4085 }
4085 4086
4087 /*
4088 * Bug work around for firmware SATL handling. The loop
4089 * is based on atomic operations and ensures consistency
4090 * since we're lockless at this point
4091 */
4092 do {
4093 if (test_bit(0, &sas_device_priv_data->ata_command_pending)) {
4094 scmd->result = SAM_STAT_BUSY;
4095 scmd->scsi_done(scmd);
4096 return 0;
4097 }
4098 } while (_scsih_set_satl_pending(scmd, true));
4099
4086 sas_target_priv_data = sas_device_priv_data->sas_target; 4100 sas_target_priv_data = sas_device_priv_data->sas_target;
4087 4101
4088 /* invalid device handle */ 4102 /* invalid device handle */
@@ -4644,14 +4658,14 @@ _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
4644 struct MPT3SAS_DEVICE *sas_device_priv_data; 4658 struct MPT3SAS_DEVICE *sas_device_priv_data;
4645 u32 response_code = 0; 4659 u32 response_code = 0;
4646 unsigned long flags; 4660 unsigned long flags;
4661 unsigned int sector_sz;
4647 4662
4648 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply); 4663 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
4649 scmd = _scsih_scsi_lookup_get_clear(ioc, smid); 4664 scmd = _scsih_scsi_lookup_get_clear(ioc, smid);
4650 if (scmd == NULL) 4665 if (scmd == NULL)
4651 return 1; 4666 return 1;
4652 4667
4653 if (ata_12_16_cmd(scmd)) 4668 _scsih_set_satl_pending(scmd, false);
4654 scsi_internal_device_unblock(scmd->device, SDEV_RUNNING);
4655 4669
4656 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); 4670 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
4657 4671
@@ -4703,6 +4717,20 @@ _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
4703 } 4717 }
4704 4718
4705 xfer_cnt = le32_to_cpu(mpi_reply->TransferCount); 4719 xfer_cnt = le32_to_cpu(mpi_reply->TransferCount);
4720
4721 /* In case of bogus fw or device, we could end up having
4722 * unaligned partial completion. We can force alignment here,
4723 * then scsi-ml does not need to handle this misbehavior.
4724 */
4725 sector_sz = scmd->device->sector_size;
4726 if (unlikely(scmd->request->cmd_type == REQ_TYPE_FS && sector_sz &&
4727 xfer_cnt % sector_sz)) {
4728 sdev_printk(KERN_INFO, scmd->device,
4729 "unaligned partial completion avoided (xfer_cnt=%u, sector_sz=%u)\n",
4730 xfer_cnt, sector_sz);
4731 xfer_cnt = round_down(xfer_cnt, sector_sz);
4732 }
4733
4706 scsi_set_resid(scmd, scsi_bufflen(scmd) - xfer_cnt); 4734 scsi_set_resid(scmd, scsi_bufflen(scmd) - xfer_cnt);
4707 if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) 4735 if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE)
4708 log_info = le32_to_cpu(mpi_reply->IOCLogInfo); 4736 log_info = le32_to_cpu(mpi_reply->IOCLogInfo);
@@ -8734,6 +8762,8 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
8734 8762
8735 switch (hba_mpi_version) { 8763 switch (hba_mpi_version) {
8736 case MPI2_VERSION: 8764 case MPI2_VERSION:
8765 pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S |
8766 PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM);
8737 /* Use mpt2sas driver host template for SAS 2.0 HBA's */ 8767 /* Use mpt2sas driver host template for SAS 2.0 HBA's */
8738 shost = scsi_host_alloc(&mpt2sas_driver_template, 8768 shost = scsi_host_alloc(&mpt2sas_driver_template,
8739 sizeof(struct MPT3SAS_ADAPTER)); 8769 sizeof(struct MPT3SAS_ADAPTER));
diff --git a/drivers/scsi/qedi/Kconfig b/drivers/scsi/qedi/Kconfig
index 23ca8a274586..21331453db7b 100644
--- a/drivers/scsi/qedi/Kconfig
+++ b/drivers/scsi/qedi/Kconfig
@@ -1,6 +1,6 @@
1config QEDI 1config QEDI
2 tristate "QLogic QEDI 25/40/100Gb iSCSI Initiator Driver Support" 2 tristate "QLogic QEDI 25/40/100Gb iSCSI Initiator Driver Support"
3 depends on PCI && SCSI 3 depends on PCI && SCSI && UIO
4 depends on QED 4 depends on QED
5 select SCSI_ISCSI_ATTRS 5 select SCSI_ISCSI_ATTRS
6 select QED_LL2 6 select QED_LL2
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 47eb4d545d13..f201f4099620 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -243,12 +243,15 @@ qla2x00_sysfs_read_optrom(struct file *filp, struct kobject *kobj,
243 struct qla_hw_data *ha = vha->hw; 243 struct qla_hw_data *ha = vha->hw;
244 ssize_t rval = 0; 244 ssize_t rval = 0;
245 245
246 mutex_lock(&ha->optrom_mutex);
247
246 if (ha->optrom_state != QLA_SREADING) 248 if (ha->optrom_state != QLA_SREADING)
247 return 0; 249 goto out;
248 250
249 mutex_lock(&ha->optrom_mutex);
250 rval = memory_read_from_buffer(buf, count, &off, ha->optrom_buffer, 251 rval = memory_read_from_buffer(buf, count, &off, ha->optrom_buffer,
251 ha->optrom_region_size); 252 ha->optrom_region_size);
253
254out:
252 mutex_unlock(&ha->optrom_mutex); 255 mutex_unlock(&ha->optrom_mutex);
253 256
254 return rval; 257 return rval;
@@ -263,14 +266,19 @@ qla2x00_sysfs_write_optrom(struct file *filp, struct kobject *kobj,
263 struct device, kobj))); 266 struct device, kobj)));
264 struct qla_hw_data *ha = vha->hw; 267 struct qla_hw_data *ha = vha->hw;
265 268
266 if (ha->optrom_state != QLA_SWRITING) 269 mutex_lock(&ha->optrom_mutex);
270
271 if (ha->optrom_state != QLA_SWRITING) {
272 mutex_unlock(&ha->optrom_mutex);
267 return -EINVAL; 273 return -EINVAL;
268 if (off > ha->optrom_region_size) 274 }
275 if (off > ha->optrom_region_size) {
276 mutex_unlock(&ha->optrom_mutex);
269 return -ERANGE; 277 return -ERANGE;
278 }
270 if (off + count > ha->optrom_region_size) 279 if (off + count > ha->optrom_region_size)
271 count = ha->optrom_region_size - off; 280 count = ha->optrom_region_size - off;
272 281
273 mutex_lock(&ha->optrom_mutex);
274 memcpy(&ha->optrom_buffer[off], buf, count); 282 memcpy(&ha->optrom_buffer[off], buf, count);
275 mutex_unlock(&ha->optrom_mutex); 283 mutex_unlock(&ha->optrom_mutex);
276 284
@@ -753,7 +761,6 @@ qla2x00_issue_logo(struct file *filp, struct kobject *kobj,
753 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj, 761 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
754 struct device, kobj))); 762 struct device, kobj)));
755 int type; 763 int type;
756 int rval = 0;
757 port_id_t did; 764 port_id_t did;
758 765
759 type = simple_strtol(buf, NULL, 10); 766 type = simple_strtol(buf, NULL, 10);
@@ -767,7 +774,7 @@ qla2x00_issue_logo(struct file *filp, struct kobject *kobj,
767 774
768 ql_log(ql_log_info, vha, 0x70e4, "%s: %d\n", __func__, type); 775 ql_log(ql_log_info, vha, 0x70e4, "%s: %d\n", __func__, type);
769 776
770 rval = qla24xx_els_dcmd_iocb(vha, ELS_DCMD_LOGO, did); 777 qla24xx_els_dcmd_iocb(vha, ELS_DCMD_LOGO, did);
771 return count; 778 return count;
772} 779}
773 780
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index f7df01b76714..5b1287a63c49 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -1556,7 +1556,8 @@ typedef struct {
1556struct atio { 1556struct atio {
1557 uint8_t entry_type; /* Entry type. */ 1557 uint8_t entry_type; /* Entry type. */
1558 uint8_t entry_count; /* Entry count. */ 1558 uint8_t entry_count; /* Entry count. */
1559 uint8_t data[58]; 1559 __le16 attr_n_length;
1560 uint8_t data[56];
1560 uint32_t signature; 1561 uint32_t signature;
1561#define ATIO_PROCESSED 0xDEADDEAD /* Signature */ 1562#define ATIO_PROCESSED 0xDEADDEAD /* Signature */
1562}; 1563};
@@ -2732,7 +2733,7 @@ struct isp_operations {
2732#define QLA_MSIX_FW_MODE(m) (((m) & (BIT_7|BIT_8|BIT_9)) >> 7) 2733#define QLA_MSIX_FW_MODE(m) (((m) & (BIT_7|BIT_8|BIT_9)) >> 7)
2733#define QLA_MSIX_FW_MODE_1(m) (QLA_MSIX_FW_MODE(m) == 1) 2734#define QLA_MSIX_FW_MODE_1(m) (QLA_MSIX_FW_MODE(m) == 1)
2734 2735
2735#define QLA_MSIX_DEFAULT 0x00 2736#define QLA_BASE_VECTORS 2 /* default + RSP */
2736#define QLA_MSIX_RSP_Q 0x01 2737#define QLA_MSIX_RSP_Q 0x01
2737#define QLA_ATIO_VECTOR 0x02 2738#define QLA_ATIO_VECTOR 0x02
2738#define QLA_MSIX_QPAIR_MULTIQ_RSP_Q 0x03 2739#define QLA_MSIX_QPAIR_MULTIQ_RSP_Q 0x03
@@ -2754,7 +2755,6 @@ struct qla_msix_entry {
2754 uint16_t entry; 2755 uint16_t entry;
2755 char name[30]; 2756 char name[30];
2756 void *handle; 2757 void *handle;
2757 struct irq_affinity_notify irq_notify;
2758 int cpuid; 2758 int cpuid;
2759}; 2759};
2760 2760
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 632d5f30386a..7b6317c8c2e9 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -1191,7 +1191,7 @@ qla24xx_reset_risc(scsi_qla_host_t *vha)
1191 1191
1192 /* Wait for soft-reset to complete. */ 1192 /* Wait for soft-reset to complete. */
1193 RD_REG_DWORD(&reg->ctrl_status); 1193 RD_REG_DWORD(&reg->ctrl_status);
1194 for (cnt = 0; cnt < 6000000; cnt++) { 1194 for (cnt = 0; cnt < 60; cnt++) {
1195 barrier(); 1195 barrier();
1196 if ((RD_REG_DWORD(&reg->ctrl_status) & 1196 if ((RD_REG_DWORD(&reg->ctrl_status) &
1197 CSRX_ISP_SOFT_RESET) == 0) 1197 CSRX_ISP_SOFT_RESET) == 0)
@@ -1234,7 +1234,7 @@ qla24xx_reset_risc(scsi_qla_host_t *vha)
1234 RD_REG_DWORD(&reg->hccr); 1234 RD_REG_DWORD(&reg->hccr);
1235 1235
1236 RD_REG_WORD(&reg->mailbox0); 1236 RD_REG_WORD(&reg->mailbox0);
1237 for (cnt = 6000000; RD_REG_WORD(&reg->mailbox0) != 0 && 1237 for (cnt = 60; RD_REG_WORD(&reg->mailbox0) != 0 &&
1238 rval == QLA_SUCCESS; cnt--) { 1238 rval == QLA_SUCCESS; cnt--) {
1239 barrier(); 1239 barrier();
1240 if (cnt) 1240 if (cnt)
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 5093ca9b02ec..a94b0b6bd030 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -19,10 +19,6 @@ static void qla2x00_status_entry(scsi_qla_host_t *, struct rsp_que *, void *);
19static void qla2x00_status_cont_entry(struct rsp_que *, sts_cont_entry_t *); 19static void qla2x00_status_cont_entry(struct rsp_que *, sts_cont_entry_t *);
20static void qla2x00_error_entry(scsi_qla_host_t *, struct rsp_que *, 20static void qla2x00_error_entry(scsi_qla_host_t *, struct rsp_que *,
21 sts_entry_t *); 21 sts_entry_t *);
22static void qla_irq_affinity_notify(struct irq_affinity_notify *,
23 const cpumask_t *);
24static void qla_irq_affinity_release(struct kref *);
25
26 22
27/** 23/**
28 * qla2100_intr_handler() - Process interrupts for the ISP2100 and ISP2200. 24 * qla2100_intr_handler() - Process interrupts for the ISP2100 and ISP2200.
@@ -2496,6 +2492,10 @@ qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt)
2496 if (pkt->entry_status & RF_BUSY) 2492 if (pkt->entry_status & RF_BUSY)
2497 res = DID_BUS_BUSY << 16; 2493 res = DID_BUS_BUSY << 16;
2498 2494
2495 if (pkt->entry_type == NOTIFY_ACK_TYPE &&
2496 pkt->handle == QLA_TGT_SKIP_HANDLE)
2497 return;
2498
2499 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); 2499 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
2500 if (sp) { 2500 if (sp) {
2501 sp->done(ha, sp, res); 2501 sp->done(ha, sp, res);
@@ -2572,14 +2572,6 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha,
2572 if (!vha->flags.online) 2572 if (!vha->flags.online)
2573 return; 2573 return;
2574 2574
2575 if (rsp->msix && rsp->msix->cpuid != smp_processor_id()) {
2576 /* if kernel does not notify qla of IRQ's CPU change,
2577 * then set it here.
2578 */
2579 rsp->msix->cpuid = smp_processor_id();
2580 ha->tgt.rspq_vector_cpuid = rsp->msix->cpuid;
2581 }
2582
2583 while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) { 2575 while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) {
2584 pkt = (struct sts_entry_24xx *)rsp->ring_ptr; 2576 pkt = (struct sts_entry_24xx *)rsp->ring_ptr;
2585 2577
@@ -3018,13 +3010,20 @@ static struct qla_init_msix_entry qla82xx_msix_entries[] = {
3018static int 3010static int
3019qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp) 3011qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
3020{ 3012{
3021#define MIN_MSIX_COUNT 2
3022 int i, ret; 3013 int i, ret;
3023 struct qla_msix_entry *qentry; 3014 struct qla_msix_entry *qentry;
3024 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); 3015 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
3016 struct irq_affinity desc = {
3017 .pre_vectors = QLA_BASE_VECTORS,
3018 };
3019
3020 if (QLA_TGT_MODE_ENABLED() && IS_ATIO_MSIX_CAPABLE(ha))
3021 desc.pre_vectors++;
3022
3023 ret = pci_alloc_irq_vectors_affinity(ha->pdev, QLA_BASE_VECTORS,
3024 ha->msix_count, PCI_IRQ_MSIX | PCI_IRQ_AFFINITY,
3025 &desc);
3025 3026
3026 ret = pci_alloc_irq_vectors(ha->pdev, MIN_MSIX_COUNT, ha->msix_count,
3027 PCI_IRQ_MSIX | PCI_IRQ_AFFINITY);
3028 if (ret < 0) { 3027 if (ret < 0) {
3029 ql_log(ql_log_fatal, vha, 0x00c7, 3028 ql_log(ql_log_fatal, vha, 0x00c7,
3030 "MSI-X: Failed to enable support, " 3029 "MSI-X: Failed to enable support, "
@@ -3069,13 +3068,10 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
3069 qentry->have_irq = 0; 3068 qentry->have_irq = 0;
3070 qentry->in_use = 0; 3069 qentry->in_use = 0;
3071 qentry->handle = NULL; 3070 qentry->handle = NULL;
3072 qentry->irq_notify.notify = qla_irq_affinity_notify;
3073 qentry->irq_notify.release = qla_irq_affinity_release;
3074 qentry->cpuid = -1;
3075 } 3071 }
3076 3072
3077 /* Enable MSI-X vectors for the base queue */ 3073 /* Enable MSI-X vectors for the base queue */
3078 for (i = 0; i < (QLA_MSIX_RSP_Q + 1); i++) { 3074 for (i = 0; i < QLA_BASE_VECTORS; i++) {
3079 qentry = &ha->msix_entries[i]; 3075 qentry = &ha->msix_entries[i];
3080 qentry->handle = rsp; 3076 qentry->handle = rsp;
3081 rsp->msix = qentry; 3077 rsp->msix = qentry;
@@ -3093,18 +3089,6 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
3093 goto msix_register_fail; 3089 goto msix_register_fail;
3094 qentry->have_irq = 1; 3090 qentry->have_irq = 1;
3095 qentry->in_use = 1; 3091 qentry->in_use = 1;
3096
3097 /* Register for CPU affinity notification. */
3098 irq_set_affinity_notifier(qentry->vector, &qentry->irq_notify);
3099
3100 /* Schedule work (ie. trigger a notification) to read cpu
3101 * mask for this specific irq.
3102 * kref_get is required because
3103 * irq_affinity_notify() will do
3104 * kref_put().
3105 */
3106 kref_get(&qentry->irq_notify.kref);
3107 schedule_work(&qentry->irq_notify.work);
3108 } 3092 }
3109 3093
3110 /* 3094 /*
@@ -3258,7 +3242,7 @@ qla2x00_free_irqs(scsi_qla_host_t *vha)
3258 * from a probe failure context. 3242 * from a probe failure context.
3259 */ 3243 */
3260 if (!ha->rsp_q_map || !ha->rsp_q_map[0]) 3244 if (!ha->rsp_q_map || !ha->rsp_q_map[0])
3261 return; 3245 goto free_irqs;
3262 rsp = ha->rsp_q_map[0]; 3246 rsp = ha->rsp_q_map[0];
3263 3247
3264 if (ha->flags.msix_enabled) { 3248 if (ha->flags.msix_enabled) {
@@ -3278,6 +3262,7 @@ qla2x00_free_irqs(scsi_qla_host_t *vha)
3278 free_irq(pci_irq_vector(ha->pdev, 0), rsp); 3262 free_irq(pci_irq_vector(ha->pdev, 0), rsp);
3279 } 3263 }
3280 3264
3265free_irqs:
3281 pci_free_irq_vectors(ha->pdev); 3266 pci_free_irq_vectors(ha->pdev);
3282} 3267}
3283 3268
@@ -3301,49 +3286,3 @@ int qla25xx_request_irq(struct qla_hw_data *ha, struct qla_qpair *qpair,
3301 msix->handle = qpair; 3286 msix->handle = qpair;
3302 return ret; 3287 return ret;
3303} 3288}
3304
3305
3306/* irq_set_affinity/irqbalance will trigger notification of cpu mask update */
3307static void qla_irq_affinity_notify(struct irq_affinity_notify *notify,
3308 const cpumask_t *mask)
3309{
3310 struct qla_msix_entry *e =
3311 container_of(notify, struct qla_msix_entry, irq_notify);
3312 struct qla_hw_data *ha;
3313 struct scsi_qla_host *base_vha;
3314 struct rsp_que *rsp = e->handle;
3315
3316 /* user is recommended to set mask to just 1 cpu */
3317 e->cpuid = cpumask_first(mask);
3318
3319 ha = rsp->hw;
3320 base_vha = pci_get_drvdata(ha->pdev);
3321
3322 ql_dbg(ql_dbg_init, base_vha, 0xffff,
3323 "%s: host %ld : vector %d cpu %d \n", __func__,
3324 base_vha->host_no, e->vector, e->cpuid);
3325
3326 if (e->have_irq) {
3327 if ((IS_QLA83XX(ha) || IS_QLA27XX(ha)) &&
3328 (e->entry == QLA83XX_RSPQ_MSIX_ENTRY_NUMBER)) {
3329 ha->tgt.rspq_vector_cpuid = e->cpuid;
3330 ql_dbg(ql_dbg_init, base_vha, 0xffff,
3331 "%s: host%ld: rspq vector %d cpu %d runtime change\n",
3332 __func__, base_vha->host_no, e->vector, e->cpuid);
3333 }
3334 }
3335}
3336
3337static void qla_irq_affinity_release(struct kref *ref)
3338{
3339 struct irq_affinity_notify *notify =
3340 container_of(ref, struct irq_affinity_notify, kref);
3341 struct qla_msix_entry *e =
3342 container_of(notify, struct qla_msix_entry, irq_notify);
3343 struct rsp_que *rsp = e->handle;
3344 struct scsi_qla_host *base_vha = pci_get_drvdata(rsp->hw->pdev);
3345
3346 ql_dbg(ql_dbg_init, base_vha, 0xffff,
3347 "%s: host%ld: vector %d cpu %d\n", __func__,
3348 base_vha->host_no, e->vector, e->cpuid);
3349}
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index 2819ceb96041..67f64db390b0 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -10,7 +10,7 @@
10#include <linux/delay.h> 10#include <linux/delay.h>
11#include <linux/gfp.h> 11#include <linux/gfp.h>
12 12
13struct rom_cmd { 13static struct rom_cmd {
14 uint16_t cmd; 14 uint16_t cmd;
15} rom_cmds[] = { 15} rom_cmds[] = {
16 { MBC_LOAD_RAM }, 16 { MBC_LOAD_RAM },
@@ -101,12 +101,12 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
101 return QLA_FUNCTION_TIMEOUT; 101 return QLA_FUNCTION_TIMEOUT;
102 } 102 }
103 103
104 /* if PCI error, then avoid mbx processing.*/ 104 /* if PCI error, then avoid mbx processing.*/
105 if (test_bit(PCI_ERR, &base_vha->dpc_flags)) { 105 if (test_bit(PCI_ERR, &base_vha->dpc_flags)) {
106 ql_log(ql_log_warn, vha, 0x1191, 106 ql_log(ql_log_warn, vha, 0x1191,
107 "PCI error, exiting.\n"); 107 "PCI error, exiting.\n");
108 return QLA_FUNCTION_TIMEOUT; 108 return QLA_FUNCTION_TIMEOUT;
109 } 109 }
110 110
111 reg = ha->iobase; 111 reg = ha->iobase;
112 io_lock_on = base_vha->flags.init_done; 112 io_lock_on = base_vha->flags.init_done;
@@ -323,20 +323,33 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
323 } 323 }
324 } else { 324 } else {
325 325
326 uint16_t mb0; 326 uint16_t mb[8];
327 uint32_t ictrl; 327 uint32_t ictrl, host_status, hccr;
328 uint16_t w; 328 uint16_t w;
329 329
330 if (IS_FWI2_CAPABLE(ha)) { 330 if (IS_FWI2_CAPABLE(ha)) {
331 mb0 = RD_REG_WORD(&reg->isp24.mailbox0); 331 mb[0] = RD_REG_WORD(&reg->isp24.mailbox0);
332 mb[1] = RD_REG_WORD(&reg->isp24.mailbox1);
333 mb[2] = RD_REG_WORD(&reg->isp24.mailbox2);
334 mb[3] = RD_REG_WORD(&reg->isp24.mailbox3);
335 mb[7] = RD_REG_WORD(&reg->isp24.mailbox7);
332 ictrl = RD_REG_DWORD(&reg->isp24.ictrl); 336 ictrl = RD_REG_DWORD(&reg->isp24.ictrl);
337 host_status = RD_REG_DWORD(&reg->isp24.host_status);
338 hccr = RD_REG_DWORD(&reg->isp24.hccr);
339
340 ql_log(ql_log_warn, vha, 0x1119,
341 "MBX Command timeout for cmd %x, iocontrol=%x jiffies=%lx "
342 "mb[0-3]=[0x%x 0x%x 0x%x 0x%x] mb7 0x%x host_status 0x%x hccr 0x%x\n",
343 command, ictrl, jiffies, mb[0], mb[1], mb[2], mb[3],
344 mb[7], host_status, hccr);
345
333 } else { 346 } else {
334 mb0 = RD_MAILBOX_REG(ha, &reg->isp, 0); 347 mb[0] = RD_MAILBOX_REG(ha, &reg->isp, 0);
335 ictrl = RD_REG_WORD(&reg->isp.ictrl); 348 ictrl = RD_REG_WORD(&reg->isp.ictrl);
349 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1119,
350 "MBX Command timeout for cmd %x, iocontrol=%x jiffies=%lx "
351 "mb[0]=0x%x\n", command, ictrl, jiffies, mb[0]);
336 } 352 }
337 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1119,
338 "MBX Command timeout for cmd %x, iocontrol=%x jiffies=%lx "
339 "mb[0]=0x%x\n", command, ictrl, jiffies, mb0);
340 ql_dump_regs(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1019); 353 ql_dump_regs(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1019);
341 354
342 /* Capture FW dump only, if PCI device active */ 355 /* Capture FW dump only, if PCI device active */
@@ -684,7 +697,6 @@ qla_set_exlogin_mem_cfg(scsi_qla_host_t *vha, dma_addr_t phys_addr)
684 mbx_cmd_t mc; 697 mbx_cmd_t mc;
685 mbx_cmd_t *mcp = &mc; 698 mbx_cmd_t *mcp = &mc;
686 struct qla_hw_data *ha = vha->hw; 699 struct qla_hw_data *ha = vha->hw;
687 int configured_count;
688 700
689 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x111a, 701 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x111a,
690 "Entered %s.\n", __func__); 702 "Entered %s.\n", __func__);
@@ -707,7 +719,6 @@ qla_set_exlogin_mem_cfg(scsi_qla_host_t *vha, dma_addr_t phys_addr)
707 /*EMPTY*/ 719 /*EMPTY*/
708 ql_dbg(ql_dbg_mbx, vha, 0x111b, "Failed=%x.\n", rval); 720 ql_dbg(ql_dbg_mbx, vha, 0x111b, "Failed=%x.\n", rval);
709 } else { 721 } else {
710 configured_count = mcp->mb[11];
711 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118c, 722 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118c,
712 "Done %s.\n", __func__); 723 "Done %s.\n", __func__);
713 } 724 }
diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c
index 54380b434b30..0a1723cc08cf 100644
--- a/drivers/scsi/qla2xxx/qla_nx.c
+++ b/drivers/scsi/qla2xxx/qla_nx.c
@@ -42,6 +42,11 @@ static int qla82xx_crb_table_initialized;
42 (crb_addr_xform[QLA82XX_HW_PX_MAP_CRB_##name] = \ 42 (crb_addr_xform[QLA82XX_HW_PX_MAP_CRB_##name] = \
43 QLA82XX_HW_CRB_HUB_AGT_ADR_##name << 20) 43 QLA82XX_HW_CRB_HUB_AGT_ADR_##name << 20)
44 44
45const int MD_MIU_TEST_AGT_RDDATA[] = {
46 0x410000A8, 0x410000AC,
47 0x410000B8, 0x410000BC
48};
49
45static void qla82xx_crb_addr_transform_setup(void) 50static void qla82xx_crb_addr_transform_setup(void)
46{ 51{
47 qla82xx_crb_addr_transform(XDMA); 52 qla82xx_crb_addr_transform(XDMA);
diff --git a/drivers/scsi/qla2xxx/qla_nx.h b/drivers/scsi/qla2xxx/qla_nx.h
index 6201dce3553b..77624eac95a4 100644
--- a/drivers/scsi/qla2xxx/qla_nx.h
+++ b/drivers/scsi/qla2xxx/qla_nx.h
@@ -1176,8 +1176,7 @@ struct qla82xx_md_entry_queue {
1176#define MD_MIU_TEST_AGT_ADDR_LO 0x41000094 1176#define MD_MIU_TEST_AGT_ADDR_LO 0x41000094
1177#define MD_MIU_TEST_AGT_ADDR_HI 0x41000098 1177#define MD_MIU_TEST_AGT_ADDR_HI 0x41000098
1178 1178
1179static const int MD_MIU_TEST_AGT_RDDATA[] = { 0x410000A8, 0x410000AC, 1179extern const int MD_MIU_TEST_AGT_RDDATA[4];
1180 0x410000B8, 0x410000BC };
1181 1180
1182#define CRB_NIU_XG_PAUSE_CTL_P0 0x1 1181#define CRB_NIU_XG_PAUSE_CTL_P0 0x1
1183#define CRB_NIU_XG_PAUSE_CTL_P1 0x8 1182#define CRB_NIU_XG_PAUSE_CTL_P1 0x8
diff --git a/drivers/scsi/qla2xxx/qla_nx2.c b/drivers/scsi/qla2xxx/qla_nx2.c
index 007192d7bad8..dc1ec9b61027 100644
--- a/drivers/scsi/qla2xxx/qla_nx2.c
+++ b/drivers/scsi/qla2xxx/qla_nx2.c
@@ -15,6 +15,23 @@
15 15
16#define TIMEOUT_100_MS 100 16#define TIMEOUT_100_MS 100
17 17
18static const uint32_t qla8044_reg_tbl[] = {
19 QLA8044_PEG_HALT_STATUS1,
20 QLA8044_PEG_HALT_STATUS2,
21 QLA8044_PEG_ALIVE_COUNTER,
22 QLA8044_CRB_DRV_ACTIVE,
23 QLA8044_CRB_DEV_STATE,
24 QLA8044_CRB_DRV_STATE,
25 QLA8044_CRB_DRV_SCRATCH,
26 QLA8044_CRB_DEV_PART_INFO1,
27 QLA8044_CRB_IDC_VER_MAJOR,
28 QLA8044_FW_VER_MAJOR,
29 QLA8044_FW_VER_MINOR,
30 QLA8044_FW_VER_SUB,
31 QLA8044_CMDPEG_STATE,
32 QLA8044_ASIC_TEMP,
33};
34
18/* 8044 Flash Read/Write functions */ 35/* 8044 Flash Read/Write functions */
19uint32_t 36uint32_t
20qla8044_rd_reg(struct qla_hw_data *ha, ulong addr) 37qla8044_rd_reg(struct qla_hw_data *ha, ulong addr)
diff --git a/drivers/scsi/qla2xxx/qla_nx2.h b/drivers/scsi/qla2xxx/qla_nx2.h
index 02fe3c4cdf55..83c1b7e17c80 100644
--- a/drivers/scsi/qla2xxx/qla_nx2.h
+++ b/drivers/scsi/qla2xxx/qla_nx2.h
@@ -535,23 +535,6 @@ enum qla_regs {
535#define CRB_CMDPEG_CHECK_RETRY_COUNT 60 535#define CRB_CMDPEG_CHECK_RETRY_COUNT 60
536#define CRB_CMDPEG_CHECK_DELAY 500 536#define CRB_CMDPEG_CHECK_DELAY 500
537 537
538static const uint32_t qla8044_reg_tbl[] = {
539 QLA8044_PEG_HALT_STATUS1,
540 QLA8044_PEG_HALT_STATUS2,
541 QLA8044_PEG_ALIVE_COUNTER,
542 QLA8044_CRB_DRV_ACTIVE,
543 QLA8044_CRB_DEV_STATE,
544 QLA8044_CRB_DRV_STATE,
545 QLA8044_CRB_DRV_SCRATCH,
546 QLA8044_CRB_DEV_PART_INFO1,
547 QLA8044_CRB_IDC_VER_MAJOR,
548 QLA8044_FW_VER_MAJOR,
549 QLA8044_FW_VER_MINOR,
550 QLA8044_FW_VER_SUB,
551 QLA8044_CMDPEG_STATE,
552 QLA8044_ASIC_TEMP,
553};
554
555/* MiniDump Structures */ 538/* MiniDump Structures */
556 539
557/* Driver_code is for driver to write some info about the entry 540/* Driver_code is for driver to write some info about the entry
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 8521cfe302e9..40660461a4b5 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -466,7 +466,7 @@ static void qla2x00_free_queues(struct qla_hw_data *ha)
466 continue; 466 continue;
467 467
468 rsp = ha->rsp_q_map[cnt]; 468 rsp = ha->rsp_q_map[cnt];
469 clear_bit(cnt, ha->req_qid_map); 469 clear_bit(cnt, ha->rsp_qid_map);
470 ha->rsp_q_map[cnt] = NULL; 470 ha->rsp_q_map[cnt] = NULL;
471 spin_unlock_irqrestore(&ha->hardware_lock, flags); 471 spin_unlock_irqrestore(&ha->hardware_lock, flags);
472 qla2x00_free_rsp_que(ha, rsp); 472 qla2x00_free_rsp_que(ha, rsp);
@@ -1616,7 +1616,7 @@ qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res)
1616 /* Don't abort commands in adapter during EEH 1616 /* Don't abort commands in adapter during EEH
1617 * recovery as it's not accessible/responding. 1617 * recovery as it's not accessible/responding.
1618 */ 1618 */
1619 if (!ha->flags.eeh_busy) { 1619 if (GET_CMD_SP(sp) && !ha->flags.eeh_busy) {
1620 /* Get a reference to the sp and drop the lock. 1620 /* Get a reference to the sp and drop the lock.
1621 * The reference ensures this sp->done() call 1621 * The reference ensures this sp->done() call
1622 * - and not the call in qla2xxx_eh_abort() - 1622 * - and not the call in qla2xxx_eh_abort() -
@@ -3662,7 +3662,7 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
3662 sizeof(struct ct6_dsd), 0, 3662 sizeof(struct ct6_dsd), 0,
3663 SLAB_HWCACHE_ALIGN, NULL); 3663 SLAB_HWCACHE_ALIGN, NULL);
3664 if (!ctx_cachep) 3664 if (!ctx_cachep)
3665 goto fail_free_gid_list; 3665 goto fail_free_srb_mempool;
3666 } 3666 }
3667 ha->ctx_mempool = mempool_create_slab_pool(SRB_MIN_REQ, 3667 ha->ctx_mempool = mempool_create_slab_pool(SRB_MIN_REQ,
3668 ctx_cachep); 3668 ctx_cachep);
@@ -3815,7 +3815,7 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
3815 ha->loop_id_map = kzalloc(BITS_TO_LONGS(LOOPID_MAP_SIZE) * sizeof(long), 3815 ha->loop_id_map = kzalloc(BITS_TO_LONGS(LOOPID_MAP_SIZE) * sizeof(long),
3816 GFP_KERNEL); 3816 GFP_KERNEL);
3817 if (!ha->loop_id_map) 3817 if (!ha->loop_id_map)
3818 goto fail_async_pd; 3818 goto fail_loop_id_map;
3819 else { 3819 else {
3820 qla2x00_set_reserved_loop_ids(ha); 3820 qla2x00_set_reserved_loop_ids(ha);
3821 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0123, 3821 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0123,
@@ -3824,6 +3824,8 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
3824 3824
3825 return 0; 3825 return 0;
3826 3826
3827fail_loop_id_map:
3828 dma_pool_free(ha->s_dma_pool, ha->async_pd, ha->async_pd_dma);
3827fail_async_pd: 3829fail_async_pd:
3828 dma_pool_free(ha->s_dma_pool, ha->ex_init_cb, ha->ex_init_cb_dma); 3830 dma_pool_free(ha->s_dma_pool, ha->ex_init_cb, ha->ex_init_cb_dma);
3829fail_ex_init_cb: 3831fail_ex_init_cb:
@@ -3851,6 +3853,10 @@ fail_free_ms_iocb:
3851 dma_pool_free(ha->s_dma_pool, ha->ms_iocb, ha->ms_iocb_dma); 3853 dma_pool_free(ha->s_dma_pool, ha->ms_iocb, ha->ms_iocb_dma);
3852 ha->ms_iocb = NULL; 3854 ha->ms_iocb = NULL;
3853 ha->ms_iocb_dma = 0; 3855 ha->ms_iocb_dma = 0;
3856
3857 if (ha->sns_cmd)
3858 dma_free_coherent(&ha->pdev->dev, sizeof(struct sns_cmd_pkt),
3859 ha->sns_cmd, ha->sns_cmd_dma);
3854fail_dma_pool: 3860fail_dma_pool:
3855 if (IS_QLA82XX(ha) || ql2xenabledif) { 3861 if (IS_QLA82XX(ha) || ql2xenabledif) {
3856 dma_pool_destroy(ha->fcp_cmnd_dma_pool); 3862 dma_pool_destroy(ha->fcp_cmnd_dma_pool);
@@ -3868,10 +3874,12 @@ fail_free_nvram:
3868 kfree(ha->nvram); 3874 kfree(ha->nvram);
3869 ha->nvram = NULL; 3875 ha->nvram = NULL;
3870fail_free_ctx_mempool: 3876fail_free_ctx_mempool:
3871 mempool_destroy(ha->ctx_mempool); 3877 if (ha->ctx_mempool)
3878 mempool_destroy(ha->ctx_mempool);
3872 ha->ctx_mempool = NULL; 3879 ha->ctx_mempool = NULL;
3873fail_free_srb_mempool: 3880fail_free_srb_mempool:
3874 mempool_destroy(ha->srb_mempool); 3881 if (ha->srb_mempool)
3882 mempool_destroy(ha->srb_mempool);
3875 ha->srb_mempool = NULL; 3883 ha->srb_mempool = NULL;
3876fail_free_gid_list: 3884fail_free_gid_list:
3877 dma_free_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha), 3885 dma_free_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha),
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index bff9689f5ca9..e4fda84b959e 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -668,11 +668,9 @@ static int qlt_reset(struct scsi_qla_host *vha, void *iocb, int mcmd)
668{ 668{
669 struct qla_hw_data *ha = vha->hw; 669 struct qla_hw_data *ha = vha->hw;
670 struct qla_tgt_sess *sess = NULL; 670 struct qla_tgt_sess *sess = NULL;
671 uint32_t unpacked_lun, lun = 0;
672 uint16_t loop_id; 671 uint16_t loop_id;
673 int res = 0; 672 int res = 0;
674 struct imm_ntfy_from_isp *n = (struct imm_ntfy_from_isp *)iocb; 673 struct imm_ntfy_from_isp *n = (struct imm_ntfy_from_isp *)iocb;
675 struct atio_from_isp *a = (struct atio_from_isp *)iocb;
676 unsigned long flags; 674 unsigned long flags;
677 675
678 loop_id = le16_to_cpu(n->u.isp24.nport_handle); 676 loop_id = le16_to_cpu(n->u.isp24.nport_handle);
@@ -725,11 +723,7 @@ static int qlt_reset(struct scsi_qla_host *vha, void *iocb, int mcmd)
725 "loop_id %d)\n", vha->host_no, sess, sess->port_name, 723 "loop_id %d)\n", vha->host_no, sess, sess->port_name,
726 mcmd, loop_id); 724 mcmd, loop_id);
727 725
728 lun = a->u.isp24.fcp_cmnd.lun; 726 return qlt_issue_task_mgmt(sess, 0, mcmd, iocb, QLA24XX_MGMT_SEND_NACK);
729 unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun);
730
731 return qlt_issue_task_mgmt(sess, unpacked_lun, mcmd,
732 iocb, QLA24XX_MGMT_SEND_NACK);
733} 727}
734 728
735/* ha->tgt.sess_lock supposed to be held on entry */ 729/* ha->tgt.sess_lock supposed to be held on entry */
@@ -3067,7 +3061,7 @@ static int __qlt_send_term_imm_notif(struct scsi_qla_host *vha,
3067 3061
3068 pkt->entry_type = NOTIFY_ACK_TYPE; 3062 pkt->entry_type = NOTIFY_ACK_TYPE;
3069 pkt->entry_count = 1; 3063 pkt->entry_count = 1;
3070 pkt->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK; 3064 pkt->handle = QLA_TGT_SKIP_HANDLE;
3071 3065
3072 nack = (struct nack_to_isp *)pkt; 3066 nack = (struct nack_to_isp *)pkt;
3073 nack->ox_id = ntfy->ox_id; 3067 nack->ox_id = ntfy->ox_id;
@@ -3110,6 +3104,9 @@ static void qlt_send_term_imm_notif(struct scsi_qla_host *vha,
3110#if 0 /* Todo */ 3104#if 0 /* Todo */
3111 if (rc == -ENOMEM) 3105 if (rc == -ENOMEM)
3112 qlt_alloc_qfull_cmd(vha, imm, 0, 0); 3106 qlt_alloc_qfull_cmd(vha, imm, 0, 0);
3107#else
3108 if (rc) {
3109 }
3113#endif 3110#endif
3114 goto done; 3111 goto done;
3115 } 3112 }
@@ -6457,12 +6454,29 @@ qlt_24xx_process_atio_queue(struct scsi_qla_host *vha, uint8_t ha_locked)
6457 if (!vha->flags.online) 6454 if (!vha->flags.online)
6458 return; 6455 return;
6459 6456
6460 while (ha->tgt.atio_ring_ptr->signature != ATIO_PROCESSED) { 6457 while ((ha->tgt.atio_ring_ptr->signature != ATIO_PROCESSED) ||
6458 fcpcmd_is_corrupted(ha->tgt.atio_ring_ptr)) {
6461 pkt = (struct atio_from_isp *)ha->tgt.atio_ring_ptr; 6459 pkt = (struct atio_from_isp *)ha->tgt.atio_ring_ptr;
6462 cnt = pkt->u.raw.entry_count; 6460 cnt = pkt->u.raw.entry_count;
6463 6461
6464 qlt_24xx_atio_pkt_all_vps(vha, (struct atio_from_isp *)pkt, 6462 if (unlikely(fcpcmd_is_corrupted(ha->tgt.atio_ring_ptr))) {
6465 ha_locked); 6463 /*
6464 * This packet is corrupted. The header + payload
6465 * can not be trusted. There is no point in passing
6466 * it further up.
6467 */
6468 ql_log(ql_log_warn, vha, 0xffff,
6469 "corrupted fcp frame SID[%3phN] OXID[%04x] EXCG[%x] %64phN\n",
6470 pkt->u.isp24.fcp_hdr.s_id,
6471 be16_to_cpu(pkt->u.isp24.fcp_hdr.ox_id),
6472 le32_to_cpu(pkt->u.isp24.exchange_addr), pkt);
6473
6474 adjust_corrupted_atio(pkt);
6475 qlt_send_term_exchange(vha, NULL, pkt, ha_locked, 0);
6476 } else {
6477 qlt_24xx_atio_pkt_all_vps(vha,
6478 (struct atio_from_isp *)pkt, ha_locked);
6479 }
6466 6480
6467 for (i = 0; i < cnt; i++) { 6481 for (i = 0; i < cnt; i++) {
6468 ha->tgt.atio_ring_index++; 6482 ha->tgt.atio_ring_index++;
@@ -6545,6 +6559,13 @@ qlt_24xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_24xx *nv)
6545 6559
6546 /* Disable Full Login after LIP */ 6560 /* Disable Full Login after LIP */
6547 nv->host_p &= cpu_to_le32(~BIT_10); 6561 nv->host_p &= cpu_to_le32(~BIT_10);
6562
6563 /*
6564 * clear BIT 15 explicitly as we have seen at least
6565 * a couple of instances where this was set and this
6566 * was causing the firmware to not be initialized.
6567 */
6568 nv->firmware_options_1 &= cpu_to_le32(~BIT_15);
6548 /* Enable target PRLI control */ 6569 /* Enable target PRLI control */
6549 nv->firmware_options_2 |= cpu_to_le32(BIT_14); 6570 nv->firmware_options_2 |= cpu_to_le32(BIT_14);
6550 } else { 6571 } else {
@@ -6560,9 +6581,6 @@ qlt_24xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_24xx *nv)
6560 return; 6581 return;
6561 } 6582 }
6562 6583
6563 /* out-of-order frames reassembly */
6564 nv->firmware_options_3 |= BIT_6|BIT_9;
6565
6566 if (ha->tgt.enable_class_2) { 6584 if (ha->tgt.enable_class_2) {
6567 if (vha->flags.init_done) 6585 if (vha->flags.init_done)
6568 fc_host_supported_classes(vha->host) = 6586 fc_host_supported_classes(vha->host) =
@@ -6629,11 +6647,17 @@ qlt_81xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_81xx *nv)
6629 /* Disable ini mode, if requested */ 6647 /* Disable ini mode, if requested */
6630 if (!qla_ini_mode_enabled(vha)) 6648 if (!qla_ini_mode_enabled(vha))
6631 nv->firmware_options_1 |= cpu_to_le32(BIT_5); 6649 nv->firmware_options_1 |= cpu_to_le32(BIT_5);
6632
6633 /* Disable Full Login after LIP */ 6650 /* Disable Full Login after LIP */
6634 nv->firmware_options_1 &= cpu_to_le32(~BIT_13); 6651 nv->firmware_options_1 &= cpu_to_le32(~BIT_13);
6635 /* Enable initial LIP */ 6652 /* Enable initial LIP */
6636 nv->firmware_options_1 &= cpu_to_le32(~BIT_9); 6653 nv->firmware_options_1 &= cpu_to_le32(~BIT_9);
6654 /*
6655 * clear BIT 15 explicitly as we have seen at
6656 * least a couple of instances where this was set
6657 * and this was causing the firmware to not be
6658 * initialized.
6659 */
6660 nv->firmware_options_1 &= cpu_to_le32(~BIT_15);
6637 if (ql2xtgt_tape_enable) 6661 if (ql2xtgt_tape_enable)
6638 /* Enable FC tape support */ 6662 /* Enable FC tape support */
6639 nv->firmware_options_2 |= cpu_to_le32(BIT_12); 6663 nv->firmware_options_2 |= cpu_to_le32(BIT_12);
@@ -6658,9 +6682,6 @@ qlt_81xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_81xx *nv)
6658 return; 6682 return;
6659 } 6683 }
6660 6684
6661 /* out-of-order frames reassembly */
6662 nv->firmware_options_3 |= BIT_6|BIT_9;
6663
6664 if (ha->tgt.enable_class_2) { 6685 if (ha->tgt.enable_class_2) {
6665 if (vha->flags.init_done) 6686 if (vha->flags.init_done)
6666 fc_host_supported_classes(vha->host) = 6687 fc_host_supported_classes(vha->host) =
diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h
index f26c5f60eedd..0824a8164a24 100644
--- a/drivers/scsi/qla2xxx/qla_target.h
+++ b/drivers/scsi/qla2xxx/qla_target.h
@@ -427,13 +427,33 @@ struct atio_from_isp {
427 struct { 427 struct {
428 uint8_t entry_type; /* Entry type. */ 428 uint8_t entry_type; /* Entry type. */
429 uint8_t entry_count; /* Entry count. */ 429 uint8_t entry_count; /* Entry count. */
430 uint8_t data[58]; 430 __le16 attr_n_length;
431#define FCP_CMD_LENGTH_MASK 0x0fff
432#define FCP_CMD_LENGTH_MIN 0x38
433 uint8_t data[56];
431 uint32_t signature; 434 uint32_t signature;
432#define ATIO_PROCESSED 0xDEADDEAD /* Signature */ 435#define ATIO_PROCESSED 0xDEADDEAD /* Signature */
433 } raw; 436 } raw;
434 } u; 437 } u;
435} __packed; 438} __packed;
436 439
440static inline int fcpcmd_is_corrupted(struct atio *atio)
441{
442 if (atio->entry_type == ATIO_TYPE7 &&
443 (le16_to_cpu(atio->attr_n_length & FCP_CMD_LENGTH_MASK) <
444 FCP_CMD_LENGTH_MIN))
445 return 1;
446 else
447 return 0;
448}
449
450/* adjust corrupted atio so we won't trip over the same entry again. */
451static inline void adjust_corrupted_atio(struct atio_from_isp *atio)
452{
453 atio->u.raw.attr_n_length = cpu_to_le16(FCP_CMD_LENGTH_MIN);
454 atio->u.isp24.fcp_cmnd.add_cdb_len = 0;
455}
456
437#define CTIO_TYPE7 0x12 /* Continue target I/O entry (for 24xx) */ 457#define CTIO_TYPE7 0x12 /* Continue target I/O entry (for 24xx) */
438 458
439/* 459/*
diff --git a/drivers/scsi/qla2xxx/qla_tmpl.c b/drivers/scsi/qla2xxx/qla_tmpl.c
index 36935c9ed669..8a58ef3adab4 100644
--- a/drivers/scsi/qla2xxx/qla_tmpl.c
+++ b/drivers/scsi/qla2xxx/qla_tmpl.c
@@ -433,6 +433,18 @@ qla27xx_fwdt_entry_t263(struct scsi_qla_host *vha,
433 count++; 433 count++;
434 } 434 }
435 } 435 }
436 } else if (QLA_TGT_MODE_ENABLED() &&
437 ent->t263.queue_type == T263_QUEUE_TYPE_ATIO) {
438 struct qla_hw_data *ha = vha->hw;
439 struct atio *atr = ha->tgt.atio_ring;
440
441 if (atr || !buf) {
442 length = ha->tgt.atio_q_length;
443 qla27xx_insert16(0, buf, len);
444 qla27xx_insert16(length, buf, len);
445 qla27xx_insertbuf(atr, length * sizeof(*atr), buf, len);
446 count++;
447 }
436 } else { 448 } else {
437 ql_dbg(ql_dbg_misc, vha, 0xd026, 449 ql_dbg(ql_dbg_misc, vha, 0xd026,
438 "%s: unknown queue %x\n", __func__, ent->t263.queue_type); 450 "%s: unknown queue %x\n", __func__, ent->t263.queue_type);
@@ -676,6 +688,18 @@ qla27xx_fwdt_entry_t274(struct scsi_qla_host *vha,
676 count++; 688 count++;
677 } 689 }
678 } 690 }
691 } else if (QLA_TGT_MODE_ENABLED() &&
692 ent->t274.queue_type == T274_QUEUE_TYPE_ATIO_SHAD) {
693 struct qla_hw_data *ha = vha->hw;
694 struct atio *atr = ha->tgt.atio_ring_ptr;
695
696 if (atr || !buf) {
697 qla27xx_insert16(0, buf, len);
698 qla27xx_insert16(1, buf, len);
699 qla27xx_insert32(ha->tgt.atio_q_in ?
700 readl(ha->tgt.atio_q_in) : 0, buf, len);
701 count++;
702 }
679 } else { 703 } else {
680 ql_dbg(ql_dbg_misc, vha, 0xd02f, 704 ql_dbg(ql_dbg_misc, vha, 0xd02f,
681 "%s: unknown queue %x\n", __func__, ent->t274.queue_type); 705 "%s: unknown queue %x\n", __func__, ent->t274.queue_type);
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
index 6643f6fc7795..d925910be761 100644
--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
@@ -1800,7 +1800,7 @@ static ssize_t tcm_qla2xxx_wwn_version_show(struct config_item *item,
1800{ 1800{
1801 return sprintf(page, 1801 return sprintf(page,
1802 "TCM QLOGIC QLA2XXX NPIV capable fabric module %s on %s/%s on " 1802 "TCM QLOGIC QLA2XXX NPIV capable fabric module %s on %s/%s on "
1803 UTS_RELEASE"\n", TCM_QLA2XXX_VERSION, utsname()->sysname, 1803 UTS_RELEASE"\n", QLA2XXX_VERSION, utsname()->sysname,
1804 utsname()->machine); 1804 utsname()->machine);
1805} 1805}
1806 1806
@@ -1906,7 +1906,7 @@ static int tcm_qla2xxx_register_configfs(void)
1906 int ret; 1906 int ret;
1907 1907
1908 pr_debug("TCM QLOGIC QLA2XXX fabric module %s on %s/%s on " 1908 pr_debug("TCM QLOGIC QLA2XXX fabric module %s on %s/%s on "
1909 UTS_RELEASE"\n", TCM_QLA2XXX_VERSION, utsname()->sysname, 1909 UTS_RELEASE"\n", QLA2XXX_VERSION, utsname()->sysname,
1910 utsname()->machine); 1910 utsname()->machine);
1911 1911
1912 ret = target_register_template(&tcm_qla2xxx_ops); 1912 ret = target_register_template(&tcm_qla2xxx_ops);
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.h b/drivers/scsi/qla2xxx/tcm_qla2xxx.h
index 37e026a4823d..cf8430be183b 100644
--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.h
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.h
@@ -1,7 +1,6 @@
1#include <target/target_core_base.h> 1#include <target/target_core_base.h>
2#include <linux/btree.h> 2#include <linux/btree.h>
3 3
4#define TCM_QLA2XXX_VERSION "v0.1"
5/* length of ASCII WWPNs including pad */ 4/* length of ASCII WWPNs including pad */
6#define TCM_QLA2XXX_NAMELEN 32 5#define TCM_QLA2XXX_NAMELEN 32
7/* 6/*
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index c35b6de4ca64..78db07fd8055 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -1018,7 +1018,7 @@ static int scsi_init_sgtable(struct request *req, struct scsi_data_buffer *sdb)
1018 count = blk_rq_map_sg(req->q, req, sdb->table.sgl); 1018 count = blk_rq_map_sg(req->q, req, sdb->table.sgl);
1019 BUG_ON(count > sdb->table.nents); 1019 BUG_ON(count > sdb->table.nents);
1020 sdb->table.nents = count; 1020 sdb->table.nents = count;
1021 sdb->length = blk_rq_bytes(req); 1021 sdb->length = blk_rq_payload_bytes(req);
1022 return BLKPREP_OK; 1022 return BLKPREP_OK;
1023} 1023}
1024 1024
@@ -1040,7 +1040,8 @@ int scsi_init_io(struct scsi_cmnd *cmd)
1040 bool is_mq = (rq->mq_ctx != NULL); 1040 bool is_mq = (rq->mq_ctx != NULL);
1041 int error; 1041 int error;
1042 1042
1043 BUG_ON(!blk_rq_nr_phys_segments(rq)); 1043 if (WARN_ON_ONCE(!blk_rq_nr_phys_segments(rq)))
1044 return -EINVAL;
1044 1045
1045 error = scsi_init_sgtable(rq, &cmd->sdb); 1046 error = scsi_init_sgtable(rq, &cmd->sdb);
1046 if (error) 1047 if (error)
@@ -2893,7 +2894,7 @@ scsi_internal_device_block(struct scsi_device *sdev)
2893 * request queue. 2894 * request queue.
2894 */ 2895 */
2895 if (q->mq_ops) { 2896 if (q->mq_ops) {
2896 blk_mq_stop_hw_queues(q); 2897 blk_mq_quiesce_queue(q);
2897 } else { 2898 } else {
2898 spin_lock_irqsave(q->queue_lock, flags); 2899 spin_lock_irqsave(q->queue_lock, flags);
2899 blk_stop_queue(q); 2900 blk_stop_queue(q);
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index b1933041da39..1f5d92a25a49 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -871,11 +871,11 @@ static int sd_setup_write_same_cmnd(struct scsi_cmnd *cmd)
871 cmd->allowed = SD_MAX_RETRIES; 871 cmd->allowed = SD_MAX_RETRIES;
872 872
873 /* 873 /*
874 * For WRITE_SAME the data transferred in the DATA IN buffer is 874 * For WRITE SAME the data transferred via the DATA OUT buffer is
875 * different from the amount of data actually written to the target. 875 * different from the amount of data actually written to the target.
876 * 876 *
877 * We set up __data_len to the amount of data transferred from the 877 * We set up __data_len to the amount of data transferred via the
878 * DATA IN buffer so that blk_rq_map_sg set up the proper S/G list 878 * DATA OUT buffer so that blk_rq_map_sg sets up the proper S/G list
879 * to transfer a single sector of data first, but then reset it to 879 * to transfer a single sector of data first, but then reset it to
880 * the amount of data to be written right after so that the I/O path 880 * the amount of data to be written right after so that the I/O path
881 * knows how much to actually write. 881 * knows how much to actually write.
@@ -2600,7 +2600,8 @@ sd_read_cache_type(struct scsi_disk *sdkp, unsigned char *buffer)
2600 if (sdp->broken_fua) { 2600 if (sdp->broken_fua) {
2601 sd_first_printk(KERN_NOTICE, sdkp, "Disabling FUA\n"); 2601 sd_first_printk(KERN_NOTICE, sdkp, "Disabling FUA\n");
2602 sdkp->DPOFUA = 0; 2602 sdkp->DPOFUA = 0;
2603 } else if (sdkp->DPOFUA && !sdkp->device->use_10_for_rw) { 2603 } else if (sdkp->DPOFUA && !sdkp->device->use_10_for_rw &&
2604 !sdkp->device->use_16_for_rw) {
2604 sd_first_printk(KERN_NOTICE, sdkp, 2605 sd_first_printk(KERN_NOTICE, sdkp,
2605 "Uses READ/WRITE(6), disabling FUA\n"); 2606 "Uses READ/WRITE(6), disabling FUA\n");
2606 sdkp->DPOFUA = 0; 2607 sdkp->DPOFUA = 0;
@@ -2783,13 +2784,21 @@ static void sd_read_block_characteristics(struct scsi_disk *sdkp)
2783 queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, q); 2784 queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, q);
2784 } 2785 }
2785 2786
2786 sdkp->zoned = (buffer[8] >> 4) & 3; 2787 if (sdkp->device->type == TYPE_ZBC) {
2787 if (sdkp->zoned == 1) 2788 /* Host-managed */
2788 q->limits.zoned = BLK_ZONED_HA;
2789 else if (sdkp->device->type == TYPE_ZBC)
2790 q->limits.zoned = BLK_ZONED_HM; 2789 q->limits.zoned = BLK_ZONED_HM;
2791 else 2790 } else {
2792 q->limits.zoned = BLK_ZONED_NONE; 2791 sdkp->zoned = (buffer[8] >> 4) & 3;
2792 if (sdkp->zoned == 1)
2793 /* Host-aware */
2794 q->limits.zoned = BLK_ZONED_HA;
2795 else
2796 /*
2797 * Treat drive-managed devices as
2798 * regular block devices.
2799 */
2800 q->limits.zoned = BLK_ZONED_NONE;
2801 }
2793 if (blk_queue_is_zoned(q) && sdkp->first_scan) 2802 if (blk_queue_is_zoned(q) && sdkp->first_scan)
2794 sd_printk(KERN_NOTICE, sdkp, "Host-%s zoned block device\n", 2803 sd_printk(KERN_NOTICE, sdkp, "Host-%s zoned block device\n",
2795 q->limits.zoned == BLK_ZONED_HM ? "managed" : "aware"); 2804 q->limits.zoned == BLK_ZONED_HM ? "managed" : "aware");
diff --git a/drivers/scsi/ses.c b/drivers/scsi/ses.c
index 8c9a35c91705..50adabbb5808 100644
--- a/drivers/scsi/ses.c
+++ b/drivers/scsi/ses.c
@@ -587,7 +587,7 @@ static void ses_match_to_enclosure(struct enclosure_device *edev,
587 587
588 ses_enclosure_data_process(edev, to_scsi_device(edev->edev.parent), 0); 588 ses_enclosure_data_process(edev, to_scsi_device(edev->edev.parent), 0);
589 589
590 if (scsi_is_sas_rphy(&sdev->sdev_gendev)) 590 if (scsi_is_sas_rphy(sdev->sdev_target->dev.parent))
591 efd.addr = sas_get_address(sdev); 591 efd.addr = sas_get_address(sdev);
592 592
593 if (efd.addr) { 593 if (efd.addr) {
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index dbe5b4b95df0..121de0aaa6ad 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -1753,6 +1753,10 @@ sg_start_req(Sg_request *srp, unsigned char *cmd)
1753 return res; 1753 return res;
1754 1754
1755 iov_iter_truncate(&i, hp->dxfer_len); 1755 iov_iter_truncate(&i, hp->dxfer_len);
1756 if (!iov_iter_count(&i)) {
1757 kfree(iov);
1758 return -EINVAL;
1759 }
1756 1760
1757 res = blk_rq_map_user_iov(q, rq, md, &i, GFP_ATOMIC); 1761 res = blk_rq_map_user_iov(q, rq, md, &i, GFP_ATOMIC);
1758 kfree(iov); 1762 kfree(iov);
diff --git a/drivers/scsi/snic/snic_main.c b/drivers/scsi/snic/snic_main.c
index 396b32dca074..7cf70aaec0ba 100644
--- a/drivers/scsi/snic/snic_main.c
+++ b/drivers/scsi/snic/snic_main.c
@@ -591,6 +591,7 @@ snic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
591 if (!pool) { 591 if (!pool) {
592 SNIC_HOST_ERR(shost, "dflt sgl pool creation failed\n"); 592 SNIC_HOST_ERR(shost, "dflt sgl pool creation failed\n");
593 593
594 ret = -ENOMEM;
594 goto err_free_res; 595 goto err_free_res;
595 } 596 }
596 597
@@ -601,6 +602,7 @@ snic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
601 if (!pool) { 602 if (!pool) {
602 SNIC_HOST_ERR(shost, "max sgl pool creation failed\n"); 603 SNIC_HOST_ERR(shost, "max sgl pool creation failed\n");
603 604
605 ret = -ENOMEM;
604 goto err_free_dflt_sgl_pool; 606 goto err_free_dflt_sgl_pool;
605 } 607 }
606 608
@@ -611,6 +613,7 @@ snic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
611 if (!pool) { 613 if (!pool) {
612 SNIC_HOST_ERR(shost, "snic tmreq info pool creation failed.\n"); 614 SNIC_HOST_ERR(shost, "snic tmreq info pool creation failed.\n");
613 615
616 ret = -ENOMEM;
614 goto err_free_max_sgl_pool; 617 goto err_free_max_sgl_pool;
615 } 618 }
616 619
diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c
index ec91bd07f00a..c680d7641311 100644
--- a/drivers/scsi/virtio_scsi.c
+++ b/drivers/scsi/virtio_scsi.c
@@ -534,7 +534,9 @@ static int virtscsi_queuecommand(struct virtio_scsi *vscsi,
534{ 534{
535 struct Scsi_Host *shost = virtio_scsi_host(vscsi->vdev); 535 struct Scsi_Host *shost = virtio_scsi_host(vscsi->vdev);
536 struct virtio_scsi_cmd *cmd = scsi_cmd_priv(sc); 536 struct virtio_scsi_cmd *cmd = scsi_cmd_priv(sc);
537 unsigned long flags;
537 int req_size; 538 int req_size;
539 int ret;
538 540
539 BUG_ON(scsi_sg_count(sc) > shost->sg_tablesize); 541 BUG_ON(scsi_sg_count(sc) > shost->sg_tablesize);
540 542
@@ -562,8 +564,15 @@ static int virtscsi_queuecommand(struct virtio_scsi *vscsi,
562 req_size = sizeof(cmd->req.cmd); 564 req_size = sizeof(cmd->req.cmd);
563 } 565 }
564 566
565 if (virtscsi_kick_cmd(req_vq, cmd, req_size, sizeof(cmd->resp.cmd)) != 0) 567 ret = virtscsi_kick_cmd(req_vq, cmd, req_size, sizeof(cmd->resp.cmd));
568 if (ret == -EIO) {
569 cmd->resp.cmd.response = VIRTIO_SCSI_S_BAD_TARGET;
570 spin_lock_irqsave(&req_vq->vq_lock, flags);
571 virtscsi_complete_cmd(vscsi, cmd);
572 spin_unlock_irqrestore(&req_vq->vq_lock, flags);
573 } else if (ret != 0) {
566 return SCSI_MLQUEUE_HOST_BUSY; 574 return SCSI_MLQUEUE_HOST_BUSY;
575 }
567 return 0; 576 return 0;
568} 577}
569 578
diff --git a/drivers/soc/ti/wkup_m3_ipc.c b/drivers/soc/ti/wkup_m3_ipc.c
index 8823cc81ae45..5bb376009d98 100644
--- a/drivers/soc/ti/wkup_m3_ipc.c
+++ b/drivers/soc/ti/wkup_m3_ipc.c
@@ -459,6 +459,7 @@ static int wkup_m3_ipc_probe(struct platform_device *pdev)
459 459
460 if (IS_ERR(task)) { 460 if (IS_ERR(task)) {
461 dev_err(dev, "can't create rproc_boot thread\n"); 461 dev_err(dev, "can't create rproc_boot thread\n");
462 ret = PTR_ERR(task);
462 goto err_put_rproc; 463 goto err_put_rproc;
463 } 464 }
464 465
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index ec4aa252d6e8..2922a9908302 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -378,6 +378,7 @@ config SPI_FSL_SPI
378config SPI_FSL_DSPI 378config SPI_FSL_DSPI
379 tristate "Freescale DSPI controller" 379 tristate "Freescale DSPI controller"
380 select REGMAP_MMIO 380 select REGMAP_MMIO
381 depends on HAS_DMA
381 depends on SOC_VF610 || SOC_LS1021A || ARCH_LAYERSCAPE || COMPILE_TEST 382 depends on SOC_VF610 || SOC_LS1021A || ARCH_LAYERSCAPE || COMPILE_TEST
382 help 383 help
383 This enables support for the Freescale DSPI controller in master 384 This enables support for the Freescale DSPI controller in master
diff --git a/drivers/spi/spi-armada-3700.c b/drivers/spi/spi-armada-3700.c
index e89da0af45d2..0314c6b9e044 100644
--- a/drivers/spi/spi-armada-3700.c
+++ b/drivers/spi/spi-armada-3700.c
@@ -800,7 +800,7 @@ static int a3700_spi_probe(struct platform_device *pdev)
800 struct spi_master *master; 800 struct spi_master *master;
801 struct a3700_spi *spi; 801 struct a3700_spi *spi;
802 u32 num_cs = 0; 802 u32 num_cs = 0;
803 int ret = 0; 803 int irq, ret = 0;
804 804
805 master = spi_alloc_master(dev, sizeof(*spi)); 805 master = spi_alloc_master(dev, sizeof(*spi));
806 if (!master) { 806 if (!master) {
@@ -825,7 +825,7 @@ static int a3700_spi_probe(struct platform_device *pdev)
825 master->unprepare_message = a3700_spi_unprepare_message; 825 master->unprepare_message = a3700_spi_unprepare_message;
826 master->set_cs = a3700_spi_set_cs; 826 master->set_cs = a3700_spi_set_cs;
827 master->flags = SPI_MASTER_HALF_DUPLEX; 827 master->flags = SPI_MASTER_HALF_DUPLEX;
828 master->mode_bits |= (SPI_RX_DUAL | SPI_RX_DUAL | 828 master->mode_bits |= (SPI_RX_DUAL | SPI_TX_DUAL |
829 SPI_RX_QUAD | SPI_TX_QUAD); 829 SPI_RX_QUAD | SPI_TX_QUAD);
830 830
831 platform_set_drvdata(pdev, master); 831 platform_set_drvdata(pdev, master);
@@ -846,12 +846,13 @@ static int a3700_spi_probe(struct platform_device *pdev)
846 goto error; 846 goto error;
847 } 847 }
848 848
849 spi->irq = platform_get_irq(pdev, 0); 849 irq = platform_get_irq(pdev, 0);
850 if (spi->irq < 0) { 850 if (irq < 0) {
851 dev_err(dev, "could not get irq: %d\n", spi->irq); 851 dev_err(dev, "could not get irq: %d\n", irq);
852 ret = -ENXIO; 852 ret = -ENXIO;
853 goto error; 853 goto error;
854 } 854 }
855 spi->irq = irq;
855 856
856 init_completion(&spi->done); 857 init_completion(&spi->done);
857 858
diff --git a/drivers/spi/spi-axi-spi-engine.c b/drivers/spi/spi-axi-spi-engine.c
index 319225d7e761..6ab4c7700228 100644
--- a/drivers/spi/spi-axi-spi-engine.c
+++ b/drivers/spi/spi-axi-spi-engine.c
@@ -494,7 +494,8 @@ static int spi_engine_probe(struct platform_device *pdev)
494 SPI_ENGINE_VERSION_MAJOR(version), 494 SPI_ENGINE_VERSION_MAJOR(version),
495 SPI_ENGINE_VERSION_MINOR(version), 495 SPI_ENGINE_VERSION_MINOR(version),
496 SPI_ENGINE_VERSION_PATCH(version)); 496 SPI_ENGINE_VERSION_PATCH(version));
497 return -ENODEV; 497 ret = -ENODEV;
498 goto err_put_master;
498 } 499 }
499 500
500 spi_engine->clk = devm_clk_get(&pdev->dev, "s_axi_aclk"); 501 spi_engine->clk = devm_clk_get(&pdev->dev, "s_axi_aclk");
diff --git a/drivers/spi/spi-davinci.c b/drivers/spi/spi-davinci.c
index d36c11b73a35..02fb96797ac8 100644
--- a/drivers/spi/spi-davinci.c
+++ b/drivers/spi/spi-davinci.c
@@ -646,7 +646,7 @@ static int davinci_spi_bufs(struct spi_device *spi, struct spi_transfer *t)
646 buf = t->rx_buf; 646 buf = t->rx_buf;
647 t->rx_dma = dma_map_single(&spi->dev, buf, 647 t->rx_dma = dma_map_single(&spi->dev, buf,
648 t->len, DMA_FROM_DEVICE); 648 t->len, DMA_FROM_DEVICE);
649 if (!t->rx_dma) { 649 if (dma_mapping_error(&spi->dev, !t->rx_dma)) {
650 ret = -EFAULT; 650 ret = -EFAULT;
651 goto err_rx_map; 651 goto err_rx_map;
652 } 652 }
@@ -660,7 +660,7 @@ static int davinci_spi_bufs(struct spi_device *spi, struct spi_transfer *t)
660 buf = (void *)t->tx_buf; 660 buf = (void *)t->tx_buf;
661 t->tx_dma = dma_map_single(&spi->dev, buf, 661 t->tx_dma = dma_map_single(&spi->dev, buf,
662 t->len, DMA_TO_DEVICE); 662 t->len, DMA_TO_DEVICE);
663 if (!t->tx_dma) { 663 if (dma_mapping_error(&spi->dev, t->tx_dma)) {
664 ret = -EFAULT; 664 ret = -EFAULT;
665 goto err_tx_map; 665 goto err_tx_map;
666 } 666 }
diff --git a/drivers/spi/spi-dw-mid.c b/drivers/spi/spi-dw-mid.c
index e31971f91475..837cb8d0bac6 100644
--- a/drivers/spi/spi-dw-mid.c
+++ b/drivers/spi/spi-dw-mid.c
@@ -274,11 +274,11 @@ static int mid_spi_dma_transfer(struct dw_spi *dws, struct spi_transfer *xfer)
274static void mid_spi_dma_stop(struct dw_spi *dws) 274static void mid_spi_dma_stop(struct dw_spi *dws)
275{ 275{
276 if (test_bit(TX_BUSY, &dws->dma_chan_busy)) { 276 if (test_bit(TX_BUSY, &dws->dma_chan_busy)) {
277 dmaengine_terminate_all(dws->txchan); 277 dmaengine_terminate_sync(dws->txchan);
278 clear_bit(TX_BUSY, &dws->dma_chan_busy); 278 clear_bit(TX_BUSY, &dws->dma_chan_busy);
279 } 279 }
280 if (test_bit(RX_BUSY, &dws->dma_chan_busy)) { 280 if (test_bit(RX_BUSY, &dws->dma_chan_busy)) {
281 dmaengine_terminate_all(dws->rxchan); 281 dmaengine_terminate_sync(dws->rxchan);
282 clear_bit(RX_BUSY, &dws->dma_chan_busy); 282 clear_bit(RX_BUSY, &dws->dma_chan_busy);
283 } 283 }
284} 284}
diff --git a/drivers/spi/spi-dw.c b/drivers/spi/spi-dw.c
index b715a26a9148..054012f87567 100644
--- a/drivers/spi/spi-dw.c
+++ b/drivers/spi/spi-dw.c
@@ -107,7 +107,10 @@ static const struct file_operations dw_spi_regs_ops = {
107 107
108static int dw_spi_debugfs_init(struct dw_spi *dws) 108static int dw_spi_debugfs_init(struct dw_spi *dws)
109{ 109{
110 dws->debugfs = debugfs_create_dir("dw_spi", NULL); 110 char name[128];
111
112 snprintf(name, 128, "dw_spi-%s", dev_name(&dws->master->dev));
113 dws->debugfs = debugfs_create_dir(name, NULL);
111 if (!dws->debugfs) 114 if (!dws->debugfs)
112 return -ENOMEM; 115 return -ENOMEM;
113 116
diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c
index dd7b5b47291d..d6239fa718be 100644
--- a/drivers/spi/spi-pxa2xx.c
+++ b/drivers/spi/spi-pxa2xx.c
@@ -1690,6 +1690,7 @@ static int pxa2xx_spi_probe(struct platform_device *pdev)
1690 pxa2xx_spi_write(drv_data, SSCR1, tmp); 1690 pxa2xx_spi_write(drv_data, SSCR1, tmp);
1691 tmp = SSCR0_SCR(2) | SSCR0_Motorola | SSCR0_DataSize(8); 1691 tmp = SSCR0_SCR(2) | SSCR0_Motorola | SSCR0_DataSize(8);
1692 pxa2xx_spi_write(drv_data, SSCR0, tmp); 1692 pxa2xx_spi_write(drv_data, SSCR0, tmp);
1693 break;
1693 default: 1694 default:
1694 tmp = SSCR1_RxTresh(RX_THRESH_DFLT) | 1695 tmp = SSCR1_RxTresh(RX_THRESH_DFLT) |
1695 SSCR1_TxTresh(TX_THRESH_DFLT); 1696 SSCR1_TxTresh(TX_THRESH_DFLT);
diff --git a/drivers/spi/spi-sh-msiof.c b/drivers/spi/spi-sh-msiof.c
index 0012ad02e569..1f00eeb0b5a3 100644
--- a/drivers/spi/spi-sh-msiof.c
+++ b/drivers/spi/spi-sh-msiof.c
@@ -973,14 +973,16 @@ static const struct sh_msiof_chipdata r8a779x_data = {
973}; 973};
974 974
975static const struct of_device_id sh_msiof_match[] = { 975static const struct of_device_id sh_msiof_match[] = {
976 { .compatible = "renesas,sh-msiof", .data = &sh_data },
977 { .compatible = "renesas,sh-mobile-msiof", .data = &sh_data }, 976 { .compatible = "renesas,sh-mobile-msiof", .data = &sh_data },
978 { .compatible = "renesas,msiof-r8a7790", .data = &r8a779x_data }, 977 { .compatible = "renesas,msiof-r8a7790", .data = &r8a779x_data },
979 { .compatible = "renesas,msiof-r8a7791", .data = &r8a779x_data }, 978 { .compatible = "renesas,msiof-r8a7791", .data = &r8a779x_data },
980 { .compatible = "renesas,msiof-r8a7792", .data = &r8a779x_data }, 979 { .compatible = "renesas,msiof-r8a7792", .data = &r8a779x_data },
981 { .compatible = "renesas,msiof-r8a7793", .data = &r8a779x_data }, 980 { .compatible = "renesas,msiof-r8a7793", .data = &r8a779x_data },
982 { .compatible = "renesas,msiof-r8a7794", .data = &r8a779x_data }, 981 { .compatible = "renesas,msiof-r8a7794", .data = &r8a779x_data },
982 { .compatible = "renesas,rcar-gen2-msiof", .data = &r8a779x_data },
983 { .compatible = "renesas,msiof-r8a7796", .data = &r8a779x_data }, 983 { .compatible = "renesas,msiof-r8a7796", .data = &r8a779x_data },
984 { .compatible = "renesas,rcar-gen3-msiof", .data = &r8a779x_data },
985 { .compatible = "renesas,sh-msiof", .data = &sh_data }, /* Deprecated */
984 {}, 986 {},
985}; 987};
986MODULE_DEVICE_TABLE(of, sh_msiof_match); 988MODULE_DEVICE_TABLE(of, sh_msiof_match);
diff --git a/drivers/staging/greybus/timesync_platform.c b/drivers/staging/greybus/timesync_platform.c
index 113f3d6c4b3a..27f75b17679b 100644
--- a/drivers/staging/greybus/timesync_platform.c
+++ b/drivers/staging/greybus/timesync_platform.c
@@ -45,12 +45,18 @@ u32 gb_timesync_platform_get_clock_rate(void)
45 45
46int gb_timesync_platform_lock_bus(struct gb_timesync_svc *pdata) 46int gb_timesync_platform_lock_bus(struct gb_timesync_svc *pdata)
47{ 47{
48 if (!arche_platform_change_state_cb)
49 return 0;
50
48 return arche_platform_change_state_cb(ARCHE_PLATFORM_STATE_TIME_SYNC, 51 return arche_platform_change_state_cb(ARCHE_PLATFORM_STATE_TIME_SYNC,
49 pdata); 52 pdata);
50} 53}
51 54
52void gb_timesync_platform_unlock_bus(void) 55void gb_timesync_platform_unlock_bus(void)
53{ 56{
57 if (!arche_platform_change_state_cb)
58 return;
59
54 arche_platform_change_state_cb(ARCHE_PLATFORM_STATE_ACTIVE, NULL); 60 arche_platform_change_state_cb(ARCHE_PLATFORM_STATE_ACTIVE, NULL);
55} 61}
56 62
diff --git a/drivers/staging/lustre/lustre/llite/llite_mmap.c b/drivers/staging/lustre/lustre/llite/llite_mmap.c
index ee01f20d8b11..9afa6bec3e6f 100644
--- a/drivers/staging/lustre/lustre/llite/llite_mmap.c
+++ b/drivers/staging/lustre/lustre/llite/llite_mmap.c
@@ -390,15 +390,13 @@ static int ll_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
390 result = VM_FAULT_LOCKED; 390 result = VM_FAULT_LOCKED;
391 break; 391 break;
392 case -ENODATA: 392 case -ENODATA:
393 case -EAGAIN:
393 case -EFAULT: 394 case -EFAULT:
394 result = VM_FAULT_NOPAGE; 395 result = VM_FAULT_NOPAGE;
395 break; 396 break;
396 case -ENOMEM: 397 case -ENOMEM:
397 result = VM_FAULT_OOM; 398 result = VM_FAULT_OOM;
398 break; 399 break;
399 case -EAGAIN:
400 result = VM_FAULT_RETRY;
401 break;
402 default: 400 default:
403 result = VM_FAULT_SIGBUS; 401 result = VM_FAULT_SIGBUS;
404 break; 402 break;
diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
index 8130dfe89745..4971aa54756a 100644
--- a/drivers/staging/octeon/ethernet.c
+++ b/drivers/staging/octeon/ethernet.c
@@ -770,6 +770,7 @@ static int cvm_oct_probe(struct platform_device *pdev)
770 /* Initialize the device private structure. */ 770 /* Initialize the device private structure. */
771 struct octeon_ethernet *priv = netdev_priv(dev); 771 struct octeon_ethernet *priv = netdev_priv(dev);
772 772
773 SET_NETDEV_DEV(dev, &pdev->dev);
773 dev->netdev_ops = &cvm_oct_pow_netdev_ops; 774 dev->netdev_ops = &cvm_oct_pow_netdev_ops;
774 priv->imode = CVMX_HELPER_INTERFACE_MODE_DISABLED; 775 priv->imode = CVMX_HELPER_INTERFACE_MODE_DISABLED;
775 priv->port = CVMX_PIP_NUM_INPUT_PORTS; 776 priv->port = CVMX_PIP_NUM_INPUT_PORTS;
@@ -816,6 +817,7 @@ static int cvm_oct_probe(struct platform_device *pdev)
816 } 817 }
817 818
818 /* Initialize the device private structure. */ 819 /* Initialize the device private structure. */
820 SET_NETDEV_DEV(dev, &pdev->dev);
819 priv = netdev_priv(dev); 821 priv = netdev_priv(dev);
820 priv->netdev = dev; 822 priv->netdev = dev;
821 priv->of_node = cvm_oct_node_for_port(pip, interface, 823 priv->of_node = cvm_oct_node_for_port(pip, interface,
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index 1ebd13ef7bd3..26929c44d703 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -352,7 +352,15 @@ int core_enable_device_list_for_node(
352 kfree(new); 352 kfree(new);
353 return -EINVAL; 353 return -EINVAL;
354 } 354 }
355 BUG_ON(orig->se_lun_acl != NULL); 355 if (orig->se_lun_acl != NULL) {
356 pr_warn_ratelimited("Detected existing explicit"
357 " se_lun_acl->se_lun_group reference for %s"
358 " mapped_lun: %llu, failing\n",
359 nacl->initiatorname, mapped_lun);
360 mutex_unlock(&nacl->lun_entry_mutex);
361 kfree(new);
362 return -EINVAL;
363 }
356 364
357 rcu_assign_pointer(new->se_lun, lun); 365 rcu_assign_pointer(new->se_lun, lun);
358 rcu_assign_pointer(new->se_lun_acl, lun_acl); 366 rcu_assign_pointer(new->se_lun_acl, lun_acl);
diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c
index 4879e70e2eef..df7b6e95c019 100644
--- a/drivers/target/target_core_sbc.c
+++ b/drivers/target/target_core_sbc.c
@@ -451,6 +451,7 @@ static sense_reason_t compare_and_write_post(struct se_cmd *cmd, bool success,
451 int *post_ret) 451 int *post_ret)
452{ 452{
453 struct se_device *dev = cmd->se_dev; 453 struct se_device *dev = cmd->se_dev;
454 sense_reason_t ret = TCM_NO_SENSE;
454 455
455 /* 456 /*
456 * Only set SCF_COMPARE_AND_WRITE_POST to force a response fall-through 457 * Only set SCF_COMPARE_AND_WRITE_POST to force a response fall-through
@@ -458,9 +459,12 @@ static sense_reason_t compare_and_write_post(struct se_cmd *cmd, bool success,
458 * sent to the backend driver. 459 * sent to the backend driver.
459 */ 460 */
460 spin_lock_irq(&cmd->t_state_lock); 461 spin_lock_irq(&cmd->t_state_lock);
461 if ((cmd->transport_state & CMD_T_SENT) && !cmd->scsi_status) { 462 if (cmd->transport_state & CMD_T_SENT) {
462 cmd->se_cmd_flags |= SCF_COMPARE_AND_WRITE_POST; 463 cmd->se_cmd_flags |= SCF_COMPARE_AND_WRITE_POST;
463 *post_ret = 1; 464 *post_ret = 1;
465
466 if (cmd->scsi_status == SAM_STAT_CHECK_CONDITION)
467 ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
464 } 468 }
465 spin_unlock_irq(&cmd->t_state_lock); 469 spin_unlock_irq(&cmd->t_state_lock);
466 470
@@ -470,7 +474,7 @@ static sense_reason_t compare_and_write_post(struct se_cmd *cmd, bool success,
470 */ 474 */
471 up(&dev->caw_sem); 475 up(&dev->caw_sem);
472 476
473 return TCM_NO_SENSE; 477 return ret;
474} 478}
475 479
476static sense_reason_t compare_and_write_callback(struct se_cmd *cmd, bool success, 480static sense_reason_t compare_and_write_callback(struct se_cmd *cmd, bool success,
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 7dfefd66df93..437591bc7c08 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -457,8 +457,20 @@ static void target_complete_nacl(struct kref *kref)
457{ 457{
458 struct se_node_acl *nacl = container_of(kref, 458 struct se_node_acl *nacl = container_of(kref,
459 struct se_node_acl, acl_kref); 459 struct se_node_acl, acl_kref);
460 struct se_portal_group *se_tpg = nacl->se_tpg;
460 461
461 complete(&nacl->acl_free_comp); 462 if (!nacl->dynamic_stop) {
463 complete(&nacl->acl_free_comp);
464 return;
465 }
466
467 mutex_lock(&se_tpg->acl_node_mutex);
468 list_del(&nacl->acl_list);
469 mutex_unlock(&se_tpg->acl_node_mutex);
470
471 core_tpg_wait_for_nacl_pr_ref(nacl);
472 core_free_device_list_for_node(nacl, se_tpg);
473 kfree(nacl);
462} 474}
463 475
464void target_put_nacl(struct se_node_acl *nacl) 476void target_put_nacl(struct se_node_acl *nacl)
@@ -499,12 +511,39 @@ EXPORT_SYMBOL(transport_deregister_session_configfs);
499void transport_free_session(struct se_session *se_sess) 511void transport_free_session(struct se_session *se_sess)
500{ 512{
501 struct se_node_acl *se_nacl = se_sess->se_node_acl; 513 struct se_node_acl *se_nacl = se_sess->se_node_acl;
514
502 /* 515 /*
503 * Drop the se_node_acl->nacl_kref obtained from within 516 * Drop the se_node_acl->nacl_kref obtained from within
504 * core_tpg_get_initiator_node_acl(). 517 * core_tpg_get_initiator_node_acl().
505 */ 518 */
506 if (se_nacl) { 519 if (se_nacl) {
520 struct se_portal_group *se_tpg = se_nacl->se_tpg;
521 const struct target_core_fabric_ops *se_tfo = se_tpg->se_tpg_tfo;
522 unsigned long flags;
523
507 se_sess->se_node_acl = NULL; 524 se_sess->se_node_acl = NULL;
525
526 /*
527 * Also determine if we need to drop the extra ->cmd_kref if
528 * it had been previously dynamically generated, and
529 * the endpoint is not caching dynamic ACLs.
530 */
531 mutex_lock(&se_tpg->acl_node_mutex);
532 if (se_nacl->dynamic_node_acl &&
533 !se_tfo->tpg_check_demo_mode_cache(se_tpg)) {
534 spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags);
535 if (list_empty(&se_nacl->acl_sess_list))
536 se_nacl->dynamic_stop = true;
537 spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags);
538
539 if (se_nacl->dynamic_stop)
540 list_del(&se_nacl->acl_list);
541 }
542 mutex_unlock(&se_tpg->acl_node_mutex);
543
544 if (se_nacl->dynamic_stop)
545 target_put_nacl(se_nacl);
546
508 target_put_nacl(se_nacl); 547 target_put_nacl(se_nacl);
509 } 548 }
510 if (se_sess->sess_cmd_map) { 549 if (se_sess->sess_cmd_map) {
@@ -518,16 +557,12 @@ EXPORT_SYMBOL(transport_free_session);
518void transport_deregister_session(struct se_session *se_sess) 557void transport_deregister_session(struct se_session *se_sess)
519{ 558{
520 struct se_portal_group *se_tpg = se_sess->se_tpg; 559 struct se_portal_group *se_tpg = se_sess->se_tpg;
521 const struct target_core_fabric_ops *se_tfo;
522 struct se_node_acl *se_nacl;
523 unsigned long flags; 560 unsigned long flags;
524 bool drop_nacl = false;
525 561
526 if (!se_tpg) { 562 if (!se_tpg) {
527 transport_free_session(se_sess); 563 transport_free_session(se_sess);
528 return; 564 return;
529 } 565 }
530 se_tfo = se_tpg->se_tpg_tfo;
531 566
532 spin_lock_irqsave(&se_tpg->session_lock, flags); 567 spin_lock_irqsave(&se_tpg->session_lock, flags);
533 list_del(&se_sess->sess_list); 568 list_del(&se_sess->sess_list);
@@ -535,33 +570,15 @@ void transport_deregister_session(struct se_session *se_sess)
535 se_sess->fabric_sess_ptr = NULL; 570 se_sess->fabric_sess_ptr = NULL;
536 spin_unlock_irqrestore(&se_tpg->session_lock, flags); 571 spin_unlock_irqrestore(&se_tpg->session_lock, flags);
537 572
538 /*
539 * Determine if we need to do extra work for this initiator node's
540 * struct se_node_acl if it had been previously dynamically generated.
541 */
542 se_nacl = se_sess->se_node_acl;
543
544 mutex_lock(&se_tpg->acl_node_mutex);
545 if (se_nacl && se_nacl->dynamic_node_acl) {
546 if (!se_tfo->tpg_check_demo_mode_cache(se_tpg)) {
547 list_del(&se_nacl->acl_list);
548 drop_nacl = true;
549 }
550 }
551 mutex_unlock(&se_tpg->acl_node_mutex);
552
553 if (drop_nacl) {
554 core_tpg_wait_for_nacl_pr_ref(se_nacl);
555 core_free_device_list_for_node(se_nacl, se_tpg);
556 se_sess->se_node_acl = NULL;
557 kfree(se_nacl);
558 }
559 pr_debug("TARGET_CORE[%s]: Deregistered fabric_sess\n", 573 pr_debug("TARGET_CORE[%s]: Deregistered fabric_sess\n",
560 se_tpg->se_tpg_tfo->get_fabric_name()); 574 se_tpg->se_tpg_tfo->get_fabric_name());
561 /* 575 /*
562 * If last kref is dropping now for an explicit NodeACL, awake sleeping 576 * If last kref is dropping now for an explicit NodeACL, awake sleeping
563 * ->acl_free_comp caller to wakeup configfs se_node_acl->acl_group 577 * ->acl_free_comp caller to wakeup configfs se_node_acl->acl_group
564 * removal context from within transport_free_session() code. 578 * removal context from within transport_free_session() code.
579 *
580 * For dynamic ACL, target_put_nacl() uses target_complete_nacl()
581 * to release all remaining generate_node_acl=1 created ACL resources.
565 */ 582 */
566 583
567 transport_free_session(se_sess); 584 transport_free_session(se_sess);
@@ -1693,6 +1710,10 @@ void transport_generic_request_failure(struct se_cmd *cmd,
1693 case TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED: 1710 case TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED:
1694 case TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED: 1711 case TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED:
1695 case TCM_COPY_TARGET_DEVICE_NOT_REACHABLE: 1712 case TCM_COPY_TARGET_DEVICE_NOT_REACHABLE:
1713 case TCM_TOO_MANY_TARGET_DESCS:
1714 case TCM_UNSUPPORTED_TARGET_DESC_TYPE_CODE:
1715 case TCM_TOO_MANY_SEGMENT_DESCS:
1716 case TCM_UNSUPPORTED_SEGMENT_DESC_TYPE_CODE:
1696 break; 1717 break;
1697 case TCM_OUT_OF_RESOURCES: 1718 case TCM_OUT_OF_RESOURCES:
1698 sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 1719 sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
@@ -2808,6 +2829,26 @@ static const struct sense_info sense_info_table[] = {
2808 .key = ILLEGAL_REQUEST, 2829 .key = ILLEGAL_REQUEST,
2809 .asc = 0x26, /* INVALID FIELD IN PARAMETER LIST */ 2830 .asc = 0x26, /* INVALID FIELD IN PARAMETER LIST */
2810 }, 2831 },
2832 [TCM_TOO_MANY_TARGET_DESCS] = {
2833 .key = ILLEGAL_REQUEST,
2834 .asc = 0x26,
2835 .ascq = 0x06, /* TOO MANY TARGET DESCRIPTORS */
2836 },
2837 [TCM_UNSUPPORTED_TARGET_DESC_TYPE_CODE] = {
2838 .key = ILLEGAL_REQUEST,
2839 .asc = 0x26,
2840 .ascq = 0x07, /* UNSUPPORTED TARGET DESCRIPTOR TYPE CODE */
2841 },
2842 [TCM_TOO_MANY_SEGMENT_DESCS] = {
2843 .key = ILLEGAL_REQUEST,
2844 .asc = 0x26,
2845 .ascq = 0x08, /* TOO MANY SEGMENT DESCRIPTORS */
2846 },
2847 [TCM_UNSUPPORTED_SEGMENT_DESC_TYPE_CODE] = {
2848 .key = ILLEGAL_REQUEST,
2849 .asc = 0x26,
2850 .ascq = 0x09, /* UNSUPPORTED SEGMENT DESCRIPTOR TYPE CODE */
2851 },
2811 [TCM_PARAMETER_LIST_LENGTH_ERROR] = { 2852 [TCM_PARAMETER_LIST_LENGTH_ERROR] = {
2812 .key = ILLEGAL_REQUEST, 2853 .key = ILLEGAL_REQUEST,
2813 .asc = 0x1a, /* PARAMETER LIST LENGTH ERROR */ 2854 .asc = 0x1a, /* PARAMETER LIST LENGTH ERROR */
@@ -3086,7 +3127,6 @@ static void target_tmr_work(struct work_struct *work)
3086 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 3127 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3087 goto check_stop; 3128 goto check_stop;
3088 } 3129 }
3089 cmd->t_state = TRANSPORT_ISTATE_PROCESSING;
3090 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 3130 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3091 3131
3092 cmd->se_tfo->queue_tm_rsp(cmd); 3132 cmd->se_tfo->queue_tm_rsp(cmd);
@@ -3099,11 +3139,25 @@ int transport_generic_handle_tmr(
3099 struct se_cmd *cmd) 3139 struct se_cmd *cmd)
3100{ 3140{
3101 unsigned long flags; 3141 unsigned long flags;
3142 bool aborted = false;
3102 3143
3103 spin_lock_irqsave(&cmd->t_state_lock, flags); 3144 spin_lock_irqsave(&cmd->t_state_lock, flags);
3104 cmd->transport_state |= CMD_T_ACTIVE; 3145 if (cmd->transport_state & CMD_T_ABORTED) {
3146 aborted = true;
3147 } else {
3148 cmd->t_state = TRANSPORT_ISTATE_PROCESSING;
3149 cmd->transport_state |= CMD_T_ACTIVE;
3150 }
3105 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 3151 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3106 3152
3153 if (aborted) {
3154 pr_warn_ratelimited("handle_tmr caught CMD_T_ABORTED TMR %d"
3155 "ref_tag: %llu tag: %llu\n", cmd->se_tmr_req->function,
3156 cmd->se_tmr_req->ref_task_tag, cmd->tag);
3157 transport_cmd_check_stop_to_fabric(cmd);
3158 return 0;
3159 }
3160
3107 INIT_WORK(&cmd->work, target_tmr_work); 3161 INIT_WORK(&cmd->work, target_tmr_work);
3108 queue_work(cmd->se_dev->tmr_wq, &cmd->work); 3162 queue_work(cmd->se_dev->tmr_wq, &cmd->work);
3109 return 0; 3163 return 0;
diff --git a/drivers/target/target_core_xcopy.c b/drivers/target/target_core_xcopy.c
index 37d5caebffa6..cac5a20a4de0 100644
--- a/drivers/target/target_core_xcopy.c
+++ b/drivers/target/target_core_xcopy.c
@@ -53,18 +53,13 @@ static int target_xcopy_gen_naa_ieee(struct se_device *dev, unsigned char *buf)
53 return 0; 53 return 0;
54} 54}
55 55
56static int target_xcopy_locate_se_dev_e4(struct se_cmd *se_cmd, struct xcopy_op *xop, 56static int target_xcopy_locate_se_dev_e4(const unsigned char *dev_wwn,
57 bool src) 57 struct se_device **found_dev)
58{ 58{
59 struct se_device *se_dev; 59 struct se_device *se_dev;
60 unsigned char tmp_dev_wwn[XCOPY_NAA_IEEE_REGEX_LEN], *dev_wwn; 60 unsigned char tmp_dev_wwn[XCOPY_NAA_IEEE_REGEX_LEN];
61 int rc; 61 int rc;
62 62
63 if (src)
64 dev_wwn = &xop->dst_tid_wwn[0];
65 else
66 dev_wwn = &xop->src_tid_wwn[0];
67
68 mutex_lock(&g_device_mutex); 63 mutex_lock(&g_device_mutex);
69 list_for_each_entry(se_dev, &g_device_list, g_dev_node) { 64 list_for_each_entry(se_dev, &g_device_list, g_dev_node) {
70 65
@@ -78,15 +73,8 @@ static int target_xcopy_locate_se_dev_e4(struct se_cmd *se_cmd, struct xcopy_op
78 if (rc != 0) 73 if (rc != 0)
79 continue; 74 continue;
80 75
81 if (src) { 76 *found_dev = se_dev;
82 xop->dst_dev = se_dev; 77 pr_debug("XCOPY 0xe4: located se_dev: %p\n", se_dev);
83 pr_debug("XCOPY 0xe4: Setting xop->dst_dev: %p from located"
84 " se_dev\n", xop->dst_dev);
85 } else {
86 xop->src_dev = se_dev;
87 pr_debug("XCOPY 0xe4: Setting xop->src_dev: %p from located"
88 " se_dev\n", xop->src_dev);
89 }
90 78
91 rc = target_depend_item(&se_dev->dev_group.cg_item); 79 rc = target_depend_item(&se_dev->dev_group.cg_item);
92 if (rc != 0) { 80 if (rc != 0) {
@@ -110,7 +98,7 @@ static int target_xcopy_locate_se_dev_e4(struct se_cmd *se_cmd, struct xcopy_op
110} 98}
111 99
112static int target_xcopy_parse_tiddesc_e4(struct se_cmd *se_cmd, struct xcopy_op *xop, 100static int target_xcopy_parse_tiddesc_e4(struct se_cmd *se_cmd, struct xcopy_op *xop,
113 unsigned char *p, bool src) 101 unsigned char *p, unsigned short cscd_index)
114{ 102{
115 unsigned char *desc = p; 103 unsigned char *desc = p;
116 unsigned short ript; 104 unsigned short ript;
@@ -155,7 +143,13 @@ static int target_xcopy_parse_tiddesc_e4(struct se_cmd *se_cmd, struct xcopy_op
155 return -EINVAL; 143 return -EINVAL;
156 } 144 }
157 145
158 if (src) { 146 if (cscd_index != xop->stdi && cscd_index != xop->dtdi) {
147 pr_debug("XCOPY 0xe4: ignoring CSCD entry %d - neither src nor "
148 "dest\n", cscd_index);
149 return 0;
150 }
151
152 if (cscd_index == xop->stdi) {
159 memcpy(&xop->src_tid_wwn[0], &desc[8], XCOPY_NAA_IEEE_REGEX_LEN); 153 memcpy(&xop->src_tid_wwn[0], &desc[8], XCOPY_NAA_IEEE_REGEX_LEN);
160 /* 154 /*
161 * Determine if the source designator matches the local device 155 * Determine if the source designator matches the local device
@@ -167,10 +161,15 @@ static int target_xcopy_parse_tiddesc_e4(struct se_cmd *se_cmd, struct xcopy_op
167 pr_debug("XCOPY 0xe4: Set xop->src_dev %p from source" 161 pr_debug("XCOPY 0xe4: Set xop->src_dev %p from source"
168 " received xop\n", xop->src_dev); 162 " received xop\n", xop->src_dev);
169 } 163 }
170 } else { 164 }
165
166 if (cscd_index == xop->dtdi) {
171 memcpy(&xop->dst_tid_wwn[0], &desc[8], XCOPY_NAA_IEEE_REGEX_LEN); 167 memcpy(&xop->dst_tid_wwn[0], &desc[8], XCOPY_NAA_IEEE_REGEX_LEN);
172 /* 168 /*
173 * Determine if the destination designator matches the local device 169 * Determine if the destination designator matches the local
170 * device. If @cscd_index corresponds to both source (stdi) and
171 * destination (dtdi), or dtdi comes after stdi, then
172 * XCOL_DEST_RECV_OP wins.
174 */ 173 */
175 if (!memcmp(&xop->local_dev_wwn[0], &xop->dst_tid_wwn[0], 174 if (!memcmp(&xop->local_dev_wwn[0], &xop->dst_tid_wwn[0],
176 XCOPY_NAA_IEEE_REGEX_LEN)) { 175 XCOPY_NAA_IEEE_REGEX_LEN)) {
@@ -190,20 +189,23 @@ static int target_xcopy_parse_target_descriptors(struct se_cmd *se_cmd,
190{ 189{
191 struct se_device *local_dev = se_cmd->se_dev; 190 struct se_device *local_dev = se_cmd->se_dev;
192 unsigned char *desc = p; 191 unsigned char *desc = p;
193 int offset = tdll % XCOPY_TARGET_DESC_LEN, rc, ret = 0; 192 int offset = tdll % XCOPY_TARGET_DESC_LEN, rc;
193 unsigned short cscd_index = 0;
194 unsigned short start = 0; 194 unsigned short start = 0;
195 bool src = true;
196 195
197 *sense_ret = TCM_INVALID_PARAMETER_LIST; 196 *sense_ret = TCM_INVALID_PARAMETER_LIST;
198 197
199 if (offset != 0) { 198 if (offset != 0) {
200 pr_err("XCOPY target descriptor list length is not" 199 pr_err("XCOPY target descriptor list length is not"
201 " multiple of %d\n", XCOPY_TARGET_DESC_LEN); 200 " multiple of %d\n", XCOPY_TARGET_DESC_LEN);
201 *sense_ret = TCM_UNSUPPORTED_TARGET_DESC_TYPE_CODE;
202 return -EINVAL; 202 return -EINVAL;
203 } 203 }
204 if (tdll > 64) { 204 if (tdll > RCR_OP_MAX_TARGET_DESC_COUNT * XCOPY_TARGET_DESC_LEN) {
205 pr_err("XCOPY target descriptor supports a maximum" 205 pr_err("XCOPY target descriptor supports a maximum"
206 " two src/dest descriptors, tdll: %hu too large..\n", tdll); 206 " two src/dest descriptors, tdll: %hu too large..\n", tdll);
207 /* spc4r37 6.4.3.4 CSCD DESCRIPTOR LIST LENGTH field */
208 *sense_ret = TCM_TOO_MANY_TARGET_DESCS;
207 return -EINVAL; 209 return -EINVAL;
208 } 210 }
209 /* 211 /*
@@ -215,37 +217,43 @@ static int target_xcopy_parse_target_descriptors(struct se_cmd *se_cmd,
215 217
216 while (start < tdll) { 218 while (start < tdll) {
217 /* 219 /*
218 * Check target descriptor identification with 0xE4 type with 220 * Check target descriptor identification with 0xE4 type, and
219 * use VPD 0x83 WWPN matching .. 221 * compare the current index with the CSCD descriptor IDs in
222 * the segment descriptor. Use VPD 0x83 WWPN matching ..
220 */ 223 */
221 switch (desc[0]) { 224 switch (desc[0]) {
222 case 0xe4: 225 case 0xe4:
223 rc = target_xcopy_parse_tiddesc_e4(se_cmd, xop, 226 rc = target_xcopy_parse_tiddesc_e4(se_cmd, xop,
224 &desc[0], src); 227 &desc[0], cscd_index);
225 if (rc != 0) 228 if (rc != 0)
226 goto out; 229 goto out;
227 /*
228 * Assume target descriptors are in source -> destination order..
229 */
230 if (src)
231 src = false;
232 else
233 src = true;
234 start += XCOPY_TARGET_DESC_LEN; 230 start += XCOPY_TARGET_DESC_LEN;
235 desc += XCOPY_TARGET_DESC_LEN; 231 desc += XCOPY_TARGET_DESC_LEN;
236 ret++; 232 cscd_index++;
237 break; 233 break;
238 default: 234 default:
239 pr_err("XCOPY unsupported descriptor type code:" 235 pr_err("XCOPY unsupported descriptor type code:"
240 " 0x%02x\n", desc[0]); 236 " 0x%02x\n", desc[0]);
237 *sense_ret = TCM_UNSUPPORTED_TARGET_DESC_TYPE_CODE;
241 goto out; 238 goto out;
242 } 239 }
243 } 240 }
244 241
245 if (xop->op_origin == XCOL_SOURCE_RECV_OP) 242 switch (xop->op_origin) {
246 rc = target_xcopy_locate_se_dev_e4(se_cmd, xop, true); 243 case XCOL_SOURCE_RECV_OP:
247 else 244 rc = target_xcopy_locate_se_dev_e4(xop->dst_tid_wwn,
248 rc = target_xcopy_locate_se_dev_e4(se_cmd, xop, false); 245 &xop->dst_dev);
246 break;
247 case XCOL_DEST_RECV_OP:
248 rc = target_xcopy_locate_se_dev_e4(xop->src_tid_wwn,
249 &xop->src_dev);
250 break;
251 default:
252 pr_err("XCOPY CSCD descriptor IDs not found in CSCD list - "
253 "stdi: %hu dtdi: %hu\n", xop->stdi, xop->dtdi);
254 rc = -EINVAL;
255 break;
256 }
249 /* 257 /*
250 * If a matching IEEE NAA 0x83 descriptor for the requested device 258 * If a matching IEEE NAA 0x83 descriptor for the requested device
251 * is not located on this node, return COPY_ABORTED with ASQ/ASQC 259 * is not located on this node, return COPY_ABORTED with ASQ/ASQC
@@ -262,7 +270,7 @@ static int target_xcopy_parse_target_descriptors(struct se_cmd *se_cmd,
262 pr_debug("XCOPY TGT desc: Dest dev: %p NAA IEEE WWN: 0x%16phN\n", 270 pr_debug("XCOPY TGT desc: Dest dev: %p NAA IEEE WWN: 0x%16phN\n",
263 xop->dst_dev, &xop->dst_tid_wwn[0]); 271 xop->dst_dev, &xop->dst_tid_wwn[0]);
264 272
265 return ret; 273 return cscd_index;
266 274
267out: 275out:
268 return -EINVAL; 276 return -EINVAL;
@@ -284,6 +292,14 @@ static int target_xcopy_parse_segdesc_02(struct se_cmd *se_cmd, struct xcopy_op
284 292
285 xop->stdi = get_unaligned_be16(&desc[4]); 293 xop->stdi = get_unaligned_be16(&desc[4]);
286 xop->dtdi = get_unaligned_be16(&desc[6]); 294 xop->dtdi = get_unaligned_be16(&desc[6]);
295
296 if (xop->stdi > XCOPY_CSCD_DESC_ID_LIST_OFF_MAX ||
297 xop->dtdi > XCOPY_CSCD_DESC_ID_LIST_OFF_MAX) {
298 pr_err("XCOPY segment desc 0x02: unsupported CSCD ID > 0x%x; stdi: %hu dtdi: %hu\n",
299 XCOPY_CSCD_DESC_ID_LIST_OFF_MAX, xop->stdi, xop->dtdi);
300 return -EINVAL;
301 }
302
287 pr_debug("XCOPY seg desc 0x02: desc_len: %hu stdi: %hu dtdi: %hu, DC: %d\n", 303 pr_debug("XCOPY seg desc 0x02: desc_len: %hu stdi: %hu dtdi: %hu, DC: %d\n",
288 desc_len, xop->stdi, xop->dtdi, dc); 304 desc_len, xop->stdi, xop->dtdi, dc);
289 305
@@ -306,15 +322,25 @@ static int target_xcopy_parse_segdesc_02(struct se_cmd *se_cmd, struct xcopy_op
306 322
307static int target_xcopy_parse_segment_descriptors(struct se_cmd *se_cmd, 323static int target_xcopy_parse_segment_descriptors(struct se_cmd *se_cmd,
308 struct xcopy_op *xop, unsigned char *p, 324 struct xcopy_op *xop, unsigned char *p,
309 unsigned int sdll) 325 unsigned int sdll, sense_reason_t *sense_ret)
310{ 326{
311 unsigned char *desc = p; 327 unsigned char *desc = p;
312 unsigned int start = 0; 328 unsigned int start = 0;
313 int offset = sdll % XCOPY_SEGMENT_DESC_LEN, rc, ret = 0; 329 int offset = sdll % XCOPY_SEGMENT_DESC_LEN, rc, ret = 0;
314 330
331 *sense_ret = TCM_INVALID_PARAMETER_LIST;
332
315 if (offset != 0) { 333 if (offset != 0) {
316 pr_err("XCOPY segment descriptor list length is not" 334 pr_err("XCOPY segment descriptor list length is not"
317 " multiple of %d\n", XCOPY_SEGMENT_DESC_LEN); 335 " multiple of %d\n", XCOPY_SEGMENT_DESC_LEN);
336 *sense_ret = TCM_UNSUPPORTED_SEGMENT_DESC_TYPE_CODE;
337 return -EINVAL;
338 }
339 if (sdll > RCR_OP_MAX_SG_DESC_COUNT * XCOPY_SEGMENT_DESC_LEN) {
340 pr_err("XCOPY supports %u segment descriptor(s), sdll: %u too"
341 " large..\n", RCR_OP_MAX_SG_DESC_COUNT, sdll);
342 /* spc4r37 6.4.3.5 SEGMENT DESCRIPTOR LIST LENGTH field */
343 *sense_ret = TCM_TOO_MANY_SEGMENT_DESCS;
318 return -EINVAL; 344 return -EINVAL;
319 } 345 }
320 346
@@ -335,6 +361,7 @@ static int target_xcopy_parse_segment_descriptors(struct se_cmd *se_cmd,
335 default: 361 default:
336 pr_err("XCOPY unsupported segment descriptor" 362 pr_err("XCOPY unsupported segment descriptor"
337 "type: 0x%02x\n", desc[0]); 363 "type: 0x%02x\n", desc[0]);
364 *sense_ret = TCM_UNSUPPORTED_SEGMENT_DESC_TYPE_CODE;
338 goto out; 365 goto out;
339 } 366 }
340 } 367 }
@@ -837,7 +864,7 @@ out:
837 " CHECK_CONDITION -> sending response\n", rc); 864 " CHECK_CONDITION -> sending response\n", rc);
838 ec_cmd->scsi_status = SAM_STAT_CHECK_CONDITION; 865 ec_cmd->scsi_status = SAM_STAT_CHECK_CONDITION;
839 } 866 }
840 target_complete_cmd(ec_cmd, SAM_STAT_CHECK_CONDITION); 867 target_complete_cmd(ec_cmd, ec_cmd->scsi_status);
841} 868}
842 869
843sense_reason_t target_do_xcopy(struct se_cmd *se_cmd) 870sense_reason_t target_do_xcopy(struct se_cmd *se_cmd)
@@ -861,6 +888,16 @@ sense_reason_t target_do_xcopy(struct se_cmd *se_cmd)
861 return TCM_UNSUPPORTED_SCSI_OPCODE; 888 return TCM_UNSUPPORTED_SCSI_OPCODE;
862 } 889 }
863 890
891 if (se_cmd->data_length == 0) {
892 target_complete_cmd(se_cmd, SAM_STAT_GOOD);
893 return TCM_NO_SENSE;
894 }
895 if (se_cmd->data_length < XCOPY_HDR_LEN) {
896 pr_err("XCOPY parameter truncation: length %u < hdr_len %u\n",
897 se_cmd->data_length, XCOPY_HDR_LEN);
898 return TCM_PARAMETER_LIST_LENGTH_ERROR;
899 }
900
864 xop = kzalloc(sizeof(struct xcopy_op), GFP_KERNEL); 901 xop = kzalloc(sizeof(struct xcopy_op), GFP_KERNEL);
865 if (!xop) { 902 if (!xop) {
866 pr_err("Unable to allocate xcopy_op\n"); 903 pr_err("Unable to allocate xcopy_op\n");
@@ -883,6 +920,12 @@ sense_reason_t target_do_xcopy(struct se_cmd *se_cmd)
883 */ 920 */
884 tdll = get_unaligned_be16(&p[2]); 921 tdll = get_unaligned_be16(&p[2]);
885 sdll = get_unaligned_be32(&p[8]); 922 sdll = get_unaligned_be32(&p[8]);
923 if (tdll + sdll > RCR_OP_MAX_DESC_LIST_LEN) {
924 pr_err("XCOPY descriptor list length %u exceeds maximum %u\n",
925 tdll + sdll, RCR_OP_MAX_DESC_LIST_LEN);
926 ret = TCM_PARAMETER_LIST_LENGTH_ERROR;
927 goto out;
928 }
886 929
887 inline_dl = get_unaligned_be32(&p[12]); 930 inline_dl = get_unaligned_be32(&p[12]);
888 if (inline_dl != 0) { 931 if (inline_dl != 0) {
@@ -890,10 +933,32 @@ sense_reason_t target_do_xcopy(struct se_cmd *se_cmd)
890 goto out; 933 goto out;
891 } 934 }
892 935
936 if (se_cmd->data_length < (XCOPY_HDR_LEN + tdll + sdll + inline_dl)) {
937 pr_err("XCOPY parameter truncation: data length %u too small "
938 "for tdll: %hu sdll: %u inline_dl: %u\n",
939 se_cmd->data_length, tdll, sdll, inline_dl);
940 ret = TCM_PARAMETER_LIST_LENGTH_ERROR;
941 goto out;
942 }
943
893 pr_debug("Processing XCOPY with list_id: 0x%02x list_id_usage: 0x%02x" 944 pr_debug("Processing XCOPY with list_id: 0x%02x list_id_usage: 0x%02x"
894 " tdll: %hu sdll: %u inline_dl: %u\n", list_id, list_id_usage, 945 " tdll: %hu sdll: %u inline_dl: %u\n", list_id, list_id_usage,
895 tdll, sdll, inline_dl); 946 tdll, sdll, inline_dl);
896 947
948 /*
949 * skip over the target descriptors until segment descriptors
950 * have been passed - CSCD ids are needed to determine src and dest.
951 */
952 seg_desc = &p[16] + tdll;
953
954 rc = target_xcopy_parse_segment_descriptors(se_cmd, xop, seg_desc,
955 sdll, &ret);
956 if (rc <= 0)
957 goto out;
958
959 pr_debug("XCOPY: Processed %d segment descriptors, length: %u\n", rc,
960 rc * XCOPY_SEGMENT_DESC_LEN);
961
897 rc = target_xcopy_parse_target_descriptors(se_cmd, xop, &p[16], tdll, &ret); 962 rc = target_xcopy_parse_target_descriptors(se_cmd, xop, &p[16], tdll, &ret);
898 if (rc <= 0) 963 if (rc <= 0)
899 goto out; 964 goto out;
@@ -911,18 +976,8 @@ sense_reason_t target_do_xcopy(struct se_cmd *se_cmd)
911 976
912 pr_debug("XCOPY: Processed %d target descriptors, length: %u\n", rc, 977 pr_debug("XCOPY: Processed %d target descriptors, length: %u\n", rc,
913 rc * XCOPY_TARGET_DESC_LEN); 978 rc * XCOPY_TARGET_DESC_LEN);
914 seg_desc = &p[16];
915 seg_desc += (rc * XCOPY_TARGET_DESC_LEN);
916
917 rc = target_xcopy_parse_segment_descriptors(se_cmd, xop, seg_desc, sdll);
918 if (rc <= 0) {
919 xcopy_pt_undepend_remotedev(xop);
920 goto out;
921 }
922 transport_kunmap_data_sg(se_cmd); 979 transport_kunmap_data_sg(se_cmd);
923 980
924 pr_debug("XCOPY: Processed %d segment descriptors, length: %u\n", rc,
925 rc * XCOPY_SEGMENT_DESC_LEN);
926 INIT_WORK(&xop->xop_work, target_xcopy_do_work); 981 INIT_WORK(&xop->xop_work, target_xcopy_do_work);
927 queue_work(xcopy_wq, &xop->xop_work); 982 queue_work(xcopy_wq, &xop->xop_work);
928 return TCM_NO_SENSE; 983 return TCM_NO_SENSE;
diff --git a/drivers/target/target_core_xcopy.h b/drivers/target/target_core_xcopy.h
index 4d3d4dd060f2..7c0b105cbe1b 100644
--- a/drivers/target/target_core_xcopy.h
+++ b/drivers/target/target_core_xcopy.h
@@ -1,10 +1,17 @@
1#include <target/target_core_base.h> 1#include <target/target_core_base.h>
2 2
3#define XCOPY_HDR_LEN 16
3#define XCOPY_TARGET_DESC_LEN 32 4#define XCOPY_TARGET_DESC_LEN 32
4#define XCOPY_SEGMENT_DESC_LEN 28 5#define XCOPY_SEGMENT_DESC_LEN 28
5#define XCOPY_NAA_IEEE_REGEX_LEN 16 6#define XCOPY_NAA_IEEE_REGEX_LEN 16
6#define XCOPY_MAX_SECTORS 1024 7#define XCOPY_MAX_SECTORS 1024
7 8
9/*
10 * SPC4r37 6.4.6.1
11 * Table 150 — CSCD descriptor ID values
12 */
13#define XCOPY_CSCD_DESC_ID_LIST_OFF_MAX 0x07FF
14
8enum xcopy_origin_list { 15enum xcopy_origin_list {
9 XCOL_SOURCE_RECV_OP = 0x01, 16 XCOL_SOURCE_RECV_OP = 0x01,
10 XCOL_DEST_RECV_OP = 0x02, 17 XCOL_DEST_RECV_OP = 0x02,
diff --git a/drivers/thermal/rockchip_thermal.c b/drivers/thermal/rockchip_thermal.c
index b811b0fb61b1..4c7796512453 100644
--- a/drivers/thermal/rockchip_thermal.c
+++ b/drivers/thermal/rockchip_thermal.c
@@ -118,12 +118,12 @@ struct rockchip_tsadc_chip {
118 void (*control)(void __iomem *reg, bool on); 118 void (*control)(void __iomem *reg, bool on);
119 119
120 /* Per-sensor methods */ 120 /* Per-sensor methods */
121 int (*get_temp)(struct chip_tsadc_table table, 121 int (*get_temp)(const struct chip_tsadc_table *table,
122 int chn, void __iomem *reg, int *temp); 122 int chn, void __iomem *reg, int *temp);
123 void (*set_alarm_temp)(struct chip_tsadc_table table, 123 int (*set_alarm_temp)(const struct chip_tsadc_table *table,
124 int chn, void __iomem *reg, int temp); 124 int chn, void __iomem *reg, int temp);
125 void (*set_tshut_temp)(struct chip_tsadc_table table, 125 int (*set_tshut_temp)(const struct chip_tsadc_table *table,
126 int chn, void __iomem *reg, int temp); 126 int chn, void __iomem *reg, int temp);
127 void (*set_tshut_mode)(int chn, void __iomem *reg, enum tshut_mode m); 127 void (*set_tshut_mode)(int chn, void __iomem *reg, enum tshut_mode m);
128 128
129 /* Per-table methods */ 129 /* Per-table methods */
@@ -317,6 +317,7 @@ static const struct tsadc_table rk3288_code_table[] = {
317 {3452, 115000}, 317 {3452, 115000},
318 {3437, 120000}, 318 {3437, 120000},
319 {3421, 125000}, 319 {3421, 125000},
320 {0, 125000},
320}; 321};
321 322
322static const struct tsadc_table rk3368_code_table[] = { 323static const struct tsadc_table rk3368_code_table[] = {
@@ -397,59 +398,80 @@ static const struct tsadc_table rk3399_code_table[] = {
397 {TSADCV3_DATA_MASK, 125000}, 398 {TSADCV3_DATA_MASK, 125000},
398}; 399};
399 400
400static u32 rk_tsadcv2_temp_to_code(struct chip_tsadc_table table, 401static u32 rk_tsadcv2_temp_to_code(const struct chip_tsadc_table *table,
401 int temp) 402 int temp)
402{ 403{
403 int high, low, mid; 404 int high, low, mid;
404 u32 error = 0; 405 unsigned long num;
406 unsigned int denom;
407 u32 error = table->data_mask;
405 408
406 low = 0; 409 low = 0;
407 high = table.length - 1; 410 high = (table->length - 1) - 1; /* ignore the last check for table */
408 mid = (high + low) / 2; 411 mid = (high + low) / 2;
409 412
410 /* Return mask code data when the temp is over table range */ 413 /* Return mask code data when the temp is over table range */
411 if (temp < table.id[low].temp || temp > table.id[high].temp) { 414 if (temp < table->id[low].temp || temp > table->id[high].temp)
412 error = table.data_mask;
413 goto exit; 415 goto exit;
414 }
415 416
416 while (low <= high) { 417 while (low <= high) {
417 if (temp == table.id[mid].temp) 418 if (temp == table->id[mid].temp)
418 return table.id[mid].code; 419 return table->id[mid].code;
419 else if (temp < table.id[mid].temp) 420 else if (temp < table->id[mid].temp)
420 high = mid - 1; 421 high = mid - 1;
421 else 422 else
422 low = mid + 1; 423 low = mid + 1;
423 mid = (low + high) / 2; 424 mid = (low + high) / 2;
424 } 425 }
425 426
427 /*
428 * The conversion code granularity provided by the table. Let's
429 * assume that the relationship between temperature and
430 * analog value between 2 table entries is linear and interpolate
431 * to produce less granular result.
432 */
433 num = abs(table->id[mid + 1].code - table->id[mid].code);
434 num *= temp - table->id[mid].temp;
435 denom = table->id[mid + 1].temp - table->id[mid].temp;
436
437 switch (table->mode) {
438 case ADC_DECREMENT:
439 return table->id[mid].code - (num / denom);
440 case ADC_INCREMENT:
441 return table->id[mid].code + (num / denom);
442 default:
443 pr_err("%s: unknown table mode: %d\n", __func__, table->mode);
444 return error;
445 }
446
426exit: 447exit:
427 pr_err("Invalid the conversion, error=%d\n", error); 448 pr_err("%s: invalid temperature, temp=%d error=%d\n",
449 __func__, temp, error);
428 return error; 450 return error;
429} 451}
430 452
431static int rk_tsadcv2_code_to_temp(struct chip_tsadc_table table, u32 code, 453static int rk_tsadcv2_code_to_temp(const struct chip_tsadc_table *table,
432 int *temp) 454 u32 code, int *temp)
433{ 455{
434 unsigned int low = 1; 456 unsigned int low = 1;
435 unsigned int high = table.length - 1; 457 unsigned int high = table->length - 1;
436 unsigned int mid = (low + high) / 2; 458 unsigned int mid = (low + high) / 2;
437 unsigned int num; 459 unsigned int num;
438 unsigned long denom; 460 unsigned long denom;
439 461
440 WARN_ON(table.length < 2); 462 WARN_ON(table->length < 2);
441 463
442 switch (table.mode) { 464 switch (table->mode) {
443 case ADC_DECREMENT: 465 case ADC_DECREMENT:
444 code &= table.data_mask; 466 code &= table->data_mask;
445 if (code < table.id[high].code) 467 if (code <= table->id[high].code)
446 return -EAGAIN; /* Incorrect reading */ 468 return -EAGAIN; /* Incorrect reading */
447 469
448 while (low <= high) { 470 while (low <= high) {
449 if (code >= table.id[mid].code && 471 if (code >= table->id[mid].code &&
450 code < table.id[mid - 1].code) 472 code < table->id[mid - 1].code)
451 break; 473 break;
452 else if (code < table.id[mid].code) 474 else if (code < table->id[mid].code)
453 low = mid + 1; 475 low = mid + 1;
454 else 476 else
455 high = mid - 1; 477 high = mid - 1;
@@ -458,15 +480,15 @@ static int rk_tsadcv2_code_to_temp(struct chip_tsadc_table table, u32 code,
458 } 480 }
459 break; 481 break;
460 case ADC_INCREMENT: 482 case ADC_INCREMENT:
461 code &= table.data_mask; 483 code &= table->data_mask;
462 if (code < table.id[low].code) 484 if (code < table->id[low].code)
463 return -EAGAIN; /* Incorrect reading */ 485 return -EAGAIN; /* Incorrect reading */
464 486
465 while (low <= high) { 487 while (low <= high) {
466 if (code <= table.id[mid].code && 488 if (code <= table->id[mid].code &&
467 code > table.id[mid - 1].code) 489 code > table->id[mid - 1].code)
468 break; 490 break;
469 else if (code > table.id[mid].code) 491 else if (code > table->id[mid].code)
470 low = mid + 1; 492 low = mid + 1;
471 else 493 else
472 high = mid - 1; 494 high = mid - 1;
@@ -475,7 +497,8 @@ static int rk_tsadcv2_code_to_temp(struct chip_tsadc_table table, u32 code,
475 } 497 }
476 break; 498 break;
477 default: 499 default:
478 pr_err("Invalid the conversion table\n"); 500 pr_err("%s: unknown table mode: %d\n", __func__, table->mode);
501 return -EINVAL;
479 } 502 }
480 503
481 /* 504 /*
@@ -484,10 +507,10 @@ static int rk_tsadcv2_code_to_temp(struct chip_tsadc_table table, u32 code,
484 * temperature between 2 table entries is linear and interpolate 507 * temperature between 2 table entries is linear and interpolate
485 * to produce less granular result. 508 * to produce less granular result.
486 */ 509 */
487 num = table.id[mid].temp - table.id[mid - 1].temp; 510 num = table->id[mid].temp - table->id[mid - 1].temp;
488 num *= abs(table.id[mid - 1].code - code); 511 num *= abs(table->id[mid - 1].code - code);
489 denom = abs(table.id[mid - 1].code - table.id[mid].code); 512 denom = abs(table->id[mid - 1].code - table->id[mid].code);
490 *temp = table.id[mid - 1].temp + (num / denom); 513 *temp = table->id[mid - 1].temp + (num / denom);
491 514
492 return 0; 515 return 0;
493} 516}
@@ -638,7 +661,7 @@ static void rk_tsadcv3_control(void __iomem *regs, bool enable)
638 writel_relaxed(val, regs + TSADCV2_AUTO_CON); 661 writel_relaxed(val, regs + TSADCV2_AUTO_CON);
639} 662}
640 663
641static int rk_tsadcv2_get_temp(struct chip_tsadc_table table, 664static int rk_tsadcv2_get_temp(const struct chip_tsadc_table *table,
642 int chn, void __iomem *regs, int *temp) 665 int chn, void __iomem *regs, int *temp)
643{ 666{
644 u32 val; 667 u32 val;
@@ -648,39 +671,57 @@ static int rk_tsadcv2_get_temp(struct chip_tsadc_table table,
648 return rk_tsadcv2_code_to_temp(table, val, temp); 671 return rk_tsadcv2_code_to_temp(table, val, temp);
649} 672}
650 673
651static void rk_tsadcv2_alarm_temp(struct chip_tsadc_table table, 674static int rk_tsadcv2_alarm_temp(const struct chip_tsadc_table *table,
652 int chn, void __iomem *regs, int temp) 675 int chn, void __iomem *regs, int temp)
653{ 676{
654 u32 alarm_value, int_en; 677 u32 alarm_value;
678 u32 int_en, int_clr;
679
680 /*
681 * In some cases, some sensors didn't need the trip points, the
682 * set_trips will pass {-INT_MAX, INT_MAX} to trigger tsadc alarm
683 * in the end, ignore this case and disable the high temperature
684 * interrupt.
685 */
686 if (temp == INT_MAX) {
687 int_clr = readl_relaxed(regs + TSADCV2_INT_EN);
688 int_clr &= ~TSADCV2_INT_SRC_EN(chn);
689 writel_relaxed(int_clr, regs + TSADCV2_INT_EN);
690 return 0;
691 }
655 692
656 /* Make sure the value is valid */ 693 /* Make sure the value is valid */
657 alarm_value = rk_tsadcv2_temp_to_code(table, temp); 694 alarm_value = rk_tsadcv2_temp_to_code(table, temp);
658 if (alarm_value == table.data_mask) 695 if (alarm_value == table->data_mask)
659 return; 696 return -ERANGE;
660 697
661 writel_relaxed(alarm_value & table.data_mask, 698 writel_relaxed(alarm_value & table->data_mask,
662 regs + TSADCV2_COMP_INT(chn)); 699 regs + TSADCV2_COMP_INT(chn));
663 700
664 int_en = readl_relaxed(regs + TSADCV2_INT_EN); 701 int_en = readl_relaxed(regs + TSADCV2_INT_EN);
665 int_en |= TSADCV2_INT_SRC_EN(chn); 702 int_en |= TSADCV2_INT_SRC_EN(chn);
666 writel_relaxed(int_en, regs + TSADCV2_INT_EN); 703 writel_relaxed(int_en, regs + TSADCV2_INT_EN);
704
705 return 0;
667} 706}
668 707
669static void rk_tsadcv2_tshut_temp(struct chip_tsadc_table table, 708static int rk_tsadcv2_tshut_temp(const struct chip_tsadc_table *table,
670 int chn, void __iomem *regs, int temp) 709 int chn, void __iomem *regs, int temp)
671{ 710{
672 u32 tshut_value, val; 711 u32 tshut_value, val;
673 712
674 /* Make sure the value is valid */ 713 /* Make sure the value is valid */
675 tshut_value = rk_tsadcv2_temp_to_code(table, temp); 714 tshut_value = rk_tsadcv2_temp_to_code(table, temp);
676 if (tshut_value == table.data_mask) 715 if (tshut_value == table->data_mask)
677 return; 716 return -ERANGE;
678 717
679 writel_relaxed(tshut_value, regs + TSADCV2_COMP_SHUT(chn)); 718 writel_relaxed(tshut_value, regs + TSADCV2_COMP_SHUT(chn));
680 719
681 /* TSHUT will be valid */ 720 /* TSHUT will be valid */
682 val = readl_relaxed(regs + TSADCV2_AUTO_CON); 721 val = readl_relaxed(regs + TSADCV2_AUTO_CON);
683 writel_relaxed(val | TSADCV2_AUTO_SRC_EN(chn), regs + TSADCV2_AUTO_CON); 722 writel_relaxed(val | TSADCV2_AUTO_SRC_EN(chn), regs + TSADCV2_AUTO_CON);
723
724 return 0;
684} 725}
685 726
686static void rk_tsadcv2_tshut_mode(int chn, void __iomem *regs, 727static void rk_tsadcv2_tshut_mode(int chn, void __iomem *regs,
@@ -883,10 +924,8 @@ static int rockchip_thermal_set_trips(void *_sensor, int low, int high)
883 dev_dbg(&thermal->pdev->dev, "%s: sensor %d: low: %d, high %d\n", 924 dev_dbg(&thermal->pdev->dev, "%s: sensor %d: low: %d, high %d\n",
884 __func__, sensor->id, low, high); 925 __func__, sensor->id, low, high);
885 926
886 tsadc->set_alarm_temp(tsadc->table, 927 return tsadc->set_alarm_temp(&tsadc->table,
887 sensor->id, thermal->regs, high); 928 sensor->id, thermal->regs, high);
888
889 return 0;
890} 929}
891 930
892static int rockchip_thermal_get_temp(void *_sensor, int *out_temp) 931static int rockchip_thermal_get_temp(void *_sensor, int *out_temp)
@@ -896,7 +935,7 @@ static int rockchip_thermal_get_temp(void *_sensor, int *out_temp)
896 const struct rockchip_tsadc_chip *tsadc = sensor->thermal->chip; 935 const struct rockchip_tsadc_chip *tsadc = sensor->thermal->chip;
897 int retval; 936 int retval;
898 937
899 retval = tsadc->get_temp(tsadc->table, 938 retval = tsadc->get_temp(&tsadc->table,
900 sensor->id, thermal->regs, out_temp); 939 sensor->id, thermal->regs, out_temp);
901 dev_dbg(&thermal->pdev->dev, "sensor %d - temp: %d, retval: %d\n", 940 dev_dbg(&thermal->pdev->dev, "sensor %d - temp: %d, retval: %d\n",
902 sensor->id, *out_temp, retval); 941 sensor->id, *out_temp, retval);
@@ -982,8 +1021,12 @@ rockchip_thermal_register_sensor(struct platform_device *pdev,
982 int error; 1021 int error;
983 1022
984 tsadc->set_tshut_mode(id, thermal->regs, thermal->tshut_mode); 1023 tsadc->set_tshut_mode(id, thermal->regs, thermal->tshut_mode);
985 tsadc->set_tshut_temp(tsadc->table, id, thermal->regs, 1024
1025 error = tsadc->set_tshut_temp(&tsadc->table, id, thermal->regs,
986 thermal->tshut_temp); 1026 thermal->tshut_temp);
1027 if (error)
1028 dev_err(&pdev->dev, "%s: invalid tshut=%d, error=%d\n",
1029 __func__, thermal->tshut_temp, error);
987 1030
988 sensor->thermal = thermal; 1031 sensor->thermal = thermal;
989 sensor->id = id; 1032 sensor->id = id;
@@ -1196,9 +1239,13 @@ static int __maybe_unused rockchip_thermal_resume(struct device *dev)
1196 1239
1197 thermal->chip->set_tshut_mode(id, thermal->regs, 1240 thermal->chip->set_tshut_mode(id, thermal->regs,
1198 thermal->tshut_mode); 1241 thermal->tshut_mode);
1199 thermal->chip->set_tshut_temp(thermal->chip->table, 1242
1243 error = thermal->chip->set_tshut_temp(&thermal->chip->table,
1200 id, thermal->regs, 1244 id, thermal->regs,
1201 thermal->tshut_temp); 1245 thermal->tshut_temp);
1246 if (error)
1247 dev_err(&pdev->dev, "%s: invalid tshut=%d, error=%d\n",
1248 __func__, thermal->tshut_temp, error);
1202 } 1249 }
1203 1250
1204 thermal->chip->control(thermal->regs, true); 1251 thermal->chip->control(thermal->regs, true);
diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
index 641faab6e24b..655591316a88 100644
--- a/drivers/thermal/thermal_core.c
+++ b/drivers/thermal/thermal_core.c
@@ -799,6 +799,11 @@ static void thermal_release(struct device *dev)
799 if (!strncmp(dev_name(dev), "thermal_zone", 799 if (!strncmp(dev_name(dev), "thermal_zone",
800 sizeof("thermal_zone") - 1)) { 800 sizeof("thermal_zone") - 1)) {
801 tz = to_thermal_zone(dev); 801 tz = to_thermal_zone(dev);
802 kfree(tz->trip_type_attrs);
803 kfree(tz->trip_temp_attrs);
804 kfree(tz->trip_hyst_attrs);
805 kfree(tz->trips_attribute_group.attrs);
806 kfree(tz->device.groups);
802 kfree(tz); 807 kfree(tz);
803 } else if (!strncmp(dev_name(dev), "cooling_device", 808 } else if (!strncmp(dev_name(dev), "cooling_device",
804 sizeof("cooling_device") - 1)) { 809 sizeof("cooling_device") - 1)) {
@@ -1305,10 +1310,6 @@ void thermal_zone_device_unregister(struct thermal_zone_device *tz)
1305 1310
1306 thermal_zone_device_set_polling(tz, 0); 1311 thermal_zone_device_set_polling(tz, 0);
1307 1312
1308 kfree(tz->trip_type_attrs);
1309 kfree(tz->trip_temp_attrs);
1310 kfree(tz->trip_hyst_attrs);
1311 kfree(tz->trips_attribute_group.attrs);
1312 thermal_set_governor(tz, NULL); 1313 thermal_set_governor(tz, NULL);
1313 1314
1314 thermal_remove_hwmon_sysfs(tz); 1315 thermal_remove_hwmon_sysfs(tz);
@@ -1316,7 +1317,6 @@ void thermal_zone_device_unregister(struct thermal_zone_device *tz)
1316 idr_destroy(&tz->idr); 1317 idr_destroy(&tz->idr);
1317 mutex_destroy(&tz->lock); 1318 mutex_destroy(&tz->lock);
1318 device_unregister(&tz->device); 1319 device_unregister(&tz->device);
1319 kfree(tz->device.groups);
1320} 1320}
1321EXPORT_SYMBOL_GPL(thermal_zone_device_unregister); 1321EXPORT_SYMBOL_GPL(thermal_zone_device_unregister);
1322 1322
diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c
index 61569a765d9e..76e03a7de9cc 100644
--- a/drivers/tty/serial/8250/8250_core.c
+++ b/drivers/tty/serial/8250/8250_core.c
@@ -675,7 +675,7 @@ static struct console univ8250_console = {
675 .device = uart_console_device, 675 .device = uart_console_device,
676 .setup = univ8250_console_setup, 676 .setup = univ8250_console_setup,
677 .match = univ8250_console_match, 677 .match = univ8250_console_match,
678 .flags = CON_PRINTBUFFER | CON_ANYTIME | CON_CONSDEV, 678 .flags = CON_PRINTBUFFER | CON_ANYTIME,
679 .index = -1, 679 .index = -1,
680 .data = &serial8250_reg, 680 .data = &serial8250_reg,
681}; 681};
diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
index aa0166b6d450..116436b7fa52 100644
--- a/drivers/tty/serial/8250/8250_pci.c
+++ b/drivers/tty/serial/8250/8250_pci.c
@@ -5642,17 +5642,15 @@ static pci_ers_result_t serial8250_io_slot_reset(struct pci_dev *dev)
5642static void serial8250_io_resume(struct pci_dev *dev) 5642static void serial8250_io_resume(struct pci_dev *dev)
5643{ 5643{
5644 struct serial_private *priv = pci_get_drvdata(dev); 5644 struct serial_private *priv = pci_get_drvdata(dev);
5645 const struct pciserial_board *board; 5645 struct serial_private *new;
5646 5646
5647 if (!priv) 5647 if (!priv)
5648 return; 5648 return;
5649 5649
5650 board = priv->board; 5650 new = pciserial_init_ports(dev, priv->board);
5651 kfree(priv); 5651 if (!IS_ERR(new)) {
5652 priv = pciserial_init_ports(dev, board); 5652 pci_set_drvdata(dev, new);
5653 5653 kfree(priv);
5654 if (!IS_ERR(priv)) {
5655 pci_set_drvdata(dev, priv);
5656 } 5654 }
5657} 5655}
5658 5656
diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
index fe4399b41df6..c13fec451d03 100644
--- a/drivers/tty/serial/8250/8250_port.c
+++ b/drivers/tty/serial/8250/8250_port.c
@@ -1413,7 +1413,7 @@ static void __do_stop_tx_rs485(struct uart_8250_port *p)
1413 * Enable previously disabled RX interrupts. 1413 * Enable previously disabled RX interrupts.
1414 */ 1414 */
1415 if (!(p->port.rs485.flags & SER_RS485_RX_DURING_TX)) { 1415 if (!(p->port.rs485.flags & SER_RS485_RX_DURING_TX)) {
1416 serial8250_clear_fifos(p); 1416 serial8250_clear_and_reinit_fifos(p);
1417 1417
1418 p->ier |= UART_IER_RLSI | UART_IER_RDI; 1418 p->ier |= UART_IER_RLSI | UART_IER_RDI;
1419 serial_port_out(&p->port, UART_IER, p->ier); 1419 serial_port_out(&p->port, UART_IER, p->ier);
diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
index 168b10cad47b..fabbe76203bb 100644
--- a/drivers/tty/serial/atmel_serial.c
+++ b/drivers/tty/serial/atmel_serial.c
@@ -481,6 +481,14 @@ static void atmel_stop_tx(struct uart_port *port)
481 /* disable PDC transmit */ 481 /* disable PDC transmit */
482 atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_TXTDIS); 482 atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_TXTDIS);
483 } 483 }
484
485 /*
486 * Disable the transmitter.
487 * This is mandatory when DMA is used, otherwise the DMA buffer
488 * is fully transmitted.
489 */
490 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXDIS);
491
484 /* Disable interrupts */ 492 /* Disable interrupts */
485 atmel_uart_writel(port, ATMEL_US_IDR, atmel_port->tx_done_mask); 493 atmel_uart_writel(port, ATMEL_US_IDR, atmel_port->tx_done_mask);
486 494
@@ -513,6 +521,9 @@ static void atmel_start_tx(struct uart_port *port)
513 521
514 /* Enable interrupts */ 522 /* Enable interrupts */
515 atmel_uart_writel(port, ATMEL_US_IER, atmel_port->tx_done_mask); 523 atmel_uart_writel(port, ATMEL_US_IER, atmel_port->tx_done_mask);
524
525 /* re-enable the transmitter */
526 atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXEN);
516} 527}
517 528
518/* 529/*
@@ -798,6 +809,11 @@ static void atmel_complete_tx_dma(void *arg)
798 */ 809 */
799 if (!uart_circ_empty(xmit)) 810 if (!uart_circ_empty(xmit))
800 atmel_tasklet_schedule(atmel_port, &atmel_port->tasklet_tx); 811 atmel_tasklet_schedule(atmel_port, &atmel_port->tasklet_tx);
812 else if ((port->rs485.flags & SER_RS485_ENABLED) &&
813 !(port->rs485.flags & SER_RS485_RX_DURING_TX)) {
814 /* DMA done, stop TX, start RX for RS485 */
815 atmel_start_rx(port);
816 }
801 817
802 spin_unlock_irqrestore(&port->lock, flags); 818 spin_unlock_irqrestore(&port->lock, flags);
803} 819}
@@ -900,12 +916,6 @@ static void atmel_tx_dma(struct uart_port *port)
900 desc->callback = atmel_complete_tx_dma; 916 desc->callback = atmel_complete_tx_dma;
901 desc->callback_param = atmel_port; 917 desc->callback_param = atmel_port;
902 atmel_port->cookie_tx = dmaengine_submit(desc); 918 atmel_port->cookie_tx = dmaengine_submit(desc);
903
904 } else {
905 if (port->rs485.flags & SER_RS485_ENABLED) {
906 /* DMA done, stop TX, start RX for RS485 */
907 atmel_start_rx(port);
908 }
909 } 919 }
910 920
911 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) 921 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c
index 52bbd27e93ae..701c085bb19b 100644
--- a/drivers/tty/sysrq.c
+++ b/drivers/tty/sysrq.c
@@ -946,8 +946,8 @@ static const struct input_device_id sysrq_ids[] = {
946 { 946 {
947 .flags = INPUT_DEVICE_ID_MATCH_EVBIT | 947 .flags = INPUT_DEVICE_ID_MATCH_EVBIT |
948 INPUT_DEVICE_ID_MATCH_KEYBIT, 948 INPUT_DEVICE_ID_MATCH_KEYBIT,
949 .evbit = { BIT_MASK(EV_KEY) }, 949 .evbit = { [BIT_WORD(EV_KEY)] = BIT_MASK(EV_KEY) },
950 .keybit = { BIT_MASK(KEY_LEFTALT) }, 950 .keybit = { [BIT_WORD(KEY_LEFTALT)] = BIT_MASK(KEY_LEFTALT) },
951 }, 951 },
952 { }, 952 { },
953}; 953};
diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
index 0aa9e7d697a5..25dbd8c7aec7 100644
--- a/drivers/usb/core/config.c
+++ b/drivers/usb/core/config.c
@@ -239,6 +239,16 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum,
239 if (ifp->desc.bNumEndpoints >= num_ep) 239 if (ifp->desc.bNumEndpoints >= num_ep)
240 goto skip_to_next_endpoint_or_interface_descriptor; 240 goto skip_to_next_endpoint_or_interface_descriptor;
241 241
242 /* Check for duplicate endpoint addresses */
243 for (i = 0; i < ifp->desc.bNumEndpoints; ++i) {
244 if (ifp->endpoint[i].desc.bEndpointAddress ==
245 d->bEndpointAddress) {
246 dev_warn(ddev, "config %d interface %d altsetting %d has a duplicate endpoint with address 0x%X, skipping\n",
247 cfgno, inum, asnum, d->bEndpointAddress);
248 goto skip_to_next_endpoint_or_interface_descriptor;
249 }
250 }
251
242 endpoint = &ifp->endpoint[ifp->desc.bNumEndpoints]; 252 endpoint = &ifp->endpoint[ifp->desc.bNumEndpoints];
243 ++ifp->desc.bNumEndpoints; 253 ++ifp->desc.bNumEndpoints;
244 254
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 1fa5c0f29c64..a56c75e09786 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -103,8 +103,7 @@ EXPORT_SYMBOL_GPL(ehci_cf_port_reset_rwsem);
103 103
104static void hub_release(struct kref *kref); 104static void hub_release(struct kref *kref);
105static int usb_reset_and_verify_device(struct usb_device *udev); 105static int usb_reset_and_verify_device(struct usb_device *udev);
106static void hub_usb3_port_prepare_disable(struct usb_hub *hub, 106static int hub_port_disable(struct usb_hub *hub, int port1, int set_state);
107 struct usb_port *port_dev);
108 107
109static inline char *portspeed(struct usb_hub *hub, int portstatus) 108static inline char *portspeed(struct usb_hub *hub, int portstatus)
110{ 109{
@@ -903,34 +902,6 @@ static int hub_set_port_link_state(struct usb_hub *hub, int port1,
903} 902}
904 903
905/* 904/*
906 * USB-3 does not have a similar link state as USB-2 that will avoid negotiating
907 * a connection with a plugged-in cable but will signal the host when the cable
908 * is unplugged. Disable remote wake and set link state to U3 for USB-3 devices
909 */
910static int hub_port_disable(struct usb_hub *hub, int port1, int set_state)
911{
912 struct usb_port *port_dev = hub->ports[port1 - 1];
913 struct usb_device *hdev = hub->hdev;
914 int ret = 0;
915
916 if (!hub->error) {
917 if (hub_is_superspeed(hub->hdev)) {
918 hub_usb3_port_prepare_disable(hub, port_dev);
919 ret = hub_set_port_link_state(hub, port_dev->portnum,
920 USB_SS_PORT_LS_U3);
921 } else {
922 ret = usb_clear_port_feature(hdev, port1,
923 USB_PORT_FEAT_ENABLE);
924 }
925 }
926 if (port_dev->child && set_state)
927 usb_set_device_state(port_dev->child, USB_STATE_NOTATTACHED);
928 if (ret && ret != -ENODEV)
929 dev_err(&port_dev->dev, "cannot disable (err = %d)\n", ret);
930 return ret;
931}
932
933/*
934 * Disable a port and mark a logical connect-change event, so that some 905 * Disable a port and mark a logical connect-change event, so that some
935 * time later hub_wq will disconnect() any existing usb_device on the port 906 * time later hub_wq will disconnect() any existing usb_device on the port
936 * and will re-enumerate if there actually is a device attached. 907 * and will re-enumerate if there actually is a device attached.
@@ -4162,6 +4133,34 @@ static int hub_handle_remote_wakeup(struct usb_hub *hub, unsigned int port,
4162 4133
4163#endif /* CONFIG_PM */ 4134#endif /* CONFIG_PM */
4164 4135
4136/*
4137 * USB-3 does not have a similar link state as USB-2 that will avoid negotiating
4138 * a connection with a plugged-in cable but will signal the host when the cable
4139 * is unplugged. Disable remote wake and set link state to U3 for USB-3 devices
4140 */
4141static int hub_port_disable(struct usb_hub *hub, int port1, int set_state)
4142{
4143 struct usb_port *port_dev = hub->ports[port1 - 1];
4144 struct usb_device *hdev = hub->hdev;
4145 int ret = 0;
4146
4147 if (!hub->error) {
4148 if (hub_is_superspeed(hub->hdev)) {
4149 hub_usb3_port_prepare_disable(hub, port_dev);
4150 ret = hub_set_port_link_state(hub, port_dev->portnum,
4151 USB_SS_PORT_LS_U3);
4152 } else {
4153 ret = usb_clear_port_feature(hdev, port1,
4154 USB_PORT_FEAT_ENABLE);
4155 }
4156 }
4157 if (port_dev->child && set_state)
4158 usb_set_device_state(port_dev->child, USB_STATE_NOTATTACHED);
4159 if (ret && ret != -ENODEV)
4160 dev_err(&port_dev->dev, "cannot disable (err = %d)\n", ret);
4161 return ret;
4162}
4163
4165 4164
4166/* USB 2.0 spec, 7.1.7.3 / fig 7-29: 4165/* USB 2.0 spec, 7.1.7.3 / fig 7-29:
4167 * 4166 *
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index d2e50a27140c..24f9f98968a5 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -37,6 +37,10 @@ static const struct usb_device_id usb_quirk_list[] = {
37 /* CBM - Flash disk */ 37 /* CBM - Flash disk */
38 { USB_DEVICE(0x0204, 0x6025), .driver_info = USB_QUIRK_RESET_RESUME }, 38 { USB_DEVICE(0x0204, 0x6025), .driver_info = USB_QUIRK_RESET_RESUME },
39 39
40 /* WORLDE easy key (easykey.25) MIDI controller */
41 { USB_DEVICE(0x0218, 0x0401), .driver_info =
42 USB_QUIRK_CONFIG_INTF_STRINGS },
43
40 /* HP 5300/5370C scanner */ 44 /* HP 5300/5370C scanner */
41 { USB_DEVICE(0x03f0, 0x0701), .driver_info = 45 { USB_DEVICE(0x03f0, 0x0701), .driver_info =
42 USB_QUIRK_STRING_FETCH_255 }, 46 USB_QUIRK_STRING_FETCH_255 },
diff --git a/drivers/usb/dwc2/core.h b/drivers/usb/dwc2/core.h
index 9548d3e03453..302b8f5f7d27 100644
--- a/drivers/usb/dwc2/core.h
+++ b/drivers/usb/dwc2/core.h
@@ -513,8 +513,8 @@ struct dwc2_core_params {
513 /* Gadget parameters */ 513 /* Gadget parameters */
514 bool g_dma; 514 bool g_dma;
515 bool g_dma_desc; 515 bool g_dma_desc;
516 u16 g_rx_fifo_size; 516 u32 g_rx_fifo_size;
517 u16 g_np_tx_fifo_size; 517 u32 g_np_tx_fifo_size;
518 u32 g_tx_fifo_size[MAX_EPS_CHANNELS]; 518 u32 g_tx_fifo_size[MAX_EPS_CHANNELS];
519}; 519};
520 520
diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c
index b95930f20d90..77c5fcf3a5bf 100644
--- a/drivers/usb/dwc2/gadget.c
+++ b/drivers/usb/dwc2/gadget.c
@@ -3169,7 +3169,7 @@ void dwc2_hsotg_core_init_disconnected(struct dwc2_hsotg *hsotg,
3169 /* keep other bits untouched (so e.g. forced modes are not lost) */ 3169 /* keep other bits untouched (so e.g. forced modes are not lost) */
3170 usbcfg = dwc2_readl(hsotg->regs + GUSBCFG); 3170 usbcfg = dwc2_readl(hsotg->regs + GUSBCFG);
3171 usbcfg &= ~(GUSBCFG_TOUTCAL_MASK | GUSBCFG_PHYIF16 | GUSBCFG_SRPCAP | 3171 usbcfg &= ~(GUSBCFG_TOUTCAL_MASK | GUSBCFG_PHYIF16 | GUSBCFG_SRPCAP |
3172 GUSBCFG_HNPCAP); 3172 GUSBCFG_HNPCAP | GUSBCFG_USBTRDTIM_MASK);
3173 3173
3174 if (hsotg->params.phy_type == DWC2_PHY_TYPE_PARAM_FS && 3174 if (hsotg->params.phy_type == DWC2_PHY_TYPE_PARAM_FS &&
3175 (hsotg->params.speed == DWC2_SPEED_PARAM_FULL || 3175 (hsotg->params.speed == DWC2_SPEED_PARAM_FULL ||
@@ -3749,11 +3749,11 @@ static int dwc2_hsotg_ep_enable(struct usb_ep *ep,
3749 __func__, epctrl, epctrl_reg); 3749 __func__, epctrl, epctrl_reg);
3750 3750
3751 /* Allocate DMA descriptor chain for non-ctrl endpoints */ 3751 /* Allocate DMA descriptor chain for non-ctrl endpoints */
3752 if (using_desc_dma(hsotg)) { 3752 if (using_desc_dma(hsotg) && !hs_ep->desc_list) {
3753 hs_ep->desc_list = dma_alloc_coherent(hsotg->dev, 3753 hs_ep->desc_list = dmam_alloc_coherent(hsotg->dev,
3754 MAX_DMA_DESC_NUM_GENERIC * 3754 MAX_DMA_DESC_NUM_GENERIC *
3755 sizeof(struct dwc2_dma_desc), 3755 sizeof(struct dwc2_dma_desc),
3756 &hs_ep->desc_list_dma, GFP_KERNEL); 3756 &hs_ep->desc_list_dma, GFP_ATOMIC);
3757 if (!hs_ep->desc_list) { 3757 if (!hs_ep->desc_list) {
3758 ret = -ENOMEM; 3758 ret = -ENOMEM;
3759 goto error2; 3759 goto error2;
@@ -3872,7 +3872,7 @@ error1:
3872 3872
3873error2: 3873error2:
3874 if (ret && using_desc_dma(hsotg) && hs_ep->desc_list) { 3874 if (ret && using_desc_dma(hsotg) && hs_ep->desc_list) {
3875 dma_free_coherent(hsotg->dev, MAX_DMA_DESC_NUM_GENERIC * 3875 dmam_free_coherent(hsotg->dev, MAX_DMA_DESC_NUM_GENERIC *
3876 sizeof(struct dwc2_dma_desc), 3876 sizeof(struct dwc2_dma_desc),
3877 hs_ep->desc_list, hs_ep->desc_list_dma); 3877 hs_ep->desc_list, hs_ep->desc_list_dma);
3878 hs_ep->desc_list = NULL; 3878 hs_ep->desc_list = NULL;
@@ -3902,14 +3902,6 @@ static int dwc2_hsotg_ep_disable(struct usb_ep *ep)
3902 return -EINVAL; 3902 return -EINVAL;
3903 } 3903 }
3904 3904
3905 /* Remove DMA memory allocated for non-control Endpoints */
3906 if (using_desc_dma(hsotg)) {
3907 dma_free_coherent(hsotg->dev, MAX_DMA_DESC_NUM_GENERIC *
3908 sizeof(struct dwc2_dma_desc),
3909 hs_ep->desc_list, hs_ep->desc_list_dma);
3910 hs_ep->desc_list = NULL;
3911 }
3912
3913 epctrl_reg = dir_in ? DIEPCTL(index) : DOEPCTL(index); 3905 epctrl_reg = dir_in ? DIEPCTL(index) : DOEPCTL(index);
3914 3906
3915 spin_lock_irqsave(&hsotg->lock, flags); 3907 spin_lock_irqsave(&hsotg->lock, flags);
@@ -4131,7 +4123,7 @@ static void dwc2_hsotg_init(struct dwc2_hsotg *hsotg)
4131 /* keep other bits untouched (so e.g. forced modes are not lost) */ 4123 /* keep other bits untouched (so e.g. forced modes are not lost) */
4132 usbcfg = dwc2_readl(hsotg->regs + GUSBCFG); 4124 usbcfg = dwc2_readl(hsotg->regs + GUSBCFG);
4133 usbcfg &= ~(GUSBCFG_TOUTCAL_MASK | GUSBCFG_PHYIF16 | GUSBCFG_SRPCAP | 4125 usbcfg &= ~(GUSBCFG_TOUTCAL_MASK | GUSBCFG_PHYIF16 | GUSBCFG_SRPCAP |
4134 GUSBCFG_HNPCAP); 4126 GUSBCFG_HNPCAP | GUSBCFG_USBTRDTIM_MASK);
4135 4127
4136 /* set the PLL on, remove the HNP/SRP and set the PHY */ 4128 /* set the PLL on, remove the HNP/SRP and set the PHY */
4137 trdtim = (hsotg->phyif == GUSBCFG_PHYIF8) ? 9 : 5; 4129 trdtim = (hsotg->phyif == GUSBCFG_PHYIF8) ? 9 : 5;
diff --git a/drivers/usb/dwc2/hcd.c b/drivers/usb/dwc2/hcd.c
index 911c3b36ac06..46d0ad5105e4 100644
--- a/drivers/usb/dwc2/hcd.c
+++ b/drivers/usb/dwc2/hcd.c
@@ -4367,6 +4367,9 @@ static int _dwc2_hcd_suspend(struct usb_hcd *hcd)
4367 if (!HCD_HW_ACCESSIBLE(hcd)) 4367 if (!HCD_HW_ACCESSIBLE(hcd))
4368 goto unlock; 4368 goto unlock;
4369 4369
4370 if (hsotg->op_state == OTG_STATE_B_PERIPHERAL)
4371 goto unlock;
4372
4370 if (!hsotg->params.hibernation) 4373 if (!hsotg->params.hibernation)
4371 goto skip_power_saving; 4374 goto skip_power_saving;
4372 4375
@@ -4489,8 +4492,8 @@ static void dwc2_dump_urb_info(struct usb_hcd *hcd, struct urb *urb,
4489{ 4492{
4490#ifdef VERBOSE_DEBUG 4493#ifdef VERBOSE_DEBUG
4491 struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd); 4494 struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd);
4492 char *pipetype; 4495 char *pipetype = NULL;
4493 char *speed; 4496 char *speed = NULL;
4494 4497
4495 dev_vdbg(hsotg->dev, "%s, urb %p\n", fn_name, urb); 4498 dev_vdbg(hsotg->dev, "%s, urb %p\n", fn_name, urb);
4496 dev_vdbg(hsotg->dev, " Device address: %d\n", 4499 dev_vdbg(hsotg->dev, " Device address: %d\n",
diff --git a/drivers/usb/dwc2/params.c b/drivers/usb/dwc2/params.c
index a786256535b6..bcd1e19b4076 100644
--- a/drivers/usb/dwc2/params.c
+++ b/drivers/usb/dwc2/params.c
@@ -247,8 +247,6 @@ MODULE_DEVICE_TABLE(of, dwc2_of_match_table);
247static void dwc2_get_device_property(struct dwc2_hsotg *hsotg, 247static void dwc2_get_device_property(struct dwc2_hsotg *hsotg,
248 char *property, u8 size, u64 *value) 248 char *property, u8 size, u64 *value)
249{ 249{
250 u8 val8;
251 u16 val16;
252 u32 val32; 250 u32 val32;
253 251
254 switch (size) { 252 switch (size) {
@@ -256,17 +254,7 @@ static void dwc2_get_device_property(struct dwc2_hsotg *hsotg,
256 *value = device_property_read_bool(hsotg->dev, property); 254 *value = device_property_read_bool(hsotg->dev, property);
257 break; 255 break;
258 case 1: 256 case 1:
259 if (device_property_read_u8(hsotg->dev, property, &val8))
260 return;
261
262 *value = val8;
263 break;
264 case 2: 257 case 2:
265 if (device_property_read_u16(hsotg->dev, property, &val16))
266 return;
267
268 *value = val16;
269 break;
270 case 4: 258 case 4:
271 if (device_property_read_u32(hsotg->dev, property, &val32)) 259 if (device_property_read_u32(hsotg->dev, property, &val32))
272 return; 260 return;
@@ -397,16 +385,16 @@ static void dwc2_set_param(struct dwc2_hsotg *hsotg, void *param,
397} 385}
398 386
399/** 387/**
400 * dwc2_set_param_u16() - Set a u16 parameter 388 * dwc2_set_param_u32() - Set a u32 parameter
401 * 389 *
402 * See dwc2_set_param(). 390 * See dwc2_set_param().
403 */ 391 */
404static void dwc2_set_param_u16(struct dwc2_hsotg *hsotg, u16 *param, 392static void dwc2_set_param_u32(struct dwc2_hsotg *hsotg, u32 *param,
405 bool lookup, char *property, u16 legacy, 393 bool lookup, char *property, u16 legacy,
406 u16 def, u16 min, u16 max) 394 u16 def, u16 min, u16 max)
407{ 395{
408 dwc2_set_param(hsotg, param, lookup, property, 396 dwc2_set_param(hsotg, param, lookup, property,
409 legacy, def, min, max, 2); 397 legacy, def, min, max, 4);
410} 398}
411 399
412/** 400/**
@@ -1100,13 +1088,13 @@ static void dwc2_set_gadget_dma(struct dwc2_hsotg *hsotg)
1100 /* Buffer DMA */ 1088 /* Buffer DMA */
1101 dwc2_set_param_bool(hsotg, &p->g_dma, 1089 dwc2_set_param_bool(hsotg, &p->g_dma,
1102 false, "gadget-dma", 1090 false, "gadget-dma",
1103 true, false, 1091 dma_capable, false,
1104 dma_capable); 1092 dma_capable);
1105 1093
1106 /* DMA Descriptor */ 1094 /* DMA Descriptor */
1107 dwc2_set_param_bool(hsotg, &p->g_dma_desc, false, 1095 dwc2_set_param_bool(hsotg, &p->g_dma_desc, false,
1108 "gadget-dma-desc", 1096 "gadget-dma-desc",
1109 p->g_dma, false, 1097 !!hw->dma_desc_enable, false,
1110 !!hw->dma_desc_enable); 1098 !!hw->dma_desc_enable);
1111} 1099}
1112 1100
@@ -1130,8 +1118,14 @@ static void dwc2_set_parameters(struct dwc2_hsotg *hsotg,
1130 1118
1131 dwc2_set_param_bool(hsotg, &p->host_dma, 1119 dwc2_set_param_bool(hsotg, &p->host_dma,
1132 false, "host-dma", 1120 false, "host-dma",
1133 true, false, 1121 dma_capable, false,
1134 dma_capable); 1122 dma_capable);
1123 dwc2_set_param_host_rx_fifo_size(hsotg,
1124 params->host_rx_fifo_size);
1125 dwc2_set_param_host_nperio_tx_fifo_size(hsotg,
1126 params->host_nperio_tx_fifo_size);
1127 dwc2_set_param_host_perio_tx_fifo_size(hsotg,
1128 params->host_perio_tx_fifo_size);
1135 } 1129 }
1136 dwc2_set_param_dma_desc_enable(hsotg, params->dma_desc_enable); 1130 dwc2_set_param_dma_desc_enable(hsotg, params->dma_desc_enable);
1137 dwc2_set_param_dma_desc_fs_enable(hsotg, params->dma_desc_fs_enable); 1131 dwc2_set_param_dma_desc_fs_enable(hsotg, params->dma_desc_fs_enable);
@@ -1140,12 +1134,6 @@ static void dwc2_set_parameters(struct dwc2_hsotg *hsotg,
1140 params->host_support_fs_ls_low_power); 1134 params->host_support_fs_ls_low_power);
1141 dwc2_set_param_enable_dynamic_fifo(hsotg, 1135 dwc2_set_param_enable_dynamic_fifo(hsotg,
1142 params->enable_dynamic_fifo); 1136 params->enable_dynamic_fifo);
1143 dwc2_set_param_host_rx_fifo_size(hsotg,
1144 params->host_rx_fifo_size);
1145 dwc2_set_param_host_nperio_tx_fifo_size(hsotg,
1146 params->host_nperio_tx_fifo_size);
1147 dwc2_set_param_host_perio_tx_fifo_size(hsotg,
1148 params->host_perio_tx_fifo_size);
1149 dwc2_set_param_max_transfer_size(hsotg, 1137 dwc2_set_param_max_transfer_size(hsotg,
1150 params->max_transfer_size); 1138 params->max_transfer_size);
1151 dwc2_set_param_max_packet_count(hsotg, 1139 dwc2_set_param_max_packet_count(hsotg,
@@ -1190,12 +1178,12 @@ static void dwc2_set_parameters(struct dwc2_hsotg *hsotg,
1190 * auto-detect if the hardware does not support the 1178 * auto-detect if the hardware does not support the
1191 * default. 1179 * default.
1192 */ 1180 */
1193 dwc2_set_param_u16(hsotg, &p->g_rx_fifo_size, 1181 dwc2_set_param_u32(hsotg, &p->g_rx_fifo_size,
1194 true, "g-rx-fifo-size", 2048, 1182 true, "g-rx-fifo-size", 2048,
1195 hw->rx_fifo_size, 1183 hw->rx_fifo_size,
1196 16, hw->rx_fifo_size); 1184 16, hw->rx_fifo_size);
1197 1185
1198 dwc2_set_param_u16(hsotg, &p->g_np_tx_fifo_size, 1186 dwc2_set_param_u32(hsotg, &p->g_np_tx_fifo_size,
1199 true, "g-np-tx-fifo-size", 1024, 1187 true, "g-np-tx-fifo-size", 1024,
1200 hw->dev_nperio_tx_fifo_size, 1188 hw->dev_nperio_tx_fifo_size,
1201 16, hw->dev_nperio_tx_fifo_size); 1189 16, hw->dev_nperio_tx_fifo_size);
diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
index de5a8570be04..14b760209680 100644
--- a/drivers/usb/dwc3/core.h
+++ b/drivers/usb/dwc3/core.h
@@ -45,9 +45,7 @@
45#define DWC3_XHCI_RESOURCES_NUM 2 45#define DWC3_XHCI_RESOURCES_NUM 2
46 46
47#define DWC3_SCRATCHBUF_SIZE 4096 /* each buffer is assumed to be 4KiB */ 47#define DWC3_SCRATCHBUF_SIZE 4096 /* each buffer is assumed to be 4KiB */
48#define DWC3_EVENT_SIZE 4 /* bytes */ 48#define DWC3_EVENT_BUFFERS_SIZE 4096
49#define DWC3_EVENT_MAX_NUM 64 /* 2 events/endpoint */
50#define DWC3_EVENT_BUFFERS_SIZE (DWC3_EVENT_SIZE * DWC3_EVENT_MAX_NUM)
51#define DWC3_EVENT_TYPE_MASK 0xfe 49#define DWC3_EVENT_TYPE_MASK 0xfe
52 50
53#define DWC3_EVENT_TYPE_DEV 0 51#define DWC3_EVENT_TYPE_DEV 0
@@ -311,9 +309,8 @@
311#define DWC3_DCFG_SUPERSPEED_PLUS (5 << 0) /* DWC_usb31 only */ 309#define DWC3_DCFG_SUPERSPEED_PLUS (5 << 0) /* DWC_usb31 only */
312#define DWC3_DCFG_SUPERSPEED (4 << 0) 310#define DWC3_DCFG_SUPERSPEED (4 << 0)
313#define DWC3_DCFG_HIGHSPEED (0 << 0) 311#define DWC3_DCFG_HIGHSPEED (0 << 0)
314#define DWC3_DCFG_FULLSPEED2 (1 << 0) 312#define DWC3_DCFG_FULLSPEED (1 << 0)
315#define DWC3_DCFG_LOWSPEED (2 << 0) 313#define DWC3_DCFG_LOWSPEED (2 << 0)
316#define DWC3_DCFG_FULLSPEED1 (3 << 0)
317 314
318#define DWC3_DCFG_NUMP_SHIFT 17 315#define DWC3_DCFG_NUMP_SHIFT 17
319#define DWC3_DCFG_NUMP(n) (((n) >> DWC3_DCFG_NUMP_SHIFT) & 0x1f) 316#define DWC3_DCFG_NUMP(n) (((n) >> DWC3_DCFG_NUMP_SHIFT) & 0x1f)
@@ -405,9 +402,8 @@
405#define DWC3_DSTS_SUPERSPEED_PLUS (5 << 0) /* DWC_usb31 only */ 402#define DWC3_DSTS_SUPERSPEED_PLUS (5 << 0) /* DWC_usb31 only */
406#define DWC3_DSTS_SUPERSPEED (4 << 0) 403#define DWC3_DSTS_SUPERSPEED (4 << 0)
407#define DWC3_DSTS_HIGHSPEED (0 << 0) 404#define DWC3_DSTS_HIGHSPEED (0 << 0)
408#define DWC3_DSTS_FULLSPEED2 (1 << 0) 405#define DWC3_DSTS_FULLSPEED (1 << 0)
409#define DWC3_DSTS_LOWSPEED (2 << 0) 406#define DWC3_DSTS_LOWSPEED (2 << 0)
410#define DWC3_DSTS_FULLSPEED1 (3 << 0)
411 407
412/* Device Generic Command Register */ 408/* Device Generic Command Register */
413#define DWC3_DGCMD_SET_LMP 0x01 409#define DWC3_DGCMD_SET_LMP 0x01
diff --git a/drivers/usb/dwc3/dwc3-exynos.c b/drivers/usb/dwc3/dwc3-exynos.c
index e27899bb5706..e956306d9b0f 100644
--- a/drivers/usb/dwc3/dwc3-exynos.c
+++ b/drivers/usb/dwc3/dwc3-exynos.c
@@ -138,7 +138,8 @@ static int dwc3_exynos_probe(struct platform_device *pdev)
138 exynos->axius_clk = devm_clk_get(dev, "usbdrd30_axius_clk"); 138 exynos->axius_clk = devm_clk_get(dev, "usbdrd30_axius_clk");
139 if (IS_ERR(exynos->axius_clk)) { 139 if (IS_ERR(exynos->axius_clk)) {
140 dev_err(dev, "no AXI UpScaler clk specified\n"); 140 dev_err(dev, "no AXI UpScaler clk specified\n");
141 return -ENODEV; 141 ret = -ENODEV;
142 goto axius_clk_err;
142 } 143 }
143 clk_prepare_enable(exynos->axius_clk); 144 clk_prepare_enable(exynos->axius_clk);
144 } else { 145 } else {
@@ -196,6 +197,7 @@ err3:
196 regulator_disable(exynos->vdd33); 197 regulator_disable(exynos->vdd33);
197err2: 198err2:
198 clk_disable_unprepare(exynos->axius_clk); 199 clk_disable_unprepare(exynos->axius_clk);
200axius_clk_err:
199 clk_disable_unprepare(exynos->susp_clk); 201 clk_disable_unprepare(exynos->susp_clk);
200 clk_disable_unprepare(exynos->clk); 202 clk_disable_unprepare(exynos->clk);
201 return ret; 203 return ret;
diff --git a/drivers/usb/dwc3/dwc3-omap.c b/drivers/usb/dwc3/dwc3-omap.c
index 29e80cc9b634..eb1b9cb3f9d1 100644
--- a/drivers/usb/dwc3/dwc3-omap.c
+++ b/drivers/usb/dwc3/dwc3-omap.c
@@ -19,6 +19,7 @@
19#include <linux/module.h> 19#include <linux/module.h>
20#include <linux/kernel.h> 20#include <linux/kernel.h>
21#include <linux/slab.h> 21#include <linux/slab.h>
22#include <linux/irq.h>
22#include <linux/interrupt.h> 23#include <linux/interrupt.h>
23#include <linux/platform_device.h> 24#include <linux/platform_device.h>
24#include <linux/platform_data/dwc3-omap.h> 25#include <linux/platform_data/dwc3-omap.h>
@@ -510,7 +511,7 @@ static int dwc3_omap_probe(struct platform_device *pdev)
510 511
511 /* check the DMA Status */ 512 /* check the DMA Status */
512 reg = dwc3_omap_readl(omap->base, USBOTGSS_SYSCONFIG); 513 reg = dwc3_omap_readl(omap->base, USBOTGSS_SYSCONFIG);
513 514 irq_set_status_flags(omap->irq, IRQ_NOAUTOEN);
514 ret = devm_request_threaded_irq(dev, omap->irq, dwc3_omap_interrupt, 515 ret = devm_request_threaded_irq(dev, omap->irq, dwc3_omap_interrupt,
515 dwc3_omap_interrupt_thread, IRQF_SHARED, 516 dwc3_omap_interrupt_thread, IRQF_SHARED,
516 "dwc3-omap", omap); 517 "dwc3-omap", omap);
@@ -531,7 +532,7 @@ static int dwc3_omap_probe(struct platform_device *pdev)
531 } 532 }
532 533
533 dwc3_omap_enable_irqs(omap); 534 dwc3_omap_enable_irqs(omap);
534 535 enable_irq(omap->irq);
535 return 0; 536 return 0;
536 537
537err2: 538err2:
@@ -552,6 +553,7 @@ static int dwc3_omap_remove(struct platform_device *pdev)
552 extcon_unregister_notifier(omap->edev, EXTCON_USB, &omap->vbus_nb); 553 extcon_unregister_notifier(omap->edev, EXTCON_USB, &omap->vbus_nb);
553 extcon_unregister_notifier(omap->edev, EXTCON_USB_HOST, &omap->id_nb); 554 extcon_unregister_notifier(omap->edev, EXTCON_USB_HOST, &omap->id_nb);
554 dwc3_omap_disable_irqs(omap); 555 dwc3_omap_disable_irqs(omap);
556 disable_irq(omap->irq);
555 of_platform_depopulate(omap->dev); 557 of_platform_depopulate(omap->dev);
556 pm_runtime_put_sync(&pdev->dev); 558 pm_runtime_put_sync(&pdev->dev);
557 pm_runtime_disable(&pdev->dev); 559 pm_runtime_disable(&pdev->dev);
diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c
index 2b73339f286b..cce0a220b6b0 100644
--- a/drivers/usb/dwc3/dwc3-pci.c
+++ b/drivers/usb/dwc3/dwc3-pci.c
@@ -38,6 +38,7 @@
38#define PCI_DEVICE_ID_INTEL_BXT_M 0x1aaa 38#define PCI_DEVICE_ID_INTEL_BXT_M 0x1aaa
39#define PCI_DEVICE_ID_INTEL_APL 0x5aaa 39#define PCI_DEVICE_ID_INTEL_APL 0x5aaa
40#define PCI_DEVICE_ID_INTEL_KBP 0xa2b0 40#define PCI_DEVICE_ID_INTEL_KBP 0xa2b0
41#define PCI_DEVICE_ID_INTEL_GLK 0x31aa
41 42
42#define PCI_INTEL_BXT_DSM_UUID "732b85d5-b7a7-4a1b-9ba0-4bbd00ffd511" 43#define PCI_INTEL_BXT_DSM_UUID "732b85d5-b7a7-4a1b-9ba0-4bbd00ffd511"
43#define PCI_INTEL_BXT_FUNC_PMU_PWR 4 44#define PCI_INTEL_BXT_FUNC_PMU_PWR 4
@@ -73,16 +74,6 @@ static int dwc3_pci_quirks(struct dwc3_pci *dwc)
73{ 74{
74 struct platform_device *dwc3 = dwc->dwc3; 75 struct platform_device *dwc3 = dwc->dwc3;
75 struct pci_dev *pdev = dwc->pci; 76 struct pci_dev *pdev = dwc->pci;
76 int ret;
77
78 struct property_entry sysdev_property[] = {
79 PROPERTY_ENTRY_BOOL("linux,sysdev_is_parent"),
80 { },
81 };
82
83 ret = platform_device_add_properties(dwc3, sysdev_property);
84 if (ret)
85 return ret;
86 77
87 if (pdev->vendor == PCI_VENDOR_ID_AMD && 78 if (pdev->vendor == PCI_VENDOR_ID_AMD &&
88 pdev->device == PCI_DEVICE_ID_AMD_NL_USB) { 79 pdev->device == PCI_DEVICE_ID_AMD_NL_USB) {
@@ -105,6 +96,7 @@ static int dwc3_pci_quirks(struct dwc3_pci *dwc)
105 PROPERTY_ENTRY_BOOL("snps,disable_scramble_quirk"), 96 PROPERTY_ENTRY_BOOL("snps,disable_scramble_quirk"),
106 PROPERTY_ENTRY_BOOL("snps,dis_u3_susphy_quirk"), 97 PROPERTY_ENTRY_BOOL("snps,dis_u3_susphy_quirk"),
107 PROPERTY_ENTRY_BOOL("snps,dis_u2_susphy_quirk"), 98 PROPERTY_ENTRY_BOOL("snps,dis_u2_susphy_quirk"),
99 PROPERTY_ENTRY_BOOL("linux,sysdev_is_parent"),
108 { }, 100 { },
109 }; 101 };
110 102
@@ -115,7 +107,8 @@ static int dwc3_pci_quirks(struct dwc3_pci *dwc)
115 int ret; 107 int ret;
116 108
117 struct property_entry properties[] = { 109 struct property_entry properties[] = {
118 PROPERTY_ENTRY_STRING("dr-mode", "peripheral"), 110 PROPERTY_ENTRY_STRING("dr_mode", "peripheral"),
111 PROPERTY_ENTRY_BOOL("linux,sysdev_is_parent"),
119 { } 112 { }
120 }; 113 };
121 114
@@ -167,6 +160,7 @@ static int dwc3_pci_quirks(struct dwc3_pci *dwc)
167 PROPERTY_ENTRY_BOOL("snps,usb3_lpm_capable"), 160 PROPERTY_ENTRY_BOOL("snps,usb3_lpm_capable"),
168 PROPERTY_ENTRY_BOOL("snps,has-lpm-erratum"), 161 PROPERTY_ENTRY_BOOL("snps,has-lpm-erratum"),
169 PROPERTY_ENTRY_BOOL("snps,dis_enblslpm_quirk"), 162 PROPERTY_ENTRY_BOOL("snps,dis_enblslpm_quirk"),
163 PROPERTY_ENTRY_BOOL("linux,sysdev_is_parent"),
170 { }, 164 { },
171 }; 165 };
172 166
@@ -274,6 +268,7 @@ static const struct pci_device_id dwc3_pci_id_table[] = {
274 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BXT_M), }, 268 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BXT_M), },
275 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_APL), }, 269 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_APL), },
276 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KBP), }, 270 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KBP), },
271 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_GLK), },
277 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_NL_USB), }, 272 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_NL_USB), },
278 { } /* Terminating Entry */ 273 { } /* Terminating Entry */
279}; 274};
diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c
index 4878d187c7d4..9bb1f8526f3e 100644
--- a/drivers/usb/dwc3/ep0.c
+++ b/drivers/usb/dwc3/ep0.c
@@ -39,18 +39,13 @@ static void __dwc3_ep0_do_control_status(struct dwc3 *dwc, struct dwc3_ep *dep);
39static void __dwc3_ep0_do_control_data(struct dwc3 *dwc, 39static void __dwc3_ep0_do_control_data(struct dwc3 *dwc,
40 struct dwc3_ep *dep, struct dwc3_request *req); 40 struct dwc3_ep *dep, struct dwc3_request *req);
41 41
42static int dwc3_ep0_start_trans(struct dwc3 *dwc, u8 epnum, dma_addr_t buf_dma, 42static void dwc3_ep0_prepare_one_trb(struct dwc3 *dwc, u8 epnum,
43 u32 len, u32 type, bool chain) 43 dma_addr_t buf_dma, u32 len, u32 type, bool chain)
44{ 44{
45 struct dwc3_gadget_ep_cmd_params params;
46 struct dwc3_trb *trb; 45 struct dwc3_trb *trb;
47 struct dwc3_ep *dep; 46 struct dwc3_ep *dep;
48 47
49 int ret;
50
51 dep = dwc->eps[epnum]; 48 dep = dwc->eps[epnum];
52 if (dep->flags & DWC3_EP_BUSY)
53 return 0;
54 49
55 trb = &dwc->ep0_trb[dep->trb_enqueue]; 50 trb = &dwc->ep0_trb[dep->trb_enqueue];
56 51
@@ -71,15 +66,23 @@ static int dwc3_ep0_start_trans(struct dwc3 *dwc, u8 epnum, dma_addr_t buf_dma,
71 trb->ctrl |= (DWC3_TRB_CTRL_IOC 66 trb->ctrl |= (DWC3_TRB_CTRL_IOC
72 | DWC3_TRB_CTRL_LST); 67 | DWC3_TRB_CTRL_LST);
73 68
74 if (chain) 69 trace_dwc3_prepare_trb(dep, trb);
70}
71
72static int dwc3_ep0_start_trans(struct dwc3 *dwc, u8 epnum)
73{
74 struct dwc3_gadget_ep_cmd_params params;
75 struct dwc3_ep *dep;
76 int ret;
77
78 dep = dwc->eps[epnum];
79 if (dep->flags & DWC3_EP_BUSY)
75 return 0; 80 return 0;
76 81
77 memset(&params, 0, sizeof(params)); 82 memset(&params, 0, sizeof(params));
78 params.param0 = upper_32_bits(dwc->ep0_trb_addr); 83 params.param0 = upper_32_bits(dwc->ep0_trb_addr);
79 params.param1 = lower_32_bits(dwc->ep0_trb_addr); 84 params.param1 = lower_32_bits(dwc->ep0_trb_addr);
80 85
81 trace_dwc3_prepare_trb(dep, trb);
82
83 ret = dwc3_send_gadget_ep_cmd(dep, DWC3_DEPCMD_STARTTRANSFER, &params); 86 ret = dwc3_send_gadget_ep_cmd(dep, DWC3_DEPCMD_STARTTRANSFER, &params);
84 if (ret < 0) 87 if (ret < 0)
85 return ret; 88 return ret;
@@ -280,8 +283,9 @@ void dwc3_ep0_out_start(struct dwc3 *dwc)
280 283
281 complete(&dwc->ep0_in_setup); 284 complete(&dwc->ep0_in_setup);
282 285
283 ret = dwc3_ep0_start_trans(dwc, 0, dwc->ctrl_req_addr, 8, 286 dwc3_ep0_prepare_one_trb(dwc, 0, dwc->ctrl_req_addr, 8,
284 DWC3_TRBCTL_CONTROL_SETUP, false); 287 DWC3_TRBCTL_CONTROL_SETUP, false);
288 ret = dwc3_ep0_start_trans(dwc, 0);
285 WARN_ON(ret < 0); 289 WARN_ON(ret < 0);
286} 290}
287 291
@@ -912,9 +916,9 @@ static void dwc3_ep0_complete_data(struct dwc3 *dwc,
912 916
913 dwc->ep0_next_event = DWC3_EP0_COMPLETE; 917 dwc->ep0_next_event = DWC3_EP0_COMPLETE;
914 918
915 ret = dwc3_ep0_start_trans(dwc, epnum, 919 dwc3_ep0_prepare_one_trb(dwc, epnum, dwc->ctrl_req_addr,
916 dwc->ctrl_req_addr, 0, 920 0, DWC3_TRBCTL_CONTROL_DATA, false);
917 DWC3_TRBCTL_CONTROL_DATA, false); 921 ret = dwc3_ep0_start_trans(dwc, epnum);
918 WARN_ON(ret < 0); 922 WARN_ON(ret < 0);
919 } 923 }
920 } 924 }
@@ -993,9 +997,10 @@ static void __dwc3_ep0_do_control_data(struct dwc3 *dwc,
993 req->direction = !!dep->number; 997 req->direction = !!dep->number;
994 998
995 if (req->request.length == 0) { 999 if (req->request.length == 0) {
996 ret = dwc3_ep0_start_trans(dwc, dep->number, 1000 dwc3_ep0_prepare_one_trb(dwc, dep->number,
997 dwc->ctrl_req_addr, 0, 1001 dwc->ctrl_req_addr, 0,
998 DWC3_TRBCTL_CONTROL_DATA, false); 1002 DWC3_TRBCTL_CONTROL_DATA, false);
1003 ret = dwc3_ep0_start_trans(dwc, dep->number);
999 } else if (!IS_ALIGNED(req->request.length, dep->endpoint.maxpacket) 1004 } else if (!IS_ALIGNED(req->request.length, dep->endpoint.maxpacket)
1000 && (dep->number == 0)) { 1005 && (dep->number == 0)) {
1001 u32 transfer_size = 0; 1006 u32 transfer_size = 0;
@@ -1011,7 +1016,7 @@ static void __dwc3_ep0_do_control_data(struct dwc3 *dwc,
1011 if (req->request.length > DWC3_EP0_BOUNCE_SIZE) { 1016 if (req->request.length > DWC3_EP0_BOUNCE_SIZE) {
1012 transfer_size = ALIGN(req->request.length - maxpacket, 1017 transfer_size = ALIGN(req->request.length - maxpacket,
1013 maxpacket); 1018 maxpacket);
1014 ret = dwc3_ep0_start_trans(dwc, dep->number, 1019 dwc3_ep0_prepare_one_trb(dwc, dep->number,
1015 req->request.dma, 1020 req->request.dma,
1016 transfer_size, 1021 transfer_size,
1017 DWC3_TRBCTL_CONTROL_DATA, 1022 DWC3_TRBCTL_CONTROL_DATA,
@@ -1023,18 +1028,20 @@ static void __dwc3_ep0_do_control_data(struct dwc3 *dwc,
1023 1028
1024 dwc->ep0_bounced = true; 1029 dwc->ep0_bounced = true;
1025 1030
1026 ret = dwc3_ep0_start_trans(dwc, dep->number, 1031 dwc3_ep0_prepare_one_trb(dwc, dep->number,
1027 dwc->ep0_bounce_addr, transfer_size, 1032 dwc->ep0_bounce_addr, transfer_size,
1028 DWC3_TRBCTL_CONTROL_DATA, false); 1033 DWC3_TRBCTL_CONTROL_DATA, false);
1034 ret = dwc3_ep0_start_trans(dwc, dep->number);
1029 } else { 1035 } else {
1030 ret = usb_gadget_map_request_by_dev(dwc->sysdev, 1036 ret = usb_gadget_map_request_by_dev(dwc->sysdev,
1031 &req->request, dep->number); 1037 &req->request, dep->number);
1032 if (ret) 1038 if (ret)
1033 return; 1039 return;
1034 1040
1035 ret = dwc3_ep0_start_trans(dwc, dep->number, req->request.dma, 1041 dwc3_ep0_prepare_one_trb(dwc, dep->number, req->request.dma,
1036 req->request.length, DWC3_TRBCTL_CONTROL_DATA, 1042 req->request.length, DWC3_TRBCTL_CONTROL_DATA,
1037 false); 1043 false);
1044 ret = dwc3_ep0_start_trans(dwc, dep->number);
1038 } 1045 }
1039 1046
1040 WARN_ON(ret < 0); 1047 WARN_ON(ret < 0);
@@ -1048,8 +1055,9 @@ static int dwc3_ep0_start_control_status(struct dwc3_ep *dep)
1048 type = dwc->three_stage_setup ? DWC3_TRBCTL_CONTROL_STATUS3 1055 type = dwc->three_stage_setup ? DWC3_TRBCTL_CONTROL_STATUS3
1049 : DWC3_TRBCTL_CONTROL_STATUS2; 1056 : DWC3_TRBCTL_CONTROL_STATUS2;
1050 1057
1051 return dwc3_ep0_start_trans(dwc, dep->number, 1058 dwc3_ep0_prepare_one_trb(dwc, dep->number,
1052 dwc->ctrl_req_addr, 0, type, false); 1059 dwc->ctrl_req_addr, 0, type, false);
1060 return dwc3_ep0_start_trans(dwc, dep->number);
1053} 1061}
1054 1062
1055static void __dwc3_ep0_do_control_status(struct dwc3 *dwc, struct dwc3_ep *dep) 1063static void __dwc3_ep0_do_control_status(struct dwc3 *dwc, struct dwc3_ep *dep)
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index efddaf5d11d1..204c754cc647 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -180,11 +180,11 @@ void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req,
180 if (req->request.status == -EINPROGRESS) 180 if (req->request.status == -EINPROGRESS)
181 req->request.status = status; 181 req->request.status = status;
182 182
183 if (dwc->ep0_bounced && dep->number == 0) 183 if (dwc->ep0_bounced && dep->number <= 1)
184 dwc->ep0_bounced = false; 184 dwc->ep0_bounced = false;
185 else 185
186 usb_gadget_unmap_request_by_dev(dwc->sysdev, 186 usb_gadget_unmap_request_by_dev(dwc->sysdev,
187 &req->request, req->direction); 187 &req->request, req->direction);
188 188
189 trace_dwc3_gadget_giveback(req); 189 trace_dwc3_gadget_giveback(req);
190 190
@@ -1720,7 +1720,7 @@ static int __dwc3_gadget_start(struct dwc3 *dwc)
1720 reg |= DWC3_DCFG_LOWSPEED; 1720 reg |= DWC3_DCFG_LOWSPEED;
1721 break; 1721 break;
1722 case USB_SPEED_FULL: 1722 case USB_SPEED_FULL:
1723 reg |= DWC3_DCFG_FULLSPEED1; 1723 reg |= DWC3_DCFG_FULLSPEED;
1724 break; 1724 break;
1725 case USB_SPEED_HIGH: 1725 case USB_SPEED_HIGH:
1726 reg |= DWC3_DCFG_HIGHSPEED; 1726 reg |= DWC3_DCFG_HIGHSPEED;
@@ -2232,9 +2232,14 @@ static void dwc3_endpoint_interrupt(struct dwc3 *dwc,
2232 2232
2233 dep = dwc->eps[epnum]; 2233 dep = dwc->eps[epnum];
2234 2234
2235 if (!(dep->flags & DWC3_EP_ENABLED) && 2235 if (!(dep->flags & DWC3_EP_ENABLED)) {
2236 !(dep->flags & DWC3_EP_END_TRANSFER_PENDING)) 2236 if (!(dep->flags & DWC3_EP_END_TRANSFER_PENDING))
2237 return; 2237 return;
2238
2239 /* Handle only EPCMDCMPLT when EP disabled */
2240 if (event->endpoint_event != DWC3_DEPEVT_EPCMDCMPLT)
2241 return;
2242 }
2238 2243
2239 if (epnum == 0 || epnum == 1) { 2244 if (epnum == 0 || epnum == 1) {
2240 dwc3_ep0_interrupt(dwc, event); 2245 dwc3_ep0_interrupt(dwc, event);
@@ -2531,8 +2536,7 @@ static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc)
2531 dwc->gadget.ep0->maxpacket = 64; 2536 dwc->gadget.ep0->maxpacket = 64;
2532 dwc->gadget.speed = USB_SPEED_HIGH; 2537 dwc->gadget.speed = USB_SPEED_HIGH;
2533 break; 2538 break;
2534 case DWC3_DSTS_FULLSPEED2: 2539 case DWC3_DSTS_FULLSPEED:
2535 case DWC3_DSTS_FULLSPEED1:
2536 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64); 2540 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64);
2537 dwc->gadget.ep0->maxpacket = 64; 2541 dwc->gadget.ep0->maxpacket = 64;
2538 dwc->gadget.speed = USB_SPEED_FULL; 2542 dwc->gadget.speed = USB_SPEED_FULL;
diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
index 41ab61f9b6e0..49d685ad0da9 100644
--- a/drivers/usb/gadget/composite.c
+++ b/drivers/usb/gadget/composite.c
@@ -1694,9 +1694,7 @@ composite_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
1694 value = min(w_length, (u16) 1); 1694 value = min(w_length, (u16) 1);
1695 break; 1695 break;
1696 1696
1697 /* function drivers must handle get/set altsetting; if there's 1697 /* function drivers must handle get/set altsetting */
1698 * no get() method, we know only altsetting zero works.
1699 */
1700 case USB_REQ_SET_INTERFACE: 1698 case USB_REQ_SET_INTERFACE:
1701 if (ctrl->bRequestType != USB_RECIP_INTERFACE) 1699 if (ctrl->bRequestType != USB_RECIP_INTERFACE)
1702 goto unknown; 1700 goto unknown;
@@ -1705,7 +1703,13 @@ composite_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
1705 f = cdev->config->interface[intf]; 1703 f = cdev->config->interface[intf];
1706 if (!f) 1704 if (!f)
1707 break; 1705 break;
1708 if (w_value && !f->set_alt) 1706
1707 /*
1708 * If there's no get_alt() method, we know only altsetting zero
1709 * works. There is no need to check if set_alt() is not NULL
1710 * as we check this in usb_add_function().
1711 */
1712 if (w_value && !f->get_alt)
1709 break; 1713 break;
1710 value = f->set_alt(f, w_index, w_value); 1714 value = f->set_alt(f, w_index, w_value);
1711 if (value == USB_GADGET_DELAYED_STATUS) { 1715 if (value == USB_GADGET_DELAYED_STATUS) {
@@ -2143,7 +2147,7 @@ int composite_os_desc_req_prepare(struct usb_composite_dev *cdev,
2143 cdev->os_desc_req->buf = kmalloc(4096, GFP_KERNEL); 2147 cdev->os_desc_req->buf = kmalloc(4096, GFP_KERNEL);
2144 if (!cdev->os_desc_req->buf) { 2148 if (!cdev->os_desc_req->buf) {
2145 ret = -ENOMEM; 2149 ret = -ENOMEM;
2146 kfree(cdev->os_desc_req); 2150 usb_ep_free_request(ep0, cdev->os_desc_req);
2147 goto end; 2151 goto end;
2148 } 2152 }
2149 cdev->os_desc_req->context = cdev; 2153 cdev->os_desc_req->context = cdev;
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
index aab3fc1dbb94..fd80c1b9c823 100644
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -1806,7 +1806,7 @@ static void ffs_func_eps_disable(struct ffs_function *func)
1806 unsigned long flags; 1806 unsigned long flags;
1807 1807
1808 spin_lock_irqsave(&func->ffs->eps_lock, flags); 1808 spin_lock_irqsave(&func->ffs->eps_lock, flags);
1809 do { 1809 while (count--) {
1810 /* pending requests get nuked */ 1810 /* pending requests get nuked */
1811 if (likely(ep->ep)) 1811 if (likely(ep->ep))
1812 usb_ep_disable(ep->ep); 1812 usb_ep_disable(ep->ep);
@@ -1817,7 +1817,7 @@ static void ffs_func_eps_disable(struct ffs_function *func)
1817 __ffs_epfile_read_buffer_free(epfile); 1817 __ffs_epfile_read_buffer_free(epfile);
1818 ++epfile; 1818 ++epfile;
1819 } 1819 }
1820 } while (--count); 1820 }
1821 spin_unlock_irqrestore(&func->ffs->eps_lock, flags); 1821 spin_unlock_irqrestore(&func->ffs->eps_lock, flags);
1822} 1822}
1823 1823
@@ -1831,7 +1831,7 @@ static int ffs_func_eps_enable(struct ffs_function *func)
1831 int ret = 0; 1831 int ret = 0;
1832 1832
1833 spin_lock_irqsave(&func->ffs->eps_lock, flags); 1833 spin_lock_irqsave(&func->ffs->eps_lock, flags);
1834 do { 1834 while(count--) {
1835 struct usb_endpoint_descriptor *ds; 1835 struct usb_endpoint_descriptor *ds;
1836 int desc_idx; 1836 int desc_idx;
1837 1837
@@ -1867,7 +1867,7 @@ static int ffs_func_eps_enable(struct ffs_function *func)
1867 1867
1868 ++ep; 1868 ++ep;
1869 ++epfile; 1869 ++epfile;
1870 } while (--count); 1870 }
1871 spin_unlock_irqrestore(&func->ffs->eps_lock, flags); 1871 spin_unlock_irqrestore(&func->ffs->eps_lock, flags);
1872 1872
1873 return ret; 1873 return ret;
@@ -2091,8 +2091,8 @@ static int __ffs_data_do_entity(enum ffs_entity_type type,
2091 2091
2092 case FFS_STRING: 2092 case FFS_STRING:
2093 /* 2093 /*
2094 * Strings are indexed from 1 (0 is magic ;) reserved 2094 * Strings are indexed from 1 (0 is reserved
2095 * for languages list or some such) 2095 * for languages list)
2096 */ 2096 */
2097 if (*valuep > helper->ffs->strings_count) 2097 if (*valuep > helper->ffs->strings_count)
2098 helper->ffs->strings_count = *valuep; 2098 helper->ffs->strings_count = *valuep;
@@ -2252,7 +2252,7 @@ static int __ffs_data_do_os_desc(enum ffs_os_desc_type type,
2252 2252
2253 if (len < sizeof(*d) || 2253 if (len < sizeof(*d) ||
2254 d->bFirstInterfaceNumber >= ffs->interfaces_count || 2254 d->bFirstInterfaceNumber >= ffs->interfaces_count ||
2255 !d->Reserved1) 2255 d->Reserved1)
2256 return -EINVAL; 2256 return -EINVAL;
2257 for (i = 0; i < ARRAY_SIZE(d->Reserved2); ++i) 2257 for (i = 0; i < ARRAY_SIZE(d->Reserved2); ++i)
2258 if (d->Reserved2[i]) 2258 if (d->Reserved2[i])
@@ -2269,6 +2269,8 @@ static int __ffs_data_do_os_desc(enum ffs_os_desc_type type,
2269 if (len < sizeof(*d) || h->interface >= ffs->interfaces_count) 2269 if (len < sizeof(*d) || h->interface >= ffs->interfaces_count)
2270 return -EINVAL; 2270 return -EINVAL;
2271 length = le32_to_cpu(d->dwSize); 2271 length = le32_to_cpu(d->dwSize);
2272 if (len < length)
2273 return -EINVAL;
2272 type = le32_to_cpu(d->dwPropertyDataType); 2274 type = le32_to_cpu(d->dwPropertyDataType);
2273 if (type < USB_EXT_PROP_UNICODE || 2275 if (type < USB_EXT_PROP_UNICODE ||
2274 type > USB_EXT_PROP_UNICODE_MULTI) { 2276 type > USB_EXT_PROP_UNICODE_MULTI) {
@@ -2277,6 +2279,11 @@ static int __ffs_data_do_os_desc(enum ffs_os_desc_type type,
2277 return -EINVAL; 2279 return -EINVAL;
2278 } 2280 }
2279 pnl = le16_to_cpu(d->wPropertyNameLength); 2281 pnl = le16_to_cpu(d->wPropertyNameLength);
2282 if (length < 14 + pnl) {
2283 pr_vdebug("invalid os descriptor length: %d pnl:%d (descriptor %d)\n",
2284 length, pnl, type);
2285 return -EINVAL;
2286 }
2280 pdl = le32_to_cpu(*(u32 *)((u8 *)data + 10 + pnl)); 2287 pdl = le32_to_cpu(*(u32 *)((u8 *)data + 10 + pnl));
2281 if (length != 14 + pnl + pdl) { 2288 if (length != 14 + pnl + pdl) {
2282 pr_vdebug("invalid os descriptor length: %d pnl:%d pdl:%d (descriptor %d)\n", 2289 pr_vdebug("invalid os descriptor length: %d pnl:%d pdl:%d (descriptor %d)\n",
@@ -2363,6 +2370,9 @@ static int __ffs_data_got_descs(struct ffs_data *ffs,
2363 } 2370 }
2364 } 2371 }
2365 if (flags & (1 << i)) { 2372 if (flags & (1 << i)) {
2373 if (len < 4) {
2374 goto error;
2375 }
2366 os_descs_count = get_unaligned_le32(data); 2376 os_descs_count = get_unaligned_le32(data);
2367 data += 4; 2377 data += 4;
2368 len -= 4; 2378 len -= 4;
@@ -2435,7 +2445,8 @@ static int __ffs_data_got_strings(struct ffs_data *ffs,
2435 2445
2436 ENTER(); 2446 ENTER();
2437 2447
2438 if (unlikely(get_unaligned_le32(data) != FUNCTIONFS_STRINGS_MAGIC || 2448 if (unlikely(len < 16 ||
2449 get_unaligned_le32(data) != FUNCTIONFS_STRINGS_MAGIC ||
2439 get_unaligned_le32(data + 4) != len)) 2450 get_unaligned_le32(data + 4) != len))
2440 goto error; 2451 goto error;
2441 str_count = get_unaligned_le32(data + 8); 2452 str_count = get_unaligned_le32(data + 8);
@@ -3448,12 +3459,12 @@ static void ffs_func_unbind(struct usb_configuration *c,
3448 3459
3449 /* cleanup after autoconfig */ 3460 /* cleanup after autoconfig */
3450 spin_lock_irqsave(&func->ffs->eps_lock, flags); 3461 spin_lock_irqsave(&func->ffs->eps_lock, flags);
3451 do { 3462 while (count--) {
3452 if (ep->ep && ep->req) 3463 if (ep->ep && ep->req)
3453 usb_ep_free_request(ep->ep, ep->req); 3464 usb_ep_free_request(ep->ep, ep->req);
3454 ep->req = NULL; 3465 ep->req = NULL;
3455 ++ep; 3466 ++ep;
3456 } while (--count); 3467 }
3457 spin_unlock_irqrestore(&func->ffs->eps_lock, flags); 3468 spin_unlock_irqrestore(&func->ffs->eps_lock, flags);
3458 kfree(func->eps); 3469 kfree(func->eps);
3459 func->eps = NULL; 3470 func->eps = NULL;
@@ -3666,6 +3677,7 @@ static void ffs_closed(struct ffs_data *ffs)
3666{ 3677{
3667 struct ffs_dev *ffs_obj; 3678 struct ffs_dev *ffs_obj;
3668 struct f_fs_opts *opts; 3679 struct f_fs_opts *opts;
3680 struct config_item *ci;
3669 3681
3670 ENTER(); 3682 ENTER();
3671 ffs_dev_lock(); 3683 ffs_dev_lock();
@@ -3689,8 +3701,11 @@ static void ffs_closed(struct ffs_data *ffs)
3689 || !atomic_read(&opts->func_inst.group.cg_item.ci_kref.refcount)) 3701 || !atomic_read(&opts->func_inst.group.cg_item.ci_kref.refcount))
3690 goto done; 3702 goto done;
3691 3703
3692 unregister_gadget_item(ffs_obj->opts-> 3704 ci = opts->func_inst.group.cg_item.ci_parent->ci_parent;
3693 func_inst.group.cg_item.ci_parent->ci_parent); 3705 ffs_dev_unlock();
3706
3707 unregister_gadget_item(ci);
3708 return;
3694done: 3709done:
3695 ffs_dev_unlock(); 3710 ffs_dev_unlock();
3696} 3711}
diff --git a/drivers/usb/gadget/function/f_hid.c b/drivers/usb/gadget/function/f_hid.c
index 3151d2a0fe59..5f8139b8e601 100644
--- a/drivers/usb/gadget/function/f_hid.c
+++ b/drivers/usb/gadget/function/f_hid.c
@@ -593,7 +593,7 @@ static int hidg_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
593 } 593 }
594 status = usb_ep_enable(hidg->out_ep); 594 status = usb_ep_enable(hidg->out_ep);
595 if (status < 0) { 595 if (status < 0) {
596 ERROR(cdev, "Enable IN endpoint FAILED!\n"); 596 ERROR(cdev, "Enable OUT endpoint FAILED!\n");
597 goto fail; 597 goto fail;
598 } 598 }
599 hidg->out_ep->driver_data = hidg; 599 hidg->out_ep->driver_data = hidg;
diff --git a/drivers/usb/gadget/legacy/inode.c b/drivers/usb/gadget/legacy/inode.c
index e8f4102d19df..6bde4396927c 100644
--- a/drivers/usb/gadget/legacy/inode.c
+++ b/drivers/usb/gadget/legacy/inode.c
@@ -1126,7 +1126,7 @@ ep0_write (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
1126 /* data and/or status stage for control request */ 1126 /* data and/or status stage for control request */
1127 } else if (dev->state == STATE_DEV_SETUP) { 1127 } else if (dev->state == STATE_DEV_SETUP) {
1128 1128
1129 /* IN DATA+STATUS caller makes len <= wLength */ 1129 len = min_t(size_t, len, dev->setup_wLength);
1130 if (dev->setup_in) { 1130 if (dev->setup_in) {
1131 retval = setup_req (dev->gadget->ep0, dev->req, len); 1131 retval = setup_req (dev->gadget->ep0, dev->req, len);
1132 if (retval == 0) { 1132 if (retval == 0) {
@@ -1734,10 +1734,12 @@ static struct usb_gadget_driver gadgetfs_driver = {
1734 * such as configuration notifications. 1734 * such as configuration notifications.
1735 */ 1735 */
1736 1736
1737static int is_valid_config (struct usb_config_descriptor *config) 1737static int is_valid_config(struct usb_config_descriptor *config,
1738 unsigned int total)
1738{ 1739{
1739 return config->bDescriptorType == USB_DT_CONFIG 1740 return config->bDescriptorType == USB_DT_CONFIG
1740 && config->bLength == USB_DT_CONFIG_SIZE 1741 && config->bLength == USB_DT_CONFIG_SIZE
1742 && total >= USB_DT_CONFIG_SIZE
1741 && config->bConfigurationValue != 0 1743 && config->bConfigurationValue != 0
1742 && (config->bmAttributes & USB_CONFIG_ATT_ONE) != 0 1744 && (config->bmAttributes & USB_CONFIG_ATT_ONE) != 0
1743 && (config->bmAttributes & USB_CONFIG_ATT_WAKEUP) == 0; 1745 && (config->bmAttributes & USB_CONFIG_ATT_WAKEUP) == 0;
@@ -1762,7 +1764,8 @@ dev_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
1762 } 1764 }
1763 spin_unlock_irq(&dev->lock); 1765 spin_unlock_irq(&dev->lock);
1764 1766
1765 if (len < (USB_DT_CONFIG_SIZE + USB_DT_DEVICE_SIZE + 4)) 1767 if ((len < (USB_DT_CONFIG_SIZE + USB_DT_DEVICE_SIZE + 4)) ||
1768 (len > PAGE_SIZE * 4))
1766 return -EINVAL; 1769 return -EINVAL;
1767 1770
1768 /* we might need to change message format someday */ 1771 /* we might need to change message format someday */
@@ -1786,7 +1789,8 @@ dev_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
1786 /* full or low speed config */ 1789 /* full or low speed config */
1787 dev->config = (void *) kbuf; 1790 dev->config = (void *) kbuf;
1788 total = le16_to_cpu(dev->config->wTotalLength); 1791 total = le16_to_cpu(dev->config->wTotalLength);
1789 if (!is_valid_config (dev->config) || total >= length) 1792 if (!is_valid_config(dev->config, total) ||
1793 total > length - USB_DT_DEVICE_SIZE)
1790 goto fail; 1794 goto fail;
1791 kbuf += total; 1795 kbuf += total;
1792 length -= total; 1796 length -= total;
@@ -1795,10 +1799,13 @@ dev_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
1795 if (kbuf [1] == USB_DT_CONFIG) { 1799 if (kbuf [1] == USB_DT_CONFIG) {
1796 dev->hs_config = (void *) kbuf; 1800 dev->hs_config = (void *) kbuf;
1797 total = le16_to_cpu(dev->hs_config->wTotalLength); 1801 total = le16_to_cpu(dev->hs_config->wTotalLength);
1798 if (!is_valid_config (dev->hs_config) || total >= length) 1802 if (!is_valid_config(dev->hs_config, total) ||
1803 total > length - USB_DT_DEVICE_SIZE)
1799 goto fail; 1804 goto fail;
1800 kbuf += total; 1805 kbuf += total;
1801 length -= total; 1806 length -= total;
1807 } else {
1808 dev->hs_config = NULL;
1802 } 1809 }
1803 1810
1804 /* could support multiple configs, using another encoding! */ 1811 /* could support multiple configs, using another encoding! */
@@ -1811,7 +1818,6 @@ dev_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
1811 || dev->dev->bDescriptorType != USB_DT_DEVICE 1818 || dev->dev->bDescriptorType != USB_DT_DEVICE
1812 || dev->dev->bNumConfigurations != 1) 1819 || dev->dev->bNumConfigurations != 1)
1813 goto fail; 1820 goto fail;
1814 dev->dev->bNumConfigurations = 1;
1815 dev->dev->bcdUSB = cpu_to_le16 (0x0200); 1821 dev->dev->bcdUSB = cpu_to_le16 (0x0200);
1816 1822
1817 /* triggers gadgetfs_bind(); then we can enumerate. */ 1823 /* triggers gadgetfs_bind(); then we can enumerate. */
diff --git a/drivers/usb/gadget/udc/atmel_usba_udc.c b/drivers/usb/gadget/udc/atmel_usba_udc.c
index f3212db9bc37..12c7687216e6 100644
--- a/drivers/usb/gadget/udc/atmel_usba_udc.c
+++ b/drivers/usb/gadget/udc/atmel_usba_udc.c
@@ -1978,7 +1978,8 @@ static struct usba_ep * atmel_udc_of_init(struct platform_device *pdev,
1978 dev_err(&pdev->dev, "of_probe: name error(%d)\n", ret); 1978 dev_err(&pdev->dev, "of_probe: name error(%d)\n", ret);
1979 goto err; 1979 goto err;
1980 } 1980 }
1981 ep->ep.name = kasprintf(GFP_KERNEL, "ep%d", ep->index); 1981 sprintf(ep->name, "ep%d", ep->index);
1982 ep->ep.name = ep->name;
1982 1983
1983 ep->ep_regs = udc->regs + USBA_EPT_BASE(i); 1984 ep->ep_regs = udc->regs + USBA_EPT_BASE(i);
1984 ep->dma_regs = udc->regs + USBA_DMA_BASE(i); 1985 ep->dma_regs = udc->regs + USBA_DMA_BASE(i);
diff --git a/drivers/usb/gadget/udc/atmel_usba_udc.h b/drivers/usb/gadget/udc/atmel_usba_udc.h
index 3e1c9d589dfa..b03b2ebfc53a 100644
--- a/drivers/usb/gadget/udc/atmel_usba_udc.h
+++ b/drivers/usb/gadget/udc/atmel_usba_udc.h
@@ -280,6 +280,7 @@ struct usba_ep {
280 void __iomem *ep_regs; 280 void __iomem *ep_regs;
281 void __iomem *dma_regs; 281 void __iomem *dma_regs;
282 void __iomem *fifo; 282 void __iomem *fifo;
283 char name[8];
283 struct usb_ep ep; 284 struct usb_ep ep;
284 struct usba_udc *udc; 285 struct usba_udc *udc;
285 286
diff --git a/drivers/usb/gadget/udc/core.c b/drivers/usb/gadget/udc/core.c
index 9483489080f6..0402177f93cd 100644
--- a/drivers/usb/gadget/udc/core.c
+++ b/drivers/usb/gadget/udc/core.c
@@ -1317,7 +1317,11 @@ int usb_gadget_probe_driver(struct usb_gadget_driver *driver)
1317 if (!ret) 1317 if (!ret)
1318 break; 1318 break;
1319 } 1319 }
1320 if (!ret && !udc->driver) 1320 if (ret)
1321 ret = -ENODEV;
1322 else if (udc->driver)
1323 ret = -EBUSY;
1324 else
1321 goto found; 1325 goto found;
1322 } else { 1326 } else {
1323 list_for_each_entry(udc, &udc_list, list) { 1327 list_for_each_entry(udc, &udc_list, list) {
diff --git a/drivers/usb/gadget/udc/dummy_hcd.c b/drivers/usb/gadget/udc/dummy_hcd.c
index 02b14e91ae6c..c60abe3a68f9 100644
--- a/drivers/usb/gadget/udc/dummy_hcd.c
+++ b/drivers/usb/gadget/udc/dummy_hcd.c
@@ -330,7 +330,7 @@ static void nuke(struct dummy *dum, struct dummy_ep *ep)
330/* caller must hold lock */ 330/* caller must hold lock */
331static void stop_activity(struct dummy *dum) 331static void stop_activity(struct dummy *dum)
332{ 332{
333 struct dummy_ep *ep; 333 int i;
334 334
335 /* prevent any more requests */ 335 /* prevent any more requests */
336 dum->address = 0; 336 dum->address = 0;
@@ -338,8 +338,8 @@ static void stop_activity(struct dummy *dum)
338 /* The timer is left running so that outstanding URBs can fail */ 338 /* The timer is left running so that outstanding URBs can fail */
339 339
340 /* nuke any pending requests first, so driver i/o is quiesced */ 340 /* nuke any pending requests first, so driver i/o is quiesced */
341 list_for_each_entry(ep, &dum->gadget.ep_list, ep.ep_list) 341 for (i = 0; i < DUMMY_ENDPOINTS; ++i)
342 nuke(dum, ep); 342 nuke(dum, &dum->ep[i]);
343 343
344 /* driver now does any non-usb quiescing necessary */ 344 /* driver now does any non-usb quiescing necessary */
345} 345}
diff --git a/drivers/usb/host/ohci-at91.c b/drivers/usb/host/ohci-at91.c
index be9e63836881..414e3c376dbb 100644
--- a/drivers/usb/host/ohci-at91.c
+++ b/drivers/usb/host/ohci-at91.c
@@ -43,7 +43,6 @@ struct at91_usbh_data {
43 struct gpio_desc *overcurrent_pin[AT91_MAX_USBH_PORTS]; 43 struct gpio_desc *overcurrent_pin[AT91_MAX_USBH_PORTS];
44 u8 ports; /* number of ports on root hub */ 44 u8 ports; /* number of ports on root hub */
45 u8 overcurrent_supported; 45 u8 overcurrent_supported;
46 u8 vbus_pin_active_low[AT91_MAX_USBH_PORTS];
47 u8 overcurrent_status[AT91_MAX_USBH_PORTS]; 46 u8 overcurrent_status[AT91_MAX_USBH_PORTS];
48 u8 overcurrent_changed[AT91_MAX_USBH_PORTS]; 47 u8 overcurrent_changed[AT91_MAX_USBH_PORTS];
49}; 48};
@@ -266,8 +265,7 @@ static void ohci_at91_usb_set_power(struct at91_usbh_data *pdata, int port, int
266 if (!valid_port(port)) 265 if (!valid_port(port))
267 return; 266 return;
268 267
269 gpiod_set_value(pdata->vbus_pin[port], 268 gpiod_set_value(pdata->vbus_pin[port], enable);
270 pdata->vbus_pin_active_low[port] ^ enable);
271} 269}
272 270
273static int ohci_at91_usb_get_power(struct at91_usbh_data *pdata, int port) 271static int ohci_at91_usb_get_power(struct at91_usbh_data *pdata, int port)
@@ -275,8 +273,7 @@ static int ohci_at91_usb_get_power(struct at91_usbh_data *pdata, int port)
275 if (!valid_port(port)) 273 if (!valid_port(port))
276 return -EINVAL; 274 return -EINVAL;
277 275
278 return gpiod_get_value(pdata->vbus_pin[port]) ^ 276 return gpiod_get_value(pdata->vbus_pin[port]);
279 pdata->vbus_pin_active_low[port];
280} 277}
281 278
282/* 279/*
@@ -533,18 +530,17 @@ static int ohci_hcd_at91_drv_probe(struct platform_device *pdev)
533 pdata->ports = ports; 530 pdata->ports = ports;
534 531
535 at91_for_each_port(i) { 532 at91_for_each_port(i) {
536 pdata->vbus_pin[i] = devm_gpiod_get_optional(&pdev->dev, 533 if (i >= pdata->ports)
537 "atmel,vbus-gpio", 534 break;
538 GPIOD_IN); 535
536 pdata->vbus_pin[i] =
537 devm_gpiod_get_index_optional(&pdev->dev, "atmel,vbus",
538 i, GPIOD_OUT_HIGH);
539 if (IS_ERR(pdata->vbus_pin[i])) { 539 if (IS_ERR(pdata->vbus_pin[i])) {
540 err = PTR_ERR(pdata->vbus_pin[i]); 540 err = PTR_ERR(pdata->vbus_pin[i]);
541 dev_err(&pdev->dev, "unable to claim gpio \"vbus\": %d\n", err); 541 dev_err(&pdev->dev, "unable to claim gpio \"vbus\": %d\n", err);
542 continue; 542 continue;
543 } 543 }
544
545 pdata->vbus_pin_active_low[i] = gpiod_get_value(pdata->vbus_pin[i]);
546
547 ohci_at91_usb_set_power(pdata, i, 1);
548 } 544 }
549 545
550 at91_for_each_port(i) { 546 at91_for_each_port(i) {
@@ -552,8 +548,8 @@ static int ohci_hcd_at91_drv_probe(struct platform_device *pdev)
552 break; 548 break;
553 549
554 pdata->overcurrent_pin[i] = 550 pdata->overcurrent_pin[i] =
555 devm_gpiod_get_optional(&pdev->dev, 551 devm_gpiod_get_index_optional(&pdev->dev, "atmel,oc",
556 "atmel,oc-gpio", GPIOD_IN); 552 i, GPIOD_IN);
557 if (IS_ERR(pdata->overcurrent_pin[i])) { 553 if (IS_ERR(pdata->overcurrent_pin[i])) {
558 err = PTR_ERR(pdata->overcurrent_pin[i]); 554 err = PTR_ERR(pdata->overcurrent_pin[i]);
559 dev_err(&pdev->dev, "unable to claim gpio \"overcurrent\": %d\n", err); 555 dev_err(&pdev->dev, "unable to claim gpio \"overcurrent\": %d\n", err);
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index 321de2e0161b..8414ed2a02de 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -979,6 +979,40 @@ void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id)
979 xhci->devs[slot_id] = NULL; 979 xhci->devs[slot_id] = NULL;
980} 980}
981 981
982/*
983 * Free a virt_device structure.
984 * If the virt_device added a tt_info (a hub) and has children pointing to
985 * that tt_info, then free the child first. Recursive.
986 * We can't rely on udev at this point to find child-parent relationships.
987 */
988void xhci_free_virt_devices_depth_first(struct xhci_hcd *xhci, int slot_id)
989{
990 struct xhci_virt_device *vdev;
991 struct list_head *tt_list_head;
992 struct xhci_tt_bw_info *tt_info, *next;
993 int i;
994
995 vdev = xhci->devs[slot_id];
996 if (!vdev)
997 return;
998
999 tt_list_head = &(xhci->rh_bw[vdev->real_port - 1].tts);
1000 list_for_each_entry_safe(tt_info, next, tt_list_head, tt_list) {
1001 /* is this a hub device that added a tt_info to the tts list */
1002 if (tt_info->slot_id == slot_id) {
1003 /* are any devices using this tt_info? */
1004 for (i = 1; i < HCS_MAX_SLOTS(xhci->hcs_params1); i++) {
1005 vdev = xhci->devs[i];
1006 if (vdev && (vdev->tt_info == tt_info))
1007 xhci_free_virt_devices_depth_first(
1008 xhci, i);
1009 }
1010 }
1011 }
1012 /* we are now at a leaf device */
1013 xhci_free_virt_device(xhci, slot_id);
1014}
1015
982int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id, 1016int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id,
983 struct usb_device *udev, gfp_t flags) 1017 struct usb_device *udev, gfp_t flags)
984{ 1018{
@@ -1795,7 +1829,7 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
1795 int size; 1829 int size;
1796 int i, j, num_ports; 1830 int i, j, num_ports;
1797 1831
1798 del_timer_sync(&xhci->cmd_timer); 1832 cancel_delayed_work_sync(&xhci->cmd_timer);
1799 1833
1800 /* Free the Event Ring Segment Table and the actual Event Ring */ 1834 /* Free the Event Ring Segment Table and the actual Event Ring */
1801 size = sizeof(struct xhci_erst_entry)*(xhci->erst.num_entries); 1835 size = sizeof(struct xhci_erst_entry)*(xhci->erst.num_entries);
@@ -1828,8 +1862,8 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
1828 } 1862 }
1829 } 1863 }
1830 1864
1831 for (i = 1; i < MAX_HC_SLOTS; ++i) 1865 for (i = HCS_MAX_SLOTS(xhci->hcs_params1); i > 0; i--)
1832 xhci_free_virt_device(xhci, i); 1866 xhci_free_virt_devices_depth_first(xhci, i);
1833 1867
1834 dma_pool_destroy(xhci->segment_pool); 1868 dma_pool_destroy(xhci->segment_pool);
1835 xhci->segment_pool = NULL; 1869 xhci->segment_pool = NULL;
@@ -2342,9 +2376,9 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
2342 2376
2343 INIT_LIST_HEAD(&xhci->cmd_list); 2377 INIT_LIST_HEAD(&xhci->cmd_list);
2344 2378
2345 /* init command timeout timer */ 2379 /* init command timeout work */
2346 setup_timer(&xhci->cmd_timer, xhci_handle_command_timeout, 2380 INIT_DELAYED_WORK(&xhci->cmd_timer, xhci_handle_command_timeout);
2347 (unsigned long)xhci); 2381 init_completion(&xhci->cmd_ring_stop_completion);
2348 2382
2349 page_size = readl(&xhci->op_regs->page_size); 2383 page_size = readl(&xhci->op_regs->page_size);
2350 xhci_dbg_trace(xhci, trace_xhci_dbg_init, 2384 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
diff --git a/drivers/usb/host/xhci-mtk.c b/drivers/usb/host/xhci-mtk.c
index 1094ebd2838f..bac961cd24ad 100644
--- a/drivers/usb/host/xhci-mtk.c
+++ b/drivers/usb/host/xhci-mtk.c
@@ -579,8 +579,10 @@ static int xhci_mtk_probe(struct platform_device *pdev)
579 goto disable_ldos; 579 goto disable_ldos;
580 580
581 irq = platform_get_irq(pdev, 0); 581 irq = platform_get_irq(pdev, 0);
582 if (irq < 0) 582 if (irq < 0) {
583 ret = irq;
583 goto disable_clk; 584 goto disable_clk;
585 }
584 586
585 /* Initialize dma_mask and coherent_dma_mask to 32-bits */ 587 /* Initialize dma_mask and coherent_dma_mask to 32-bits */
586 ret = dma_set_coherent_mask(dev, DMA_BIT_MASK(32)); 588 ret = dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index e96ae80d107e..954abfd5014d 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -165,7 +165,8 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
165 pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_XHCI || 165 pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_XHCI ||
166 pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI || 166 pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI ||
167 pdev->device == PCI_DEVICE_ID_INTEL_BROXTON_M_XHCI || 167 pdev->device == PCI_DEVICE_ID_INTEL_BROXTON_M_XHCI ||
168 pdev->device == PCI_DEVICE_ID_INTEL_BROXTON_B_XHCI)) { 168 pdev->device == PCI_DEVICE_ID_INTEL_BROXTON_B_XHCI ||
169 pdev->device == PCI_DEVICE_ID_INTEL_APL_XHCI)) {
169 xhci->quirks |= XHCI_PME_STUCK_QUIRK; 170 xhci->quirks |= XHCI_PME_STUCK_QUIRK;
170 } 171 }
171 if (pdev->vendor == PCI_VENDOR_ID_INTEL && 172 if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
index ddfab301e366..e5834dd9bcde 100644
--- a/drivers/usb/host/xhci-plat.c
+++ b/drivers/usb/host/xhci-plat.c
@@ -165,7 +165,7 @@ static int xhci_plat_probe(struct platform_device *pdev)
165 return -ENODEV; 165 return -ENODEV;
166 166
167 /* Try to set 64-bit DMA first */ 167 /* Try to set 64-bit DMA first */
168 if (WARN_ON(!pdev->dev.dma_mask)) 168 if (!pdev->dev.dma_mask)
169 /* Platform did not initialize dma_mask */ 169 /* Platform did not initialize dma_mask */
170 ret = dma_coerce_mask_and_coherent(&pdev->dev, 170 ret = dma_coerce_mask_and_coherent(&pdev->dev,
171 DMA_BIT_MASK(64)); 171 DMA_BIT_MASK(64));
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index bdf6b13d9b67..e32029a31ca4 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -279,23 +279,76 @@ void xhci_ring_cmd_db(struct xhci_hcd *xhci)
279 readl(&xhci->dba->doorbell[0]); 279 readl(&xhci->dba->doorbell[0]);
280} 280}
281 281
282static int xhci_abort_cmd_ring(struct xhci_hcd *xhci) 282static bool xhci_mod_cmd_timer(struct xhci_hcd *xhci, unsigned long delay)
283{
284 return mod_delayed_work(system_wq, &xhci->cmd_timer, delay);
285}
286
287static struct xhci_command *xhci_next_queued_cmd(struct xhci_hcd *xhci)
288{
289 return list_first_entry_or_null(&xhci->cmd_list, struct xhci_command,
290 cmd_list);
291}
292
293/*
294 * Turn all commands on command ring with status set to "aborted" to no-op trbs.
295 * If there are other commands waiting then restart the ring and kick the timer.
296 * This must be called with command ring stopped and xhci->lock held.
297 */
298static void xhci_handle_stopped_cmd_ring(struct xhci_hcd *xhci,
299 struct xhci_command *cur_cmd)
300{
301 struct xhci_command *i_cmd;
302 u32 cycle_state;
303
304 /* Turn all aborted commands in list to no-ops, then restart */
305 list_for_each_entry(i_cmd, &xhci->cmd_list, cmd_list) {
306
307 if (i_cmd->status != COMP_CMD_ABORT)
308 continue;
309
310 i_cmd->status = COMP_CMD_STOP;
311
312 xhci_dbg(xhci, "Turn aborted command %p to no-op\n",
313 i_cmd->command_trb);
314 /* get cycle state from the original cmd trb */
315 cycle_state = le32_to_cpu(
316 i_cmd->command_trb->generic.field[3]) & TRB_CYCLE;
317 /* modify the command trb to no-op command */
318 i_cmd->command_trb->generic.field[0] = 0;
319 i_cmd->command_trb->generic.field[1] = 0;
320 i_cmd->command_trb->generic.field[2] = 0;
321 i_cmd->command_trb->generic.field[3] = cpu_to_le32(
322 TRB_TYPE(TRB_CMD_NOOP) | cycle_state);
323
324 /*
325 * caller waiting for completion is called when command
326 * completion event is received for these no-op commands
327 */
328 }
329
330 xhci->cmd_ring_state = CMD_RING_STATE_RUNNING;
331
332 /* ring command ring doorbell to restart the command ring */
333 if ((xhci->cmd_ring->dequeue != xhci->cmd_ring->enqueue) &&
334 !(xhci->xhc_state & XHCI_STATE_DYING)) {
335 xhci->current_cmd = cur_cmd;
336 xhci_mod_cmd_timer(xhci, XHCI_CMD_DEFAULT_TIMEOUT);
337 xhci_ring_cmd_db(xhci);
338 }
339}
340
341/* Must be called with xhci->lock held, releases and aquires lock back */
342static int xhci_abort_cmd_ring(struct xhci_hcd *xhci, unsigned long flags)
283{ 343{
284 u64 temp_64; 344 u64 temp_64;
285 int ret; 345 int ret;
286 346
287 xhci_dbg(xhci, "Abort command ring\n"); 347 xhci_dbg(xhci, "Abort command ring\n");
288 348
289 temp_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring); 349 reinit_completion(&xhci->cmd_ring_stop_completion);
290 xhci->cmd_ring_state = CMD_RING_STATE_ABORTED;
291 350
292 /* 351 temp_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
293 * Writing the CMD_RING_ABORT bit should cause a cmd completion event,
294 * however on some host hw the CMD_RING_RUNNING bit is correctly cleared
295 * but the completion event in never sent. Use the cmd timeout timer to
296 * handle those cases. Use twice the time to cover the bit polling retry
297 */
298 mod_timer(&xhci->cmd_timer, jiffies + (2 * XHCI_CMD_DEFAULT_TIMEOUT));
299 xhci_write_64(xhci, temp_64 | CMD_RING_ABORT, 352 xhci_write_64(xhci, temp_64 | CMD_RING_ABORT,
300 &xhci->op_regs->cmd_ring); 353 &xhci->op_regs->cmd_ring);
301 354
@@ -315,17 +368,30 @@ static int xhci_abort_cmd_ring(struct xhci_hcd *xhci)
315 udelay(1000); 368 udelay(1000);
316 ret = xhci_handshake(&xhci->op_regs->cmd_ring, 369 ret = xhci_handshake(&xhci->op_regs->cmd_ring,
317 CMD_RING_RUNNING, 0, 3 * 1000 * 1000); 370 CMD_RING_RUNNING, 0, 3 * 1000 * 1000);
318 if (ret == 0) 371 if (ret < 0) {
319 return 0; 372 xhci_err(xhci, "Stopped the command ring failed, "
320 373 "maybe the host is dead\n");
321 xhci_err(xhci, "Stopped the command ring failed, " 374 xhci->xhc_state |= XHCI_STATE_DYING;
322 "maybe the host is dead\n"); 375 xhci_halt(xhci);
323 del_timer(&xhci->cmd_timer); 376 return -ESHUTDOWN;
324 xhci->xhc_state |= XHCI_STATE_DYING; 377 }
325 xhci_halt(xhci); 378 }
326 return -ESHUTDOWN; 379 /*
380 * Writing the CMD_RING_ABORT bit should cause a cmd completion event,
381 * however on some host hw the CMD_RING_RUNNING bit is correctly cleared
382 * but the completion event in never sent. Wait 2 secs (arbitrary
383 * number) to handle those cases after negation of CMD_RING_RUNNING.
384 */
385 spin_unlock_irqrestore(&xhci->lock, flags);
386 ret = wait_for_completion_timeout(&xhci->cmd_ring_stop_completion,
387 msecs_to_jiffies(2000));
388 spin_lock_irqsave(&xhci->lock, flags);
389 if (!ret) {
390 xhci_dbg(xhci, "No stop event for abort, ring start fail?\n");
391 xhci_cleanup_command_queue(xhci);
392 } else {
393 xhci_handle_stopped_cmd_ring(xhci, xhci_next_queued_cmd(xhci));
327 } 394 }
328
329 return 0; 395 return 0;
330} 396}
331 397
@@ -847,17 +913,6 @@ void xhci_stop_endpoint_command_watchdog(unsigned long arg)
847 spin_lock_irqsave(&xhci->lock, flags); 913 spin_lock_irqsave(&xhci->lock, flags);
848 914
849 ep->stop_cmds_pending--; 915 ep->stop_cmds_pending--;
850 if (xhci->xhc_state & XHCI_STATE_REMOVING) {
851 spin_unlock_irqrestore(&xhci->lock, flags);
852 return;
853 }
854 if (xhci->xhc_state & XHCI_STATE_DYING) {
855 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
856 "Stop EP timer ran, but another timer marked "
857 "xHCI as DYING, exiting.");
858 spin_unlock_irqrestore(&xhci->lock, flags);
859 return;
860 }
861 if (!(ep->stop_cmds_pending == 0 && (ep->ep_state & EP_HALT_PENDING))) { 916 if (!(ep->stop_cmds_pending == 0 && (ep->ep_state & EP_HALT_PENDING))) {
862 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, 917 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
863 "Stop EP timer ran, but no command pending, " 918 "Stop EP timer ran, but no command pending, "
@@ -1207,101 +1262,62 @@ void xhci_cleanup_command_queue(struct xhci_hcd *xhci)
1207 xhci_complete_del_and_free_cmd(cur_cmd, COMP_CMD_ABORT); 1262 xhci_complete_del_and_free_cmd(cur_cmd, COMP_CMD_ABORT);
1208} 1263}
1209 1264
1210/* 1265void xhci_handle_command_timeout(struct work_struct *work)
1211 * Turn all commands on command ring with status set to "aborted" to no-op trbs.
1212 * If there are other commands waiting then restart the ring and kick the timer.
1213 * This must be called with command ring stopped and xhci->lock held.
1214 */
1215static void xhci_handle_stopped_cmd_ring(struct xhci_hcd *xhci,
1216 struct xhci_command *cur_cmd)
1217{
1218 struct xhci_command *i_cmd, *tmp_cmd;
1219 u32 cycle_state;
1220
1221 /* Turn all aborted commands in list to no-ops, then restart */
1222 list_for_each_entry_safe(i_cmd, tmp_cmd, &xhci->cmd_list,
1223 cmd_list) {
1224
1225 if (i_cmd->status != COMP_CMD_ABORT)
1226 continue;
1227
1228 i_cmd->status = COMP_CMD_STOP;
1229
1230 xhci_dbg(xhci, "Turn aborted command %p to no-op\n",
1231 i_cmd->command_trb);
1232 /* get cycle state from the original cmd trb */
1233 cycle_state = le32_to_cpu(
1234 i_cmd->command_trb->generic.field[3]) & TRB_CYCLE;
1235 /* modify the command trb to no-op command */
1236 i_cmd->command_trb->generic.field[0] = 0;
1237 i_cmd->command_trb->generic.field[1] = 0;
1238 i_cmd->command_trb->generic.field[2] = 0;
1239 i_cmd->command_trb->generic.field[3] = cpu_to_le32(
1240 TRB_TYPE(TRB_CMD_NOOP) | cycle_state);
1241
1242 /*
1243 * caller waiting for completion is called when command
1244 * completion event is received for these no-op commands
1245 */
1246 }
1247
1248 xhci->cmd_ring_state = CMD_RING_STATE_RUNNING;
1249
1250 /* ring command ring doorbell to restart the command ring */
1251 if ((xhci->cmd_ring->dequeue != xhci->cmd_ring->enqueue) &&
1252 !(xhci->xhc_state & XHCI_STATE_DYING)) {
1253 xhci->current_cmd = cur_cmd;
1254 mod_timer(&xhci->cmd_timer, jiffies + XHCI_CMD_DEFAULT_TIMEOUT);
1255 xhci_ring_cmd_db(xhci);
1256 }
1257 return;
1258}
1259
1260
1261void xhci_handle_command_timeout(unsigned long data)
1262{ 1266{
1263 struct xhci_hcd *xhci; 1267 struct xhci_hcd *xhci;
1264 int ret; 1268 int ret;
1265 unsigned long flags; 1269 unsigned long flags;
1266 u64 hw_ring_state; 1270 u64 hw_ring_state;
1267 bool second_timeout = false;
1268 xhci = (struct xhci_hcd *) data;
1269 1271
1270 /* mark this command to be cancelled */ 1272 xhci = container_of(to_delayed_work(work), struct xhci_hcd, cmd_timer);
1273
1271 spin_lock_irqsave(&xhci->lock, flags); 1274 spin_lock_irqsave(&xhci->lock, flags);
1272 if (xhci->current_cmd) { 1275
1273 if (xhci->current_cmd->status == COMP_CMD_ABORT) 1276 /*
1274 second_timeout = true; 1277 * If timeout work is pending, or current_cmd is NULL, it means we
1275 xhci->current_cmd->status = COMP_CMD_ABORT; 1278 * raced with command completion. Command is handled so just return.
1279 */
1280 if (!xhci->current_cmd || delayed_work_pending(&xhci->cmd_timer)) {
1281 spin_unlock_irqrestore(&xhci->lock, flags);
1282 return;
1276 } 1283 }
1284 /* mark this command to be cancelled */
1285 xhci->current_cmd->status = COMP_CMD_ABORT;
1277 1286
1278 /* Make sure command ring is running before aborting it */ 1287 /* Make sure command ring is running before aborting it */
1279 hw_ring_state = xhci_read_64(xhci, &xhci->op_regs->cmd_ring); 1288 hw_ring_state = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
1280 if ((xhci->cmd_ring_state & CMD_RING_STATE_RUNNING) && 1289 if ((xhci->cmd_ring_state & CMD_RING_STATE_RUNNING) &&
1281 (hw_ring_state & CMD_RING_RUNNING)) { 1290 (hw_ring_state & CMD_RING_RUNNING)) {
1282 spin_unlock_irqrestore(&xhci->lock, flags); 1291 /* Prevent new doorbell, and start command abort */
1292 xhci->cmd_ring_state = CMD_RING_STATE_ABORTED;
1283 xhci_dbg(xhci, "Command timeout\n"); 1293 xhci_dbg(xhci, "Command timeout\n");
1284 ret = xhci_abort_cmd_ring(xhci); 1294 ret = xhci_abort_cmd_ring(xhci, flags);
1285 if (unlikely(ret == -ESHUTDOWN)) { 1295 if (unlikely(ret == -ESHUTDOWN)) {
1286 xhci_err(xhci, "Abort command ring failed\n"); 1296 xhci_err(xhci, "Abort command ring failed\n");
1287 xhci_cleanup_command_queue(xhci); 1297 xhci_cleanup_command_queue(xhci);
1298 spin_unlock_irqrestore(&xhci->lock, flags);
1288 usb_hc_died(xhci_to_hcd(xhci)->primary_hcd); 1299 usb_hc_died(xhci_to_hcd(xhci)->primary_hcd);
1289 xhci_dbg(xhci, "xHCI host controller is dead.\n"); 1300 xhci_dbg(xhci, "xHCI host controller is dead.\n");
1301
1302 return;
1290 } 1303 }
1291 return; 1304
1305 goto time_out_completed;
1292 } 1306 }
1293 1307
1294 /* command ring failed to restart, or host removed. Bail out */ 1308 /* host removed. Bail out */
1295 if (second_timeout || xhci->xhc_state & XHCI_STATE_REMOVING) { 1309 if (xhci->xhc_state & XHCI_STATE_REMOVING) {
1296 spin_unlock_irqrestore(&xhci->lock, flags); 1310 xhci_dbg(xhci, "host removed, ring start fail?\n");
1297 xhci_dbg(xhci, "command timed out twice, ring start fail?\n");
1298 xhci_cleanup_command_queue(xhci); 1311 xhci_cleanup_command_queue(xhci);
1299 return; 1312
1313 goto time_out_completed;
1300 } 1314 }
1301 1315
1302 /* command timeout on stopped ring, ring can't be aborted */ 1316 /* command timeout on stopped ring, ring can't be aborted */
1303 xhci_dbg(xhci, "Command timeout on stopped ring\n"); 1317 xhci_dbg(xhci, "Command timeout on stopped ring\n");
1304 xhci_handle_stopped_cmd_ring(xhci, xhci->current_cmd); 1318 xhci_handle_stopped_cmd_ring(xhci, xhci->current_cmd);
1319
1320time_out_completed:
1305 spin_unlock_irqrestore(&xhci->lock, flags); 1321 spin_unlock_irqrestore(&xhci->lock, flags);
1306 return; 1322 return;
1307} 1323}
@@ -1333,7 +1349,7 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
1333 1349
1334 cmd = list_entry(xhci->cmd_list.next, struct xhci_command, cmd_list); 1350 cmd = list_entry(xhci->cmd_list.next, struct xhci_command, cmd_list);
1335 1351
1336 del_timer(&xhci->cmd_timer); 1352 cancel_delayed_work(&xhci->cmd_timer);
1337 1353
1338 trace_xhci_cmd_completion(cmd_trb, (struct xhci_generic_trb *) event); 1354 trace_xhci_cmd_completion(cmd_trb, (struct xhci_generic_trb *) event);
1339 1355
@@ -1341,7 +1357,7 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
1341 1357
1342 /* If CMD ring stopped we own the trbs between enqueue and dequeue */ 1358 /* If CMD ring stopped we own the trbs between enqueue and dequeue */
1343 if (cmd_comp_code == COMP_CMD_STOP) { 1359 if (cmd_comp_code == COMP_CMD_STOP) {
1344 xhci_handle_stopped_cmd_ring(xhci, cmd); 1360 complete_all(&xhci->cmd_ring_stop_completion);
1345 return; 1361 return;
1346 } 1362 }
1347 1363
@@ -1359,8 +1375,11 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
1359 */ 1375 */
1360 if (cmd_comp_code == COMP_CMD_ABORT) { 1376 if (cmd_comp_code == COMP_CMD_ABORT) {
1361 xhci->cmd_ring_state = CMD_RING_STATE_STOPPED; 1377 xhci->cmd_ring_state = CMD_RING_STATE_STOPPED;
1362 if (cmd->status == COMP_CMD_ABORT) 1378 if (cmd->status == COMP_CMD_ABORT) {
1379 if (xhci->current_cmd == cmd)
1380 xhci->current_cmd = NULL;
1363 goto event_handled; 1381 goto event_handled;
1382 }
1364 } 1383 }
1365 1384
1366 cmd_type = TRB_FIELD_TO_TYPE(le32_to_cpu(cmd_trb->generic.field[3])); 1385 cmd_type = TRB_FIELD_TO_TYPE(le32_to_cpu(cmd_trb->generic.field[3]));
@@ -1421,7 +1440,9 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
1421 if (cmd->cmd_list.next != &xhci->cmd_list) { 1440 if (cmd->cmd_list.next != &xhci->cmd_list) {
1422 xhci->current_cmd = list_entry(cmd->cmd_list.next, 1441 xhci->current_cmd = list_entry(cmd->cmd_list.next,
1423 struct xhci_command, cmd_list); 1442 struct xhci_command, cmd_list);
1424 mod_timer(&xhci->cmd_timer, jiffies + XHCI_CMD_DEFAULT_TIMEOUT); 1443 xhci_mod_cmd_timer(xhci, XHCI_CMD_DEFAULT_TIMEOUT);
1444 } else if (xhci->current_cmd == cmd) {
1445 xhci->current_cmd = NULL;
1425 } 1446 }
1426 1447
1427event_handled: 1448event_handled:
@@ -1939,8 +1960,9 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td,
1939 struct xhci_ep_ctx *ep_ctx; 1960 struct xhci_ep_ctx *ep_ctx;
1940 u32 trb_comp_code; 1961 u32 trb_comp_code;
1941 u32 remaining, requested; 1962 u32 remaining, requested;
1942 bool on_data_stage; 1963 u32 trb_type;
1943 1964
1965 trb_type = TRB_FIELD_TO_TYPE(le32_to_cpu(ep_trb->generic.field[3]));
1944 slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags)); 1966 slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
1945 xdev = xhci->devs[slot_id]; 1967 xdev = xhci->devs[slot_id];
1946 ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1; 1968 ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1;
@@ -1950,14 +1972,11 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td,
1950 requested = td->urb->transfer_buffer_length; 1972 requested = td->urb->transfer_buffer_length;
1951 remaining = EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)); 1973 remaining = EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
1952 1974
1953 /* not setup (dequeue), or status stage means we are at data stage */
1954 on_data_stage = (ep_trb != ep_ring->dequeue && ep_trb != td->last_trb);
1955
1956 switch (trb_comp_code) { 1975 switch (trb_comp_code) {
1957 case COMP_SUCCESS: 1976 case COMP_SUCCESS:
1958 if (ep_trb != td->last_trb) { 1977 if (trb_type != TRB_STATUS) {
1959 xhci_warn(xhci, "WARN: Success on ctrl %s TRB without IOC set?\n", 1978 xhci_warn(xhci, "WARN: Success on ctrl %s TRB without IOC set?\n",
1960 on_data_stage ? "data" : "setup"); 1979 (trb_type == TRB_DATA) ? "data" : "setup");
1961 *status = -ESHUTDOWN; 1980 *status = -ESHUTDOWN;
1962 break; 1981 break;
1963 } 1982 }
@@ -1967,15 +1986,25 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td,
1967 *status = 0; 1986 *status = 0;
1968 break; 1987 break;
1969 case COMP_STOP_SHORT: 1988 case COMP_STOP_SHORT:
1970 if (on_data_stage) 1989 if (trb_type == TRB_DATA || trb_type == TRB_NORMAL)
1971 td->urb->actual_length = remaining; 1990 td->urb->actual_length = remaining;
1972 else 1991 else
1973 xhci_warn(xhci, "WARN: Stopped Short Packet on ctrl setup or status TRB\n"); 1992 xhci_warn(xhci, "WARN: Stopped Short Packet on ctrl setup or status TRB\n");
1974 goto finish_td; 1993 goto finish_td;
1975 case COMP_STOP: 1994 case COMP_STOP:
1976 if (on_data_stage) 1995 switch (trb_type) {
1996 case TRB_SETUP:
1997 td->urb->actual_length = 0;
1998 goto finish_td;
1999 case TRB_DATA:
2000 case TRB_NORMAL:
1977 td->urb->actual_length = requested - remaining; 2001 td->urb->actual_length = requested - remaining;
1978 goto finish_td; 2002 goto finish_td;
2003 default:
2004 xhci_warn(xhci, "WARN: unexpected TRB Type %d\n",
2005 trb_type);
2006 goto finish_td;
2007 }
1979 case COMP_STOP_INVAL: 2008 case COMP_STOP_INVAL:
1980 goto finish_td; 2009 goto finish_td;
1981 default: 2010 default:
@@ -1987,7 +2016,7 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td,
1987 /* else fall through */ 2016 /* else fall through */
1988 case COMP_STALL: 2017 case COMP_STALL:
1989 /* Did we transfer part of the data (middle) phase? */ 2018 /* Did we transfer part of the data (middle) phase? */
1990 if (on_data_stage) 2019 if (trb_type == TRB_DATA || trb_type == TRB_NORMAL)
1991 td->urb->actual_length = requested - remaining; 2020 td->urb->actual_length = requested - remaining;
1992 else if (!td->urb_length_set) 2021 else if (!td->urb_length_set)
1993 td->urb->actual_length = 0; 2022 td->urb->actual_length = 0;
@@ -1995,14 +2024,15 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td,
1995 } 2024 }
1996 2025
1997 /* stopped at setup stage, no data transferred */ 2026 /* stopped at setup stage, no data transferred */
1998 if (ep_trb == ep_ring->dequeue) 2027 if (trb_type == TRB_SETUP)
1999 goto finish_td; 2028 goto finish_td;
2000 2029
2001 /* 2030 /*
2002 * if on data stage then update the actual_length of the URB and flag it 2031 * if on data stage then update the actual_length of the URB and flag it
2003 * as set, so it won't be overwritten in the event for the last TRB. 2032 * as set, so it won't be overwritten in the event for the last TRB.
2004 */ 2033 */
2005 if (on_data_stage) { 2034 if (trb_type == TRB_DATA ||
2035 trb_type == TRB_NORMAL) {
2006 td->urb_length_set = true; 2036 td->urb_length_set = true;
2007 td->urb->actual_length = requested - remaining; 2037 td->urb->actual_length = requested - remaining;
2008 xhci_dbg(xhci, "Waiting for status stage event\n"); 2038 xhci_dbg(xhci, "Waiting for status stage event\n");
@@ -3790,9 +3820,9 @@ static int queue_command(struct xhci_hcd *xhci, struct xhci_command *cmd,
3790 3820
3791 /* if there are no other commands queued we start the timeout timer */ 3821 /* if there are no other commands queued we start the timeout timer */
3792 if (xhci->cmd_list.next == &cmd->cmd_list && 3822 if (xhci->cmd_list.next == &cmd->cmd_list &&
3793 !timer_pending(&xhci->cmd_timer)) { 3823 !delayed_work_pending(&xhci->cmd_timer)) {
3794 xhci->current_cmd = cmd; 3824 xhci->current_cmd = cmd;
3795 mod_timer(&xhci->cmd_timer, jiffies + XHCI_CMD_DEFAULT_TIMEOUT); 3825 xhci_mod_cmd_timer(xhci, XHCI_CMD_DEFAULT_TIMEOUT);
3796 } 3826 }
3797 3827
3798 queue_trb(xhci, xhci->cmd_ring, false, field1, field2, field3, 3828 queue_trb(xhci, xhci->cmd_ring, false, field1, field2, field3,
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 1cd56417cbec..9a0ec116654a 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -1534,19 +1534,6 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
1534 xhci_urb_free_priv(urb_priv); 1534 xhci_urb_free_priv(urb_priv);
1535 return ret; 1535 return ret;
1536 } 1536 }
1537 if ((xhci->xhc_state & XHCI_STATE_DYING) ||
1538 (xhci->xhc_state & XHCI_STATE_HALTED)) {
1539 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
1540 "Ep 0x%x: URB %p to be canceled on "
1541 "non-responsive xHCI host.",
1542 urb->ep->desc.bEndpointAddress, urb);
1543 /* Let the stop endpoint command watchdog timer (which set this
1544 * state) finish cleaning up the endpoint TD lists. We must
1545 * have caught it in the middle of dropping a lock and giving
1546 * back an URB.
1547 */
1548 goto done;
1549 }
1550 1537
1551 ep_index = xhci_get_endpoint_index(&urb->ep->desc); 1538 ep_index = xhci_get_endpoint_index(&urb->ep->desc);
1552 ep = &xhci->devs[urb->dev->slot_id]->eps[ep_index]; 1539 ep = &xhci->devs[urb->dev->slot_id]->eps[ep_index];
@@ -3787,8 +3774,10 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev,
3787 3774
3788 mutex_lock(&xhci->mutex); 3775 mutex_lock(&xhci->mutex);
3789 3776
3790 if (xhci->xhc_state) /* dying, removing or halted */ 3777 if (xhci->xhc_state) { /* dying, removing or halted */
3778 ret = -ESHUTDOWN;
3791 goto out; 3779 goto out;
3780 }
3792 3781
3793 if (!udev->slot_id) { 3782 if (!udev->slot_id) {
3794 xhci_dbg_trace(xhci, trace_xhci_dbg_address, 3783 xhci_dbg_trace(xhci, trace_xhci_dbg_address,
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 8ccc11a974b8..2d7b6374b58d 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -1568,7 +1568,8 @@ struct xhci_hcd {
1568#define CMD_RING_STATE_STOPPED (1 << 2) 1568#define CMD_RING_STATE_STOPPED (1 << 2)
1569 struct list_head cmd_list; 1569 struct list_head cmd_list;
1570 unsigned int cmd_ring_reserved_trbs; 1570 unsigned int cmd_ring_reserved_trbs;
1571 struct timer_list cmd_timer; 1571 struct delayed_work cmd_timer;
1572 struct completion cmd_ring_stop_completion;
1572 struct xhci_command *current_cmd; 1573 struct xhci_command *current_cmd;
1573 struct xhci_ring *event_ring; 1574 struct xhci_ring *event_ring;
1574 struct xhci_erst erst; 1575 struct xhci_erst erst;
@@ -1934,7 +1935,7 @@ void xhci_queue_config_ep_quirk(struct xhci_hcd *xhci,
1934 unsigned int slot_id, unsigned int ep_index, 1935 unsigned int slot_id, unsigned int ep_index,
1935 struct xhci_dequeue_state *deq_state); 1936 struct xhci_dequeue_state *deq_state);
1936void xhci_stop_endpoint_command_watchdog(unsigned long arg); 1937void xhci_stop_endpoint_command_watchdog(unsigned long arg);
1937void xhci_handle_command_timeout(unsigned long data); 1938void xhci_handle_command_timeout(struct work_struct *work);
1938 1939
1939void xhci_ring_ep_doorbell(struct xhci_hcd *xhci, unsigned int slot_id, 1940void xhci_ring_ep_doorbell(struct xhci_hcd *xhci, unsigned int slot_id,
1940 unsigned int ep_index, unsigned int stream_id); 1941 unsigned int ep_index, unsigned int stream_id);
diff --git a/drivers/usb/musb/blackfin.c b/drivers/usb/musb/blackfin.c
index 310238c6b5cd..896798071817 100644
--- a/drivers/usb/musb/blackfin.c
+++ b/drivers/usb/musb/blackfin.c
@@ -469,6 +469,7 @@ static const struct musb_platform_ops bfin_ops = {
469 .init = bfin_musb_init, 469 .init = bfin_musb_init,
470 .exit = bfin_musb_exit, 470 .exit = bfin_musb_exit,
471 471
472 .fifo_offset = bfin_fifo_offset,
472 .readb = bfin_readb, 473 .readb = bfin_readb,
473 .writeb = bfin_writeb, 474 .writeb = bfin_writeb,
474 .readw = bfin_readw, 475 .readw = bfin_readw,
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index 9e226468a13e..772f15821242 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -594,11 +594,11 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
594 | MUSB_PORT_STAT_RESUME; 594 | MUSB_PORT_STAT_RESUME;
595 musb->rh_timer = jiffies 595 musb->rh_timer = jiffies
596 + msecs_to_jiffies(USB_RESUME_TIMEOUT); 596 + msecs_to_jiffies(USB_RESUME_TIMEOUT);
597 musb->need_finish_resume = 1;
598
599 musb->xceiv->otg->state = OTG_STATE_A_HOST; 597 musb->xceiv->otg->state = OTG_STATE_A_HOST;
600 musb->is_active = 1; 598 musb->is_active = 1;
601 musb_host_resume_root_hub(musb); 599 musb_host_resume_root_hub(musb);
600 schedule_delayed_work(&musb->finish_resume_work,
601 msecs_to_jiffies(USB_RESUME_TIMEOUT));
602 break; 602 break;
603 case OTG_STATE_B_WAIT_ACON: 603 case OTG_STATE_B_WAIT_ACON:
604 musb->xceiv->otg->state = OTG_STATE_B_PERIPHERAL; 604 musb->xceiv->otg->state = OTG_STATE_B_PERIPHERAL;
@@ -1925,6 +1925,14 @@ static void musb_pm_runtime_check_session(struct musb *musb)
1925static void musb_irq_work(struct work_struct *data) 1925static void musb_irq_work(struct work_struct *data)
1926{ 1926{
1927 struct musb *musb = container_of(data, struct musb, irq_work.work); 1927 struct musb *musb = container_of(data, struct musb, irq_work.work);
1928 int error;
1929
1930 error = pm_runtime_get_sync(musb->controller);
1931 if (error < 0) {
1932 dev_err(musb->controller, "Could not enable: %i\n", error);
1933
1934 return;
1935 }
1928 1936
1929 musb_pm_runtime_check_session(musb); 1937 musb_pm_runtime_check_session(musb);
1930 1938
@@ -1932,6 +1940,9 @@ static void musb_irq_work(struct work_struct *data)
1932 musb->xceiv_old_state = musb->xceiv->otg->state; 1940 musb->xceiv_old_state = musb->xceiv->otg->state;
1933 sysfs_notify(&musb->controller->kobj, NULL, "mode"); 1941 sysfs_notify(&musb->controller->kobj, NULL, "mode");
1934 } 1942 }
1943
1944 pm_runtime_mark_last_busy(musb->controller);
1945 pm_runtime_put_autosuspend(musb->controller);
1935} 1946}
1936 1947
1937static void musb_recover_from_babble(struct musb *musb) 1948static void musb_recover_from_babble(struct musb *musb)
@@ -2050,6 +2061,7 @@ struct musb_pending_work {
2050 struct list_head node; 2061 struct list_head node;
2051}; 2062};
2052 2063
2064#ifdef CONFIG_PM
2053/* 2065/*
2054 * Called from musb_runtime_resume(), musb_resume(), and 2066 * Called from musb_runtime_resume(), musb_resume(), and
2055 * musb_queue_resume_work(). Callers must take musb->lock. 2067 * musb_queue_resume_work(). Callers must take musb->lock.
@@ -2077,6 +2089,7 @@ static int musb_run_resume_work(struct musb *musb)
2077 2089
2078 return error; 2090 return error;
2079} 2091}
2092#endif
2080 2093
2081/* 2094/*
2082 * Called to run work if device is active or else queue the work to happen 2095 * Called to run work if device is active or else queue the work to happen
@@ -2708,11 +2721,6 @@ static int musb_resume(struct device *dev)
2708 mask = MUSB_DEVCTL_BDEVICE | MUSB_DEVCTL_FSDEV | MUSB_DEVCTL_LSDEV; 2721 mask = MUSB_DEVCTL_BDEVICE | MUSB_DEVCTL_FSDEV | MUSB_DEVCTL_LSDEV;
2709 if ((devctl & mask) != (musb->context.devctl & mask)) 2722 if ((devctl & mask) != (musb->context.devctl & mask))
2710 musb->port1_status = 0; 2723 musb->port1_status = 0;
2711 if (musb->need_finish_resume) {
2712 musb->need_finish_resume = 0;
2713 schedule_delayed_work(&musb->finish_resume_work,
2714 msecs_to_jiffies(USB_RESUME_TIMEOUT));
2715 }
2716 2724
2717 /* 2725 /*
2718 * The USB HUB code expects the device to be in RPM_ACTIVE once it came 2726 * The USB HUB code expects the device to be in RPM_ACTIVE once it came
@@ -2764,12 +2772,6 @@ static int musb_runtime_resume(struct device *dev)
2764 2772
2765 musb_restore_context(musb); 2773 musb_restore_context(musb);
2766 2774
2767 if (musb->need_finish_resume) {
2768 musb->need_finish_resume = 0;
2769 schedule_delayed_work(&musb->finish_resume_work,
2770 msecs_to_jiffies(USB_RESUME_TIMEOUT));
2771 }
2772
2773 spin_lock_irqsave(&musb->lock, flags); 2775 spin_lock_irqsave(&musb->lock, flags);
2774 error = musb_run_resume_work(musb); 2776 error = musb_run_resume_work(musb);
2775 if (error) 2777 if (error)
diff --git a/drivers/usb/musb/musb_core.h b/drivers/usb/musb/musb_core.h
index a611e2f67bdc..ce5a18c98c6d 100644
--- a/drivers/usb/musb/musb_core.h
+++ b/drivers/usb/musb/musb_core.h
@@ -216,6 +216,7 @@ struct musb_platform_ops {
216 void (*pre_root_reset_end)(struct musb *musb); 216 void (*pre_root_reset_end)(struct musb *musb);
217 void (*post_root_reset_end)(struct musb *musb); 217 void (*post_root_reset_end)(struct musb *musb);
218 int (*phy_callback)(enum musb_vbus_id_status status); 218 int (*phy_callback)(enum musb_vbus_id_status status);
219 void (*clear_ep_rxintr)(struct musb *musb, int epnum);
219}; 220};
220 221
221/* 222/*
@@ -409,7 +410,6 @@ struct musb {
409 410
410 /* is_suspended means USB B_PERIPHERAL suspend */ 411 /* is_suspended means USB B_PERIPHERAL suspend */
411 unsigned is_suspended:1; 412 unsigned is_suspended:1;
412 unsigned need_finish_resume :1;
413 413
414 /* may_wakeup means remote wakeup is enabled */ 414 /* may_wakeup means remote wakeup is enabled */
415 unsigned may_wakeup:1; 415 unsigned may_wakeup:1;
@@ -626,6 +626,12 @@ static inline void musb_platform_post_root_reset_end(struct musb *musb)
626 musb->ops->post_root_reset_end(musb); 626 musb->ops->post_root_reset_end(musb);
627} 627}
628 628
629static inline void musb_platform_clear_ep_rxintr(struct musb *musb, int epnum)
630{
631 if (musb->ops->clear_ep_rxintr)
632 musb->ops->clear_ep_rxintr(musb, epnum);
633}
634
629/* 635/*
630 * gets the "dr_mode" property from DT and converts it into musb_mode 636 * gets the "dr_mode" property from DT and converts it into musb_mode
631 * if the property is not found or not recognized returns MUSB_OTG 637 * if the property is not found or not recognized returns MUSB_OTG
diff --git a/drivers/usb/musb/musb_debugfs.c b/drivers/usb/musb/musb_debugfs.c
index 4fef50e5c8c1..dd70c88419d2 100644
--- a/drivers/usb/musb/musb_debugfs.c
+++ b/drivers/usb/musb/musb_debugfs.c
@@ -114,6 +114,7 @@ static int musb_regdump_show(struct seq_file *s, void *unused)
114 unsigned i; 114 unsigned i;
115 115
116 seq_printf(s, "MUSB (M)HDRC Register Dump\n"); 116 seq_printf(s, "MUSB (M)HDRC Register Dump\n");
117 pm_runtime_get_sync(musb->controller);
117 118
118 for (i = 0; i < ARRAY_SIZE(musb_regmap); i++) { 119 for (i = 0; i < ARRAY_SIZE(musb_regmap); i++) {
119 switch (musb_regmap[i].size) { 120 switch (musb_regmap[i].size) {
@@ -132,6 +133,8 @@ static int musb_regdump_show(struct seq_file *s, void *unused)
132 } 133 }
133 } 134 }
134 135
136 pm_runtime_mark_last_busy(musb->controller);
137 pm_runtime_put_autosuspend(musb->controller);
135 return 0; 138 return 0;
136} 139}
137 140
@@ -145,7 +148,10 @@ static int musb_test_mode_show(struct seq_file *s, void *unused)
145 struct musb *musb = s->private; 148 struct musb *musb = s->private;
146 unsigned test; 149 unsigned test;
147 150
151 pm_runtime_get_sync(musb->controller);
148 test = musb_readb(musb->mregs, MUSB_TESTMODE); 152 test = musb_readb(musb->mregs, MUSB_TESTMODE);
153 pm_runtime_mark_last_busy(musb->controller);
154 pm_runtime_put_autosuspend(musb->controller);
149 155
150 if (test & MUSB_TEST_FORCE_HOST) 156 if (test & MUSB_TEST_FORCE_HOST)
151 seq_printf(s, "force host\n"); 157 seq_printf(s, "force host\n");
@@ -194,11 +200,12 @@ static ssize_t musb_test_mode_write(struct file *file,
194 u8 test; 200 u8 test;
195 char buf[18]; 201 char buf[18];
196 202
203 pm_runtime_get_sync(musb->controller);
197 test = musb_readb(musb->mregs, MUSB_TESTMODE); 204 test = musb_readb(musb->mregs, MUSB_TESTMODE);
198 if (test) { 205 if (test) {
199 dev_err(musb->controller, "Error: test mode is already set. " 206 dev_err(musb->controller, "Error: test mode is already set. "
200 "Please do USB Bus Reset to start a new test.\n"); 207 "Please do USB Bus Reset to start a new test.\n");
201 return count; 208 goto ret;
202 } 209 }
203 210
204 memset(buf, 0x00, sizeof(buf)); 211 memset(buf, 0x00, sizeof(buf));
@@ -234,6 +241,9 @@ static ssize_t musb_test_mode_write(struct file *file,
234 241
235 musb_writeb(musb->mregs, MUSB_TESTMODE, test); 242 musb_writeb(musb->mregs, MUSB_TESTMODE, test);
236 243
244ret:
245 pm_runtime_mark_last_busy(musb->controller);
246 pm_runtime_put_autosuspend(musb->controller);
237 return count; 247 return count;
238} 248}
239 249
@@ -254,8 +264,13 @@ static int musb_softconnect_show(struct seq_file *s, void *unused)
254 switch (musb->xceiv->otg->state) { 264 switch (musb->xceiv->otg->state) {
255 case OTG_STATE_A_HOST: 265 case OTG_STATE_A_HOST:
256 case OTG_STATE_A_WAIT_BCON: 266 case OTG_STATE_A_WAIT_BCON:
267 pm_runtime_get_sync(musb->controller);
268
257 reg = musb_readb(musb->mregs, MUSB_DEVCTL); 269 reg = musb_readb(musb->mregs, MUSB_DEVCTL);
258 connect = reg & MUSB_DEVCTL_SESSION ? 1 : 0; 270 connect = reg & MUSB_DEVCTL_SESSION ? 1 : 0;
271
272 pm_runtime_mark_last_busy(musb->controller);
273 pm_runtime_put_autosuspend(musb->controller);
259 break; 274 break;
260 default: 275 default:
261 connect = -1; 276 connect = -1;
@@ -284,6 +299,7 @@ static ssize_t musb_softconnect_write(struct file *file,
284 if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count))) 299 if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count)))
285 return -EFAULT; 300 return -EFAULT;
286 301
302 pm_runtime_get_sync(musb->controller);
287 if (!strncmp(buf, "0", 1)) { 303 if (!strncmp(buf, "0", 1)) {
288 switch (musb->xceiv->otg->state) { 304 switch (musb->xceiv->otg->state) {
289 case OTG_STATE_A_HOST: 305 case OTG_STATE_A_HOST:
@@ -314,6 +330,8 @@ static ssize_t musb_softconnect_write(struct file *file,
314 } 330 }
315 } 331 }
316 332
333 pm_runtime_mark_last_busy(musb->controller);
334 pm_runtime_put_autosuspend(musb->controller);
317 return count; 335 return count;
318} 336}
319 337
diff --git a/drivers/usb/musb/musb_dsps.c b/drivers/usb/musb/musb_dsps.c
index feae1561b9ab..9f125e179acd 100644
--- a/drivers/usb/musb/musb_dsps.c
+++ b/drivers/usb/musb/musb_dsps.c
@@ -267,6 +267,17 @@ static void otg_timer(unsigned long _musb)
267 pm_runtime_put_autosuspend(dev); 267 pm_runtime_put_autosuspend(dev);
268} 268}
269 269
270void dsps_musb_clear_ep_rxintr(struct musb *musb, int epnum)
271{
272 u32 epintr;
273 struct dsps_glue *glue = dev_get_drvdata(musb->controller->parent);
274 const struct dsps_musb_wrapper *wrp = glue->wrp;
275
276 /* musb->lock might already been held */
277 epintr = (1 << epnum) << wrp->rxep_shift;
278 musb_writel(musb->ctrl_base, wrp->epintr_status, epintr);
279}
280
270static irqreturn_t dsps_interrupt(int irq, void *hci) 281static irqreturn_t dsps_interrupt(int irq, void *hci)
271{ 282{
272 struct musb *musb = hci; 283 struct musb *musb = hci;
@@ -622,6 +633,7 @@ static struct musb_platform_ops dsps_ops = {
622 633
623 .set_mode = dsps_musb_set_mode, 634 .set_mode = dsps_musb_set_mode,
624 .recover = dsps_musb_recover, 635 .recover = dsps_musb_recover,
636 .clear_ep_rxintr = dsps_musb_clear_ep_rxintr,
625}; 637};
626 638
627static u64 musb_dmamask = DMA_BIT_MASK(32); 639static u64 musb_dmamask = DMA_BIT_MASK(32);
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
index f6cdbad00dac..ac3a4952abb4 100644
--- a/drivers/usb/musb/musb_host.c
+++ b/drivers/usb/musb/musb_host.c
@@ -2374,12 +2374,11 @@ static int musb_cleanup_urb(struct urb *urb, struct musb_qh *qh)
2374 int is_in = usb_pipein(urb->pipe); 2374 int is_in = usb_pipein(urb->pipe);
2375 int status = 0; 2375 int status = 0;
2376 u16 csr; 2376 u16 csr;
2377 struct dma_channel *dma = NULL;
2377 2378
2378 musb_ep_select(regs, hw_end); 2379 musb_ep_select(regs, hw_end);
2379 2380
2380 if (is_dma_capable()) { 2381 if (is_dma_capable()) {
2381 struct dma_channel *dma;
2382
2383 dma = is_in ? ep->rx_channel : ep->tx_channel; 2382 dma = is_in ? ep->rx_channel : ep->tx_channel;
2384 if (dma) { 2383 if (dma) {
2385 status = ep->musb->dma_controller->channel_abort(dma); 2384 status = ep->musb->dma_controller->channel_abort(dma);
@@ -2395,10 +2394,9 @@ static int musb_cleanup_urb(struct urb *urb, struct musb_qh *qh)
2395 /* giveback saves bulk toggle */ 2394 /* giveback saves bulk toggle */
2396 csr = musb_h_flush_rxfifo(ep, 0); 2395 csr = musb_h_flush_rxfifo(ep, 0);
2397 2396
2398 /* REVISIT we still get an irq; should likely clear the 2397 /* clear the endpoint's irq status here to avoid bogus irqs */
2399 * endpoint's irq status here to avoid bogus irqs. 2398 if (is_dma_capable() && dma)
2400 * clearing that status is platform-specific... 2399 musb_platform_clear_ep_rxintr(musb, ep->epnum);
2401 */
2402 } else if (ep->epnum) { 2400 } else if (ep->epnum) {
2403 musb_h_tx_flush_fifo(ep); 2401 musb_h_tx_flush_fifo(ep);
2404 csr = musb_readw(epio, MUSB_TXCSR); 2402 csr = musb_readw(epio, MUSB_TXCSR);
diff --git a/drivers/usb/musb/musbhsdma.h b/drivers/usb/musb/musbhsdma.h
index f7b13fd25257..a3dcbd55e436 100644
--- a/drivers/usb/musb/musbhsdma.h
+++ b/drivers/usb/musb/musbhsdma.h
@@ -157,5 +157,5 @@ struct musb_dma_controller {
157 void __iomem *base; 157 void __iomem *base;
158 u8 channel_count; 158 u8 channel_count;
159 u8 used_channels; 159 u8 used_channels;
160 u8 irq; 160 int irq;
161}; 161};
diff --git a/drivers/usb/serial/ch341.c b/drivers/usb/serial/ch341.c
index 2597b83a8ae2..95aa5233726c 100644
--- a/drivers/usb/serial/ch341.c
+++ b/drivers/usb/serial/ch341.c
@@ -95,6 +95,7 @@ struct ch341_private {
95 unsigned baud_rate; /* set baud rate */ 95 unsigned baud_rate; /* set baud rate */
96 u8 line_control; /* set line control value RTS/DTR */ 96 u8 line_control; /* set line control value RTS/DTR */
97 u8 line_status; /* active status of modem control inputs */ 97 u8 line_status; /* active status of modem control inputs */
98 u8 lcr;
98}; 99};
99 100
100static void ch341_set_termios(struct tty_struct *tty, 101static void ch341_set_termios(struct tty_struct *tty,
@@ -112,6 +113,8 @@ static int ch341_control_out(struct usb_device *dev, u8 request,
112 r = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), request, 113 r = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), request,
113 USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT, 114 USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT,
114 value, index, NULL, 0, DEFAULT_TIMEOUT); 115 value, index, NULL, 0, DEFAULT_TIMEOUT);
116 if (r < 0)
117 dev_err(&dev->dev, "failed to send control message: %d\n", r);
115 118
116 return r; 119 return r;
117} 120}
@@ -129,11 +132,24 @@ static int ch341_control_in(struct usb_device *dev,
129 r = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), request, 132 r = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), request,
130 USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN, 133 USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN,
131 value, index, buf, bufsize, DEFAULT_TIMEOUT); 134 value, index, buf, bufsize, DEFAULT_TIMEOUT);
132 return r; 135 if (r < bufsize) {
136 if (r >= 0) {
137 dev_err(&dev->dev,
138 "short control message received (%d < %u)\n",
139 r, bufsize);
140 r = -EIO;
141 }
142
143 dev_err(&dev->dev, "failed to receive control message: %d\n",
144 r);
145 return r;
146 }
147
148 return 0;
133} 149}
134 150
135static int ch341_init_set_baudrate(struct usb_device *dev, 151static int ch341_set_baudrate_lcr(struct usb_device *dev,
136 struct ch341_private *priv, unsigned ctrl) 152 struct ch341_private *priv, u8 lcr)
137{ 153{
138 short a; 154 short a;
139 int r; 155 int r;
@@ -156,9 +172,19 @@ static int ch341_init_set_baudrate(struct usb_device *dev,
156 factor = 0x10000 - factor; 172 factor = 0x10000 - factor;
157 a = (factor & 0xff00) | divisor; 173 a = (factor & 0xff00) | divisor;
158 174
159 /* 0x9c is "enable SFR_UART Control register and timer" */ 175 /*
160 r = ch341_control_out(dev, CH341_REQ_SERIAL_INIT, 176 * CH341A buffers data until a full endpoint-size packet (32 bytes)
161 0x9c | (ctrl << 8), a | 0x80); 177 * has been received unless bit 7 is set.
178 */
179 a |= BIT(7);
180
181 r = ch341_control_out(dev, CH341_REQ_WRITE_REG, 0x1312, a);
182 if (r)
183 return r;
184
185 r = ch341_control_out(dev, CH341_REQ_WRITE_REG, 0x2518, lcr);
186 if (r)
187 return r;
162 188
163 return r; 189 return r;
164} 190}
@@ -170,9 +196,9 @@ static int ch341_set_handshake(struct usb_device *dev, u8 control)
170 196
171static int ch341_get_status(struct usb_device *dev, struct ch341_private *priv) 197static int ch341_get_status(struct usb_device *dev, struct ch341_private *priv)
172{ 198{
199 const unsigned int size = 2;
173 char *buffer; 200 char *buffer;
174 int r; 201 int r;
175 const unsigned size = 8;
176 unsigned long flags; 202 unsigned long flags;
177 203
178 buffer = kmalloc(size, GFP_KERNEL); 204 buffer = kmalloc(size, GFP_KERNEL);
@@ -183,14 +209,9 @@ static int ch341_get_status(struct usb_device *dev, struct ch341_private *priv)
183 if (r < 0) 209 if (r < 0)
184 goto out; 210 goto out;
185 211
186 /* setup the private status if available */ 212 spin_lock_irqsave(&priv->lock, flags);
187 if (r == 2) { 213 priv->line_status = (~(*buffer)) & CH341_BITS_MODEM_STAT;
188 r = 0; 214 spin_unlock_irqrestore(&priv->lock, flags);
189 spin_lock_irqsave(&priv->lock, flags);
190 priv->line_status = (~(*buffer)) & CH341_BITS_MODEM_STAT;
191 spin_unlock_irqrestore(&priv->lock, flags);
192 } else
193 r = -EPROTO;
194 215
195out: kfree(buffer); 216out: kfree(buffer);
196 return r; 217 return r;
@@ -200,9 +221,9 @@ out: kfree(buffer);
200 221
201static int ch341_configure(struct usb_device *dev, struct ch341_private *priv) 222static int ch341_configure(struct usb_device *dev, struct ch341_private *priv)
202{ 223{
224 const unsigned int size = 2;
203 char *buffer; 225 char *buffer;
204 int r; 226 int r;
205 const unsigned size = 8;
206 227
207 buffer = kmalloc(size, GFP_KERNEL); 228 buffer = kmalloc(size, GFP_KERNEL);
208 if (!buffer) 229 if (!buffer)
@@ -232,7 +253,7 @@ static int ch341_configure(struct usb_device *dev, struct ch341_private *priv)
232 if (r < 0) 253 if (r < 0)
233 goto out; 254 goto out;
234 255
235 r = ch341_init_set_baudrate(dev, priv, 0); 256 r = ch341_set_baudrate_lcr(dev, priv, priv->lcr);
236 if (r < 0) 257 if (r < 0)
237 goto out; 258 goto out;
238 259
@@ -258,7 +279,6 @@ static int ch341_port_probe(struct usb_serial_port *port)
258 279
259 spin_lock_init(&priv->lock); 280 spin_lock_init(&priv->lock);
260 priv->baud_rate = DEFAULT_BAUD_RATE; 281 priv->baud_rate = DEFAULT_BAUD_RATE;
261 priv->line_control = CH341_BIT_RTS | CH341_BIT_DTR;
262 282
263 r = ch341_configure(port->serial->dev, priv); 283 r = ch341_configure(port->serial->dev, priv);
264 if (r < 0) 284 if (r < 0)
@@ -320,7 +340,7 @@ static int ch341_open(struct tty_struct *tty, struct usb_serial_port *port)
320 340
321 r = ch341_configure(serial->dev, priv); 341 r = ch341_configure(serial->dev, priv);
322 if (r) 342 if (r)
323 goto out; 343 return r;
324 344
325 if (tty) 345 if (tty)
326 ch341_set_termios(tty, port, NULL); 346 ch341_set_termios(tty, port, NULL);
@@ -330,12 +350,19 @@ static int ch341_open(struct tty_struct *tty, struct usb_serial_port *port)
330 if (r) { 350 if (r) {
331 dev_err(&port->dev, "%s - failed to submit interrupt urb: %d\n", 351 dev_err(&port->dev, "%s - failed to submit interrupt urb: %d\n",
332 __func__, r); 352 __func__, r);
333 goto out; 353 return r;
334 } 354 }
335 355
336 r = usb_serial_generic_open(tty, port); 356 r = usb_serial_generic_open(tty, port);
357 if (r)
358 goto err_kill_interrupt_urb;
359
360 return 0;
361
362err_kill_interrupt_urb:
363 usb_kill_urb(port->interrupt_in_urb);
337 364
338out: return r; 365 return r;
339} 366}
340 367
341/* Old_termios contains the original termios settings and 368/* Old_termios contains the original termios settings and
@@ -356,7 +383,6 @@ static void ch341_set_termios(struct tty_struct *tty,
356 383
357 baud_rate = tty_get_baud_rate(tty); 384 baud_rate = tty_get_baud_rate(tty);
358 385
359 priv->baud_rate = baud_rate;
360 ctrl = CH341_LCR_ENABLE_RX | CH341_LCR_ENABLE_TX; 386 ctrl = CH341_LCR_ENABLE_RX | CH341_LCR_ENABLE_TX;
361 387
362 switch (C_CSIZE(tty)) { 388 switch (C_CSIZE(tty)) {
@@ -386,22 +412,25 @@ static void ch341_set_termios(struct tty_struct *tty,
386 ctrl |= CH341_LCR_STOP_BITS_2; 412 ctrl |= CH341_LCR_STOP_BITS_2;
387 413
388 if (baud_rate) { 414 if (baud_rate) {
389 spin_lock_irqsave(&priv->lock, flags); 415 priv->baud_rate = baud_rate;
390 priv->line_control |= (CH341_BIT_DTR | CH341_BIT_RTS); 416
391 spin_unlock_irqrestore(&priv->lock, flags); 417 r = ch341_set_baudrate_lcr(port->serial->dev, priv, ctrl);
392 r = ch341_init_set_baudrate(port->serial->dev, priv, ctrl);
393 if (r < 0 && old_termios) { 418 if (r < 0 && old_termios) {
394 priv->baud_rate = tty_termios_baud_rate(old_termios); 419 priv->baud_rate = tty_termios_baud_rate(old_termios);
395 tty_termios_copy_hw(&tty->termios, old_termios); 420 tty_termios_copy_hw(&tty->termios, old_termios);
421 } else if (r == 0) {
422 priv->lcr = ctrl;
396 } 423 }
397 } else {
398 spin_lock_irqsave(&priv->lock, flags);
399 priv->line_control &= ~(CH341_BIT_DTR | CH341_BIT_RTS);
400 spin_unlock_irqrestore(&priv->lock, flags);
401 } 424 }
402 425
403 ch341_set_handshake(port->serial->dev, priv->line_control); 426 spin_lock_irqsave(&priv->lock, flags);
427 if (C_BAUD(tty) == B0)
428 priv->line_control &= ~(CH341_BIT_DTR | CH341_BIT_RTS);
429 else if (old_termios && (old_termios->c_cflag & CBAUD) == B0)
430 priv->line_control |= (CH341_BIT_DTR | CH341_BIT_RTS);
431 spin_unlock_irqrestore(&priv->lock, flags);
404 432
433 ch341_set_handshake(port->serial->dev, priv->line_control);
405} 434}
406 435
407static void ch341_break_ctl(struct tty_struct *tty, int break_state) 436static void ch341_break_ctl(struct tty_struct *tty, int break_state)
@@ -576,14 +605,23 @@ static int ch341_tiocmget(struct tty_struct *tty)
576 605
577static int ch341_reset_resume(struct usb_serial *serial) 606static int ch341_reset_resume(struct usb_serial *serial)
578{ 607{
579 struct ch341_private *priv; 608 struct usb_serial_port *port = serial->port[0];
580 609 struct ch341_private *priv = usb_get_serial_port_data(port);
581 priv = usb_get_serial_port_data(serial->port[0]); 610 int ret;
582 611
583 /* reconfigure ch341 serial port after bus-reset */ 612 /* reconfigure ch341 serial port after bus-reset */
584 ch341_configure(serial->dev, priv); 613 ch341_configure(serial->dev, priv);
585 614
586 return 0; 615 if (tty_port_initialized(&port->port)) {
616 ret = usb_submit_urb(port->interrupt_in_urb, GFP_NOIO);
617 if (ret) {
618 dev_err(&port->dev, "failed to submit interrupt urb: %d\n",
619 ret);
620 return ret;
621 }
622 }
623
624 return usb_serial_generic_resume(serial);
587} 625}
588 626
589static struct usb_serial_driver ch341_device = { 627static struct usb_serial_driver ch341_device = {
diff --git a/drivers/usb/serial/cyberjack.c b/drivers/usb/serial/cyberjack.c
index 5f17a3b9916d..80260b08398b 100644
--- a/drivers/usb/serial/cyberjack.c
+++ b/drivers/usb/serial/cyberjack.c
@@ -50,6 +50,7 @@
50#define CYBERJACK_PRODUCT_ID 0x0100 50#define CYBERJACK_PRODUCT_ID 0x0100
51 51
52/* Function prototypes */ 52/* Function prototypes */
53static int cyberjack_attach(struct usb_serial *serial);
53static int cyberjack_port_probe(struct usb_serial_port *port); 54static int cyberjack_port_probe(struct usb_serial_port *port);
54static int cyberjack_port_remove(struct usb_serial_port *port); 55static int cyberjack_port_remove(struct usb_serial_port *port);
55static int cyberjack_open(struct tty_struct *tty, 56static int cyberjack_open(struct tty_struct *tty,
@@ -77,6 +78,7 @@ static struct usb_serial_driver cyberjack_device = {
77 .description = "Reiner SCT Cyberjack USB card reader", 78 .description = "Reiner SCT Cyberjack USB card reader",
78 .id_table = id_table, 79 .id_table = id_table,
79 .num_ports = 1, 80 .num_ports = 1,
81 .attach = cyberjack_attach,
80 .port_probe = cyberjack_port_probe, 82 .port_probe = cyberjack_port_probe,
81 .port_remove = cyberjack_port_remove, 83 .port_remove = cyberjack_port_remove,
82 .open = cyberjack_open, 84 .open = cyberjack_open,
@@ -100,6 +102,14 @@ struct cyberjack_private {
100 short wrsent; /* Data already sent */ 102 short wrsent; /* Data already sent */
101}; 103};
102 104
105static int cyberjack_attach(struct usb_serial *serial)
106{
107 if (serial->num_bulk_out < serial->num_ports)
108 return -ENODEV;
109
110 return 0;
111}
112
103static int cyberjack_port_probe(struct usb_serial_port *port) 113static int cyberjack_port_probe(struct usb_serial_port *port)
104{ 114{
105 struct cyberjack_private *priv; 115 struct cyberjack_private *priv;
diff --git a/drivers/usb/serial/f81534.c b/drivers/usb/serial/f81534.c
index 8282a6a18fee..22f23a429a95 100644
--- a/drivers/usb/serial/f81534.c
+++ b/drivers/usb/serial/f81534.c
@@ -1237,6 +1237,7 @@ static int f81534_attach(struct usb_serial *serial)
1237static int f81534_port_probe(struct usb_serial_port *port) 1237static int f81534_port_probe(struct usb_serial_port *port)
1238{ 1238{
1239 struct f81534_port_private *port_priv; 1239 struct f81534_port_private *port_priv;
1240 int ret;
1240 1241
1241 port_priv = devm_kzalloc(&port->dev, sizeof(*port_priv), GFP_KERNEL); 1242 port_priv = devm_kzalloc(&port->dev, sizeof(*port_priv), GFP_KERNEL);
1242 if (!port_priv) 1243 if (!port_priv)
@@ -1246,10 +1247,11 @@ static int f81534_port_probe(struct usb_serial_port *port)
1246 mutex_init(&port_priv->mcr_mutex); 1247 mutex_init(&port_priv->mcr_mutex);
1247 1248
1248 /* Assign logic-to-phy mapping */ 1249 /* Assign logic-to-phy mapping */
1249 port_priv->phy_num = f81534_logic_to_phy_port(port->serial, port); 1250 ret = f81534_logic_to_phy_port(port->serial, port);
1250 if (port_priv->phy_num < 0 || port_priv->phy_num >= F81534_NUM_PORT) 1251 if (ret < 0)
1251 return -ENODEV; 1252 return ret;
1252 1253
1254 port_priv->phy_num = ret;
1253 usb_set_serial_port_data(port, port_priv); 1255 usb_set_serial_port_data(port, port_priv);
1254 dev_dbg(&port->dev, "%s: port_number: %d, phy_num: %d\n", __func__, 1256 dev_dbg(&port->dev, "%s: port_number: %d, phy_num: %d\n", __func__,
1255 port->port_number, port_priv->phy_num); 1257 port->port_number, port_priv->phy_num);
diff --git a/drivers/usb/serial/garmin_gps.c b/drivers/usb/serial/garmin_gps.c
index 97cabf803c2f..b2f2e87aed94 100644
--- a/drivers/usb/serial/garmin_gps.c
+++ b/drivers/usb/serial/garmin_gps.c
@@ -1043,6 +1043,7 @@ static int garmin_write_bulk(struct usb_serial_port *port,
1043 "%s - usb_submit_urb(write bulk) failed with status = %d\n", 1043 "%s - usb_submit_urb(write bulk) failed with status = %d\n",
1044 __func__, status); 1044 __func__, status);
1045 count = status; 1045 count = status;
1046 kfree(buffer);
1046 } 1047 }
1047 1048
1048 /* we are done with this urb, so let the host driver 1049 /* we are done with this urb, so let the host driver
diff --git a/drivers/usb/serial/io_edgeport.c b/drivers/usb/serial/io_edgeport.c
index dcc0c58aaad5..d50e5773483f 100644
--- a/drivers/usb/serial/io_edgeport.c
+++ b/drivers/usb/serial/io_edgeport.c
@@ -2751,6 +2751,11 @@ static int edge_startup(struct usb_serial *serial)
2751 EDGE_COMPATIBILITY_MASK1, 2751 EDGE_COMPATIBILITY_MASK1,
2752 EDGE_COMPATIBILITY_MASK2 }; 2752 EDGE_COMPATIBILITY_MASK2 };
2753 2753
2754 if (serial->num_bulk_in < 1 || serial->num_interrupt_in < 1) {
2755 dev_err(&serial->interface->dev, "missing endpoints\n");
2756 return -ENODEV;
2757 }
2758
2754 dev = serial->dev; 2759 dev = serial->dev;
2755 2760
2756 /* create our private serial structure */ 2761 /* create our private serial structure */
diff --git a/drivers/usb/serial/io_ti.c b/drivers/usb/serial/io_ti.c
index c339163698eb..9a0db2965fbb 100644
--- a/drivers/usb/serial/io_ti.c
+++ b/drivers/usb/serial/io_ti.c
@@ -1499,8 +1499,7 @@ static int do_boot_mode(struct edgeport_serial *serial,
1499 1499
1500 dev_dbg(dev, "%s - Download successful -- Device rebooting...\n", __func__); 1500 dev_dbg(dev, "%s - Download successful -- Device rebooting...\n", __func__);
1501 1501
1502 /* return an error on purpose */ 1502 return 1;
1503 return -ENODEV;
1504 } 1503 }
1505 1504
1506stayinbootmode: 1505stayinbootmode:
@@ -1508,7 +1507,7 @@ stayinbootmode:
1508 dev_dbg(dev, "%s - STAYING IN BOOT MODE\n", __func__); 1507 dev_dbg(dev, "%s - STAYING IN BOOT MODE\n", __func__);
1509 serial->product_info.TiMode = TI_MODE_BOOT; 1508 serial->product_info.TiMode = TI_MODE_BOOT;
1510 1509
1511 return 0; 1510 return 1;
1512} 1511}
1513 1512
1514static int ti_do_config(struct edgeport_port *port, int feature, int on) 1513static int ti_do_config(struct edgeport_port *port, int feature, int on)
@@ -2546,6 +2545,13 @@ static int edge_startup(struct usb_serial *serial)
2546 int status; 2545 int status;
2547 u16 product_id; 2546 u16 product_id;
2548 2547
2548 /* Make sure we have the required endpoints when in download mode. */
2549 if (serial->interface->cur_altsetting->desc.bNumEndpoints > 1) {
2550 if (serial->num_bulk_in < serial->num_ports ||
2551 serial->num_bulk_out < serial->num_ports)
2552 return -ENODEV;
2553 }
2554
2549 /* create our private serial structure */ 2555 /* create our private serial structure */
2550 edge_serial = kzalloc(sizeof(struct edgeport_serial), GFP_KERNEL); 2556 edge_serial = kzalloc(sizeof(struct edgeport_serial), GFP_KERNEL);
2551 if (!edge_serial) 2557 if (!edge_serial)
@@ -2553,14 +2559,18 @@ static int edge_startup(struct usb_serial *serial)
2553 2559
2554 mutex_init(&edge_serial->es_lock); 2560 mutex_init(&edge_serial->es_lock);
2555 edge_serial->serial = serial; 2561 edge_serial->serial = serial;
2562 INIT_DELAYED_WORK(&edge_serial->heartbeat_work, edge_heartbeat_work);
2556 usb_set_serial_data(serial, edge_serial); 2563 usb_set_serial_data(serial, edge_serial);
2557 2564
2558 status = download_fw(edge_serial); 2565 status = download_fw(edge_serial);
2559 if (status) { 2566 if (status < 0) {
2560 kfree(edge_serial); 2567 kfree(edge_serial);
2561 return status; 2568 return status;
2562 } 2569 }
2563 2570
2571 if (status > 0)
2572 return 1; /* bind but do not register any ports */
2573
2564 product_id = le16_to_cpu( 2574 product_id = le16_to_cpu(
2565 edge_serial->serial->dev->descriptor.idProduct); 2575 edge_serial->serial->dev->descriptor.idProduct);
2566 2576
@@ -2572,7 +2582,6 @@ static int edge_startup(struct usb_serial *serial)
2572 } 2582 }
2573 } 2583 }
2574 2584
2575 INIT_DELAYED_WORK(&edge_serial->heartbeat_work, edge_heartbeat_work);
2576 edge_heartbeat_schedule(edge_serial); 2585 edge_heartbeat_schedule(edge_serial);
2577 2586
2578 return 0; 2587 return 0;
@@ -2580,6 +2589,9 @@ static int edge_startup(struct usb_serial *serial)
2580 2589
2581static void edge_disconnect(struct usb_serial *serial) 2590static void edge_disconnect(struct usb_serial *serial)
2582{ 2591{
2592 struct edgeport_serial *edge_serial = usb_get_serial_data(serial);
2593
2594 cancel_delayed_work_sync(&edge_serial->heartbeat_work);
2583} 2595}
2584 2596
2585static void edge_release(struct usb_serial *serial) 2597static void edge_release(struct usb_serial *serial)
diff --git a/drivers/usb/serial/iuu_phoenix.c b/drivers/usb/serial/iuu_phoenix.c
index 344b4eea4bd5..d57fb5199218 100644
--- a/drivers/usb/serial/iuu_phoenix.c
+++ b/drivers/usb/serial/iuu_phoenix.c
@@ -68,6 +68,16 @@ struct iuu_private {
68 u32 clk; 68 u32 clk;
69}; 69};
70 70
71static int iuu_attach(struct usb_serial *serial)
72{
73 unsigned char num_ports = serial->num_ports;
74
75 if (serial->num_bulk_in < num_ports || serial->num_bulk_out < num_ports)
76 return -ENODEV;
77
78 return 0;
79}
80
71static int iuu_port_probe(struct usb_serial_port *port) 81static int iuu_port_probe(struct usb_serial_port *port)
72{ 82{
73 struct iuu_private *priv; 83 struct iuu_private *priv;
@@ -1196,6 +1206,7 @@ static struct usb_serial_driver iuu_device = {
1196 .tiocmset = iuu_tiocmset, 1206 .tiocmset = iuu_tiocmset,
1197 .set_termios = iuu_set_termios, 1207 .set_termios = iuu_set_termios,
1198 .init_termios = iuu_init_termios, 1208 .init_termios = iuu_init_termios,
1209 .attach = iuu_attach,
1199 .port_probe = iuu_port_probe, 1210 .port_probe = iuu_port_probe,
1200 .port_remove = iuu_port_remove, 1211 .port_remove = iuu_port_remove,
1201}; 1212};
diff --git a/drivers/usb/serial/keyspan_pda.c b/drivers/usb/serial/keyspan_pda.c
index e49ad0c63ad8..83523fcf6fb9 100644
--- a/drivers/usb/serial/keyspan_pda.c
+++ b/drivers/usb/serial/keyspan_pda.c
@@ -699,6 +699,19 @@ MODULE_FIRMWARE("keyspan_pda/keyspan_pda.fw");
699MODULE_FIRMWARE("keyspan_pda/xircom_pgs.fw"); 699MODULE_FIRMWARE("keyspan_pda/xircom_pgs.fw");
700#endif 700#endif
701 701
702static int keyspan_pda_attach(struct usb_serial *serial)
703{
704 unsigned char num_ports = serial->num_ports;
705
706 if (serial->num_bulk_out < num_ports ||
707 serial->num_interrupt_in < num_ports) {
708 dev_err(&serial->interface->dev, "missing endpoints\n");
709 return -ENODEV;
710 }
711
712 return 0;
713}
714
702static int keyspan_pda_port_probe(struct usb_serial_port *port) 715static int keyspan_pda_port_probe(struct usb_serial_port *port)
703{ 716{
704 717
@@ -776,6 +789,7 @@ static struct usb_serial_driver keyspan_pda_device = {
776 .break_ctl = keyspan_pda_break_ctl, 789 .break_ctl = keyspan_pda_break_ctl,
777 .tiocmget = keyspan_pda_tiocmget, 790 .tiocmget = keyspan_pda_tiocmget,
778 .tiocmset = keyspan_pda_tiocmset, 791 .tiocmset = keyspan_pda_tiocmset,
792 .attach = keyspan_pda_attach,
779 .port_probe = keyspan_pda_port_probe, 793 .port_probe = keyspan_pda_port_probe,
780 .port_remove = keyspan_pda_port_remove, 794 .port_remove = keyspan_pda_port_remove,
781}; 795};
diff --git a/drivers/usb/serial/kl5kusb105.c b/drivers/usb/serial/kl5kusb105.c
index 0ee190fc1bf8..6cb45757818f 100644
--- a/drivers/usb/serial/kl5kusb105.c
+++ b/drivers/usb/serial/kl5kusb105.c
@@ -192,10 +192,11 @@ static int klsi_105_get_line_state(struct usb_serial_port *port,
192 status_buf, KLSI_STATUSBUF_LEN, 192 status_buf, KLSI_STATUSBUF_LEN,
193 10000 193 10000
194 ); 194 );
195 if (rc < 0) 195 if (rc != KLSI_STATUSBUF_LEN) {
196 dev_err(&port->dev, "Reading line status failed (error = %d)\n", 196 dev_err(&port->dev, "reading line status failed: %d\n", rc);
197 rc); 197 if (rc >= 0)
198 else { 198 rc = -EIO;
199 } else {
199 status = get_unaligned_le16(status_buf); 200 status = get_unaligned_le16(status_buf);
200 201
201 dev_info(&port->serial->dev->dev, "read status %x %x\n", 202 dev_info(&port->serial->dev->dev, "read status %x %x\n",
diff --git a/drivers/usb/serial/kobil_sct.c b/drivers/usb/serial/kobil_sct.c
index 2363654cafc9..813035f51fe7 100644
--- a/drivers/usb/serial/kobil_sct.c
+++ b/drivers/usb/serial/kobil_sct.c
@@ -51,6 +51,7 @@
51 51
52 52
53/* Function prototypes */ 53/* Function prototypes */
54static int kobil_attach(struct usb_serial *serial);
54static int kobil_port_probe(struct usb_serial_port *probe); 55static int kobil_port_probe(struct usb_serial_port *probe);
55static int kobil_port_remove(struct usb_serial_port *probe); 56static int kobil_port_remove(struct usb_serial_port *probe);
56static int kobil_open(struct tty_struct *tty, struct usb_serial_port *port); 57static int kobil_open(struct tty_struct *tty, struct usb_serial_port *port);
@@ -86,6 +87,7 @@ static struct usb_serial_driver kobil_device = {
86 .description = "KOBIL USB smart card terminal", 87 .description = "KOBIL USB smart card terminal",
87 .id_table = id_table, 88 .id_table = id_table,
88 .num_ports = 1, 89 .num_ports = 1,
90 .attach = kobil_attach,
89 .port_probe = kobil_port_probe, 91 .port_probe = kobil_port_probe,
90 .port_remove = kobil_port_remove, 92 .port_remove = kobil_port_remove,
91 .ioctl = kobil_ioctl, 93 .ioctl = kobil_ioctl,
@@ -113,6 +115,16 @@ struct kobil_private {
113}; 115};
114 116
115 117
118static int kobil_attach(struct usb_serial *serial)
119{
120 if (serial->num_interrupt_out < serial->num_ports) {
121 dev_err(&serial->interface->dev, "missing interrupt-out endpoint\n");
122 return -ENODEV;
123 }
124
125 return 0;
126}
127
116static int kobil_port_probe(struct usb_serial_port *port) 128static int kobil_port_probe(struct usb_serial_port *port)
117{ 129{
118 struct usb_serial *serial = port->serial; 130 struct usb_serial *serial = port->serial;
diff --git a/drivers/usb/serial/mos7720.c b/drivers/usb/serial/mos7720.c
index d52caa03679c..91bc170b408a 100644
--- a/drivers/usb/serial/mos7720.c
+++ b/drivers/usb/serial/mos7720.c
@@ -65,8 +65,6 @@ struct moschip_port {
65 struct urb *write_urb_pool[NUM_URBS]; 65 struct urb *write_urb_pool[NUM_URBS];
66}; 66};
67 67
68static struct usb_serial_driver moschip7720_2port_driver;
69
70#define USB_VENDOR_ID_MOSCHIP 0x9710 68#define USB_VENDOR_ID_MOSCHIP 0x9710
71#define MOSCHIP_DEVICE_ID_7720 0x7720 69#define MOSCHIP_DEVICE_ID_7720 0x7720
72#define MOSCHIP_DEVICE_ID_7715 0x7715 70#define MOSCHIP_DEVICE_ID_7715 0x7715
@@ -970,25 +968,6 @@ static void mos7720_bulk_out_data_callback(struct urb *urb)
970 tty_port_tty_wakeup(&mos7720_port->port->port); 968 tty_port_tty_wakeup(&mos7720_port->port->port);
971} 969}
972 970
973/*
974 * mos77xx_probe
975 * this function installs the appropriate read interrupt endpoint callback
976 * depending on whether the device is a 7720 or 7715, thus avoiding costly
977 * run-time checks in the high-frequency callback routine itself.
978 */
979static int mos77xx_probe(struct usb_serial *serial,
980 const struct usb_device_id *id)
981{
982 if (id->idProduct == MOSCHIP_DEVICE_ID_7715)
983 moschip7720_2port_driver.read_int_callback =
984 mos7715_interrupt_callback;
985 else
986 moschip7720_2port_driver.read_int_callback =
987 mos7720_interrupt_callback;
988
989 return 0;
990}
991
992static int mos77xx_calc_num_ports(struct usb_serial *serial) 971static int mos77xx_calc_num_ports(struct usb_serial *serial)
993{ 972{
994 u16 product = le16_to_cpu(serial->dev->descriptor.idProduct); 973 u16 product = le16_to_cpu(serial->dev->descriptor.idProduct);
@@ -1917,6 +1896,11 @@ static int mos7720_startup(struct usb_serial *serial)
1917 u16 product; 1896 u16 product;
1918 int ret_val; 1897 int ret_val;
1919 1898
1899 if (serial->num_bulk_in < 2 || serial->num_bulk_out < 2) {
1900 dev_err(&serial->interface->dev, "missing bulk endpoints\n");
1901 return -ENODEV;
1902 }
1903
1920 product = le16_to_cpu(serial->dev->descriptor.idProduct); 1904 product = le16_to_cpu(serial->dev->descriptor.idProduct);
1921 dev = serial->dev; 1905 dev = serial->dev;
1922 1906
@@ -1941,19 +1925,18 @@ static int mos7720_startup(struct usb_serial *serial)
1941 tmp->interrupt_in_endpointAddress; 1925 tmp->interrupt_in_endpointAddress;
1942 serial->port[1]->interrupt_in_urb = NULL; 1926 serial->port[1]->interrupt_in_urb = NULL;
1943 serial->port[1]->interrupt_in_buffer = NULL; 1927 serial->port[1]->interrupt_in_buffer = NULL;
1928
1929 if (serial->port[0]->interrupt_in_urb) {
1930 struct urb *urb = serial->port[0]->interrupt_in_urb;
1931
1932 urb->complete = mos7715_interrupt_callback;
1933 }
1944 } 1934 }
1945 1935
1946 /* setting configuration feature to one */ 1936 /* setting configuration feature to one */
1947 usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0), 1937 usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0),
1948 (__u8)0x03, 0x00, 0x01, 0x00, NULL, 0x00, 5000); 1938 (__u8)0x03, 0x00, 0x01, 0x00, NULL, 0x00, 5000);
1949 1939
1950 /* start the interrupt urb */
1951 ret_val = usb_submit_urb(serial->port[0]->interrupt_in_urb, GFP_KERNEL);
1952 if (ret_val)
1953 dev_err(&dev->dev,
1954 "%s - Error %d submitting control urb\n",
1955 __func__, ret_val);
1956
1957#ifdef CONFIG_USB_SERIAL_MOS7715_PARPORT 1940#ifdef CONFIG_USB_SERIAL_MOS7715_PARPORT
1958 if (product == MOSCHIP_DEVICE_ID_7715) { 1941 if (product == MOSCHIP_DEVICE_ID_7715) {
1959 ret_val = mos7715_parport_init(serial); 1942 ret_val = mos7715_parport_init(serial);
@@ -1961,6 +1944,13 @@ static int mos7720_startup(struct usb_serial *serial)
1961 return ret_val; 1944 return ret_val;
1962 } 1945 }
1963#endif 1946#endif
1947 /* start the interrupt urb */
1948 ret_val = usb_submit_urb(serial->port[0]->interrupt_in_urb, GFP_KERNEL);
1949 if (ret_val) {
1950 dev_err(&dev->dev, "failed to submit interrupt urb: %d\n",
1951 ret_val);
1952 }
1953
1964 /* LSR For Port 1 */ 1954 /* LSR For Port 1 */
1965 read_mos_reg(serial, 0, MOS7720_LSR, &data); 1955 read_mos_reg(serial, 0, MOS7720_LSR, &data);
1966 dev_dbg(&dev->dev, "LSR:%x\n", data); 1956 dev_dbg(&dev->dev, "LSR:%x\n", data);
@@ -1970,6 +1960,8 @@ static int mos7720_startup(struct usb_serial *serial)
1970 1960
1971static void mos7720_release(struct usb_serial *serial) 1961static void mos7720_release(struct usb_serial *serial)
1972{ 1962{
1963 usb_kill_urb(serial->port[0]->interrupt_in_urb);
1964
1973#ifdef CONFIG_USB_SERIAL_MOS7715_PARPORT 1965#ifdef CONFIG_USB_SERIAL_MOS7715_PARPORT
1974 /* close the parallel port */ 1966 /* close the parallel port */
1975 1967
@@ -2019,11 +2011,6 @@ static int mos7720_port_probe(struct usb_serial_port *port)
2019 if (!mos7720_port) 2011 if (!mos7720_port)
2020 return -ENOMEM; 2012 return -ENOMEM;
2021 2013
2022 /* Initialize all port interrupt end point to port 0 int endpoint.
2023 * Our device has only one interrupt endpoint common to all ports.
2024 */
2025 port->interrupt_in_endpointAddress =
2026 port->serial->port[0]->interrupt_in_endpointAddress;
2027 mos7720_port->port = port; 2014 mos7720_port->port = port;
2028 2015
2029 usb_set_serial_port_data(port, mos7720_port); 2016 usb_set_serial_port_data(port, mos7720_port);
@@ -2053,7 +2040,6 @@ static struct usb_serial_driver moschip7720_2port_driver = {
2053 .close = mos7720_close, 2040 .close = mos7720_close,
2054 .throttle = mos7720_throttle, 2041 .throttle = mos7720_throttle,
2055 .unthrottle = mos7720_unthrottle, 2042 .unthrottle = mos7720_unthrottle,
2056 .probe = mos77xx_probe,
2057 .attach = mos7720_startup, 2043 .attach = mos7720_startup,
2058 .release = mos7720_release, 2044 .release = mos7720_release,
2059 .port_probe = mos7720_port_probe, 2045 .port_probe = mos7720_port_probe,
@@ -2067,7 +2053,7 @@ static struct usb_serial_driver moschip7720_2port_driver = {
2067 .chars_in_buffer = mos7720_chars_in_buffer, 2053 .chars_in_buffer = mos7720_chars_in_buffer,
2068 .break_ctl = mos7720_break, 2054 .break_ctl = mos7720_break,
2069 .read_bulk_callback = mos7720_bulk_in_callback, 2055 .read_bulk_callback = mos7720_bulk_in_callback,
2070 .read_int_callback = NULL /* dynamically assigned in probe() */ 2056 .read_int_callback = mos7720_interrupt_callback,
2071}; 2057};
2072 2058
2073static struct usb_serial_driver * const serial_drivers[] = { 2059static struct usb_serial_driver * const serial_drivers[] = {
diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c
index 9a220b8e810f..ea27fb23967a 100644
--- a/drivers/usb/serial/mos7840.c
+++ b/drivers/usb/serial/mos7840.c
@@ -214,7 +214,6 @@ MODULE_DEVICE_TABLE(usb, id_table);
214 214
215struct moschip_port { 215struct moschip_port {
216 int port_num; /*Actual port number in the device(1,2,etc) */ 216 int port_num; /*Actual port number in the device(1,2,etc) */
217 struct urb *write_urb; /* write URB for this port */
218 struct urb *read_urb; /* read URB for this port */ 217 struct urb *read_urb; /* read URB for this port */
219 __u8 shadowLCR; /* last LCR value received */ 218 __u8 shadowLCR; /* last LCR value received */
220 __u8 shadowMCR; /* last MCR value received */ 219 __u8 shadowMCR; /* last MCR value received */
@@ -1037,9 +1036,7 @@ static int mos7840_open(struct tty_struct *tty, struct usb_serial_port *port)
1037 serial, 1036 serial,
1038 serial->port[0]->interrupt_in_urb->interval); 1037 serial->port[0]->interrupt_in_urb->interval);
1039 1038
1040 /* start interrupt read for mos7840 * 1039 /* start interrupt read for mos7840 */
1041 * will continue as long as mos7840 is connected */
1042
1043 response = 1040 response =
1044 usb_submit_urb(serial->port[0]->interrupt_in_urb, 1041 usb_submit_urb(serial->port[0]->interrupt_in_urb,
1045 GFP_KERNEL); 1042 GFP_KERNEL);
@@ -1186,7 +1183,6 @@ static void mos7840_close(struct usb_serial_port *port)
1186 } 1183 }
1187 } 1184 }
1188 1185
1189 usb_kill_urb(mos7840_port->write_urb);
1190 usb_kill_urb(mos7840_port->read_urb); 1186 usb_kill_urb(mos7840_port->read_urb);
1191 mos7840_port->read_urb_busy = false; 1187 mos7840_port->read_urb_busy = false;
1192 1188
@@ -1199,12 +1195,6 @@ static void mos7840_close(struct usb_serial_port *port)
1199 } 1195 }
1200 } 1196 }
1201 1197
1202 if (mos7840_port->write_urb) {
1203 /* if this urb had a transfer buffer already (old tx) free it */
1204 kfree(mos7840_port->write_urb->transfer_buffer);
1205 usb_free_urb(mos7840_port->write_urb);
1206 }
1207
1208 Data = 0x0; 1198 Data = 0x0;
1209 mos7840_set_uart_reg(port, MODEM_CONTROL_REGISTER, Data); 1199 mos7840_set_uart_reg(port, MODEM_CONTROL_REGISTER, Data);
1210 1200
@@ -2113,6 +2103,17 @@ static int mos7840_calc_num_ports(struct usb_serial *serial)
2113 return mos7840_num_ports; 2103 return mos7840_num_ports;
2114} 2104}
2115 2105
2106static int mos7840_attach(struct usb_serial *serial)
2107{
2108 if (serial->num_bulk_in < serial->num_ports ||
2109 serial->num_bulk_out < serial->num_ports) {
2110 dev_err(&serial->interface->dev, "missing endpoints\n");
2111 return -ENODEV;
2112 }
2113
2114 return 0;
2115}
2116
2116static int mos7840_port_probe(struct usb_serial_port *port) 2117static int mos7840_port_probe(struct usb_serial_port *port)
2117{ 2118{
2118 struct usb_serial *serial = port->serial; 2119 struct usb_serial *serial = port->serial;
@@ -2388,6 +2389,7 @@ static struct usb_serial_driver moschip7840_4port_device = {
2388 .tiocmset = mos7840_tiocmset, 2389 .tiocmset = mos7840_tiocmset,
2389 .tiocmiwait = usb_serial_generic_tiocmiwait, 2390 .tiocmiwait = usb_serial_generic_tiocmiwait,
2390 .get_icount = usb_serial_generic_get_icount, 2391 .get_icount = usb_serial_generic_get_icount,
2392 .attach = mos7840_attach,
2391 .port_probe = mos7840_port_probe, 2393 .port_probe = mos7840_port_probe,
2392 .port_remove = mos7840_port_remove, 2394 .port_remove = mos7840_port_remove,
2393 .read_bulk_callback = mos7840_bulk_in_callback, 2395 .read_bulk_callback = mos7840_bulk_in_callback,
diff --git a/drivers/usb/serial/omninet.c b/drivers/usb/serial/omninet.c
index f6c6900bccf0..a180b17d2432 100644
--- a/drivers/usb/serial/omninet.c
+++ b/drivers/usb/serial/omninet.c
@@ -38,6 +38,7 @@ static int omninet_write(struct tty_struct *tty, struct usb_serial_port *port,
38 const unsigned char *buf, int count); 38 const unsigned char *buf, int count);
39static int omninet_write_room(struct tty_struct *tty); 39static int omninet_write_room(struct tty_struct *tty);
40static void omninet_disconnect(struct usb_serial *serial); 40static void omninet_disconnect(struct usb_serial *serial);
41static int omninet_attach(struct usb_serial *serial);
41static int omninet_port_probe(struct usb_serial_port *port); 42static int omninet_port_probe(struct usb_serial_port *port);
42static int omninet_port_remove(struct usb_serial_port *port); 43static int omninet_port_remove(struct usb_serial_port *port);
43 44
@@ -56,6 +57,7 @@ static struct usb_serial_driver zyxel_omninet_device = {
56 .description = "ZyXEL - omni.net lcd plus usb", 57 .description = "ZyXEL - omni.net lcd plus usb",
57 .id_table = id_table, 58 .id_table = id_table,
58 .num_ports = 1, 59 .num_ports = 1,
60 .attach = omninet_attach,
59 .port_probe = omninet_port_probe, 61 .port_probe = omninet_port_probe,
60 .port_remove = omninet_port_remove, 62 .port_remove = omninet_port_remove,
61 .open = omninet_open, 63 .open = omninet_open,
@@ -104,6 +106,17 @@ struct omninet_data {
104 __u8 od_outseq; /* Sequence number for bulk_out URBs */ 106 __u8 od_outseq; /* Sequence number for bulk_out URBs */
105}; 107};
106 108
109static int omninet_attach(struct usb_serial *serial)
110{
111 /* The second bulk-out endpoint is used for writing. */
112 if (serial->num_bulk_out < 2) {
113 dev_err(&serial->interface->dev, "missing endpoints\n");
114 return -ENODEV;
115 }
116
117 return 0;
118}
119
107static int omninet_port_probe(struct usb_serial_port *port) 120static int omninet_port_probe(struct usb_serial_port *port)
108{ 121{
109 struct omninet_data *od; 122 struct omninet_data *od;
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 7ce31a4c7e7f..42cc72e54c05 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -2007,6 +2007,7 @@ static const struct usb_device_id option_ids[] = {
2007 { USB_DEVICE_AND_INTERFACE_INFO(WETELECOM_VENDOR_ID, WETELECOM_PRODUCT_WMD200, 0xff, 0xff, 0xff) }, 2007 { USB_DEVICE_AND_INTERFACE_INFO(WETELECOM_VENDOR_ID, WETELECOM_PRODUCT_WMD200, 0xff, 0xff, 0xff) },
2008 { USB_DEVICE_AND_INTERFACE_INFO(WETELECOM_VENDOR_ID, WETELECOM_PRODUCT_6802, 0xff, 0xff, 0xff) }, 2008 { USB_DEVICE_AND_INTERFACE_INFO(WETELECOM_VENDOR_ID, WETELECOM_PRODUCT_6802, 0xff, 0xff, 0xff) },
2009 { USB_DEVICE_AND_INTERFACE_INFO(WETELECOM_VENDOR_ID, WETELECOM_PRODUCT_WMD300, 0xff, 0xff, 0xff) }, 2009 { USB_DEVICE_AND_INTERFACE_INFO(WETELECOM_VENDOR_ID, WETELECOM_PRODUCT_WMD300, 0xff, 0xff, 0xff) },
2010 { USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0x421d, 0xff, 0xff, 0xff) }, /* HP lt2523 (Novatel E371) */
2010 { } /* Terminating entry */ 2011 { } /* Terminating entry */
2011}; 2012};
2012MODULE_DEVICE_TABLE(usb, option_ids); 2013MODULE_DEVICE_TABLE(usb, option_ids);
diff --git a/drivers/usb/serial/oti6858.c b/drivers/usb/serial/oti6858.c
index a4b88bc038b6..b8bf52bf7a94 100644
--- a/drivers/usb/serial/oti6858.c
+++ b/drivers/usb/serial/oti6858.c
@@ -134,6 +134,7 @@ static int oti6858_chars_in_buffer(struct tty_struct *tty);
134static int oti6858_tiocmget(struct tty_struct *tty); 134static int oti6858_tiocmget(struct tty_struct *tty);
135static int oti6858_tiocmset(struct tty_struct *tty, 135static int oti6858_tiocmset(struct tty_struct *tty,
136 unsigned int set, unsigned int clear); 136 unsigned int set, unsigned int clear);
137static int oti6858_attach(struct usb_serial *serial);
137static int oti6858_port_probe(struct usb_serial_port *port); 138static int oti6858_port_probe(struct usb_serial_port *port);
138static int oti6858_port_remove(struct usb_serial_port *port); 139static int oti6858_port_remove(struct usb_serial_port *port);
139 140
@@ -158,6 +159,7 @@ static struct usb_serial_driver oti6858_device = {
158 .write_bulk_callback = oti6858_write_bulk_callback, 159 .write_bulk_callback = oti6858_write_bulk_callback,
159 .write_room = oti6858_write_room, 160 .write_room = oti6858_write_room,
160 .chars_in_buffer = oti6858_chars_in_buffer, 161 .chars_in_buffer = oti6858_chars_in_buffer,
162 .attach = oti6858_attach,
161 .port_probe = oti6858_port_probe, 163 .port_probe = oti6858_port_probe,
162 .port_remove = oti6858_port_remove, 164 .port_remove = oti6858_port_remove,
163}; 165};
@@ -324,6 +326,20 @@ static void send_data(struct work_struct *work)
324 usb_serial_port_softint(port); 326 usb_serial_port_softint(port);
325} 327}
326 328
329static int oti6858_attach(struct usb_serial *serial)
330{
331 unsigned char num_ports = serial->num_ports;
332
333 if (serial->num_bulk_in < num_ports ||
334 serial->num_bulk_out < num_ports ||
335 serial->num_interrupt_in < num_ports) {
336 dev_err(&serial->interface->dev, "missing endpoints\n");
337 return -ENODEV;
338 }
339
340 return 0;
341}
342
327static int oti6858_port_probe(struct usb_serial_port *port) 343static int oti6858_port_probe(struct usb_serial_port *port)
328{ 344{
329 struct oti6858_private *priv; 345 struct oti6858_private *priv;
diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
index ae682e4eeaef..1db4b61bdf7b 100644
--- a/drivers/usb/serial/pl2303.c
+++ b/drivers/usb/serial/pl2303.c
@@ -49,6 +49,7 @@ static const struct usb_device_id id_table[] = {
49 { USB_DEVICE(IODATA_VENDOR_ID, IODATA_PRODUCT_ID) }, 49 { USB_DEVICE(IODATA_VENDOR_ID, IODATA_PRODUCT_ID) },
50 { USB_DEVICE(IODATA_VENDOR_ID, IODATA_PRODUCT_ID_RSAQ5) }, 50 { USB_DEVICE(IODATA_VENDOR_ID, IODATA_PRODUCT_ID_RSAQ5) },
51 { USB_DEVICE(ATEN_VENDOR_ID, ATEN_PRODUCT_ID) }, 51 { USB_DEVICE(ATEN_VENDOR_ID, ATEN_PRODUCT_ID) },
52 { USB_DEVICE(ATEN_VENDOR_ID, ATEN_PRODUCT_ID2) },
52 { USB_DEVICE(ATEN_VENDOR_ID2, ATEN_PRODUCT_ID) }, 53 { USB_DEVICE(ATEN_VENDOR_ID2, ATEN_PRODUCT_ID) },
53 { USB_DEVICE(ELCOM_VENDOR_ID, ELCOM_PRODUCT_ID) }, 54 { USB_DEVICE(ELCOM_VENDOR_ID, ELCOM_PRODUCT_ID) },
54 { USB_DEVICE(ELCOM_VENDOR_ID, ELCOM_PRODUCT_ID_UCSGT) }, 55 { USB_DEVICE(ELCOM_VENDOR_ID, ELCOM_PRODUCT_ID_UCSGT) },
@@ -220,9 +221,17 @@ static int pl2303_probe(struct usb_serial *serial,
220static int pl2303_startup(struct usb_serial *serial) 221static int pl2303_startup(struct usb_serial *serial)
221{ 222{
222 struct pl2303_serial_private *spriv; 223 struct pl2303_serial_private *spriv;
224 unsigned char num_ports = serial->num_ports;
223 enum pl2303_type type = TYPE_01; 225 enum pl2303_type type = TYPE_01;
224 unsigned char *buf; 226 unsigned char *buf;
225 227
228 if (serial->num_bulk_in < num_ports ||
229 serial->num_bulk_out < num_ports ||
230 serial->num_interrupt_in < num_ports) {
231 dev_err(&serial->interface->dev, "missing endpoints\n");
232 return -ENODEV;
233 }
234
226 spriv = kzalloc(sizeof(*spriv), GFP_KERNEL); 235 spriv = kzalloc(sizeof(*spriv), GFP_KERNEL);
227 if (!spriv) 236 if (!spriv)
228 return -ENOMEM; 237 return -ENOMEM;
diff --git a/drivers/usb/serial/pl2303.h b/drivers/usb/serial/pl2303.h
index e3b7af8adfb7..09d9be88209e 100644
--- a/drivers/usb/serial/pl2303.h
+++ b/drivers/usb/serial/pl2303.h
@@ -27,6 +27,7 @@
27#define ATEN_VENDOR_ID 0x0557 27#define ATEN_VENDOR_ID 0x0557
28#define ATEN_VENDOR_ID2 0x0547 28#define ATEN_VENDOR_ID2 0x0547
29#define ATEN_PRODUCT_ID 0x2008 29#define ATEN_PRODUCT_ID 0x2008
30#define ATEN_PRODUCT_ID2 0x2118
30 31
31#define IODATA_VENDOR_ID 0x04bb 32#define IODATA_VENDOR_ID 0x04bb
32#define IODATA_PRODUCT_ID 0x0a03 33#define IODATA_PRODUCT_ID 0x0a03
diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
index 1bc6089b9008..696458db7e3c 100644
--- a/drivers/usb/serial/qcserial.c
+++ b/drivers/usb/serial/qcserial.c
@@ -124,6 +124,7 @@ static const struct usb_device_id id_table[] = {
124 {USB_DEVICE(0x1410, 0xa021)}, /* Novatel Gobi 3000 Composite */ 124 {USB_DEVICE(0x1410, 0xa021)}, /* Novatel Gobi 3000 Composite */
125 {USB_DEVICE(0x413c, 0x8193)}, /* Dell Gobi 3000 QDL */ 125 {USB_DEVICE(0x413c, 0x8193)}, /* Dell Gobi 3000 QDL */
126 {USB_DEVICE(0x413c, 0x8194)}, /* Dell Gobi 3000 Composite */ 126 {USB_DEVICE(0x413c, 0x8194)}, /* Dell Gobi 3000 Composite */
127 {USB_DEVICE(0x413c, 0x81a6)}, /* Dell DW5570 QDL (MC8805) */
127 {USB_DEVICE(0x1199, 0x68a4)}, /* Sierra Wireless QDL */ 128 {USB_DEVICE(0x1199, 0x68a4)}, /* Sierra Wireless QDL */
128 {USB_DEVICE(0x1199, 0x68a5)}, /* Sierra Wireless Modem */ 129 {USB_DEVICE(0x1199, 0x68a5)}, /* Sierra Wireless Modem */
129 {USB_DEVICE(0x1199, 0x68a8)}, /* Sierra Wireless QDL */ 130 {USB_DEVICE(0x1199, 0x68a8)}, /* Sierra Wireless QDL */
diff --git a/drivers/usb/serial/quatech2.c b/drivers/usb/serial/quatech2.c
index 659cb8606bd9..5709cc93b083 100644
--- a/drivers/usb/serial/quatech2.c
+++ b/drivers/usb/serial/quatech2.c
@@ -408,16 +408,12 @@ static void qt2_close(struct usb_serial_port *port)
408{ 408{
409 struct usb_serial *serial; 409 struct usb_serial *serial;
410 struct qt2_port_private *port_priv; 410 struct qt2_port_private *port_priv;
411 unsigned long flags;
412 int i; 411 int i;
413 412
414 serial = port->serial; 413 serial = port->serial;
415 port_priv = usb_get_serial_port_data(port); 414 port_priv = usb_get_serial_port_data(port);
416 415
417 spin_lock_irqsave(&port_priv->urb_lock, flags);
418 usb_kill_urb(port_priv->write_urb); 416 usb_kill_urb(port_priv->write_urb);
419 port_priv->urb_in_use = false;
420 spin_unlock_irqrestore(&port_priv->urb_lock, flags);
421 417
422 /* flush the port transmit buffer */ 418 /* flush the port transmit buffer */
423 i = usb_control_msg(serial->dev, 419 i = usb_control_msg(serial->dev,
diff --git a/drivers/usb/serial/spcp8x5.c b/drivers/usb/serial/spcp8x5.c
index ef0dbf0703c5..475e6c31b266 100644
--- a/drivers/usb/serial/spcp8x5.c
+++ b/drivers/usb/serial/spcp8x5.c
@@ -154,6 +154,19 @@ static int spcp8x5_probe(struct usb_serial *serial,
154 return 0; 154 return 0;
155} 155}
156 156
157static int spcp8x5_attach(struct usb_serial *serial)
158{
159 unsigned char num_ports = serial->num_ports;
160
161 if (serial->num_bulk_in < num_ports ||
162 serial->num_bulk_out < num_ports) {
163 dev_err(&serial->interface->dev, "missing endpoints\n");
164 return -ENODEV;
165 }
166
167 return 0;
168}
169
157static int spcp8x5_port_probe(struct usb_serial_port *port) 170static int spcp8x5_port_probe(struct usb_serial_port *port)
158{ 171{
159 const struct usb_device_id *id = usb_get_serial_data(port->serial); 172 const struct usb_device_id *id = usb_get_serial_data(port->serial);
@@ -477,6 +490,7 @@ static struct usb_serial_driver spcp8x5_device = {
477 .tiocmget = spcp8x5_tiocmget, 490 .tiocmget = spcp8x5_tiocmget,
478 .tiocmset = spcp8x5_tiocmset, 491 .tiocmset = spcp8x5_tiocmset,
479 .probe = spcp8x5_probe, 492 .probe = spcp8x5_probe,
493 .attach = spcp8x5_attach,
480 .port_probe = spcp8x5_port_probe, 494 .port_probe = spcp8x5_port_probe,
481 .port_remove = spcp8x5_port_remove, 495 .port_remove = spcp8x5_port_remove,
482}; 496};
diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c
index 8db9d071d940..64b85b8dedf3 100644
--- a/drivers/usb/serial/ti_usb_3410_5052.c
+++ b/drivers/usb/serial/ti_usb_3410_5052.c
@@ -579,6 +579,13 @@ static int ti_startup(struct usb_serial *serial)
579 goto free_tdev; 579 goto free_tdev;
580 } 580 }
581 581
582 if (serial->num_bulk_in < serial->num_ports ||
583 serial->num_bulk_out < serial->num_ports) {
584 dev_err(&serial->interface->dev, "missing endpoints\n");
585 status = -ENODEV;
586 goto free_tdev;
587 }
588
582 return 0; 589 return 0;
583 590
584free_tdev: 591free_tdev:
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index af3c7eecff91..16cc18369111 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -2109,6 +2109,13 @@ UNUSUAL_DEV( 0x152d, 0x2566, 0x0114, 0x0114,
2109 USB_SC_DEVICE, USB_PR_DEVICE, NULL, 2109 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
2110 US_FL_BROKEN_FUA ), 2110 US_FL_BROKEN_FUA ),
2111 2111
2112/* Reported-by George Cherian <george.cherian@cavium.com> */
2113UNUSUAL_DEV(0x152d, 0x9561, 0x0000, 0x9999,
2114 "JMicron",
2115 "JMS56x",
2116 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
2117 US_FL_NO_REPORT_OPCODES),
2118
2112/* 2119/*
2113 * Entrega Technologies U1-SC25 (later Xircom PortGear PGSCSI) 2120 * Entrega Technologies U1-SC25 (later Xircom PortGear PGSCSI)
2114 * and Mac USB Dock USB-SCSI */ 2121 * and Mac USB Dock USB-SCSI */
diff --git a/drivers/usb/wusbcore/crypto.c b/drivers/usb/wusbcore/crypto.c
index 79451f7ef1b7..062c205f0046 100644
--- a/drivers/usb/wusbcore/crypto.c
+++ b/drivers/usb/wusbcore/crypto.c
@@ -216,7 +216,6 @@ static int wusb_ccm_mac(struct crypto_skcipher *tfm_cbc,
216 struct scatterlist sg[4], sg_dst; 216 struct scatterlist sg[4], sg_dst;
217 void *dst_buf; 217 void *dst_buf;
218 size_t dst_size; 218 size_t dst_size;
219 const u8 bzero[16] = { 0 };
220 u8 iv[crypto_skcipher_ivsize(tfm_cbc)]; 219 u8 iv[crypto_skcipher_ivsize(tfm_cbc)];
221 size_t zero_padding; 220 size_t zero_padding;
222 221
@@ -261,7 +260,7 @@ static int wusb_ccm_mac(struct crypto_skcipher *tfm_cbc,
261 sg_set_buf(&sg[1], &scratch->b1, sizeof(scratch->b1)); 260 sg_set_buf(&sg[1], &scratch->b1, sizeof(scratch->b1));
262 sg_set_buf(&sg[2], b, blen); 261 sg_set_buf(&sg[2], b, blen);
263 /* 0 if well behaved :) */ 262 /* 0 if well behaved :) */
264 sg_set_buf(&sg[3], bzero, zero_padding); 263 sg_set_page(&sg[3], ZERO_PAGE(0), zero_padding, 0);
265 sg_init_one(&sg_dst, dst_buf, dst_size); 264 sg_init_one(&sg_dst, dst_buf, dst_size);
266 265
267 skcipher_request_set_tfm(req, tfm_cbc); 266 skcipher_request_set_tfm(req, tfm_cbc);
diff --git a/drivers/vfio/mdev/mdev_core.c b/drivers/vfio/mdev/mdev_core.c
index be1ee89ee917..36d75c367d22 100644
--- a/drivers/vfio/mdev/mdev_core.c
+++ b/drivers/vfio/mdev/mdev_core.c
@@ -27,6 +27,45 @@ static LIST_HEAD(parent_list);
27static DEFINE_MUTEX(parent_list_lock); 27static DEFINE_MUTEX(parent_list_lock);
28static struct class_compat *mdev_bus_compat_class; 28static struct class_compat *mdev_bus_compat_class;
29 29
30static LIST_HEAD(mdev_list);
31static DEFINE_MUTEX(mdev_list_lock);
32
33struct device *mdev_parent_dev(struct mdev_device *mdev)
34{
35 return mdev->parent->dev;
36}
37EXPORT_SYMBOL(mdev_parent_dev);
38
39void *mdev_get_drvdata(struct mdev_device *mdev)
40{
41 return mdev->driver_data;
42}
43EXPORT_SYMBOL(mdev_get_drvdata);
44
45void mdev_set_drvdata(struct mdev_device *mdev, void *data)
46{
47 mdev->driver_data = data;
48}
49EXPORT_SYMBOL(mdev_set_drvdata);
50
51struct device *mdev_dev(struct mdev_device *mdev)
52{
53 return &mdev->dev;
54}
55EXPORT_SYMBOL(mdev_dev);
56
57struct mdev_device *mdev_from_dev(struct device *dev)
58{
59 return dev_is_mdev(dev) ? to_mdev_device(dev) : NULL;
60}
61EXPORT_SYMBOL(mdev_from_dev);
62
63uuid_le mdev_uuid(struct mdev_device *mdev)
64{
65 return mdev->uuid;
66}
67EXPORT_SYMBOL(mdev_uuid);
68
30static int _find_mdev_device(struct device *dev, void *data) 69static int _find_mdev_device(struct device *dev, void *data)
31{ 70{
32 struct mdev_device *mdev; 71 struct mdev_device *mdev;
@@ -42,7 +81,7 @@ static int _find_mdev_device(struct device *dev, void *data)
42 return 0; 81 return 0;
43} 82}
44 83
45static bool mdev_device_exist(struct parent_device *parent, uuid_le uuid) 84static bool mdev_device_exist(struct mdev_parent *parent, uuid_le uuid)
46{ 85{
47 struct device *dev; 86 struct device *dev;
48 87
@@ -56,9 +95,9 @@ static bool mdev_device_exist(struct parent_device *parent, uuid_le uuid)
56} 95}
57 96
58/* Should be called holding parent_list_lock */ 97/* Should be called holding parent_list_lock */
59static struct parent_device *__find_parent_device(struct device *dev) 98static struct mdev_parent *__find_parent_device(struct device *dev)
60{ 99{
61 struct parent_device *parent; 100 struct mdev_parent *parent;
62 101
63 list_for_each_entry(parent, &parent_list, next) { 102 list_for_each_entry(parent, &parent_list, next) {
64 if (parent->dev == dev) 103 if (parent->dev == dev)
@@ -69,8 +108,8 @@ static struct parent_device *__find_parent_device(struct device *dev)
69 108
70static void mdev_release_parent(struct kref *kref) 109static void mdev_release_parent(struct kref *kref)
71{ 110{
72 struct parent_device *parent = container_of(kref, struct parent_device, 111 struct mdev_parent *parent = container_of(kref, struct mdev_parent,
73 ref); 112 ref);
74 struct device *dev = parent->dev; 113 struct device *dev = parent->dev;
75 114
76 kfree(parent); 115 kfree(parent);
@@ -78,7 +117,7 @@ static void mdev_release_parent(struct kref *kref)
78} 117}
79 118
80static 119static
81inline struct parent_device *mdev_get_parent(struct parent_device *parent) 120inline struct mdev_parent *mdev_get_parent(struct mdev_parent *parent)
82{ 121{
83 if (parent) 122 if (parent)
84 kref_get(&parent->ref); 123 kref_get(&parent->ref);
@@ -86,7 +125,7 @@ inline struct parent_device *mdev_get_parent(struct parent_device *parent)
86 return parent; 125 return parent;
87} 126}
88 127
89static inline void mdev_put_parent(struct parent_device *parent) 128static inline void mdev_put_parent(struct mdev_parent *parent)
90{ 129{
91 if (parent) 130 if (parent)
92 kref_put(&parent->ref, mdev_release_parent); 131 kref_put(&parent->ref, mdev_release_parent);
@@ -95,7 +134,7 @@ static inline void mdev_put_parent(struct parent_device *parent)
95static int mdev_device_create_ops(struct kobject *kobj, 134static int mdev_device_create_ops(struct kobject *kobj,
96 struct mdev_device *mdev) 135 struct mdev_device *mdev)
97{ 136{
98 struct parent_device *parent = mdev->parent; 137 struct mdev_parent *parent = mdev->parent;
99 int ret; 138 int ret;
100 139
101 ret = parent->ops->create(kobj, mdev); 140 ret = parent->ops->create(kobj, mdev);
@@ -122,7 +161,7 @@ static int mdev_device_create_ops(struct kobject *kobj,
122 */ 161 */
123static int mdev_device_remove_ops(struct mdev_device *mdev, bool force_remove) 162static int mdev_device_remove_ops(struct mdev_device *mdev, bool force_remove)
124{ 163{
125 struct parent_device *parent = mdev->parent; 164 struct mdev_parent *parent = mdev->parent;
126 int ret; 165 int ret;
127 166
128 /* 167 /*
@@ -153,10 +192,10 @@ static int mdev_device_remove_cb(struct device *dev, void *data)
153 * Add device to list of registered parent devices. 192 * Add device to list of registered parent devices.
154 * Returns a negative value on error, otherwise 0. 193 * Returns a negative value on error, otherwise 0.
155 */ 194 */
156int mdev_register_device(struct device *dev, const struct parent_ops *ops) 195int mdev_register_device(struct device *dev, const struct mdev_parent_ops *ops)
157{ 196{
158 int ret; 197 int ret;
159 struct parent_device *parent; 198 struct mdev_parent *parent;
160 199
161 /* check for mandatory ops */ 200 /* check for mandatory ops */
162 if (!ops || !ops->create || !ops->remove || !ops->supported_type_groups) 201 if (!ops || !ops->create || !ops->remove || !ops->supported_type_groups)
@@ -229,7 +268,7 @@ EXPORT_SYMBOL(mdev_register_device);
229 268
230void mdev_unregister_device(struct device *dev) 269void mdev_unregister_device(struct device *dev)
231{ 270{
232 struct parent_device *parent; 271 struct mdev_parent *parent;
233 bool force_remove = true; 272 bool force_remove = true;
234 273
235 mutex_lock(&parent_list_lock); 274 mutex_lock(&parent_list_lock);
@@ -266,7 +305,7 @@ int mdev_device_create(struct kobject *kobj, struct device *dev, uuid_le uuid)
266{ 305{
267 int ret; 306 int ret;
268 struct mdev_device *mdev; 307 struct mdev_device *mdev;
269 struct parent_device *parent; 308 struct mdev_parent *parent;
270 struct mdev_type *type = to_mdev_type(kobj); 309 struct mdev_type *type = to_mdev_type(kobj);
271 310
272 parent = mdev_get_parent(type->parent); 311 parent = mdev_get_parent(type->parent);
@@ -316,6 +355,11 @@ int mdev_device_create(struct kobject *kobj, struct device *dev, uuid_le uuid)
316 dev_dbg(&mdev->dev, "MDEV: created\n"); 355 dev_dbg(&mdev->dev, "MDEV: created\n");
317 356
318 mutex_unlock(&parent->lock); 357 mutex_unlock(&parent->lock);
358
359 mutex_lock(&mdev_list_lock);
360 list_add(&mdev->next, &mdev_list);
361 mutex_unlock(&mdev_list_lock);
362
319 return ret; 363 return ret;
320 364
321create_failed: 365create_failed:
@@ -329,12 +373,30 @@ create_err:
329 373
330int mdev_device_remove(struct device *dev, bool force_remove) 374int mdev_device_remove(struct device *dev, bool force_remove)
331{ 375{
332 struct mdev_device *mdev; 376 struct mdev_device *mdev, *tmp;
333 struct parent_device *parent; 377 struct mdev_parent *parent;
334 struct mdev_type *type; 378 struct mdev_type *type;
335 int ret; 379 int ret;
380 bool found = false;
336 381
337 mdev = to_mdev_device(dev); 382 mdev = to_mdev_device(dev);
383
384 mutex_lock(&mdev_list_lock);
385 list_for_each_entry(tmp, &mdev_list, next) {
386 if (tmp == mdev) {
387 found = true;
388 break;
389 }
390 }
391
392 if (found)
393 list_del(&mdev->next);
394
395 mutex_unlock(&mdev_list_lock);
396
397 if (!found)
398 return -ENODEV;
399
338 type = to_mdev_type(mdev->type_kobj); 400 type = to_mdev_type(mdev->type_kobj);
339 parent = mdev->parent; 401 parent = mdev->parent;
340 mutex_lock(&parent->lock); 402 mutex_lock(&parent->lock);
@@ -342,6 +404,11 @@ int mdev_device_remove(struct device *dev, bool force_remove)
342 ret = mdev_device_remove_ops(mdev, force_remove); 404 ret = mdev_device_remove_ops(mdev, force_remove);
343 if (ret) { 405 if (ret) {
344 mutex_unlock(&parent->lock); 406 mutex_unlock(&parent->lock);
407
408 mutex_lock(&mdev_list_lock);
409 list_add(&mdev->next, &mdev_list);
410 mutex_unlock(&mdev_list_lock);
411
345 return ret; 412 return ret;
346 } 413 }
347 414
@@ -349,7 +416,8 @@ int mdev_device_remove(struct device *dev, bool force_remove)
349 device_unregister(dev); 416 device_unregister(dev);
350 mutex_unlock(&parent->lock); 417 mutex_unlock(&parent->lock);
351 mdev_put_parent(parent); 418 mdev_put_parent(parent);
352 return ret; 419
420 return 0;
353} 421}
354 422
355static int __init mdev_init(void) 423static int __init mdev_init(void)
diff --git a/drivers/vfio/mdev/mdev_private.h b/drivers/vfio/mdev/mdev_private.h
index d35097cbf3d7..a9cefd70a705 100644
--- a/drivers/vfio/mdev/mdev_private.h
+++ b/drivers/vfio/mdev/mdev_private.h
@@ -16,10 +16,33 @@
16int mdev_bus_register(void); 16int mdev_bus_register(void);
17void mdev_bus_unregister(void); 17void mdev_bus_unregister(void);
18 18
19struct mdev_parent {
20 struct device *dev;
21 const struct mdev_parent_ops *ops;
22 struct kref ref;
23 struct mutex lock;
24 struct list_head next;
25 struct kset *mdev_types_kset;
26 struct list_head type_list;
27};
28
29struct mdev_device {
30 struct device dev;
31 struct mdev_parent *parent;
32 uuid_le uuid;
33 void *driver_data;
34 struct kref ref;
35 struct list_head next;
36 struct kobject *type_kobj;
37};
38
39#define to_mdev_device(dev) container_of(dev, struct mdev_device, dev)
40#define dev_is_mdev(d) ((d)->bus == &mdev_bus_type)
41
19struct mdev_type { 42struct mdev_type {
20 struct kobject kobj; 43 struct kobject kobj;
21 struct kobject *devices_kobj; 44 struct kobject *devices_kobj;
22 struct parent_device *parent; 45 struct mdev_parent *parent;
23 struct list_head next; 46 struct list_head next;
24 struct attribute_group *group; 47 struct attribute_group *group;
25}; 48};
@@ -29,8 +52,8 @@ struct mdev_type {
29#define to_mdev_type(_kobj) \ 52#define to_mdev_type(_kobj) \
30 container_of(_kobj, struct mdev_type, kobj) 53 container_of(_kobj, struct mdev_type, kobj)
31 54
32int parent_create_sysfs_files(struct parent_device *parent); 55int parent_create_sysfs_files(struct mdev_parent *parent);
33void parent_remove_sysfs_files(struct parent_device *parent); 56void parent_remove_sysfs_files(struct mdev_parent *parent);
34 57
35int mdev_create_sysfs_files(struct device *dev, struct mdev_type *type); 58int mdev_create_sysfs_files(struct device *dev, struct mdev_type *type);
36void mdev_remove_sysfs_files(struct device *dev, struct mdev_type *type); 59void mdev_remove_sysfs_files(struct device *dev, struct mdev_type *type);
diff --git a/drivers/vfio/mdev/mdev_sysfs.c b/drivers/vfio/mdev/mdev_sysfs.c
index 1a53deb2ee10..802df210929b 100644
--- a/drivers/vfio/mdev/mdev_sysfs.c
+++ b/drivers/vfio/mdev/mdev_sysfs.c
@@ -92,7 +92,7 @@ static struct kobj_type mdev_type_ktype = {
92 .release = mdev_type_release, 92 .release = mdev_type_release,
93}; 93};
94 94
95struct mdev_type *add_mdev_supported_type(struct parent_device *parent, 95struct mdev_type *add_mdev_supported_type(struct mdev_parent *parent,
96 struct attribute_group *group) 96 struct attribute_group *group)
97{ 97{
98 struct mdev_type *type; 98 struct mdev_type *type;
@@ -158,7 +158,7 @@ static void remove_mdev_supported_type(struct mdev_type *type)
158 kobject_put(&type->kobj); 158 kobject_put(&type->kobj);
159} 159}
160 160
161static int add_mdev_supported_type_groups(struct parent_device *parent) 161static int add_mdev_supported_type_groups(struct mdev_parent *parent)
162{ 162{
163 int i; 163 int i;
164 164
@@ -183,7 +183,7 @@ static int add_mdev_supported_type_groups(struct parent_device *parent)
183} 183}
184 184
185/* mdev sysfs functions */ 185/* mdev sysfs functions */
186void parent_remove_sysfs_files(struct parent_device *parent) 186void parent_remove_sysfs_files(struct mdev_parent *parent)
187{ 187{
188 struct mdev_type *type, *tmp; 188 struct mdev_type *type, *tmp;
189 189
@@ -196,7 +196,7 @@ void parent_remove_sysfs_files(struct parent_device *parent)
196 kset_unregister(parent->mdev_types_kset); 196 kset_unregister(parent->mdev_types_kset);
197} 197}
198 198
199int parent_create_sysfs_files(struct parent_device *parent) 199int parent_create_sysfs_files(struct mdev_parent *parent)
200{ 200{
201 int ret; 201 int ret;
202 202
diff --git a/drivers/vfio/mdev/vfio_mdev.c b/drivers/vfio/mdev/vfio_mdev.c
index ffc36758cb84..fa848a701b8b 100644
--- a/drivers/vfio/mdev/vfio_mdev.c
+++ b/drivers/vfio/mdev/vfio_mdev.c
@@ -27,7 +27,7 @@
27static int vfio_mdev_open(void *device_data) 27static int vfio_mdev_open(void *device_data)
28{ 28{
29 struct mdev_device *mdev = device_data; 29 struct mdev_device *mdev = device_data;
30 struct parent_device *parent = mdev->parent; 30 struct mdev_parent *parent = mdev->parent;
31 int ret; 31 int ret;
32 32
33 if (unlikely(!parent->ops->open)) 33 if (unlikely(!parent->ops->open))
@@ -46,7 +46,7 @@ static int vfio_mdev_open(void *device_data)
46static void vfio_mdev_release(void *device_data) 46static void vfio_mdev_release(void *device_data)
47{ 47{
48 struct mdev_device *mdev = device_data; 48 struct mdev_device *mdev = device_data;
49 struct parent_device *parent = mdev->parent; 49 struct mdev_parent *parent = mdev->parent;
50 50
51 if (likely(parent->ops->release)) 51 if (likely(parent->ops->release))
52 parent->ops->release(mdev); 52 parent->ops->release(mdev);
@@ -58,7 +58,7 @@ static long vfio_mdev_unlocked_ioctl(void *device_data,
58 unsigned int cmd, unsigned long arg) 58 unsigned int cmd, unsigned long arg)
59{ 59{
60 struct mdev_device *mdev = device_data; 60 struct mdev_device *mdev = device_data;
61 struct parent_device *parent = mdev->parent; 61 struct mdev_parent *parent = mdev->parent;
62 62
63 if (unlikely(!parent->ops->ioctl)) 63 if (unlikely(!parent->ops->ioctl))
64 return -EINVAL; 64 return -EINVAL;
@@ -70,7 +70,7 @@ static ssize_t vfio_mdev_read(void *device_data, char __user *buf,
70 size_t count, loff_t *ppos) 70 size_t count, loff_t *ppos)
71{ 71{
72 struct mdev_device *mdev = device_data; 72 struct mdev_device *mdev = device_data;
73 struct parent_device *parent = mdev->parent; 73 struct mdev_parent *parent = mdev->parent;
74 74
75 if (unlikely(!parent->ops->read)) 75 if (unlikely(!parent->ops->read))
76 return -EINVAL; 76 return -EINVAL;
@@ -82,7 +82,7 @@ static ssize_t vfio_mdev_write(void *device_data, const char __user *buf,
82 size_t count, loff_t *ppos) 82 size_t count, loff_t *ppos)
83{ 83{
84 struct mdev_device *mdev = device_data; 84 struct mdev_device *mdev = device_data;
85 struct parent_device *parent = mdev->parent; 85 struct mdev_parent *parent = mdev->parent;
86 86
87 if (unlikely(!parent->ops->write)) 87 if (unlikely(!parent->ops->write))
88 return -EINVAL; 88 return -EINVAL;
@@ -93,7 +93,7 @@ static ssize_t vfio_mdev_write(void *device_data, const char __user *buf,
93static int vfio_mdev_mmap(void *device_data, struct vm_area_struct *vma) 93static int vfio_mdev_mmap(void *device_data, struct vm_area_struct *vma)
94{ 94{
95 struct mdev_device *mdev = device_data; 95 struct mdev_device *mdev = device_data;
96 struct parent_device *parent = mdev->parent; 96 struct mdev_parent *parent = mdev->parent;
97 97
98 if (unlikely(!parent->ops->mmap)) 98 if (unlikely(!parent->ops->mmap))
99 return -EINVAL; 99 return -EINVAL;
diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c
index dcd7c2a99618..324c52e3a1a4 100644
--- a/drivers/vfio/pci/vfio_pci.c
+++ b/drivers/vfio/pci/vfio_pci.c
@@ -1142,6 +1142,10 @@ static int vfio_pci_mmap(void *device_data, struct vm_area_struct *vma)
1142 return ret; 1142 return ret;
1143 1143
1144 vdev->barmap[index] = pci_iomap(pdev, index, 0); 1144 vdev->barmap[index] = pci_iomap(pdev, index, 0);
1145 if (!vdev->barmap[index]) {
1146 pci_release_selected_regions(pdev, 1 << index);
1147 return -ENOMEM;
1148 }
1145 } 1149 }
1146 1150
1147 vma->vm_private_data = vdev; 1151 vma->vm_private_data = vdev;
diff --git a/drivers/vfio/pci/vfio_pci_rdwr.c b/drivers/vfio/pci/vfio_pci_rdwr.c
index 5ffd1d9ad4bd..357243d76f10 100644
--- a/drivers/vfio/pci/vfio_pci_rdwr.c
+++ b/drivers/vfio/pci/vfio_pci_rdwr.c
@@ -193,7 +193,10 @@ ssize_t vfio_pci_vga_rw(struct vfio_pci_device *vdev, char __user *buf,
193 if (!vdev->has_vga) 193 if (!vdev->has_vga)
194 return -EINVAL; 194 return -EINVAL;
195 195
196 switch (pos) { 196 if (pos > 0xbfffful)
197 return -EINVAL;
198
199 switch ((u32)pos) {
197 case 0xa0000 ... 0xbffff: 200 case 0xa0000 ... 0xbffff:
198 count = min(count, (size_t)(0xc0000 - pos)); 201 count = min(count, (size_t)(0xc0000 - pos));
199 iomem = ioremap_nocache(0xa0000, 0xbffff - 0xa0000 + 1); 202 iomem = ioremap_nocache(0xa0000, 0xbffff - 0xa0000 + 1);
diff --git a/drivers/vfio/vfio_iommu_spapr_tce.c b/drivers/vfio/vfio_iommu_spapr_tce.c
index c8823578a1b2..59b3f62a2d64 100644
--- a/drivers/vfio/vfio_iommu_spapr_tce.c
+++ b/drivers/vfio/vfio_iommu_spapr_tce.c
@@ -1123,12 +1123,11 @@ static long tce_iommu_ioctl(void *iommu_data,
1123 mutex_lock(&container->lock); 1123 mutex_lock(&container->lock);
1124 1124
1125 ret = tce_iommu_create_default_window(container); 1125 ret = tce_iommu_create_default_window(container);
1126 if (ret) 1126 if (!ret)
1127 return ret; 1127 ret = tce_iommu_create_window(container,
1128 1128 create.page_shift,
1129 ret = tce_iommu_create_window(container, create.page_shift, 1129 create.window_size, create.levels,
1130 create.window_size, create.levels, 1130 &create.start_addr);
1131 &create.start_addr);
1132 1131
1133 mutex_unlock(&container->lock); 1132 mutex_unlock(&container->lock);
1134 1133
@@ -1246,6 +1245,8 @@ static void tce_iommu_release_ownership_ddw(struct tce_container *container,
1246static long tce_iommu_take_ownership_ddw(struct tce_container *container, 1245static long tce_iommu_take_ownership_ddw(struct tce_container *container,
1247 struct iommu_table_group *table_group) 1246 struct iommu_table_group *table_group)
1248{ 1247{
1248 long i, ret = 0;
1249
1249 if (!table_group->ops->create_table || !table_group->ops->set_window || 1250 if (!table_group->ops->create_table || !table_group->ops->set_window ||
1250 !table_group->ops->release_ownership) { 1251 !table_group->ops->release_ownership) {
1251 WARN_ON_ONCE(1); 1252 WARN_ON_ONCE(1);
@@ -1254,7 +1255,27 @@ static long tce_iommu_take_ownership_ddw(struct tce_container *container,
1254 1255
1255 table_group->ops->take_ownership(table_group); 1256 table_group->ops->take_ownership(table_group);
1256 1257
1258 /* Set all windows to the new group */
1259 for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
1260 struct iommu_table *tbl = container->tables[i];
1261
1262 if (!tbl)
1263 continue;
1264
1265 ret = table_group->ops->set_window(table_group, i, tbl);
1266 if (ret)
1267 goto release_exit;
1268 }
1269
1257 return 0; 1270 return 0;
1271
1272release_exit:
1273 for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i)
1274 table_group->ops->unset_window(table_group, i);
1275
1276 table_group->ops->release_ownership(table_group);
1277
1278 return ret;
1258} 1279}
1259 1280
1260static int tce_iommu_attach_group(void *iommu_data, 1281static int tce_iommu_attach_group(void *iommu_data,
@@ -1270,6 +1291,10 @@ static int tce_iommu_attach_group(void *iommu_data,
1270 /* pr_debug("tce_vfio: Attaching group #%u to iommu %p\n", 1291 /* pr_debug("tce_vfio: Attaching group #%u to iommu %p\n",
1271 iommu_group_id(iommu_group), iommu_group); */ 1292 iommu_group_id(iommu_group), iommu_group); */
1272 table_group = iommu_group_get_iommudata(iommu_group); 1293 table_group = iommu_group_get_iommudata(iommu_group);
1294 if (!table_group) {
1295 ret = -ENODEV;
1296 goto unlock_exit;
1297 }
1273 1298
1274 if (tce_groups_attached(container) && (!table_group->ops || 1299 if (tce_groups_attached(container) && (!table_group->ops ||
1275 !table_group->ops->take_ownership || 1300 !table_group->ops->take_ownership ||
diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
index f3726ba12aa6..b3cc33fa6d26 100644
--- a/drivers/vfio/vfio_iommu_type1.c
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -36,7 +36,6 @@
36#include <linux/uaccess.h> 36#include <linux/uaccess.h>
37#include <linux/vfio.h> 37#include <linux/vfio.h>
38#include <linux/workqueue.h> 38#include <linux/workqueue.h>
39#include <linux/pid_namespace.h>
40#include <linux/mdev.h> 39#include <linux/mdev.h>
41#include <linux/notifier.h> 40#include <linux/notifier.h>
42 41
@@ -268,28 +267,38 @@ static void vfio_lock_acct(struct task_struct *task, long npage)
268{ 267{
269 struct vwork *vwork; 268 struct vwork *vwork;
270 struct mm_struct *mm; 269 struct mm_struct *mm;
270 bool is_current;
271 271
272 if (!npage) 272 if (!npage)
273 return; 273 return;
274 274
275 mm = get_task_mm(task); 275 is_current = (task->mm == current->mm);
276
277 mm = is_current ? task->mm : get_task_mm(task);
276 if (!mm) 278 if (!mm)
277 return; /* process exited or nothing to do */ 279 return; /* process exited */
278 280
279 if (down_write_trylock(&mm->mmap_sem)) { 281 if (down_write_trylock(&mm->mmap_sem)) {
280 mm->locked_vm += npage; 282 mm->locked_vm += npage;
281 up_write(&mm->mmap_sem); 283 up_write(&mm->mmap_sem);
282 mmput(mm); 284 if (!is_current)
285 mmput(mm);
283 return; 286 return;
284 } 287 }
285 288
289 if (is_current) {
290 mm = get_task_mm(task);
291 if (!mm)
292 return;
293 }
294
286 /* 295 /*
287 * Couldn't get mmap_sem lock, so must setup to update 296 * Couldn't get mmap_sem lock, so must setup to update
288 * mm->locked_vm later. If locked_vm were atomic, we 297 * mm->locked_vm later. If locked_vm were atomic, we
289 * wouldn't need this silliness 298 * wouldn't need this silliness
290 */ 299 */
291 vwork = kmalloc(sizeof(struct vwork), GFP_KERNEL); 300 vwork = kmalloc(sizeof(struct vwork), GFP_KERNEL);
292 if (!vwork) { 301 if (WARN_ON(!vwork)) {
293 mmput(mm); 302 mmput(mm);
294 return; 303 return;
295 } 304 }
@@ -393,77 +402,71 @@ static int vaddr_get_pfn(struct mm_struct *mm, unsigned long vaddr,
393static long vfio_pin_pages_remote(struct vfio_dma *dma, unsigned long vaddr, 402static long vfio_pin_pages_remote(struct vfio_dma *dma, unsigned long vaddr,
394 long npage, unsigned long *pfn_base) 403 long npage, unsigned long *pfn_base)
395{ 404{
396 unsigned long limit; 405 unsigned long limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
397 bool lock_cap = ns_capable(task_active_pid_ns(dma->task)->user_ns, 406 bool lock_cap = capable(CAP_IPC_LOCK);
398 CAP_IPC_LOCK); 407 long ret, pinned = 0, lock_acct = 0;
399 struct mm_struct *mm;
400 long ret, i = 0, lock_acct = 0;
401 bool rsvd; 408 bool rsvd;
402 dma_addr_t iova = vaddr - dma->vaddr + dma->iova; 409 dma_addr_t iova = vaddr - dma->vaddr + dma->iova;
403 410
404 mm = get_task_mm(dma->task); 411 /* This code path is only user initiated */
405 if (!mm) 412 if (!current->mm)
406 return -ENODEV; 413 return -ENODEV;
407 414
408 ret = vaddr_get_pfn(mm, vaddr, dma->prot, pfn_base); 415 ret = vaddr_get_pfn(current->mm, vaddr, dma->prot, pfn_base);
409 if (ret) 416 if (ret)
410 goto pin_pg_remote_exit; 417 return ret;
411 418
419 pinned++;
412 rsvd = is_invalid_reserved_pfn(*pfn_base); 420 rsvd = is_invalid_reserved_pfn(*pfn_base);
413 limit = task_rlimit(dma->task, RLIMIT_MEMLOCK) >> PAGE_SHIFT;
414 421
415 /* 422 /*
416 * Reserved pages aren't counted against the user, externally pinned 423 * Reserved pages aren't counted against the user, externally pinned
417 * pages are already counted against the user. 424 * pages are already counted against the user.
418 */ 425 */
419 if (!rsvd && !vfio_find_vpfn(dma, iova)) { 426 if (!rsvd && !vfio_find_vpfn(dma, iova)) {
420 if (!lock_cap && mm->locked_vm + 1 > limit) { 427 if (!lock_cap && current->mm->locked_vm + 1 > limit) {
421 put_pfn(*pfn_base, dma->prot); 428 put_pfn(*pfn_base, dma->prot);
422 pr_warn("%s: RLIMIT_MEMLOCK (%ld) exceeded\n", __func__, 429 pr_warn("%s: RLIMIT_MEMLOCK (%ld) exceeded\n", __func__,
423 limit << PAGE_SHIFT); 430 limit << PAGE_SHIFT);
424 ret = -ENOMEM; 431 return -ENOMEM;
425 goto pin_pg_remote_exit;
426 } 432 }
427 lock_acct++; 433 lock_acct++;
428 } 434 }
429 435
430 i++; 436 if (unlikely(disable_hugepages))
431 if (likely(!disable_hugepages)) { 437 goto out;
432 /* Lock all the consecutive pages from pfn_base */
433 for (vaddr += PAGE_SIZE, iova += PAGE_SIZE; i < npage;
434 i++, vaddr += PAGE_SIZE, iova += PAGE_SIZE) {
435 unsigned long pfn = 0;
436 438
437 ret = vaddr_get_pfn(mm, vaddr, dma->prot, &pfn); 439 /* Lock all the consecutive pages from pfn_base */
438 if (ret) 440 for (vaddr += PAGE_SIZE, iova += PAGE_SIZE; pinned < npage;
439 break; 441 pinned++, vaddr += PAGE_SIZE, iova += PAGE_SIZE) {
442 unsigned long pfn = 0;
440 443
441 if (pfn != *pfn_base + i || 444 ret = vaddr_get_pfn(current->mm, vaddr, dma->prot, &pfn);
442 rsvd != is_invalid_reserved_pfn(pfn)) { 445 if (ret)
446 break;
447
448 if (pfn != *pfn_base + pinned ||
449 rsvd != is_invalid_reserved_pfn(pfn)) {
450 put_pfn(pfn, dma->prot);
451 break;
452 }
453
454 if (!rsvd && !vfio_find_vpfn(dma, iova)) {
455 if (!lock_cap &&
456 current->mm->locked_vm + lock_acct + 1 > limit) {
443 put_pfn(pfn, dma->prot); 457 put_pfn(pfn, dma->prot);
458 pr_warn("%s: RLIMIT_MEMLOCK (%ld) exceeded\n",
459 __func__, limit << PAGE_SHIFT);
444 break; 460 break;
445 } 461 }
446 462 lock_acct++;
447 if (!rsvd && !vfio_find_vpfn(dma, iova)) {
448 if (!lock_cap &&
449 mm->locked_vm + lock_acct + 1 > limit) {
450 put_pfn(pfn, dma->prot);
451 pr_warn("%s: RLIMIT_MEMLOCK (%ld) "
452 "exceeded\n", __func__,
453 limit << PAGE_SHIFT);
454 break;
455 }
456 lock_acct++;
457 }
458 } 463 }
459 } 464 }
460 465
461 vfio_lock_acct(dma->task, lock_acct); 466out:
462 ret = i; 467 vfio_lock_acct(current, lock_acct);
463 468
464pin_pg_remote_exit: 469 return pinned;
465 mmput(mm);
466 return ret;
467} 470}
468 471
469static long vfio_unpin_pages_remote(struct vfio_dma *dma, dma_addr_t iova, 472static long vfio_unpin_pages_remote(struct vfio_dma *dma, dma_addr_t iova,
@@ -473,10 +476,10 @@ static long vfio_unpin_pages_remote(struct vfio_dma *dma, dma_addr_t iova,
473 long unlocked = 0, locked = 0; 476 long unlocked = 0, locked = 0;
474 long i; 477 long i;
475 478
476 for (i = 0; i < npage; i++) { 479 for (i = 0; i < npage; i++, iova += PAGE_SIZE) {
477 if (put_pfn(pfn++, dma->prot)) { 480 if (put_pfn(pfn++, dma->prot)) {
478 unlocked++; 481 unlocked++;
479 if (vfio_find_vpfn(dma, iova + (i << PAGE_SHIFT))) 482 if (vfio_find_vpfn(dma, iova))
480 locked++; 483 locked++;
481 } 484 }
482 } 485 }
@@ -491,8 +494,7 @@ static int vfio_pin_page_external(struct vfio_dma *dma, unsigned long vaddr,
491 unsigned long *pfn_base, bool do_accounting) 494 unsigned long *pfn_base, bool do_accounting)
492{ 495{
493 unsigned long limit; 496 unsigned long limit;
494 bool lock_cap = ns_capable(task_active_pid_ns(dma->task)->user_ns, 497 bool lock_cap = has_capability(dma->task, CAP_IPC_LOCK);
495 CAP_IPC_LOCK);
496 struct mm_struct *mm; 498 struct mm_struct *mm;
497 int ret; 499 int ret;
498 bool rsvd; 500 bool rsvd;
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
index 253310cdaaca..fd6c8b66f06f 100644
--- a/drivers/vhost/scsi.c
+++ b/drivers/vhost/scsi.c
@@ -843,7 +843,7 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
843 struct iov_iter out_iter, in_iter, prot_iter, data_iter; 843 struct iov_iter out_iter, in_iter, prot_iter, data_iter;
844 u64 tag; 844 u64 tag;
845 u32 exp_data_len, data_direction; 845 u32 exp_data_len, data_direction;
846 unsigned out, in; 846 unsigned int out = 0, in = 0;
847 int head, ret, prot_bytes; 847 int head, ret, prot_bytes;
848 size_t req_size, rsp_size = sizeof(struct virtio_scsi_cmd_resp); 848 size_t req_size, rsp_size = sizeof(struct virtio_scsi_cmd_resp);
849 size_t out_size, in_size; 849 size_t out_size, in_size;
@@ -2087,7 +2087,7 @@ static struct configfs_attribute *vhost_scsi_wwn_attrs[] = {
2087 NULL, 2087 NULL,
2088}; 2088};
2089 2089
2090static struct target_core_fabric_ops vhost_scsi_ops = { 2090static const struct target_core_fabric_ops vhost_scsi_ops = {
2091 .module = THIS_MODULE, 2091 .module = THIS_MODULE,
2092 .name = "vhost", 2092 .name = "vhost",
2093 .get_fabric_name = vhost_scsi_get_fabric_name, 2093 .get_fabric_name = vhost_scsi_get_fabric_name,
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index d6432603880c..8f99fe08de02 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -130,14 +130,14 @@ static long vhost_get_vring_endian(struct vhost_virtqueue *vq, u32 idx,
130 130
131static void vhost_init_is_le(struct vhost_virtqueue *vq) 131static void vhost_init_is_le(struct vhost_virtqueue *vq)
132{ 132{
133 if (vhost_has_feature(vq, VIRTIO_F_VERSION_1)) 133 vq->is_le = vhost_has_feature(vq, VIRTIO_F_VERSION_1)
134 vq->is_le = true; 134 || virtio_legacy_is_little_endian();
135} 135}
136#endif /* CONFIG_VHOST_CROSS_ENDIAN_LEGACY */ 136#endif /* CONFIG_VHOST_CROSS_ENDIAN_LEGACY */
137 137
138static void vhost_reset_is_le(struct vhost_virtqueue *vq) 138static void vhost_reset_is_le(struct vhost_virtqueue *vq)
139{ 139{
140 vq->is_le = virtio_legacy_is_little_endian(); 140 vhost_init_is_le(vq);
141} 141}
142 142
143struct vhost_flush_struct { 143struct vhost_flush_struct {
@@ -1714,10 +1714,8 @@ int vhost_vq_init_access(struct vhost_virtqueue *vq)
1714 int r; 1714 int r;
1715 bool is_le = vq->is_le; 1715 bool is_le = vq->is_le;
1716 1716
1717 if (!vq->private_data) { 1717 if (!vq->private_data)
1718 vhost_reset_is_le(vq);
1719 return 0; 1718 return 0;
1720 }
1721 1719
1722 vhost_init_is_le(vq); 1720 vhost_init_is_le(vq);
1723 1721
diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c
index bbbf588540ed..ce5e63d2c66a 100644
--- a/drivers/vhost/vsock.c
+++ b/drivers/vhost/vsock.c
@@ -373,6 +373,7 @@ static void vhost_vsock_handle_rx_kick(struct vhost_work *work)
373 373
374static int vhost_vsock_start(struct vhost_vsock *vsock) 374static int vhost_vsock_start(struct vhost_vsock *vsock)
375{ 375{
376 struct vhost_virtqueue *vq;
376 size_t i; 377 size_t i;
377 int ret; 378 int ret;
378 379
@@ -383,19 +384,20 @@ static int vhost_vsock_start(struct vhost_vsock *vsock)
383 goto err; 384 goto err;
384 385
385 for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) { 386 for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
386 struct vhost_virtqueue *vq = &vsock->vqs[i]; 387 vq = &vsock->vqs[i];
387 388
388 mutex_lock(&vq->mutex); 389 mutex_lock(&vq->mutex);
389 390
390 if (!vhost_vq_access_ok(vq)) { 391 if (!vhost_vq_access_ok(vq)) {
391 ret = -EFAULT; 392 ret = -EFAULT;
392 mutex_unlock(&vq->mutex);
393 goto err_vq; 393 goto err_vq;
394 } 394 }
395 395
396 if (!vq->private_data) { 396 if (!vq->private_data) {
397 vq->private_data = vsock; 397 vq->private_data = vsock;
398 vhost_vq_init_access(vq); 398 ret = vhost_vq_init_access(vq);
399 if (ret)
400 goto err_vq;
399 } 401 }
400 402
401 mutex_unlock(&vq->mutex); 403 mutex_unlock(&vq->mutex);
@@ -405,8 +407,11 @@ static int vhost_vsock_start(struct vhost_vsock *vsock)
405 return 0; 407 return 0;
406 408
407err_vq: 409err_vq:
410 vq->private_data = NULL;
411 mutex_unlock(&vq->mutex);
412
408 for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) { 413 for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
409 struct vhost_virtqueue *vq = &vsock->vqs[i]; 414 vq = &vsock->vqs[i];
410 415
411 mutex_lock(&vq->mutex); 416 mutex_lock(&vq->mutex);
412 vq->private_data = NULL; 417 vq->private_data = NULL;
diff --git a/drivers/video/fbdev/cobalt_lcdfb.c b/drivers/video/fbdev/cobalt_lcdfb.c
index 2d3b691f3fc4..038ac6934fe9 100644
--- a/drivers/video/fbdev/cobalt_lcdfb.c
+++ b/drivers/video/fbdev/cobalt_lcdfb.c
@@ -308,6 +308,11 @@ static int cobalt_lcdfb_probe(struct platform_device *dev)
308 info->screen_size = resource_size(res); 308 info->screen_size = resource_size(res);
309 info->screen_base = devm_ioremap(&dev->dev, res->start, 309 info->screen_base = devm_ioremap(&dev->dev, res->start,
310 info->screen_size); 310 info->screen_size);
311 if (!info->screen_base) {
312 framebuffer_release(info);
313 return -ENOMEM;
314 }
315
311 info->fbops = &cobalt_lcd_fbops; 316 info->fbops = &cobalt_lcd_fbops;
312 info->fix = cobalt_lcdfb_fix; 317 info->fix = cobalt_lcdfb_fix;
313 info->fix.smem_start = res->start; 318 info->fix.smem_start = res->start;
diff --git a/drivers/video/fbdev/core/fbcmap.c b/drivers/video/fbdev/core/fbcmap.c
index f89245b8ba8e..68a113594808 100644
--- a/drivers/video/fbdev/core/fbcmap.c
+++ b/drivers/video/fbdev/core/fbcmap.c
@@ -163,17 +163,18 @@ void fb_dealloc_cmap(struct fb_cmap *cmap)
163 163
164int fb_copy_cmap(const struct fb_cmap *from, struct fb_cmap *to) 164int fb_copy_cmap(const struct fb_cmap *from, struct fb_cmap *to)
165{ 165{
166 int tooff = 0, fromoff = 0; 166 unsigned int tooff = 0, fromoff = 0;
167 int size; 167 size_t size;
168 168
169 if (to->start > from->start) 169 if (to->start > from->start)
170 fromoff = to->start - from->start; 170 fromoff = to->start - from->start;
171 else 171 else
172 tooff = from->start - to->start; 172 tooff = from->start - to->start;
173 size = to->len - tooff; 173 if (fromoff >= from->len || tooff >= to->len)
174 if (size > (int) (from->len - fromoff)) 174 return -EINVAL;
175 size = from->len - fromoff; 175
176 if (size <= 0) 176 size = min_t(size_t, to->len - tooff, from->len - fromoff);
177 if (size == 0)
177 return -EINVAL; 178 return -EINVAL;
178 size *= sizeof(u16); 179 size *= sizeof(u16);
179 180
@@ -187,17 +188,18 @@ int fb_copy_cmap(const struct fb_cmap *from, struct fb_cmap *to)
187 188
188int fb_cmap_to_user(const struct fb_cmap *from, struct fb_cmap_user *to) 189int fb_cmap_to_user(const struct fb_cmap *from, struct fb_cmap_user *to)
189{ 190{
190 int tooff = 0, fromoff = 0; 191 unsigned int tooff = 0, fromoff = 0;
191 int size; 192 size_t size;
192 193
193 if (to->start > from->start) 194 if (to->start > from->start)
194 fromoff = to->start - from->start; 195 fromoff = to->start - from->start;
195 else 196 else
196 tooff = from->start - to->start; 197 tooff = from->start - to->start;
197 size = to->len - tooff; 198 if (fromoff >= from->len || tooff >= to->len)
198 if (size > (int) (from->len - fromoff)) 199 return -EINVAL;
199 size = from->len - fromoff; 200
200 if (size <= 0) 201 size = min_t(size_t, to->len - tooff, from->len - fromoff);
202 if (size == 0)
201 return -EINVAL; 203 return -EINVAL;
202 size *= sizeof(u16); 204 size *= sizeof(u16);
203 205
diff --git a/drivers/virtio/virtio_mmio.c b/drivers/virtio/virtio_mmio.c
index d47a2fcef818..c71fde5fe835 100644
--- a/drivers/virtio/virtio_mmio.c
+++ b/drivers/virtio/virtio_mmio.c
@@ -59,6 +59,7 @@
59#define pr_fmt(fmt) "virtio-mmio: " fmt 59#define pr_fmt(fmt) "virtio-mmio: " fmt
60 60
61#include <linux/acpi.h> 61#include <linux/acpi.h>
62#include <linux/dma-mapping.h>
62#include <linux/highmem.h> 63#include <linux/highmem.h>
63#include <linux/interrupt.h> 64#include <linux/interrupt.h>
64#include <linux/io.h> 65#include <linux/io.h>
@@ -498,6 +499,7 @@ static int virtio_mmio_probe(struct platform_device *pdev)
498 struct virtio_mmio_device *vm_dev; 499 struct virtio_mmio_device *vm_dev;
499 struct resource *mem; 500 struct resource *mem;
500 unsigned long magic; 501 unsigned long magic;
502 int rc;
501 503
502 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); 504 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
503 if (!mem) 505 if (!mem)
@@ -547,9 +549,25 @@ static int virtio_mmio_probe(struct platform_device *pdev)
547 } 549 }
548 vm_dev->vdev.id.vendor = readl(vm_dev->base + VIRTIO_MMIO_VENDOR_ID); 550 vm_dev->vdev.id.vendor = readl(vm_dev->base + VIRTIO_MMIO_VENDOR_ID);
549 551
550 if (vm_dev->version == 1) 552 if (vm_dev->version == 1) {
551 writel(PAGE_SIZE, vm_dev->base + VIRTIO_MMIO_GUEST_PAGE_SIZE); 553 writel(PAGE_SIZE, vm_dev->base + VIRTIO_MMIO_GUEST_PAGE_SIZE);
552 554
555 rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
556 /*
557 * In the legacy case, ensure our coherently-allocated virtio
558 * ring will be at an address expressable as a 32-bit PFN.
559 */
560 if (!rc)
561 dma_set_coherent_mask(&pdev->dev,
562 DMA_BIT_MASK(32 + PAGE_SHIFT));
563 } else {
564 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
565 }
566 if (rc)
567 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
568 if (rc)
569 dev_warn(&pdev->dev, "Failed to enable 64-bit or 32-bit DMA. Trying to continue, but this might not work.\n");
570
553 platform_set_drvdata(pdev, vm_dev); 571 platform_set_drvdata(pdev, vm_dev);
554 572
555 return register_virtio_device(&vm_dev->vdev); 573 return register_virtio_device(&vm_dev->vdev);
diff --git a/drivers/vme/bridges/vme_ca91cx42.c b/drivers/vme/bridges/vme_ca91cx42.c
index 6b5ee896af63..7cc51223db1c 100644
--- a/drivers/vme/bridges/vme_ca91cx42.c
+++ b/drivers/vme/bridges/vme_ca91cx42.c
@@ -464,7 +464,7 @@ static int ca91cx42_slave_get(struct vme_slave_resource *image, int *enabled,
464 vme_bound = ioread32(bridge->base + CA91CX42_VSI_BD[i]); 464 vme_bound = ioread32(bridge->base + CA91CX42_VSI_BD[i]);
465 pci_offset = ioread32(bridge->base + CA91CX42_VSI_TO[i]); 465 pci_offset = ioread32(bridge->base + CA91CX42_VSI_TO[i]);
466 466
467 *pci_base = (dma_addr_t)vme_base + pci_offset; 467 *pci_base = (dma_addr_t)*vme_base + pci_offset;
468 *size = (unsigned long long)((vme_bound - *vme_base) + granularity); 468 *size = (unsigned long long)((vme_bound - *vme_base) + granularity);
469 469
470 *enabled = 0; 470 *enabled = 0;
diff --git a/drivers/xen/arm-device.c b/drivers/xen/arm-device.c
index 778acf80aacb..85dd20e05726 100644
--- a/drivers/xen/arm-device.c
+++ b/drivers/xen/arm-device.c
@@ -58,9 +58,13 @@ static int xen_map_device_mmio(const struct resource *resources,
58 xen_pfn_t *gpfns; 58 xen_pfn_t *gpfns;
59 xen_ulong_t *idxs; 59 xen_ulong_t *idxs;
60 int *errs; 60 int *errs;
61 struct xen_add_to_physmap_range xatp;
62 61
63 for (i = 0; i < count; i++) { 62 for (i = 0; i < count; i++) {
63 struct xen_add_to_physmap_range xatp = {
64 .domid = DOMID_SELF,
65 .space = XENMAPSPACE_dev_mmio
66 };
67
64 r = &resources[i]; 68 r = &resources[i];
65 nr = DIV_ROUND_UP(resource_size(r), XEN_PAGE_SIZE); 69 nr = DIV_ROUND_UP(resource_size(r), XEN_PAGE_SIZE);
66 if ((resource_type(r) != IORESOURCE_MEM) || (nr == 0)) 70 if ((resource_type(r) != IORESOURCE_MEM) || (nr == 0))
@@ -87,9 +91,7 @@ static int xen_map_device_mmio(const struct resource *resources,
87 idxs[j] = XEN_PFN_DOWN(r->start) + j; 91 idxs[j] = XEN_PFN_DOWN(r->start) + j;
88 } 92 }
89 93
90 xatp.domid = DOMID_SELF;
91 xatp.size = nr; 94 xatp.size = nr;
92 xatp.space = XENMAPSPACE_dev_mmio;
93 95
94 set_xen_guest_handle(xatp.gpfns, gpfns); 96 set_xen_guest_handle(xatp.gpfns, gpfns);
95 set_xen_guest_handle(xatp.idxs, idxs); 97 set_xen_guest_handle(xatp.idxs, idxs);
diff --git a/drivers/xen/events/events_fifo.c b/drivers/xen/events/events_fifo.c
index c03f9c86c7e3..3c41470c7fc4 100644
--- a/drivers/xen/events/events_fifo.c
+++ b/drivers/xen/events/events_fifo.c
@@ -369,8 +369,7 @@ static void evtchn_fifo_resume(void)
369 } 369 }
370 370
371 ret = init_control_block(cpu, control_block); 371 ret = init_control_block(cpu, control_block);
372 if (ret < 0) 372 BUG_ON(ret < 0);
373 BUG();
374 } 373 }
375 374
376 /* 375 /*
diff --git a/drivers/xen/evtchn.c b/drivers/xen/evtchn.c
index e8c7f09d01be..6890897a6f30 100644
--- a/drivers/xen/evtchn.c
+++ b/drivers/xen/evtchn.c
@@ -125,7 +125,7 @@ static int add_evtchn(struct per_user_data *u, struct user_evtchn *evtchn)
125 while (*new) { 125 while (*new) {
126 struct user_evtchn *this; 126 struct user_evtchn *this;
127 127
128 this = container_of(*new, struct user_evtchn, node); 128 this = rb_entry(*new, struct user_evtchn, node);
129 129
130 parent = *new; 130 parent = *new;
131 if (this->port < evtchn->port) 131 if (this->port < evtchn->port)
@@ -157,7 +157,7 @@ static struct user_evtchn *find_evtchn(struct per_user_data *u, unsigned port)
157 while (node) { 157 while (node) {
158 struct user_evtchn *evtchn; 158 struct user_evtchn *evtchn;
159 159
160 evtchn = container_of(node, struct user_evtchn, node); 160 evtchn = rb_entry(node, struct user_evtchn, node);
161 161
162 if (evtchn->port < port) 162 if (evtchn->port < port)
163 node = node->rb_left; 163 node = node->rb_left;
diff --git a/drivers/xen/platform-pci.c b/drivers/xen/platform-pci.c
index 112ce422dc22..2a165cc8a43c 100644
--- a/drivers/xen/platform-pci.c
+++ b/drivers/xen/platform-pci.c
@@ -42,6 +42,7 @@
42static unsigned long platform_mmio; 42static unsigned long platform_mmio;
43static unsigned long platform_mmio_alloc; 43static unsigned long platform_mmio_alloc;
44static unsigned long platform_mmiolen; 44static unsigned long platform_mmiolen;
45static uint64_t callback_via;
45 46
46static unsigned long alloc_xen_mmio(unsigned long len) 47static unsigned long alloc_xen_mmio(unsigned long len)
47{ 48{
@@ -54,6 +55,51 @@ static unsigned long alloc_xen_mmio(unsigned long len)
54 return addr; 55 return addr;
55} 56}
56 57
58static uint64_t get_callback_via(struct pci_dev *pdev)
59{
60 u8 pin;
61 int irq;
62
63 irq = pdev->irq;
64 if (irq < 16)
65 return irq; /* ISA IRQ */
66
67 pin = pdev->pin;
68
69 /* We don't know the GSI. Specify the PCI INTx line instead. */
70 return ((uint64_t)0x01 << HVM_CALLBACK_VIA_TYPE_SHIFT) | /* PCI INTx identifier */
71 ((uint64_t)pci_domain_nr(pdev->bus) << 32) |
72 ((uint64_t)pdev->bus->number << 16) |
73 ((uint64_t)(pdev->devfn & 0xff) << 8) |
74 ((uint64_t)(pin - 1) & 3);
75}
76
77static irqreturn_t do_hvm_evtchn_intr(int irq, void *dev_id)
78{
79 xen_hvm_evtchn_do_upcall();
80 return IRQ_HANDLED;
81}
82
83static int xen_allocate_irq(struct pci_dev *pdev)
84{
85 return request_irq(pdev->irq, do_hvm_evtchn_intr,
86 IRQF_NOBALANCING | IRQF_TRIGGER_RISING,
87 "xen-platform-pci", pdev);
88}
89
90static int platform_pci_resume(struct pci_dev *pdev)
91{
92 int err;
93 if (!xen_pv_domain())
94 return 0;
95 err = xen_set_callback_via(callback_via);
96 if (err) {
97 dev_err(&pdev->dev, "platform_pci_resume failure!\n");
98 return err;
99 }
100 return 0;
101}
102
57static int platform_pci_probe(struct pci_dev *pdev, 103static int platform_pci_probe(struct pci_dev *pdev,
58 const struct pci_device_id *ent) 104 const struct pci_device_id *ent)
59{ 105{
@@ -92,6 +138,28 @@ static int platform_pci_probe(struct pci_dev *pdev,
92 platform_mmio = mmio_addr; 138 platform_mmio = mmio_addr;
93 platform_mmiolen = mmio_len; 139 platform_mmiolen = mmio_len;
94 140
141 /*
142 * Xen HVM guests always use the vector callback mechanism.
143 * L1 Dom0 in a nested Xen environment is a PV guest inside in an
144 * HVM environment. It needs the platform-pci driver to get
145 * notifications from L0 Xen, but it cannot use the vector callback
146 * as it is not exported by L1 Xen.
147 */
148 if (xen_pv_domain()) {
149 ret = xen_allocate_irq(pdev);
150 if (ret) {
151 dev_warn(&pdev->dev, "request_irq failed err=%d\n", ret);
152 goto out;
153 }
154 callback_via = get_callback_via(pdev);
155 ret = xen_set_callback_via(callback_via);
156 if (ret) {
157 dev_warn(&pdev->dev, "Unable to set the evtchn callback "
158 "err=%d\n", ret);
159 goto out;
160 }
161 }
162
95 max_nr_gframes = gnttab_max_grant_frames(); 163 max_nr_gframes = gnttab_max_grant_frames();
96 grant_frames = alloc_xen_mmio(PAGE_SIZE * max_nr_gframes); 164 grant_frames = alloc_xen_mmio(PAGE_SIZE * max_nr_gframes);
97 ret = gnttab_setup_auto_xlat_frames(grant_frames); 165 ret = gnttab_setup_auto_xlat_frames(grant_frames);
@@ -123,6 +191,9 @@ static struct pci_driver platform_driver = {
123 .name = DRV_NAME, 191 .name = DRV_NAME,
124 .probe = platform_pci_probe, 192 .probe = platform_pci_probe,
125 .id_table = platform_pci_tbl, 193 .id_table = platform_pci_tbl,
194#ifdef CONFIG_PM
195 .resume_early = platform_pci_resume,
196#endif
126}; 197};
127 198
128builtin_pci_driver(platform_driver); 199builtin_pci_driver(platform_driver);
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index 478fb91e3df2..f8afc6dcc29f 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -275,6 +275,10 @@ retry:
275 rc = 0; 275 rc = 0;
276 } else 276 } else
277 rc = swiotlb_late_init_with_tbl(xen_io_tlb_start, xen_io_tlb_nslabs); 277 rc = swiotlb_late_init_with_tbl(xen_io_tlb_start, xen_io_tlb_nslabs);
278
279 if (!rc)
280 swiotlb_set_max_segment(PAGE_SIZE);
281
278 return rc; 282 return rc;
279error: 283error:
280 if (repeat--) { 284 if (repeat--) {
@@ -392,7 +396,7 @@ dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
392 if (dma_capable(dev, dev_addr, size) && 396 if (dma_capable(dev, dev_addr, size) &&
393 !range_straddles_page_boundary(phys, size) && 397 !range_straddles_page_boundary(phys, size) &&
394 !xen_arch_need_swiotlb(dev, phys, dev_addr) && 398 !xen_arch_need_swiotlb(dev, phys, dev_addr) &&
395 !swiotlb_force) { 399 (swiotlb_force != SWIOTLB_FORCE)) {
396 /* we are not interested in the dma_addr returned by 400 /* we are not interested in the dma_addr returned by
397 * xen_dma_map_page, only in the potential cache flushes executed 401 * xen_dma_map_page, only in the potential cache flushes executed
398 * by the function. */ 402 * by the function. */
@@ -410,9 +414,9 @@ dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
410 if (map == SWIOTLB_MAP_ERROR) 414 if (map == SWIOTLB_MAP_ERROR)
411 return DMA_ERROR_CODE; 415 return DMA_ERROR_CODE;
412 416
417 dev_addr = xen_phys_to_bus(map);
413 xen_dma_map_page(dev, pfn_to_page(map >> PAGE_SHIFT), 418 xen_dma_map_page(dev, pfn_to_page(map >> PAGE_SHIFT),
414 dev_addr, map & ~PAGE_MASK, size, dir, attrs); 419 dev_addr, map & ~PAGE_MASK, size, dir, attrs);
415 dev_addr = xen_phys_to_bus(map);
416 420
417 /* 421 /*
418 * Ensure that the address returned is DMA'ble 422 * Ensure that the address returned is DMA'ble
@@ -552,7 +556,7 @@ xen_swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
552 phys_addr_t paddr = sg_phys(sg); 556 phys_addr_t paddr = sg_phys(sg);
553 dma_addr_t dev_addr = xen_phys_to_bus(paddr); 557 dma_addr_t dev_addr = xen_phys_to_bus(paddr);
554 558
555 if (swiotlb_force || 559 if (swiotlb_force == SWIOTLB_FORCE ||
556 xen_arch_need_swiotlb(hwdev, paddr, dev_addr) || 560 xen_arch_need_swiotlb(hwdev, paddr, dev_addr) ||
557 !dma_capable(hwdev, dev_addr, sg->length) || 561 !dma_capable(hwdev, dev_addr, sg->length) ||
558 range_straddles_page_boundary(paddr, sg->length)) { 562 range_straddles_page_boundary(paddr, sg->length)) {
@@ -571,13 +575,14 @@ xen_swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
571 sg_dma_len(sgl) = 0; 575 sg_dma_len(sgl) = 0;
572 return 0; 576 return 0;
573 } 577 }
578 dev_addr = xen_phys_to_bus(map);
574 xen_dma_map_page(hwdev, pfn_to_page(map >> PAGE_SHIFT), 579 xen_dma_map_page(hwdev, pfn_to_page(map >> PAGE_SHIFT),
575 dev_addr, 580 dev_addr,
576 map & ~PAGE_MASK, 581 map & ~PAGE_MASK,
577 sg->length, 582 sg->length,
578 dir, 583 dir,
579 attrs); 584 attrs);
580 sg->dma_address = xen_phys_to_bus(map); 585 sg->dma_address = dev_addr;
581 } else { 586 } else {
582 /* we are not interested in the dma_addr returned by 587 /* we are not interested in the dma_addr returned by
583 * xen_dma_map_page, only in the potential cache flushes executed 588 * xen_dma_map_page, only in the potential cache flushes executed
diff --git a/drivers/xen/xenbus/xenbus_comms.h b/drivers/xen/xenbus/xenbus_comms.h
index e74f9c1fbd80..867a2e425208 100644
--- a/drivers/xen/xenbus/xenbus_comms.h
+++ b/drivers/xen/xenbus/xenbus_comms.h
@@ -42,7 +42,6 @@ int xb_write(const void *data, unsigned len);
42int xb_read(void *data, unsigned len); 42int xb_read(void *data, unsigned len);
43int xb_data_to_read(void); 43int xb_data_to_read(void);
44int xb_wait_for_data_to_read(void); 44int xb_wait_for_data_to_read(void);
45int xs_input_avail(void);
46extern struct xenstore_domain_interface *xen_store_interface; 45extern struct xenstore_domain_interface *xen_store_interface;
47extern int xen_store_evtchn; 46extern int xen_store_evtchn;
48extern enum xenstore_init xen_store_domain_type; 47extern enum xenstore_init xen_store_domain_type;
diff --git a/drivers/xen/xenbus/xenbus_dev_frontend.c b/drivers/xen/xenbus/xenbus_dev_frontend.c
index 6c0ead4be784..79130b310247 100644
--- a/drivers/xen/xenbus/xenbus_dev_frontend.c
+++ b/drivers/xen/xenbus/xenbus_dev_frontend.c
@@ -302,6 +302,29 @@ static void watch_fired(struct xenbus_watch *watch,
302 mutex_unlock(&adap->dev_data->reply_mutex); 302 mutex_unlock(&adap->dev_data->reply_mutex);
303} 303}
304 304
305static int xenbus_command_reply(struct xenbus_file_priv *u,
306 unsigned int msg_type, const char *reply)
307{
308 struct {
309 struct xsd_sockmsg hdr;
310 const char body[16];
311 } msg;
312 int rc;
313
314 msg.hdr = u->u.msg;
315 msg.hdr.type = msg_type;
316 msg.hdr.len = strlen(reply) + 1;
317 if (msg.hdr.len > sizeof(msg.body))
318 return -E2BIG;
319
320 mutex_lock(&u->reply_mutex);
321 rc = queue_reply(&u->read_buffers, &msg, sizeof(msg.hdr) + msg.hdr.len);
322 wake_up(&u->read_waitq);
323 mutex_unlock(&u->reply_mutex);
324
325 return rc;
326}
327
305static int xenbus_write_transaction(unsigned msg_type, 328static int xenbus_write_transaction(unsigned msg_type,
306 struct xenbus_file_priv *u) 329 struct xenbus_file_priv *u)
307{ 330{
@@ -316,12 +339,12 @@ static int xenbus_write_transaction(unsigned msg_type,
316 rc = -ENOMEM; 339 rc = -ENOMEM;
317 goto out; 340 goto out;
318 } 341 }
319 } else if (msg_type == XS_TRANSACTION_END) { 342 } else if (u->u.msg.tx_id != 0) {
320 list_for_each_entry(trans, &u->transactions, list) 343 list_for_each_entry(trans, &u->transactions, list)
321 if (trans->handle.id == u->u.msg.tx_id) 344 if (trans->handle.id == u->u.msg.tx_id)
322 break; 345 break;
323 if (&trans->list == &u->transactions) 346 if (&trans->list == &u->transactions)
324 return -ESRCH; 347 return xenbus_command_reply(u, XS_ERROR, "ENOENT");
325 } 348 }
326 349
327 reply = xenbus_dev_request_and_reply(&u->u.msg); 350 reply = xenbus_dev_request_and_reply(&u->u.msg);
@@ -372,12 +395,12 @@ static int xenbus_write_watch(unsigned msg_type, struct xenbus_file_priv *u)
372 path = u->u.buffer + sizeof(u->u.msg); 395 path = u->u.buffer + sizeof(u->u.msg);
373 token = memchr(path, 0, u->u.msg.len); 396 token = memchr(path, 0, u->u.msg.len);
374 if (token == NULL) { 397 if (token == NULL) {
375 rc = -EILSEQ; 398 rc = xenbus_command_reply(u, XS_ERROR, "EINVAL");
376 goto out; 399 goto out;
377 } 400 }
378 token++; 401 token++;
379 if (memchr(token, 0, u->u.msg.len - (token - path)) == NULL) { 402 if (memchr(token, 0, u->u.msg.len - (token - path)) == NULL) {
380 rc = -EILSEQ; 403 rc = xenbus_command_reply(u, XS_ERROR, "EINVAL");
381 goto out; 404 goto out;
382 } 405 }
383 406
@@ -411,23 +434,7 @@ static int xenbus_write_watch(unsigned msg_type, struct xenbus_file_priv *u)
411 } 434 }
412 435
413 /* Success. Synthesize a reply to say all is OK. */ 436 /* Success. Synthesize a reply to say all is OK. */
414 { 437 rc = xenbus_command_reply(u, msg_type, "OK");
415 struct {
416 struct xsd_sockmsg hdr;
417 char body[3];
418 } __packed reply = {
419 {
420 .type = msg_type,
421 .len = sizeof(reply.body)
422 },
423 "OK"
424 };
425
426 mutex_lock(&u->reply_mutex);
427 rc = queue_reply(&u->read_buffers, &reply, sizeof(reply));
428 wake_up(&u->read_waitq);
429 mutex_unlock(&u->reply_mutex);
430 }
431 438
432out: 439out:
433 return rc; 440 return rc;
diff --git a/fs/Kconfig b/fs/Kconfig
index c2a377cdda2b..83eab52fb3f6 100644
--- a/fs/Kconfig
+++ b/fs/Kconfig
@@ -38,6 +38,7 @@ config FS_DAX
38 bool "Direct Access (DAX) support" 38 bool "Direct Access (DAX) support"
39 depends on MMU 39 depends on MMU
40 depends on !(ARM || MIPS || SPARC) 40 depends on !(ARM || MIPS || SPARC)
41 select FS_IOMAP
41 help 42 help
42 Direct Access (DAX) can be used on memory-backed block devices. 43 Direct Access (DAX) can be used on memory-backed block devices.
43 If the block device supports DAX and the filesystem supports DAX, 44 If the block device supports DAX and the filesystem supports DAX,
diff --git a/fs/aio.c b/fs/aio.c
index 4ab67e8cb776..873b4ca82ccb 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -1085,7 +1085,8 @@ static void aio_complete(struct kiocb *kiocb, long res, long res2)
1085 * Tell lockdep we inherited freeze protection from submission 1085 * Tell lockdep we inherited freeze protection from submission
1086 * thread. 1086 * thread.
1087 */ 1087 */
1088 __sb_writers_acquired(file_inode(file)->i_sb, SB_FREEZE_WRITE); 1088 if (S_ISREG(file_inode(file)->i_mode))
1089 __sb_writers_acquired(file_inode(file)->i_sb, SB_FREEZE_WRITE);
1089 file_end_write(file); 1090 file_end_write(file);
1090 } 1091 }
1091 1092
@@ -1525,7 +1526,8 @@ static ssize_t aio_write(struct kiocb *req, struct iocb *iocb, bool vectored,
1525 * by telling it the lock got released so that it doesn't 1526 * by telling it the lock got released so that it doesn't
1526 * complain about held lock when we return to userspace. 1527 * complain about held lock when we return to userspace.
1527 */ 1528 */
1528 __sb_writers_release(file_inode(file)->i_sb, SB_FREEZE_WRITE); 1529 if (S_ISREG(file_inode(file)->i_mode))
1530 __sb_writers_release(file_inode(file)->i_sb, SB_FREEZE_WRITE);
1529 } 1531 }
1530 kfree(iovec); 1532 kfree(iovec);
1531 return ret; 1533 return ret;
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
index 29a02daf08a9..422370293cfd 100644
--- a/fs/binfmt_elf.c
+++ b/fs/binfmt_elf.c
@@ -2298,6 +2298,7 @@ static int elf_core_dump(struct coredump_params *cprm)
2298 goto end_coredump; 2298 goto end_coredump;
2299 } 2299 }
2300 } 2300 }
2301 dump_truncate(cprm);
2301 2302
2302 if (!elf_core_write_extra_data(cprm)) 2303 if (!elf_core_write_extra_data(cprm))
2303 goto end_coredump; 2304 goto end_coredump;
diff --git a/fs/block_dev.c b/fs/block_dev.c
index 6254cee8f8f3..3c47614a4b32 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -328,9 +328,10 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages)
328 struct file *file = iocb->ki_filp; 328 struct file *file = iocb->ki_filp;
329 struct inode *inode = bdev_file_inode(file); 329 struct inode *inode = bdev_file_inode(file);
330 struct block_device *bdev = I_BDEV(inode); 330 struct block_device *bdev = I_BDEV(inode);
331 struct blk_plug plug;
331 struct blkdev_dio *dio; 332 struct blkdev_dio *dio;
332 struct bio *bio; 333 struct bio *bio;
333 bool is_read = (iov_iter_rw(iter) == READ); 334 bool is_read = (iov_iter_rw(iter) == READ), is_sync;
334 loff_t pos = iocb->ki_pos; 335 loff_t pos = iocb->ki_pos;
335 blk_qc_t qc = BLK_QC_T_NONE; 336 blk_qc_t qc = BLK_QC_T_NONE;
336 int ret; 337 int ret;
@@ -343,7 +344,7 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages)
343 bio_get(bio); /* extra ref for the completion handler */ 344 bio_get(bio); /* extra ref for the completion handler */
344 345
345 dio = container_of(bio, struct blkdev_dio, bio); 346 dio = container_of(bio, struct blkdev_dio, bio);
346 dio->is_sync = is_sync_kiocb(iocb); 347 dio->is_sync = is_sync = is_sync_kiocb(iocb);
347 if (dio->is_sync) 348 if (dio->is_sync)
348 dio->waiter = current; 349 dio->waiter = current;
349 else 350 else
@@ -353,6 +354,7 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages)
353 dio->multi_bio = false; 354 dio->multi_bio = false;
354 dio->should_dirty = is_read && (iter->type == ITER_IOVEC); 355 dio->should_dirty = is_read && (iter->type == ITER_IOVEC);
355 356
357 blk_start_plug(&plug);
356 for (;;) { 358 for (;;) {
357 bio->bi_bdev = bdev; 359 bio->bi_bdev = bdev;
358 bio->bi_iter.bi_sector = pos >> 9; 360 bio->bi_iter.bi_sector = pos >> 9;
@@ -394,8 +396,9 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages)
394 submit_bio(bio); 396 submit_bio(bio);
395 bio = bio_alloc(GFP_KERNEL, nr_pages); 397 bio = bio_alloc(GFP_KERNEL, nr_pages);
396 } 398 }
399 blk_finish_plug(&plug);
397 400
398 if (!dio->is_sync) 401 if (!is_sync)
399 return -EIOCBQUEUED; 402 return -EIOCBQUEUED;
400 403
401 for (;;) { 404 for (;;) {
diff --git a/fs/btrfs/async-thread.c b/fs/btrfs/async-thread.c
index 63d197724519..ff0b0be92d61 100644
--- a/fs/btrfs/async-thread.c
+++ b/fs/btrfs/async-thread.c
@@ -273,6 +273,8 @@ static void run_ordered_work(struct __btrfs_workqueue *wq)
273 unsigned long flags; 273 unsigned long flags;
274 274
275 while (1) { 275 while (1) {
276 void *wtag;
277
276 spin_lock_irqsave(lock, flags); 278 spin_lock_irqsave(lock, flags);
277 if (list_empty(list)) 279 if (list_empty(list))
278 break; 280 break;
@@ -299,11 +301,13 @@ static void run_ordered_work(struct __btrfs_workqueue *wq)
299 spin_unlock_irqrestore(lock, flags); 301 spin_unlock_irqrestore(lock, flags);
300 302
301 /* 303 /*
302 * we don't want to call the ordered free functions 304 * We don't want to call the ordered free functions with the
303 * with the lock held though 305 * lock held though. Save the work as tag for the trace event,
306 * because the callback could free the structure.
304 */ 307 */
308 wtag = work;
305 work->ordered_free(work); 309 work->ordered_free(work);
306 trace_btrfs_all_work_done(work); 310 trace_btrfs_all_work_done(wq->fs_info, wtag);
307 } 311 }
308 spin_unlock_irqrestore(lock, flags); 312 spin_unlock_irqrestore(lock, flags);
309} 313}
@@ -311,6 +315,7 @@ static void run_ordered_work(struct __btrfs_workqueue *wq)
311static void normal_work_helper(struct btrfs_work *work) 315static void normal_work_helper(struct btrfs_work *work)
312{ 316{
313 struct __btrfs_workqueue *wq; 317 struct __btrfs_workqueue *wq;
318 void *wtag;
314 int need_order = 0; 319 int need_order = 0;
315 320
316 /* 321 /*
@@ -324,6 +329,8 @@ static void normal_work_helper(struct btrfs_work *work)
324 if (work->ordered_func) 329 if (work->ordered_func)
325 need_order = 1; 330 need_order = 1;
326 wq = work->wq; 331 wq = work->wq;
332 /* Safe for tracepoints in case work gets freed by the callback */
333 wtag = work;
327 334
328 trace_btrfs_work_sched(work); 335 trace_btrfs_work_sched(work);
329 thresh_exec_hook(wq); 336 thresh_exec_hook(wq);
@@ -333,7 +340,7 @@ static void normal_work_helper(struct btrfs_work *work)
333 run_ordered_work(wq); 340 run_ordered_work(wq);
334 } 341 }
335 if (!need_order) 342 if (!need_order)
336 trace_btrfs_all_work_done(work); 343 trace_btrfs_all_work_done(wq->fs_info, wtag);
337} 344}
338 345
339void btrfs_init_work(struct btrfs_work *work, btrfs_work_func_t uniq_func, 346void btrfs_init_work(struct btrfs_work *work, btrfs_work_func_t uniq_func,
diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
index 7f390849343b..c4444d6f439f 100644
--- a/fs/btrfs/compression.c
+++ b/fs/btrfs/compression.c
@@ -1024,6 +1024,7 @@ int btrfs_decompress_buf2page(char *buf, unsigned long buf_start,
1024 unsigned long buf_offset; 1024 unsigned long buf_offset;
1025 unsigned long current_buf_start; 1025 unsigned long current_buf_start;
1026 unsigned long start_byte; 1026 unsigned long start_byte;
1027 unsigned long prev_start_byte;
1027 unsigned long working_bytes = total_out - buf_start; 1028 unsigned long working_bytes = total_out - buf_start;
1028 unsigned long bytes; 1029 unsigned long bytes;
1029 char *kaddr; 1030 char *kaddr;
@@ -1071,26 +1072,34 @@ int btrfs_decompress_buf2page(char *buf, unsigned long buf_start,
1071 if (!bio->bi_iter.bi_size) 1072 if (!bio->bi_iter.bi_size)
1072 return 0; 1073 return 0;
1073 bvec = bio_iter_iovec(bio, bio->bi_iter); 1074 bvec = bio_iter_iovec(bio, bio->bi_iter);
1074 1075 prev_start_byte = start_byte;
1075 start_byte = page_offset(bvec.bv_page) - disk_start; 1076 start_byte = page_offset(bvec.bv_page) - disk_start;
1076 1077
1077 /* 1078 /*
1078 * make sure our new page is covered by this 1079 * We need to make sure we're only adjusting
1079 * working buffer 1080 * our offset into compression working buffer when
1081 * we're switching pages. Otherwise we can incorrectly
1082 * keep copying when we were actually done.
1080 */ 1083 */
1081 if (total_out <= start_byte) 1084 if (start_byte != prev_start_byte) {
1082 return 1; 1085 /*
1086 * make sure our new page is covered by this
1087 * working buffer
1088 */
1089 if (total_out <= start_byte)
1090 return 1;
1083 1091
1084 /* 1092 /*
1085 * the next page in the biovec might not be adjacent 1093 * the next page in the biovec might not be adjacent
1086 * to the last page, but it might still be found 1094 * to the last page, but it might still be found
1087 * inside this working buffer. bump our offset pointer 1095 * inside this working buffer. bump our offset pointer
1088 */ 1096 */
1089 if (total_out > start_byte && 1097 if (total_out > start_byte &&
1090 current_buf_start < start_byte) { 1098 current_buf_start < start_byte) {
1091 buf_offset = start_byte - buf_start; 1099 buf_offset = start_byte - buf_start;
1092 working_bytes = total_out - start_byte; 1100 working_bytes = total_out - start_byte;
1093 current_buf_start = buf_start + buf_offset; 1101 current_buf_start = buf_start + buf_offset;
1102 }
1094 } 1103 }
1095 } 1104 }
1096 1105
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index e97302f437a1..dcd2e798767e 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -2522,11 +2522,11 @@ static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
2522 if (ref && ref->seq && 2522 if (ref && ref->seq &&
2523 btrfs_check_delayed_seq(fs_info, delayed_refs, ref->seq)) { 2523 btrfs_check_delayed_seq(fs_info, delayed_refs, ref->seq)) {
2524 spin_unlock(&locked_ref->lock); 2524 spin_unlock(&locked_ref->lock);
2525 btrfs_delayed_ref_unlock(locked_ref);
2526 spin_lock(&delayed_refs->lock); 2525 spin_lock(&delayed_refs->lock);
2527 locked_ref->processing = 0; 2526 locked_ref->processing = 0;
2528 delayed_refs->num_heads_ready++; 2527 delayed_refs->num_heads_ready++;
2529 spin_unlock(&delayed_refs->lock); 2528 spin_unlock(&delayed_refs->lock);
2529 btrfs_delayed_ref_unlock(locked_ref);
2530 locked_ref = NULL; 2530 locked_ref = NULL;
2531 cond_resched(); 2531 cond_resched();
2532 count++; 2532 count++;
@@ -2572,7 +2572,10 @@ static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
2572 */ 2572 */
2573 if (must_insert_reserved) 2573 if (must_insert_reserved)
2574 locked_ref->must_insert_reserved = 1; 2574 locked_ref->must_insert_reserved = 1;
2575 spin_lock(&delayed_refs->lock);
2575 locked_ref->processing = 0; 2576 locked_ref->processing = 0;
2577 delayed_refs->num_heads_ready++;
2578 spin_unlock(&delayed_refs->lock);
2576 btrfs_debug(fs_info, 2579 btrfs_debug(fs_info,
2577 "run_delayed_extent_op returned %d", 2580 "run_delayed_extent_op returned %d",
2578 ret); 2581 ret);
@@ -7384,7 +7387,8 @@ btrfs_lock_cluster(struct btrfs_block_group_cache *block_group,
7384 7387
7385 spin_unlock(&cluster->refill_lock); 7388 spin_unlock(&cluster->refill_lock);
7386 7389
7387 down_read(&used_bg->data_rwsem); 7390 /* We should only have one-level nested. */
7391 down_read_nested(&used_bg->data_rwsem, SINGLE_DEPTH_NESTING);
7388 7392
7389 spin_lock(&cluster->refill_lock); 7393 spin_lock(&cluster->refill_lock);
7390 if (used_bg == cluster->block_group) 7394 if (used_bg == cluster->block_group)
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index f2b281ad7af6..1e861a063721 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -3835,10 +3835,7 @@ cache_acl:
3835 break; 3835 break;
3836 case S_IFDIR: 3836 case S_IFDIR:
3837 inode->i_fop = &btrfs_dir_file_operations; 3837 inode->i_fop = &btrfs_dir_file_operations;
3838 if (root == fs_info->tree_root) 3838 inode->i_op = &btrfs_dir_inode_operations;
3839 inode->i_op = &btrfs_dir_ro_inode_operations;
3840 else
3841 inode->i_op = &btrfs_dir_inode_operations;
3842 break; 3839 break;
3843 case S_IFLNK: 3840 case S_IFLNK:
3844 inode->i_op = &btrfs_symlink_inode_operations; 3841 inode->i_op = &btrfs_symlink_inode_operations;
@@ -4505,8 +4502,19 @@ search_again:
4505 if (found_type > min_type) { 4502 if (found_type > min_type) {
4506 del_item = 1; 4503 del_item = 1;
4507 } else { 4504 } else {
4508 if (item_end < new_size) 4505 if (item_end < new_size) {
4506 /*
4507 * With NO_HOLES mode, for the following mapping
4508 *
4509 * [0-4k][hole][8k-12k]
4510 *
4511 * if truncating isize down to 6k, it ends up
4512 * isize being 8k.
4513 */
4514 if (btrfs_fs_incompat(root->fs_info, NO_HOLES))
4515 last_size = new_size;
4509 break; 4516 break;
4517 }
4510 if (found_key.offset >= new_size) 4518 if (found_key.offset >= new_size)
4511 del_item = 1; 4519 del_item = 1;
4512 else 4520 else
@@ -5710,6 +5718,7 @@ static struct inode *new_simple_dir(struct super_block *s,
5710 5718
5711 inode->i_ino = BTRFS_EMPTY_SUBVOL_DIR_OBJECTID; 5719 inode->i_ino = BTRFS_EMPTY_SUBVOL_DIR_OBJECTID;
5712 inode->i_op = &btrfs_dir_ro_inode_operations; 5720 inode->i_op = &btrfs_dir_ro_inode_operations;
5721 inode->i_opflags &= ~IOP_XATTR;
5713 inode->i_fop = &simple_dir_operations; 5722 inode->i_fop = &simple_dir_operations;
5714 inode->i_mode = S_IFDIR | S_IRUGO | S_IWUSR | S_IXUGO; 5723 inode->i_mode = S_IFDIR | S_IRUGO | S_IWUSR | S_IXUGO;
5715 inode->i_mtime = current_time(inode); 5724 inode->i_mtime = current_time(inode);
@@ -7059,7 +7068,7 @@ insert:
7059 write_unlock(&em_tree->lock); 7068 write_unlock(&em_tree->lock);
7060out: 7069out:
7061 7070
7062 trace_btrfs_get_extent(root, em); 7071 trace_btrfs_get_extent(root, inode, em);
7063 7072
7064 btrfs_free_path(path); 7073 btrfs_free_path(path);
7065 if (trans) { 7074 if (trans) {
@@ -7215,7 +7224,6 @@ static struct extent_map *btrfs_create_dio_extent(struct inode *inode,
7215 struct extent_map *em = NULL; 7224 struct extent_map *em = NULL;
7216 int ret; 7225 int ret;
7217 7226
7218 down_read(&BTRFS_I(inode)->dio_sem);
7219 if (type != BTRFS_ORDERED_NOCOW) { 7227 if (type != BTRFS_ORDERED_NOCOW) {
7220 em = create_pinned_em(inode, start, len, orig_start, 7228 em = create_pinned_em(inode, start, len, orig_start,
7221 block_start, block_len, orig_block_len, 7229 block_start, block_len, orig_block_len,
@@ -7234,7 +7242,6 @@ static struct extent_map *btrfs_create_dio_extent(struct inode *inode,
7234 em = ERR_PTR(ret); 7242 em = ERR_PTR(ret);
7235 } 7243 }
7236 out: 7244 out:
7237 up_read(&BTRFS_I(inode)->dio_sem);
7238 7245
7239 return em; 7246 return em;
7240} 7247}
@@ -7623,11 +7630,18 @@ static void adjust_dio_outstanding_extents(struct inode *inode,
7623 * within our reservation, otherwise we need to adjust our inode 7630 * within our reservation, otherwise we need to adjust our inode
7624 * counter appropriately. 7631 * counter appropriately.
7625 */ 7632 */
7626 if (dio_data->outstanding_extents) { 7633 if (dio_data->outstanding_extents >= num_extents) {
7627 dio_data->outstanding_extents -= num_extents; 7634 dio_data->outstanding_extents -= num_extents;
7628 } else { 7635 } else {
7636 /*
7637 * If dio write length has been split due to no large enough
7638 * contiguous space, we need to compensate our inode counter
7639 * appropriately.
7640 */
7641 u64 num_needed = num_extents - dio_data->outstanding_extents;
7642
7629 spin_lock(&BTRFS_I(inode)->lock); 7643 spin_lock(&BTRFS_I(inode)->lock);
7630 BTRFS_I(inode)->outstanding_extents += num_extents; 7644 BTRFS_I(inode)->outstanding_extents += num_needed;
7631 spin_unlock(&BTRFS_I(inode)->lock); 7645 spin_unlock(&BTRFS_I(inode)->lock);
7632 } 7646 }
7633} 7647}
@@ -8685,6 +8699,7 @@ static ssize_t btrfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
8685 dio_data.unsubmitted_oe_range_start = (u64)offset; 8699 dio_data.unsubmitted_oe_range_start = (u64)offset;
8686 dio_data.unsubmitted_oe_range_end = (u64)offset; 8700 dio_data.unsubmitted_oe_range_end = (u64)offset;
8687 current->journal_info = &dio_data; 8701 current->journal_info = &dio_data;
8702 down_read(&BTRFS_I(inode)->dio_sem);
8688 } else if (test_bit(BTRFS_INODE_READDIO_NEED_LOCK, 8703 } else if (test_bit(BTRFS_INODE_READDIO_NEED_LOCK,
8689 &BTRFS_I(inode)->runtime_flags)) { 8704 &BTRFS_I(inode)->runtime_flags)) {
8690 inode_dio_end(inode); 8705 inode_dio_end(inode);
@@ -8697,6 +8712,7 @@ static ssize_t btrfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
8697 iter, btrfs_get_blocks_direct, NULL, 8712 iter, btrfs_get_blocks_direct, NULL,
8698 btrfs_submit_direct, flags); 8713 btrfs_submit_direct, flags);
8699 if (iov_iter_rw(iter) == WRITE) { 8714 if (iov_iter_rw(iter) == WRITE) {
8715 up_read(&BTRFS_I(inode)->dio_sem);
8700 current->journal_info = NULL; 8716 current->journal_info = NULL;
8701 if (ret < 0 && ret != -EIOCBQUEUED) { 8717 if (ret < 0 && ret != -EIOCBQUEUED) {
8702 if (dio_data.reserve) 8718 if (dio_data.reserve)
@@ -9205,6 +9221,7 @@ static int btrfs_truncate(struct inode *inode)
9205 break; 9221 break;
9206 } 9222 }
9207 9223
9224 btrfs_block_rsv_release(fs_info, rsv, -1);
9208 ret = btrfs_block_rsv_migrate(&fs_info->trans_block_rsv, 9225 ret = btrfs_block_rsv_migrate(&fs_info->trans_block_rsv,
9209 rsv, min_size, 0); 9226 rsv, min_size, 0);
9210 BUG_ON(ret); /* shouldn't happen */ 9227 BUG_ON(ret); /* shouldn't happen */
@@ -10572,8 +10589,6 @@ static const struct inode_operations btrfs_dir_inode_operations = {
10572static const struct inode_operations btrfs_dir_ro_inode_operations = { 10589static const struct inode_operations btrfs_dir_ro_inode_operations = {
10573 .lookup = btrfs_lookup, 10590 .lookup = btrfs_lookup,
10574 .permission = btrfs_permission, 10591 .permission = btrfs_permission,
10575 .get_acl = btrfs_get_acl,
10576 .set_acl = btrfs_set_acl,
10577 .update_time = btrfs_update_time, 10592 .update_time = btrfs_update_time,
10578}; 10593};
10579 10594
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index 33f967d30b2a..21e51b0ba188 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -5653,6 +5653,10 @@ long btrfs_ioctl(struct file *file, unsigned int
5653#ifdef CONFIG_COMPAT 5653#ifdef CONFIG_COMPAT
5654long btrfs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 5654long btrfs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
5655{ 5655{
5656 /*
5657 * These all access 32-bit values anyway so no further
5658 * handling is necessary.
5659 */
5656 switch (cmd) { 5660 switch (cmd) {
5657 case FS_IOC32_GETFLAGS: 5661 case FS_IOC32_GETFLAGS:
5658 cmd = FS_IOC_GETFLAGS; 5662 cmd = FS_IOC_GETFLAGS;
@@ -5663,8 +5667,6 @@ long btrfs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
5663 case FS_IOC32_GETVERSION: 5667 case FS_IOC32_GETVERSION:
5664 cmd = FS_IOC_GETVERSION; 5668 cmd = FS_IOC_GETVERSION;
5665 break; 5669 break;
5666 default:
5667 return -ENOIOCTLCMD;
5668 } 5670 }
5669 5671
5670 return btrfs_ioctl(file, cmd, (unsigned long) compat_ptr(arg)); 5672 return btrfs_ioctl(file, cmd, (unsigned long) compat_ptr(arg));
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index f10bf5213ed8..eeffff84f280 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -37,6 +37,7 @@
37 */ 37 */
38#define LOG_INODE_ALL 0 38#define LOG_INODE_ALL 0
39#define LOG_INODE_EXISTS 1 39#define LOG_INODE_EXISTS 1
40#define LOG_OTHER_INODE 2
40 41
41/* 42/*
42 * directory trouble cases 43 * directory trouble cases
@@ -4641,7 +4642,7 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans,
4641 if (S_ISDIR(inode->i_mode) || 4642 if (S_ISDIR(inode->i_mode) ||
4642 (!test_bit(BTRFS_INODE_NEEDS_FULL_SYNC, 4643 (!test_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
4643 &BTRFS_I(inode)->runtime_flags) && 4644 &BTRFS_I(inode)->runtime_flags) &&
4644 inode_only == LOG_INODE_EXISTS)) 4645 inode_only >= LOG_INODE_EXISTS))
4645 max_key.type = BTRFS_XATTR_ITEM_KEY; 4646 max_key.type = BTRFS_XATTR_ITEM_KEY;
4646 else 4647 else
4647 max_key.type = (u8)-1; 4648 max_key.type = (u8)-1;
@@ -4665,7 +4666,13 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans,
4665 return ret; 4666 return ret;
4666 } 4667 }
4667 4668
4668 mutex_lock(&BTRFS_I(inode)->log_mutex); 4669 if (inode_only == LOG_OTHER_INODE) {
4670 inode_only = LOG_INODE_EXISTS;
4671 mutex_lock_nested(&BTRFS_I(inode)->log_mutex,
4672 SINGLE_DEPTH_NESTING);
4673 } else {
4674 mutex_lock(&BTRFS_I(inode)->log_mutex);
4675 }
4669 4676
4670 /* 4677 /*
4671 * a brute force approach to making sure we get the most uptodate 4678 * a brute force approach to making sure we get the most uptodate
@@ -4817,7 +4824,7 @@ again:
4817 * unpin it. 4824 * unpin it.
4818 */ 4825 */
4819 err = btrfs_log_inode(trans, root, other_inode, 4826 err = btrfs_log_inode(trans, root, other_inode,
4820 LOG_INODE_EXISTS, 4827 LOG_OTHER_INODE,
4821 0, LLONG_MAX, ctx); 4828 0, LLONG_MAX, ctx);
4822 iput(other_inode); 4829 iput(other_inode);
4823 if (err) 4830 if (err)
diff --git a/fs/btrfs/uuid-tree.c b/fs/btrfs/uuid-tree.c
index 161342b73ce5..726f928238d0 100644
--- a/fs/btrfs/uuid-tree.c
+++ b/fs/btrfs/uuid-tree.c
@@ -352,7 +352,5 @@ skip:
352 352
353out: 353out:
354 btrfs_free_path(path); 354 btrfs_free_path(path);
355 if (ret) 355 return ret;
356 btrfs_warn(fs_info, "btrfs_uuid_tree_iterate failed %d", ret);
357 return 0;
358} 356}
diff --git a/fs/buffer.c b/fs/buffer.c
index d21771fcf7d3..0e87401cf335 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -1660,7 +1660,7 @@ void clean_bdev_aliases(struct block_device *bdev, sector_t block, sector_t len)
1660 head = page_buffers(page); 1660 head = page_buffers(page);
1661 bh = head; 1661 bh = head;
1662 do { 1662 do {
1663 if (!buffer_mapped(bh)) 1663 if (!buffer_mapped(bh) || (bh->b_blocknr < block))
1664 goto next; 1664 goto next;
1665 if (bh->b_blocknr >= block + len) 1665 if (bh->b_blocknr >= block + len)
1666 break; 1666 break;
diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c
index 9cd0c0ea7cdb..e4b066cd912a 100644
--- a/fs/ceph/addr.c
+++ b/fs/ceph/addr.c
@@ -502,9 +502,9 @@ static struct ceph_snap_context *get_oldest_context(struct inode *inode,
502 dout(" head snapc %p has %d dirty pages\n", 502 dout(" head snapc %p has %d dirty pages\n",
503 snapc, ci->i_wrbuffer_ref_head); 503 snapc, ci->i_wrbuffer_ref_head);
504 if (truncate_size) 504 if (truncate_size)
505 *truncate_size = capsnap->truncate_size; 505 *truncate_size = ci->i_truncate_size;
506 if (truncate_seq) 506 if (truncate_seq)
507 *truncate_seq = capsnap->truncate_seq; 507 *truncate_seq = ci->i_truncate_seq;
508 } 508 }
509 spin_unlock(&ci->i_ceph_lock); 509 spin_unlock(&ci->i_ceph_lock);
510 return snapc; 510 return snapc;
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
index baea866a6751..94fd76d04683 100644
--- a/fs/ceph/caps.c
+++ b/fs/ceph/caps.c
@@ -2591,8 +2591,13 @@ int ceph_get_caps(struct ceph_inode_info *ci, int need, int want,
2591 add_wait_queue(&ci->i_cap_wq, &wait); 2591 add_wait_queue(&ci->i_cap_wq, &wait);
2592 2592
2593 while (!try_get_cap_refs(ci, need, want, endoff, 2593 while (!try_get_cap_refs(ci, need, want, endoff,
2594 true, &_got, &err)) 2594 true, &_got, &err)) {
2595 if (signal_pending(current)) {
2596 ret = -ERESTARTSYS;
2597 break;
2598 }
2595 wait_woken(&wait, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT); 2599 wait_woken(&wait, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
2600 }
2596 2601
2597 remove_wait_queue(&ci->i_cap_wq, &wait); 2602 remove_wait_queue(&ci->i_cap_wq, &wait);
2598 2603
diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
index d7a93696663b..8ab1fdf0bd49 100644
--- a/fs/ceph/dir.c
+++ b/fs/ceph/dir.c
@@ -1230,7 +1230,8 @@ static int ceph_d_revalidate(struct dentry *dentry, unsigned int flags)
1230 struct ceph_mds_client *mdsc = 1230 struct ceph_mds_client *mdsc =
1231 ceph_sb_to_client(dir->i_sb)->mdsc; 1231 ceph_sb_to_client(dir->i_sb)->mdsc;
1232 struct ceph_mds_request *req; 1232 struct ceph_mds_request *req;
1233 int op, mask, err; 1233 int op, err;
1234 u32 mask;
1234 1235
1235 if (flags & LOOKUP_RCU) 1236 if (flags & LOOKUP_RCU)
1236 return -ECHILD; 1237 return -ECHILD;
@@ -1245,7 +1246,7 @@ static int ceph_d_revalidate(struct dentry *dentry, unsigned int flags)
1245 mask = CEPH_STAT_CAP_INODE | CEPH_CAP_AUTH_SHARED; 1246 mask = CEPH_STAT_CAP_INODE | CEPH_CAP_AUTH_SHARED;
1246 if (ceph_security_xattr_wanted(dir)) 1247 if (ceph_security_xattr_wanted(dir))
1247 mask |= CEPH_CAP_XATTR_SHARED; 1248 mask |= CEPH_CAP_XATTR_SHARED;
1248 req->r_args.getattr.mask = mask; 1249 req->r_args.getattr.mask = cpu_to_le32(mask);
1249 1250
1250 err = ceph_mdsc_do_request(mdsc, NULL, req); 1251 err = ceph_mdsc_do_request(mdsc, NULL, req);
1251 switch (err) { 1252 switch (err) {
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
index 398e5328b309..5e659d054b40 100644
--- a/fs/ceph/inode.c
+++ b/fs/ceph/inode.c
@@ -305,7 +305,8 @@ static int frag_tree_split_cmp(const void *l, const void *r)
305{ 305{
306 struct ceph_frag_tree_split *ls = (struct ceph_frag_tree_split*)l; 306 struct ceph_frag_tree_split *ls = (struct ceph_frag_tree_split*)l;
307 struct ceph_frag_tree_split *rs = (struct ceph_frag_tree_split*)r; 307 struct ceph_frag_tree_split *rs = (struct ceph_frag_tree_split*)r;
308 return ceph_frag_compare(ls->frag, rs->frag); 308 return ceph_frag_compare(le32_to_cpu(ls->frag),
309 le32_to_cpu(rs->frag));
309} 310}
310 311
311static bool is_frag_child(u32 f, struct ceph_inode_frag *frag) 312static bool is_frag_child(u32 f, struct ceph_inode_frag *frag)
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
index 4f49253387a0..c9d2e553a6c4 100644
--- a/fs/ceph/mds_client.c
+++ b/fs/ceph/mds_client.c
@@ -288,12 +288,13 @@ static int parse_reply_info_extra(void **p, void *end,
288 struct ceph_mds_reply_info_parsed *info, 288 struct ceph_mds_reply_info_parsed *info,
289 u64 features) 289 u64 features)
290{ 290{
291 if (info->head->op == CEPH_MDS_OP_GETFILELOCK) 291 u32 op = le32_to_cpu(info->head->op);
292
293 if (op == CEPH_MDS_OP_GETFILELOCK)
292 return parse_reply_info_filelock(p, end, info, features); 294 return parse_reply_info_filelock(p, end, info, features);
293 else if (info->head->op == CEPH_MDS_OP_READDIR || 295 else if (op == CEPH_MDS_OP_READDIR || op == CEPH_MDS_OP_LSSNAP)
294 info->head->op == CEPH_MDS_OP_LSSNAP)
295 return parse_reply_info_dir(p, end, info, features); 296 return parse_reply_info_dir(p, end, info, features);
296 else if (info->head->op == CEPH_MDS_OP_CREATE) 297 else if (op == CEPH_MDS_OP_CREATE)
297 return parse_reply_info_create(p, end, info, features); 298 return parse_reply_info_create(p, end, info, features);
298 else 299 else
299 return -EIO; 300 return -EIO;
@@ -2106,6 +2107,11 @@ static int __do_request(struct ceph_mds_client *mdsc,
2106 dout("do_request mdsmap err %d\n", err); 2107 dout("do_request mdsmap err %d\n", err);
2107 goto finish; 2108 goto finish;
2108 } 2109 }
2110 if (mdsc->mdsmap->m_epoch == 0) {
2111 dout("do_request no mdsmap, waiting for map\n");
2112 list_add(&req->r_wait, &mdsc->waiting_for_map);
2113 goto finish;
2114 }
2109 if (!(mdsc->fsc->mount_options->flags & 2115 if (!(mdsc->fsc->mount_options->flags &
2110 CEPH_MOUNT_OPT_MOUNTWAIT) && 2116 CEPH_MOUNT_OPT_MOUNTWAIT) &&
2111 !ceph_mdsmap_is_cluster_available(mdsc->mdsmap)) { 2117 !ceph_mdsmap_is_cluster_available(mdsc->mdsmap)) {
diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c
index 8f6a2a5863b9..a27fc8791551 100644
--- a/fs/cifs/readdir.c
+++ b/fs/cifs/readdir.c
@@ -285,6 +285,7 @@ initiate_cifs_search(const unsigned int xid, struct file *file)
285 rc = -ENOMEM; 285 rc = -ENOMEM;
286 goto error_exit; 286 goto error_exit;
287 } 287 }
288 spin_lock_init(&cifsFile->file_info_lock);
288 file->private_data = cifsFile; 289 file->private_data = cifsFile;
289 cifsFile->tlink = cifs_get_tlink(tlink); 290 cifsFile->tlink = cifs_get_tlink(tlink);
290 tcon = tlink_tcon(tlink); 291 tcon = tlink_tcon(tlink);
diff --git a/fs/coredump.c b/fs/coredump.c
index e525b6017cdf..ae6b05629ca1 100644
--- a/fs/coredump.c
+++ b/fs/coredump.c
@@ -833,3 +833,21 @@ int dump_align(struct coredump_params *cprm, int align)
833 return mod ? dump_skip(cprm, align - mod) : 1; 833 return mod ? dump_skip(cprm, align - mod) : 1;
834} 834}
835EXPORT_SYMBOL(dump_align); 835EXPORT_SYMBOL(dump_align);
836
837/*
838 * Ensures that file size is big enough to contain the current file
839 * postion. This prevents gdb from complaining about a truncated file
840 * if the last "write" to the file was dump_skip.
841 */
842void dump_truncate(struct coredump_params *cprm)
843{
844 struct file *file = cprm->file;
845 loff_t offset;
846
847 if (file->f_op->llseek && file->f_op->llseek != no_llseek) {
848 offset = file->f_op->llseek(file, 0, SEEK_CUR);
849 if (i_size_read(file->f_mapping->host) < offset)
850 do_truncate(file->f_path.dentry, offset, 0, file);
851 }
852}
853EXPORT_SYMBOL(dump_truncate);
diff --git a/fs/crypto/keyinfo.c b/fs/crypto/keyinfo.c
index 6eeea1dcba41..95cd4c3b06c3 100644
--- a/fs/crypto/keyinfo.c
+++ b/fs/crypto/keyinfo.c
@@ -248,7 +248,8 @@ retry:
248 goto out; 248 goto out;
249 249
250 if (fscrypt_dummy_context_enabled(inode)) { 250 if (fscrypt_dummy_context_enabled(inode)) {
251 memset(raw_key, 0x42, FS_AES_256_XTS_KEY_SIZE); 251 memset(raw_key, 0x42, keysize/2);
252 memset(raw_key+keysize/2, 0x24, keysize - (keysize/2));
252 goto got_key; 253 goto got_key;
253 } 254 }
254 255
diff --git a/fs/crypto/policy.c b/fs/crypto/policy.c
index 6ed7c2eebeec..d6cd7ea4851d 100644
--- a/fs/crypto/policy.c
+++ b/fs/crypto/policy.c
@@ -179,6 +179,11 @@ int fscrypt_has_permitted_context(struct inode *parent, struct inode *child)
179 BUG_ON(1); 179 BUG_ON(1);
180 } 180 }
181 181
182 /* No restrictions on file types which are never encrypted */
183 if (!S_ISREG(child->i_mode) && !S_ISDIR(child->i_mode) &&
184 !S_ISLNK(child->i_mode))
185 return 1;
186
182 /* no restrictions if the parent directory is not encrypted */ 187 /* no restrictions if the parent directory is not encrypted */
183 if (!parent->i_sb->s_cop->is_encrypted(parent)) 188 if (!parent->i_sb->s_cop->is_encrypted(parent))
184 return 1; 189 return 1;
diff --git a/fs/dax.c b/fs/dax.c
index a8732fbed381..c45598b912e1 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -451,16 +451,37 @@ void dax_wake_mapping_entry_waiter(struct address_space *mapping,
451 __wake_up(wq, TASK_NORMAL, wake_all ? 0 : 1, &key); 451 __wake_up(wq, TASK_NORMAL, wake_all ? 0 : 1, &key);
452} 452}
453 453
454static int __dax_invalidate_mapping_entry(struct address_space *mapping,
455 pgoff_t index, bool trunc)
456{
457 int ret = 0;
458 void *entry;
459 struct radix_tree_root *page_tree = &mapping->page_tree;
460
461 spin_lock_irq(&mapping->tree_lock);
462 entry = get_unlocked_mapping_entry(mapping, index, NULL);
463 if (!entry || !radix_tree_exceptional_entry(entry))
464 goto out;
465 if (!trunc &&
466 (radix_tree_tag_get(page_tree, index, PAGECACHE_TAG_DIRTY) ||
467 radix_tree_tag_get(page_tree, index, PAGECACHE_TAG_TOWRITE)))
468 goto out;
469 radix_tree_delete(page_tree, index);
470 mapping->nrexceptional--;
471 ret = 1;
472out:
473 put_unlocked_mapping_entry(mapping, index, entry);
474 spin_unlock_irq(&mapping->tree_lock);
475 return ret;
476}
454/* 477/*
455 * Delete exceptional DAX entry at @index from @mapping. Wait for radix tree 478 * Delete exceptional DAX entry at @index from @mapping. Wait for radix tree
456 * entry to get unlocked before deleting it. 479 * entry to get unlocked before deleting it.
457 */ 480 */
458int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index) 481int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index)
459{ 482{
460 void *entry; 483 int ret = __dax_invalidate_mapping_entry(mapping, index, true);
461 484
462 spin_lock_irq(&mapping->tree_lock);
463 entry = get_unlocked_mapping_entry(mapping, index, NULL);
464 /* 485 /*
465 * This gets called from truncate / punch_hole path. As such, the caller 486 * This gets called from truncate / punch_hole path. As such, the caller
466 * must hold locks protecting against concurrent modifications of the 487 * must hold locks protecting against concurrent modifications of the
@@ -468,16 +489,46 @@ int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index)
468 * caller has seen exceptional entry for this index, we better find it 489 * caller has seen exceptional entry for this index, we better find it
469 * at that index as well... 490 * at that index as well...
470 */ 491 */
471 if (WARN_ON_ONCE(!entry || !radix_tree_exceptional_entry(entry))) { 492 WARN_ON_ONCE(!ret);
472 spin_unlock_irq(&mapping->tree_lock); 493 return ret;
473 return 0; 494}
474 } 495
475 radix_tree_delete(&mapping->page_tree, index); 496/*
497 * Invalidate exceptional DAX entry if easily possible. This handles DAX
498 * entries for invalidate_inode_pages() so we evict the entry only if we can
499 * do so without blocking.
500 */
501int dax_invalidate_mapping_entry(struct address_space *mapping, pgoff_t index)
502{
503 int ret = 0;
504 void *entry, **slot;
505 struct radix_tree_root *page_tree = &mapping->page_tree;
506
507 spin_lock_irq(&mapping->tree_lock);
508 entry = __radix_tree_lookup(page_tree, index, NULL, &slot);
509 if (!entry || !radix_tree_exceptional_entry(entry) ||
510 slot_locked(mapping, slot))
511 goto out;
512 if (radix_tree_tag_get(page_tree, index, PAGECACHE_TAG_DIRTY) ||
513 radix_tree_tag_get(page_tree, index, PAGECACHE_TAG_TOWRITE))
514 goto out;
515 radix_tree_delete(page_tree, index);
476 mapping->nrexceptional--; 516 mapping->nrexceptional--;
517 ret = 1;
518out:
477 spin_unlock_irq(&mapping->tree_lock); 519 spin_unlock_irq(&mapping->tree_lock);
478 dax_wake_mapping_entry_waiter(mapping, index, entry, true); 520 if (ret)
521 dax_wake_mapping_entry_waiter(mapping, index, entry, true);
522 return ret;
523}
479 524
480 return 1; 525/*
526 * Invalidate exceptional DAX entry if it is clean.
527 */
528int dax_invalidate_mapping_entry_sync(struct address_space *mapping,
529 pgoff_t index)
530{
531 return __dax_invalidate_mapping_entry(mapping, index, false);
481} 532}
482 533
483/* 534/*
@@ -488,15 +539,16 @@ int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index)
488 * otherwise it will simply fall out of the page cache under memory 539 * otherwise it will simply fall out of the page cache under memory
489 * pressure without ever having been dirtied. 540 * pressure without ever having been dirtied.
490 */ 541 */
491static int dax_load_hole(struct address_space *mapping, void *entry, 542static int dax_load_hole(struct address_space *mapping, void **entry,
492 struct vm_fault *vmf) 543 struct vm_fault *vmf)
493{ 544{
494 struct page *page; 545 struct page *page;
546 int ret;
495 547
496 /* Hole page already exists? Return it... */ 548 /* Hole page already exists? Return it... */
497 if (!radix_tree_exceptional_entry(entry)) { 549 if (!radix_tree_exceptional_entry(*entry)) {
498 vmf->page = entry; 550 page = *entry;
499 return VM_FAULT_LOCKED; 551 goto out;
500 } 552 }
501 553
502 /* This will replace locked radix tree entry with a hole page */ 554 /* This will replace locked radix tree entry with a hole page */
@@ -504,8 +556,17 @@ static int dax_load_hole(struct address_space *mapping, void *entry,
504 vmf->gfp_mask | __GFP_ZERO); 556 vmf->gfp_mask | __GFP_ZERO);
505 if (!page) 557 if (!page)
506 return VM_FAULT_OOM; 558 return VM_FAULT_OOM;
559 out:
507 vmf->page = page; 560 vmf->page = page;
508 return VM_FAULT_LOCKED; 561 ret = finish_fault(vmf);
562 vmf->page = NULL;
563 *entry = page;
564 if (!ret) {
565 /* Grab reference for PTE that is now referencing the page */
566 get_page(page);
567 return VM_FAULT_NOPAGE;
568 }
569 return ret;
509} 570}
510 571
511static int copy_user_dax(struct block_device *bdev, sector_t sector, size_t size, 572static int copy_user_dax(struct block_device *bdev, sector_t sector, size_t size,
@@ -630,8 +691,8 @@ static void dax_mapping_entry_mkclean(struct address_space *mapping,
630 pgoff_t index, unsigned long pfn) 691 pgoff_t index, unsigned long pfn)
631{ 692{
632 struct vm_area_struct *vma; 693 struct vm_area_struct *vma;
633 pte_t *ptep; 694 pte_t pte, *ptep = NULL;
634 pte_t pte; 695 pmd_t *pmdp = NULL;
635 spinlock_t *ptl; 696 spinlock_t *ptl;
636 bool changed; 697 bool changed;
637 698
@@ -646,21 +707,42 @@ static void dax_mapping_entry_mkclean(struct address_space *mapping,
646 707
647 address = pgoff_address(index, vma); 708 address = pgoff_address(index, vma);
648 changed = false; 709 changed = false;
649 if (follow_pte(vma->vm_mm, address, &ptep, &ptl)) 710 if (follow_pte_pmd(vma->vm_mm, address, &ptep, &pmdp, &ptl))
650 continue; 711 continue;
651 if (pfn != pte_pfn(*ptep))
652 goto unlock;
653 if (!pte_dirty(*ptep) && !pte_write(*ptep))
654 goto unlock;
655 712
656 flush_cache_page(vma, address, pfn); 713 if (pmdp) {
657 pte = ptep_clear_flush(vma, address, ptep); 714#ifdef CONFIG_FS_DAX_PMD
658 pte = pte_wrprotect(pte); 715 pmd_t pmd;
659 pte = pte_mkclean(pte); 716
660 set_pte_at(vma->vm_mm, address, ptep, pte); 717 if (pfn != pmd_pfn(*pmdp))
661 changed = true; 718 goto unlock_pmd;
662unlock: 719 if (!pmd_dirty(*pmdp) && !pmd_write(*pmdp))
663 pte_unmap_unlock(ptep, ptl); 720 goto unlock_pmd;
721
722 flush_cache_page(vma, address, pfn);
723 pmd = pmdp_huge_clear_flush(vma, address, pmdp);
724 pmd = pmd_wrprotect(pmd);
725 pmd = pmd_mkclean(pmd);
726 set_pmd_at(vma->vm_mm, address, pmdp, pmd);
727 changed = true;
728unlock_pmd:
729 spin_unlock(ptl);
730#endif
731 } else {
732 if (pfn != pte_pfn(*ptep))
733 goto unlock_pte;
734 if (!pte_dirty(*ptep) && !pte_write(*ptep))
735 goto unlock_pte;
736
737 flush_cache_page(vma, address, pfn);
738 pte = ptep_clear_flush(vma, address, ptep);
739 pte = pte_wrprotect(pte);
740 pte = pte_mkclean(pte);
741 set_pte_at(vma->vm_mm, address, ptep, pte);
742 changed = true;
743unlock_pte:
744 pte_unmap_unlock(ptep, ptl);
745 }
664 746
665 if (changed) 747 if (changed)
666 mmu_notifier_invalidate_page(vma->vm_mm, address); 748 mmu_notifier_invalidate_page(vma->vm_mm, address);
@@ -908,7 +990,6 @@ int __dax_zero_page_range(struct block_device *bdev, sector_t sector,
908} 990}
909EXPORT_SYMBOL_GPL(__dax_zero_page_range); 991EXPORT_SYMBOL_GPL(__dax_zero_page_range);
910 992
911#ifdef CONFIG_FS_IOMAP
912static sector_t dax_iomap_sector(struct iomap *iomap, loff_t pos) 993static sector_t dax_iomap_sector(struct iomap *iomap, loff_t pos)
913{ 994{
914 return iomap->blkno + (((pos & PAGE_MASK) - iomap->offset) >> 9); 995 return iomap->blkno + (((pos & PAGE_MASK) - iomap->offset) >> 9);
@@ -934,11 +1015,27 @@ dax_iomap_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
934 if (WARN_ON_ONCE(iomap->type != IOMAP_MAPPED)) 1015 if (WARN_ON_ONCE(iomap->type != IOMAP_MAPPED))
935 return -EIO; 1016 return -EIO;
936 1017
1018 /*
1019 * Write can allocate block for an area which has a hole page mapped
1020 * into page tables. We have to tear down these mappings so that data
1021 * written by write(2) is visible in mmap.
1022 */
1023 if ((iomap->flags & IOMAP_F_NEW) && inode->i_mapping->nrpages) {
1024 invalidate_inode_pages2_range(inode->i_mapping,
1025 pos >> PAGE_SHIFT,
1026 (end - 1) >> PAGE_SHIFT);
1027 }
1028
937 while (pos < end) { 1029 while (pos < end) {
938 unsigned offset = pos & (PAGE_SIZE - 1); 1030 unsigned offset = pos & (PAGE_SIZE - 1);
939 struct blk_dax_ctl dax = { 0 }; 1031 struct blk_dax_ctl dax = { 0 };
940 ssize_t map_len; 1032 ssize_t map_len;
941 1033
1034 if (fatal_signal_pending(current)) {
1035 ret = -EINTR;
1036 break;
1037 }
1038
942 dax.sector = dax_iomap_sector(iomap, pos); 1039 dax.sector = dax_iomap_sector(iomap, pos);
943 dax.size = (length + offset + PAGE_SIZE - 1) & PAGE_MASK; 1040 dax.size = (length + offset + PAGE_SIZE - 1) & PAGE_MASK;
944 map_len = dax_map_atomic(iomap->bdev, &dax); 1041 map_len = dax_map_atomic(iomap->bdev, &dax);
@@ -992,23 +1089,6 @@ dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter,
992 if (iov_iter_rw(iter) == WRITE) 1089 if (iov_iter_rw(iter) == WRITE)
993 flags |= IOMAP_WRITE; 1090 flags |= IOMAP_WRITE;
994 1091
995 /*
996 * Yes, even DAX files can have page cache attached to them: A zeroed
997 * page is inserted into the pagecache when we have to serve a write
998 * fault on a hole. It should never be dirtied and can simply be
999 * dropped from the pagecache once we get real data for the page.
1000 *
1001 * XXX: This is racy against mmap, and there's nothing we can do about
1002 * it. We'll eventually need to shift this down even further so that
1003 * we can check if we allocated blocks over a hole first.
1004 */
1005 if (mapping->nrpages) {
1006 ret = invalidate_inode_pages2_range(mapping,
1007 pos >> PAGE_SHIFT,
1008 (pos + iov_iter_count(iter) - 1) >> PAGE_SHIFT);
1009 WARN_ON_ONCE(ret);
1010 }
1011
1012 while (iov_iter_count(iter)) { 1092 while (iov_iter_count(iter)) {
1013 ret = iomap_apply(inode, pos, iov_iter_count(iter), flags, ops, 1093 ret = iomap_apply(inode, pos, iov_iter_count(iter), flags, ops,
1014 iter, dax_iomap_actor); 1094 iter, dax_iomap_actor);
@@ -1023,6 +1103,15 @@ dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter,
1023} 1103}
1024EXPORT_SYMBOL_GPL(dax_iomap_rw); 1104EXPORT_SYMBOL_GPL(dax_iomap_rw);
1025 1105
1106static int dax_fault_return(int error)
1107{
1108 if (error == 0)
1109 return VM_FAULT_NOPAGE;
1110 if (error == -ENOMEM)
1111 return VM_FAULT_OOM;
1112 return VM_FAULT_SIGBUS;
1113}
1114
1026/** 1115/**
1027 * dax_iomap_fault - handle a page fault on a DAX file 1116 * dax_iomap_fault - handle a page fault on a DAX file
1028 * @vma: The virtual memory area where the fault occurred 1117 * @vma: The virtual memory area where the fault occurred
@@ -1055,12 +1144,6 @@ int dax_iomap_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
1055 if (pos >= i_size_read(inode)) 1144 if (pos >= i_size_read(inode))
1056 return VM_FAULT_SIGBUS; 1145 return VM_FAULT_SIGBUS;
1057 1146
1058 entry = grab_mapping_entry(mapping, vmf->pgoff, 0);
1059 if (IS_ERR(entry)) {
1060 error = PTR_ERR(entry);
1061 goto out;
1062 }
1063
1064 if ((vmf->flags & FAULT_FLAG_WRITE) && !vmf->cow_page) 1147 if ((vmf->flags & FAULT_FLAG_WRITE) && !vmf->cow_page)
1065 flags |= IOMAP_WRITE; 1148 flags |= IOMAP_WRITE;
1066 1149
@@ -1071,9 +1154,15 @@ int dax_iomap_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
1071 */ 1154 */
1072 error = ops->iomap_begin(inode, pos, PAGE_SIZE, flags, &iomap); 1155 error = ops->iomap_begin(inode, pos, PAGE_SIZE, flags, &iomap);
1073 if (error) 1156 if (error)
1074 goto unlock_entry; 1157 return dax_fault_return(error);
1075 if (WARN_ON_ONCE(iomap.offset + iomap.length < pos + PAGE_SIZE)) { 1158 if (WARN_ON_ONCE(iomap.offset + iomap.length < pos + PAGE_SIZE)) {
1076 error = -EIO; /* fs corruption? */ 1159 vmf_ret = dax_fault_return(-EIO); /* fs corruption? */
1160 goto finish_iomap;
1161 }
1162
1163 entry = grab_mapping_entry(mapping, vmf->pgoff, 0);
1164 if (IS_ERR(entry)) {
1165 vmf_ret = dax_fault_return(PTR_ERR(entry));
1077 goto finish_iomap; 1166 goto finish_iomap;
1078 } 1167 }
1079 1168
@@ -1096,13 +1185,13 @@ int dax_iomap_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
1096 } 1185 }
1097 1186
1098 if (error) 1187 if (error)
1099 goto finish_iomap; 1188 goto error_unlock_entry;
1100 1189
1101 __SetPageUptodate(vmf->cow_page); 1190 __SetPageUptodate(vmf->cow_page);
1102 vmf_ret = finish_fault(vmf); 1191 vmf_ret = finish_fault(vmf);
1103 if (!vmf_ret) 1192 if (!vmf_ret)
1104 vmf_ret = VM_FAULT_DONE_COW; 1193 vmf_ret = VM_FAULT_DONE_COW;
1105 goto finish_iomap; 1194 goto unlock_entry;
1106 } 1195 }
1107 1196
1108 switch (iomap.type) { 1197 switch (iomap.type) {
@@ -1114,12 +1203,15 @@ int dax_iomap_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
1114 } 1203 }
1115 error = dax_insert_mapping(mapping, iomap.bdev, sector, 1204 error = dax_insert_mapping(mapping, iomap.bdev, sector,
1116 PAGE_SIZE, &entry, vma, vmf); 1205 PAGE_SIZE, &entry, vma, vmf);
1206 /* -EBUSY is fine, somebody else faulted on the same PTE */
1207 if (error == -EBUSY)
1208 error = 0;
1117 break; 1209 break;
1118 case IOMAP_UNWRITTEN: 1210 case IOMAP_UNWRITTEN:
1119 case IOMAP_HOLE: 1211 case IOMAP_HOLE:
1120 if (!(vmf->flags & FAULT_FLAG_WRITE)) { 1212 if (!(vmf->flags & FAULT_FLAG_WRITE)) {
1121 vmf_ret = dax_load_hole(mapping, entry, vmf); 1213 vmf_ret = dax_load_hole(mapping, &entry, vmf);
1122 break; 1214 goto unlock_entry;
1123 } 1215 }
1124 /*FALLTHRU*/ 1216 /*FALLTHRU*/
1125 default: 1217 default:
@@ -1128,31 +1220,25 @@ int dax_iomap_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
1128 break; 1220 break;
1129 } 1221 }
1130 1222
1223 error_unlock_entry:
1224 vmf_ret = dax_fault_return(error) | major;
1225 unlock_entry:
1226 put_locked_mapping_entry(mapping, vmf->pgoff, entry);
1131 finish_iomap: 1227 finish_iomap:
1132 if (ops->iomap_end) { 1228 if (ops->iomap_end) {
1133 if (error || (vmf_ret & VM_FAULT_ERROR)) { 1229 int copied = PAGE_SIZE;
1134 /* keep previous error */ 1230
1135 ops->iomap_end(inode, pos, PAGE_SIZE, 0, flags, 1231 if (vmf_ret & VM_FAULT_ERROR)
1136 &iomap); 1232 copied = 0;
1137 } else { 1233 /*
1138 error = ops->iomap_end(inode, pos, PAGE_SIZE, 1234 * The fault is done by now and there's no way back (other
1139 PAGE_SIZE, flags, &iomap); 1235 * thread may be already happily using PTE we have installed).
1140 } 1236 * Just ignore error from ->iomap_end since we cannot do much
1141 } 1237 * with it.
1142 unlock_entry: 1238 */
1143 if (vmf_ret != VM_FAULT_LOCKED || error) 1239 ops->iomap_end(inode, pos, PAGE_SIZE, copied, flags, &iomap);
1144 put_locked_mapping_entry(mapping, vmf->pgoff, entry);
1145 out:
1146 if (error == -ENOMEM)
1147 return VM_FAULT_OOM | major;
1148 /* -EBUSY is fine, somebody else faulted on the same PTE */
1149 if (error < 0 && error != -EBUSY)
1150 return VM_FAULT_SIGBUS | major;
1151 if (vmf_ret) {
1152 WARN_ON_ONCE(error); /* -EBUSY from ops->iomap_end? */
1153 return vmf_ret;
1154 } 1240 }
1155 return VM_FAULT_NOPAGE | major; 1241 return vmf_ret;
1156} 1242}
1157EXPORT_SYMBOL_GPL(dax_iomap_fault); 1243EXPORT_SYMBOL_GPL(dax_iomap_fault);
1158 1244
@@ -1277,16 +1363,6 @@ int dax_iomap_pmd_fault(struct vm_area_struct *vma, unsigned long address,
1277 goto fallback; 1363 goto fallback;
1278 1364
1279 /* 1365 /*
1280 * grab_mapping_entry() will make sure we get a 2M empty entry, a DAX
1281 * PMD or a HZP entry. If it can't (because a 4k page is already in
1282 * the tree, for instance), it will return -EEXIST and we just fall
1283 * back to 4k entries.
1284 */
1285 entry = grab_mapping_entry(mapping, pgoff, RADIX_DAX_PMD);
1286 if (IS_ERR(entry))
1287 goto fallback;
1288
1289 /*
1290 * Note that we don't use iomap_apply here. We aren't doing I/O, only 1366 * Note that we don't use iomap_apply here. We aren't doing I/O, only
1291 * setting up a mapping, so really we're using iomap_begin() as a way 1367 * setting up a mapping, so really we're using iomap_begin() as a way
1292 * to look up our filesystem block. 1368 * to look up our filesystem block.
@@ -1294,10 +1370,21 @@ int dax_iomap_pmd_fault(struct vm_area_struct *vma, unsigned long address,
1294 pos = (loff_t)pgoff << PAGE_SHIFT; 1370 pos = (loff_t)pgoff << PAGE_SHIFT;
1295 error = ops->iomap_begin(inode, pos, PMD_SIZE, iomap_flags, &iomap); 1371 error = ops->iomap_begin(inode, pos, PMD_SIZE, iomap_flags, &iomap);
1296 if (error) 1372 if (error)
1297 goto unlock_entry; 1373 goto fallback;
1374
1298 if (iomap.offset + iomap.length < pos + PMD_SIZE) 1375 if (iomap.offset + iomap.length < pos + PMD_SIZE)
1299 goto finish_iomap; 1376 goto finish_iomap;
1300 1377
1378 /*
1379 * grab_mapping_entry() will make sure we get a 2M empty entry, a DAX
1380 * PMD or a HZP entry. If it can't (because a 4k page is already in
1381 * the tree, for instance), it will return -EEXIST and we just fall
1382 * back to 4k entries.
1383 */
1384 entry = grab_mapping_entry(mapping, pgoff, RADIX_DAX_PMD);
1385 if (IS_ERR(entry))
1386 goto finish_iomap;
1387
1301 vmf.pgoff = pgoff; 1388 vmf.pgoff = pgoff;
1302 vmf.flags = flags; 1389 vmf.flags = flags;
1303 vmf.gfp_mask = mapping_gfp_mask(mapping) | __GFP_IO; 1390 vmf.gfp_mask = mapping_gfp_mask(mapping) | __GFP_IO;
@@ -1310,7 +1397,7 @@ int dax_iomap_pmd_fault(struct vm_area_struct *vma, unsigned long address,
1310 case IOMAP_UNWRITTEN: 1397 case IOMAP_UNWRITTEN:
1311 case IOMAP_HOLE: 1398 case IOMAP_HOLE:
1312 if (WARN_ON_ONCE(write)) 1399 if (WARN_ON_ONCE(write))
1313 goto finish_iomap; 1400 goto unlock_entry;
1314 result = dax_pmd_load_hole(vma, pmd, &vmf, address, &iomap, 1401 result = dax_pmd_load_hole(vma, pmd, &vmf, address, &iomap,
1315 &entry); 1402 &entry);
1316 break; 1403 break;
@@ -1319,20 +1406,23 @@ int dax_iomap_pmd_fault(struct vm_area_struct *vma, unsigned long address,
1319 break; 1406 break;
1320 } 1407 }
1321 1408
1409 unlock_entry:
1410 put_locked_mapping_entry(mapping, pgoff, entry);
1322 finish_iomap: 1411 finish_iomap:
1323 if (ops->iomap_end) { 1412 if (ops->iomap_end) {
1324 if (result == VM_FAULT_FALLBACK) { 1413 int copied = PMD_SIZE;
1325 ops->iomap_end(inode, pos, PMD_SIZE, 0, iomap_flags, 1414
1326 &iomap); 1415 if (result == VM_FAULT_FALLBACK)
1327 } else { 1416 copied = 0;
1328 error = ops->iomap_end(inode, pos, PMD_SIZE, PMD_SIZE, 1417 /*
1329 iomap_flags, &iomap); 1418 * The fault is done by now and there's no way back (other
1330 if (error) 1419 * thread may be already happily using PMD we have installed).
1331 result = VM_FAULT_FALLBACK; 1420 * Just ignore error from ->iomap_end since we cannot do much
1332 } 1421 * with it.
1422 */
1423 ops->iomap_end(inode, pos, PMD_SIZE, copied, iomap_flags,
1424 &iomap);
1333 } 1425 }
1334 unlock_entry:
1335 put_locked_mapping_entry(mapping, pgoff, entry);
1336 fallback: 1426 fallback:
1337 if (result == VM_FAULT_FALLBACK) { 1427 if (result == VM_FAULT_FALLBACK) {
1338 split_huge_pmd(vma, pmd, address); 1428 split_huge_pmd(vma, pmd, address);
@@ -1342,4 +1432,3 @@ int dax_iomap_pmd_fault(struct vm_area_struct *vma, unsigned long address,
1342} 1432}
1343EXPORT_SYMBOL_GPL(dax_iomap_pmd_fault); 1433EXPORT_SYMBOL_GPL(dax_iomap_pmd_fault);
1344#endif /* CONFIG_FS_DAX_PMD */ 1434#endif /* CONFIG_FS_DAX_PMD */
1345#endif /* CONFIG_FS_IOMAP */
diff --git a/fs/dcache.c b/fs/dcache.c
index 769903dbc19d..95d71eda8142 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -1336,8 +1336,11 @@ int d_set_mounted(struct dentry *dentry)
1336 } 1336 }
1337 spin_lock(&dentry->d_lock); 1337 spin_lock(&dentry->d_lock);
1338 if (!d_unlinked(dentry)) { 1338 if (!d_unlinked(dentry)) {
1339 dentry->d_flags |= DCACHE_MOUNTED; 1339 ret = -EBUSY;
1340 ret = 0; 1340 if (!d_mountpoint(dentry)) {
1341 dentry->d_flags |= DCACHE_MOUNTED;
1342 ret = 0;
1343 }
1341 } 1344 }
1342 spin_unlock(&dentry->d_lock); 1345 spin_unlock(&dentry->d_lock);
1343out: 1346out:
diff --git a/fs/direct-io.c b/fs/direct-io.c
index aeae8c063451..c87bae4376b8 100644
--- a/fs/direct-io.c
+++ b/fs/direct-io.c
@@ -906,6 +906,7 @@ static int do_direct_IO(struct dio *dio, struct dio_submit *sdio,
906 struct buffer_head *map_bh) 906 struct buffer_head *map_bh)
907{ 907{
908 const unsigned blkbits = sdio->blkbits; 908 const unsigned blkbits = sdio->blkbits;
909 const unsigned i_blkbits = blkbits + sdio->blkfactor;
909 int ret = 0; 910 int ret = 0;
910 911
911 while (sdio->block_in_file < sdio->final_block_in_request) { 912 while (sdio->block_in_file < sdio->final_block_in_request) {
@@ -949,7 +950,7 @@ static int do_direct_IO(struct dio *dio, struct dio_submit *sdio,
949 clean_bdev_aliases( 950 clean_bdev_aliases(
950 map_bh->b_bdev, 951 map_bh->b_bdev,
951 map_bh->b_blocknr, 952 map_bh->b_blocknr,
952 map_bh->b_size >> blkbits); 953 map_bh->b_size >> i_blkbits);
953 } 954 }
954 955
955 if (!sdio->blkfactor) 956 if (!sdio->blkfactor)
diff --git a/fs/ext2/Kconfig b/fs/ext2/Kconfig
index 36bea5adcaba..c634874e12d9 100644
--- a/fs/ext2/Kconfig
+++ b/fs/ext2/Kconfig
@@ -1,6 +1,5 @@
1config EXT2_FS 1config EXT2_FS
2 tristate "Second extended fs support" 2 tristate "Second extended fs support"
3 select FS_IOMAP if FS_DAX
4 help 3 help
5 Ext2 is a standard Linux file system for hard disks. 4 Ext2 is a standard Linux file system for hard disks.
6 5
diff --git a/fs/ext2/inode.c b/fs/ext2/inode.c
index 0093ea2512a8..f073bfca694b 100644
--- a/fs/ext2/inode.c
+++ b/fs/ext2/inode.c
@@ -751,9 +751,8 @@ static int ext2_get_blocks(struct inode *inode,
751 mutex_unlock(&ei->truncate_mutex); 751 mutex_unlock(&ei->truncate_mutex);
752 goto cleanup; 752 goto cleanup;
753 } 753 }
754 } else {
755 *new = true;
756 } 754 }
755 *new = true;
757 756
758 ext2_splice_branch(inode, iblock, partial, indirect_blks, count); 757 ext2_splice_branch(inode, iblock, partial, indirect_blks, count);
759 mutex_unlock(&ei->truncate_mutex); 758 mutex_unlock(&ei->truncate_mutex);
diff --git a/fs/ext4/Kconfig b/fs/ext4/Kconfig
index 7b90691e98c4..e38039fd96ff 100644
--- a/fs/ext4/Kconfig
+++ b/fs/ext4/Kconfig
@@ -37,7 +37,6 @@ config EXT4_FS
37 select CRC16 37 select CRC16
38 select CRYPTO 38 select CRYPTO
39 select CRYPTO_CRC32C 39 select CRYPTO_CRC32C
40 select FS_IOMAP if FS_DAX
41 help 40 help
42 This is the next generation of the ext3 filesystem. 41 This is the next generation of the ext3 filesystem.
43 42
diff --git a/fs/ext4/file.c b/fs/ext4/file.c
index b5f184493c57..d663d3d7c81c 100644
--- a/fs/ext4/file.c
+++ b/fs/ext4/file.c
@@ -258,7 +258,6 @@ out:
258static int ext4_dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 258static int ext4_dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
259{ 259{
260 int result; 260 int result;
261 handle_t *handle = NULL;
262 struct inode *inode = file_inode(vma->vm_file); 261 struct inode *inode = file_inode(vma->vm_file);
263 struct super_block *sb = inode->i_sb; 262 struct super_block *sb = inode->i_sb;
264 bool write = vmf->flags & FAULT_FLAG_WRITE; 263 bool write = vmf->flags & FAULT_FLAG_WRITE;
@@ -266,24 +265,12 @@ static int ext4_dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
266 if (write) { 265 if (write) {
267 sb_start_pagefault(sb); 266 sb_start_pagefault(sb);
268 file_update_time(vma->vm_file); 267 file_update_time(vma->vm_file);
269 down_read(&EXT4_I(inode)->i_mmap_sem); 268 }
270 handle = ext4_journal_start_sb(sb, EXT4_HT_WRITE_PAGE, 269 down_read(&EXT4_I(inode)->i_mmap_sem);
271 EXT4_DATA_TRANS_BLOCKS(sb)); 270 result = dax_iomap_fault(vma, vmf, &ext4_iomap_ops);
272 } else 271 up_read(&EXT4_I(inode)->i_mmap_sem);
273 down_read(&EXT4_I(inode)->i_mmap_sem); 272 if (write)
274
275 if (IS_ERR(handle))
276 result = VM_FAULT_SIGBUS;
277 else
278 result = dax_iomap_fault(vma, vmf, &ext4_iomap_ops);
279
280 if (write) {
281 if (!IS_ERR(handle))
282 ext4_journal_stop(handle);
283 up_read(&EXT4_I(inode)->i_mmap_sem);
284 sb_end_pagefault(sb); 273 sb_end_pagefault(sb);
285 } else
286 up_read(&EXT4_I(inode)->i_mmap_sem);
287 274
288 return result; 275 return result;
289} 276}
@@ -292,7 +279,6 @@ static int ext4_dax_pmd_fault(struct vm_area_struct *vma, unsigned long addr,
292 pmd_t *pmd, unsigned int flags) 279 pmd_t *pmd, unsigned int flags)
293{ 280{
294 int result; 281 int result;
295 handle_t *handle = NULL;
296 struct inode *inode = file_inode(vma->vm_file); 282 struct inode *inode = file_inode(vma->vm_file);
297 struct super_block *sb = inode->i_sb; 283 struct super_block *sb = inode->i_sb;
298 bool write = flags & FAULT_FLAG_WRITE; 284 bool write = flags & FAULT_FLAG_WRITE;
@@ -300,27 +286,13 @@ static int ext4_dax_pmd_fault(struct vm_area_struct *vma, unsigned long addr,
300 if (write) { 286 if (write) {
301 sb_start_pagefault(sb); 287 sb_start_pagefault(sb);
302 file_update_time(vma->vm_file); 288 file_update_time(vma->vm_file);
303 down_read(&EXT4_I(inode)->i_mmap_sem);
304 handle = ext4_journal_start_sb(sb, EXT4_HT_WRITE_PAGE,
305 ext4_chunk_trans_blocks(inode,
306 PMD_SIZE / PAGE_SIZE));
307 } else
308 down_read(&EXT4_I(inode)->i_mmap_sem);
309
310 if (IS_ERR(handle))
311 result = VM_FAULT_SIGBUS;
312 else {
313 result = dax_iomap_pmd_fault(vma, addr, pmd, flags,
314 &ext4_iomap_ops);
315 } 289 }
316 290 down_read(&EXT4_I(inode)->i_mmap_sem);
317 if (write) { 291 result = dax_iomap_pmd_fault(vma, addr, pmd, flags,
318 if (!IS_ERR(handle)) 292 &ext4_iomap_ops);
319 ext4_journal_stop(handle); 293 up_read(&EXT4_I(inode)->i_mmap_sem);
320 up_read(&EXT4_I(inode)->i_mmap_sem); 294 if (write)
321 sb_end_pagefault(sb); 295 sb_end_pagefault(sb);
322 } else
323 up_read(&EXT4_I(inode)->i_mmap_sem);
324 296
325 return result; 297 return result;
326} 298}
diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
index 0738f48293cc..0d8802453758 100644
--- a/fs/f2fs/segment.c
+++ b/fs/f2fs/segment.c
@@ -713,8 +713,8 @@ static int __f2fs_issue_discard_zone(struct f2fs_sb_info *sbi,
713 } 713 }
714 sector = SECTOR_FROM_BLOCK(blkstart); 714 sector = SECTOR_FROM_BLOCK(blkstart);
715 715
716 if (sector & (bdev_zone_size(bdev) - 1) || 716 if (sector & (bdev_zone_sectors(bdev) - 1) ||
717 nr_sects != bdev_zone_size(bdev)) { 717 nr_sects != bdev_zone_sectors(bdev)) {
718 f2fs_msg(sbi->sb, KERN_INFO, 718 f2fs_msg(sbi->sb, KERN_INFO,
719 "(%d) %s: Unaligned discard attempted (block %x + %x)", 719 "(%d) %s: Unaligned discard attempted (block %x + %x)",
720 devi, sbi->s_ndevs ? FDEV(devi).path: "", 720 devi, sbi->s_ndevs ? FDEV(devi).path: "",
diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
index 702638e21c76..46fd30d8af77 100644
--- a/fs/f2fs/super.c
+++ b/fs/f2fs/super.c
@@ -1553,16 +1553,16 @@ static int init_blkz_info(struct f2fs_sb_info *sbi, int devi)
1553 return 0; 1553 return 0;
1554 1554
1555 if (sbi->blocks_per_blkz && sbi->blocks_per_blkz != 1555 if (sbi->blocks_per_blkz && sbi->blocks_per_blkz !=
1556 SECTOR_TO_BLOCK(bdev_zone_size(bdev))) 1556 SECTOR_TO_BLOCK(bdev_zone_sectors(bdev)))
1557 return -EINVAL; 1557 return -EINVAL;
1558 sbi->blocks_per_blkz = SECTOR_TO_BLOCK(bdev_zone_size(bdev)); 1558 sbi->blocks_per_blkz = SECTOR_TO_BLOCK(bdev_zone_sectors(bdev));
1559 if (sbi->log_blocks_per_blkz && sbi->log_blocks_per_blkz != 1559 if (sbi->log_blocks_per_blkz && sbi->log_blocks_per_blkz !=
1560 __ilog2_u32(sbi->blocks_per_blkz)) 1560 __ilog2_u32(sbi->blocks_per_blkz))
1561 return -EINVAL; 1561 return -EINVAL;
1562 sbi->log_blocks_per_blkz = __ilog2_u32(sbi->blocks_per_blkz); 1562 sbi->log_blocks_per_blkz = __ilog2_u32(sbi->blocks_per_blkz);
1563 FDEV(devi).nr_blkz = SECTOR_TO_BLOCK(nr_sectors) >> 1563 FDEV(devi).nr_blkz = SECTOR_TO_BLOCK(nr_sectors) >>
1564 sbi->log_blocks_per_blkz; 1564 sbi->log_blocks_per_blkz;
1565 if (nr_sectors & (bdev_zone_size(bdev) - 1)) 1565 if (nr_sectors & (bdev_zone_sectors(bdev) - 1))
1566 FDEV(devi).nr_blkz++; 1566 FDEV(devi).nr_blkz++;
1567 1567
1568 FDEV(devi).blkz_type = kmalloc(FDEV(devi).nr_blkz, GFP_KERNEL); 1568 FDEV(devi).blkz_type = kmalloc(FDEV(devi).nr_blkz, GFP_KERNEL);
diff --git a/fs/fscache/cookie.c b/fs/fscache/cookie.c
index 4304072161aa..40d61077bead 100644
--- a/fs/fscache/cookie.c
+++ b/fs/fscache/cookie.c
@@ -542,6 +542,7 @@ void __fscache_disable_cookie(struct fscache_cookie *cookie, bool invalidate)
542 hlist_for_each_entry(object, &cookie->backing_objects, cookie_link) { 542 hlist_for_each_entry(object, &cookie->backing_objects, cookie_link) {
543 if (invalidate) 543 if (invalidate)
544 set_bit(FSCACHE_OBJECT_RETIRED, &object->flags); 544 set_bit(FSCACHE_OBJECT_RETIRED, &object->flags);
545 clear_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags);
545 fscache_raise_event(object, FSCACHE_OBJECT_EV_KILL); 546 fscache_raise_event(object, FSCACHE_OBJECT_EV_KILL);
546 } 547 }
547 } else { 548 } else {
@@ -560,6 +561,10 @@ void __fscache_disable_cookie(struct fscache_cookie *cookie, bool invalidate)
560 wait_on_atomic_t(&cookie->n_active, fscache_wait_atomic_t, 561 wait_on_atomic_t(&cookie->n_active, fscache_wait_atomic_t,
561 TASK_UNINTERRUPTIBLE); 562 TASK_UNINTERRUPTIBLE);
562 563
564 /* Make sure any pending writes are cancelled. */
565 if (cookie->def->type != FSCACHE_COOKIE_TYPE_INDEX)
566 fscache_invalidate_writes(cookie);
567
563 /* Reset the cookie state if it wasn't relinquished */ 568 /* Reset the cookie state if it wasn't relinquished */
564 if (!test_bit(FSCACHE_COOKIE_RELINQUISHED, &cookie->flags)) { 569 if (!test_bit(FSCACHE_COOKIE_RELINQUISHED, &cookie->flags)) {
565 atomic_inc(&cookie->n_active); 570 atomic_inc(&cookie->n_active);
diff --git a/fs/fscache/netfs.c b/fs/fscache/netfs.c
index 9b28649df3a1..a8aa00be4444 100644
--- a/fs/fscache/netfs.c
+++ b/fs/fscache/netfs.c
@@ -48,6 +48,7 @@ int __fscache_register_netfs(struct fscache_netfs *netfs)
48 cookie->flags = 1 << FSCACHE_COOKIE_ENABLED; 48 cookie->flags = 1 << FSCACHE_COOKIE_ENABLED;
49 49
50 spin_lock_init(&cookie->lock); 50 spin_lock_init(&cookie->lock);
51 spin_lock_init(&cookie->stores_lock);
51 INIT_HLIST_HEAD(&cookie->backing_objects); 52 INIT_HLIST_HEAD(&cookie->backing_objects);
52 53
53 /* check the netfs type is not already present */ 54 /* check the netfs type is not already present */
diff --git a/fs/fscache/object.c b/fs/fscache/object.c
index 9e792e30f4db..7a182c87f378 100644
--- a/fs/fscache/object.c
+++ b/fs/fscache/object.c
@@ -30,6 +30,7 @@ static const struct fscache_state *fscache_look_up_object(struct fscache_object
30static const struct fscache_state *fscache_object_available(struct fscache_object *, int); 30static const struct fscache_state *fscache_object_available(struct fscache_object *, int);
31static const struct fscache_state *fscache_parent_ready(struct fscache_object *, int); 31static const struct fscache_state *fscache_parent_ready(struct fscache_object *, int);
32static const struct fscache_state *fscache_update_object(struct fscache_object *, int); 32static const struct fscache_state *fscache_update_object(struct fscache_object *, int);
33static const struct fscache_state *fscache_object_dead(struct fscache_object *, int);
33 34
34#define __STATE_NAME(n) fscache_osm_##n 35#define __STATE_NAME(n) fscache_osm_##n
35#define STATE(n) (&__STATE_NAME(n)) 36#define STATE(n) (&__STATE_NAME(n))
@@ -91,7 +92,7 @@ static WORK_STATE(LOOKUP_FAILURE, "LCFL", fscache_lookup_failure);
91static WORK_STATE(KILL_OBJECT, "KILL", fscache_kill_object); 92static WORK_STATE(KILL_OBJECT, "KILL", fscache_kill_object);
92static WORK_STATE(KILL_DEPENDENTS, "KDEP", fscache_kill_dependents); 93static WORK_STATE(KILL_DEPENDENTS, "KDEP", fscache_kill_dependents);
93static WORK_STATE(DROP_OBJECT, "DROP", fscache_drop_object); 94static WORK_STATE(DROP_OBJECT, "DROP", fscache_drop_object);
94static WORK_STATE(OBJECT_DEAD, "DEAD", (void*)2UL); 95static WORK_STATE(OBJECT_DEAD, "DEAD", fscache_object_dead);
95 96
96static WAIT_STATE(WAIT_FOR_INIT, "?INI", 97static WAIT_STATE(WAIT_FOR_INIT, "?INI",
97 TRANSIT_TO(INIT_OBJECT, 1 << FSCACHE_OBJECT_EV_NEW_CHILD)); 98 TRANSIT_TO(INIT_OBJECT, 1 << FSCACHE_OBJECT_EV_NEW_CHILD));
@@ -229,6 +230,10 @@ execute_work_state:
229 event = -1; 230 event = -1;
230 if (new_state == NO_TRANSIT) { 231 if (new_state == NO_TRANSIT) {
231 _debug("{OBJ%x} %s notrans", object->debug_id, state->name); 232 _debug("{OBJ%x} %s notrans", object->debug_id, state->name);
233 if (unlikely(state == STATE(OBJECT_DEAD))) {
234 _leave(" [dead]");
235 return;
236 }
232 fscache_enqueue_object(object); 237 fscache_enqueue_object(object);
233 event_mask = object->oob_event_mask; 238 event_mask = object->oob_event_mask;
234 goto unmask_events; 239 goto unmask_events;
@@ -239,7 +244,7 @@ execute_work_state:
239 object->state = state = new_state; 244 object->state = state = new_state;
240 245
241 if (state->work) { 246 if (state->work) {
242 if (unlikely(state->work == ((void *)2UL))) { 247 if (unlikely(state == STATE(OBJECT_DEAD))) {
243 _leave(" [dead]"); 248 _leave(" [dead]");
244 return; 249 return;
245 } 250 }
@@ -645,6 +650,12 @@ static const struct fscache_state *fscache_kill_object(struct fscache_object *ob
645 fscache_mark_object_dead(object); 650 fscache_mark_object_dead(object);
646 object->oob_event_mask = 0; 651 object->oob_event_mask = 0;
647 652
653 if (test_bit(FSCACHE_OBJECT_RETIRED, &object->flags)) {
654 /* Reject any new read/write ops and abort any that are pending. */
655 clear_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags);
656 fscache_cancel_all_ops(object);
657 }
658
648 if (list_empty(&object->dependents) && 659 if (list_empty(&object->dependents) &&
649 object->n_ops == 0 && 660 object->n_ops == 0 &&
650 object->n_children == 0) 661 object->n_children == 0)
@@ -1077,3 +1088,20 @@ void fscache_object_mark_killed(struct fscache_object *object,
1077 } 1088 }
1078} 1089}
1079EXPORT_SYMBOL(fscache_object_mark_killed); 1090EXPORT_SYMBOL(fscache_object_mark_killed);
1091
1092/*
1093 * The object is dead. We can get here if an object gets queued by an event
1094 * that would lead to its death (such as EV_KILL) when the dispatcher is
1095 * already running (and so can be requeued) but hasn't yet cleared the event
1096 * mask.
1097 */
1098static const struct fscache_state *fscache_object_dead(struct fscache_object *object,
1099 int event)
1100{
1101 if (!test_and_set_bit(FSCACHE_OBJECT_RUN_AFTER_DEAD,
1102 &object->flags))
1103 return NO_TRANSIT;
1104
1105 WARN(true, "FS-Cache object redispatched after death");
1106 return NO_TRANSIT;
1107}
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
index 70ea57c7b6bb..f11792672977 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -399,6 +399,10 @@ static void request_end(struct fuse_conn *fc, struct fuse_req *req)
399static void queue_interrupt(struct fuse_iqueue *fiq, struct fuse_req *req) 399static void queue_interrupt(struct fuse_iqueue *fiq, struct fuse_req *req)
400{ 400{
401 spin_lock(&fiq->waitq.lock); 401 spin_lock(&fiq->waitq.lock);
402 if (test_bit(FR_FINISHED, &req->flags)) {
403 spin_unlock(&fiq->waitq.lock);
404 return;
405 }
402 if (list_empty(&req->intr_entry)) { 406 if (list_empty(&req->intr_entry)) {
403 list_add_tail(&req->intr_entry, &fiq->interrupts); 407 list_add_tail(&req->intr_entry, &fiq->interrupts);
404 wake_up_locked(&fiq->waitq); 408 wake_up_locked(&fiq->waitq);
@@ -1372,6 +1376,7 @@ static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
1372 * code can Oops if the buffer persists after module unload. 1376 * code can Oops if the buffer persists after module unload.
1373 */ 1377 */
1374 bufs[page_nr].ops = &nosteal_pipe_buf_ops; 1378 bufs[page_nr].ops = &nosteal_pipe_buf_ops;
1379 bufs[page_nr].flags = 0;
1375 ret = add_to_pipe(pipe, &bufs[page_nr++]); 1380 ret = add_to_pipe(pipe, &bufs[page_nr++]);
1376 if (unlikely(ret < 0)) 1381 if (unlikely(ret < 0))
1377 break; 1382 break;
@@ -2025,7 +2030,6 @@ static void end_requests(struct fuse_conn *fc, struct list_head *head)
2025 struct fuse_req *req; 2030 struct fuse_req *req;
2026 req = list_entry(head->next, struct fuse_req, list); 2031 req = list_entry(head->next, struct fuse_req, list);
2027 req->out.h.error = -ECONNABORTED; 2032 req->out.h.error = -ECONNABORTED;
2028 clear_bit(FR_PENDING, &req->flags);
2029 clear_bit(FR_SENT, &req->flags); 2033 clear_bit(FR_SENT, &req->flags);
2030 list_del_init(&req->list); 2034 list_del_init(&req->list);
2031 request_end(fc, req); 2035 request_end(fc, req);
@@ -2103,6 +2107,8 @@ void fuse_abort_conn(struct fuse_conn *fc)
2103 spin_lock(&fiq->waitq.lock); 2107 spin_lock(&fiq->waitq.lock);
2104 fiq->connected = 0; 2108 fiq->connected = 0;
2105 list_splice_init(&fiq->pending, &to_end2); 2109 list_splice_init(&fiq->pending, &to_end2);
2110 list_for_each_entry(req, &to_end2, list)
2111 clear_bit(FR_PENDING, &req->flags);
2106 while (forget_pending(fiq)) 2112 while (forget_pending(fiq))
2107 kfree(dequeue_forget(fiq, 1, NULL)); 2113 kfree(dequeue_forget(fiq, 1, NULL));
2108 wake_up_all_locked(&fiq->waitq); 2114 wake_up_all_locked(&fiq->waitq);
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
index 1f7c732f32b0..811fd8929a18 100644
--- a/fs/fuse/dir.c
+++ b/fs/fuse/dir.c
@@ -68,7 +68,7 @@ static u64 time_to_jiffies(u64 sec, u32 nsec)
68 if (sec || nsec) { 68 if (sec || nsec) {
69 struct timespec64 ts = { 69 struct timespec64 ts = {
70 sec, 70 sec,
71 max_t(u32, nsec, NSEC_PER_SEC - 1) 71 min_t(u32, nsec, NSEC_PER_SEC - 1)
72 }; 72 };
73 73
74 return get_jiffies_64() + timespec64_to_jiffies(&ts); 74 return get_jiffies_64() + timespec64_to_jiffies(&ts);
diff --git a/fs/iomap.c b/fs/iomap.c
index 354a123f170e..a51cb4c07d4d 100644
--- a/fs/iomap.c
+++ b/fs/iomap.c
@@ -114,6 +114,9 @@ iomap_write_begin(struct inode *inode, loff_t pos, unsigned len, unsigned flags,
114 114
115 BUG_ON(pos + len > iomap->offset + iomap->length); 115 BUG_ON(pos + len > iomap->offset + iomap->length);
116 116
117 if (fatal_signal_pending(current))
118 return -EINTR;
119
117 page = grab_cache_page_write_begin(inode->i_mapping, index, flags); 120 page = grab_cache_page_write_begin(inode->i_mapping, index, flags);
118 if (!page) 121 if (!page)
119 return -ENOMEM; 122 return -ENOMEM;
diff --git a/fs/libfs.c b/fs/libfs.c
index e973cd51f126..28d6f35feed6 100644
--- a/fs/libfs.c
+++ b/fs/libfs.c
@@ -245,7 +245,8 @@ struct dentry *mount_pseudo_xattr(struct file_system_type *fs_type, char *name,
245 struct inode *root; 245 struct inode *root;
246 struct qstr d_name = QSTR_INIT(name, strlen(name)); 246 struct qstr d_name = QSTR_INIT(name, strlen(name));
247 247
248 s = sget(fs_type, NULL, set_anon_super, MS_NOUSER, NULL); 248 s = sget_userns(fs_type, NULL, set_anon_super, MS_KERNMOUNT|MS_NOUSER,
249 &init_user_ns, NULL);
249 if (IS_ERR(s)) 250 if (IS_ERR(s))
250 return ERR_CAST(s); 251 return ERR_CAST(s);
251 252
diff --git a/fs/namespace.c b/fs/namespace.c
index b5b1259e064f..487ba30bb5c6 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -742,26 +742,50 @@ static struct mountpoint *lookup_mountpoint(struct dentry *dentry)
742 return NULL; 742 return NULL;
743} 743}
744 744
745static struct mountpoint *new_mountpoint(struct dentry *dentry) 745static struct mountpoint *get_mountpoint(struct dentry *dentry)
746{ 746{
747 struct hlist_head *chain = mp_hash(dentry); 747 struct mountpoint *mp, *new = NULL;
748 struct mountpoint *mp;
749 int ret; 748 int ret;
750 749
751 mp = kmalloc(sizeof(struct mountpoint), GFP_KERNEL); 750 if (d_mountpoint(dentry)) {
752 if (!mp) 751mountpoint:
752 read_seqlock_excl(&mount_lock);
753 mp = lookup_mountpoint(dentry);
754 read_sequnlock_excl(&mount_lock);
755 if (mp)
756 goto done;
757 }
758
759 if (!new)
760 new = kmalloc(sizeof(struct mountpoint), GFP_KERNEL);
761 if (!new)
753 return ERR_PTR(-ENOMEM); 762 return ERR_PTR(-ENOMEM);
754 763
764
765 /* Exactly one processes may set d_mounted */
755 ret = d_set_mounted(dentry); 766 ret = d_set_mounted(dentry);
756 if (ret) {
757 kfree(mp);
758 return ERR_PTR(ret);
759 }
760 767
761 mp->m_dentry = dentry; 768 /* Someone else set d_mounted? */
762 mp->m_count = 1; 769 if (ret == -EBUSY)
763 hlist_add_head(&mp->m_hash, chain); 770 goto mountpoint;
764 INIT_HLIST_HEAD(&mp->m_list); 771
772 /* The dentry is not available as a mountpoint? */
773 mp = ERR_PTR(ret);
774 if (ret)
775 goto done;
776
777 /* Add the new mountpoint to the hash table */
778 read_seqlock_excl(&mount_lock);
779 new->m_dentry = dentry;
780 new->m_count = 1;
781 hlist_add_head(&new->m_hash, mp_hash(dentry));
782 INIT_HLIST_HEAD(&new->m_list);
783 read_sequnlock_excl(&mount_lock);
784
785 mp = new;
786 new = NULL;
787done:
788 kfree(new);
765 return mp; 789 return mp;
766} 790}
767 791
@@ -1595,11 +1619,11 @@ void __detach_mounts(struct dentry *dentry)
1595 struct mount *mnt; 1619 struct mount *mnt;
1596 1620
1597 namespace_lock(); 1621 namespace_lock();
1622 lock_mount_hash();
1598 mp = lookup_mountpoint(dentry); 1623 mp = lookup_mountpoint(dentry);
1599 if (IS_ERR_OR_NULL(mp)) 1624 if (IS_ERR_OR_NULL(mp))
1600 goto out_unlock; 1625 goto out_unlock;
1601 1626
1602 lock_mount_hash();
1603 event++; 1627 event++;
1604 while (!hlist_empty(&mp->m_list)) { 1628 while (!hlist_empty(&mp->m_list)) {
1605 mnt = hlist_entry(mp->m_list.first, struct mount, mnt_mp_list); 1629 mnt = hlist_entry(mp->m_list.first, struct mount, mnt_mp_list);
@@ -1609,9 +1633,9 @@ void __detach_mounts(struct dentry *dentry)
1609 } 1633 }
1610 else umount_tree(mnt, UMOUNT_CONNECTED); 1634 else umount_tree(mnt, UMOUNT_CONNECTED);
1611 } 1635 }
1612 unlock_mount_hash();
1613 put_mountpoint(mp); 1636 put_mountpoint(mp);
1614out_unlock: 1637out_unlock:
1638 unlock_mount_hash();
1615 namespace_unlock(); 1639 namespace_unlock();
1616} 1640}
1617 1641
@@ -2038,9 +2062,7 @@ retry:
2038 namespace_lock(); 2062 namespace_lock();
2039 mnt = lookup_mnt(path); 2063 mnt = lookup_mnt(path);
2040 if (likely(!mnt)) { 2064 if (likely(!mnt)) {
2041 struct mountpoint *mp = lookup_mountpoint(dentry); 2065 struct mountpoint *mp = get_mountpoint(dentry);
2042 if (!mp)
2043 mp = new_mountpoint(dentry);
2044 if (IS_ERR(mp)) { 2066 if (IS_ERR(mp)) {
2045 namespace_unlock(); 2067 namespace_unlock();
2046 inode_unlock(dentry->d_inode); 2068 inode_unlock(dentry->d_inode);
@@ -2059,7 +2081,11 @@ retry:
2059static void unlock_mount(struct mountpoint *where) 2081static void unlock_mount(struct mountpoint *where)
2060{ 2082{
2061 struct dentry *dentry = where->m_dentry; 2083 struct dentry *dentry = where->m_dentry;
2084
2085 read_seqlock_excl(&mount_lock);
2062 put_mountpoint(where); 2086 put_mountpoint(where);
2087 read_sequnlock_excl(&mount_lock);
2088
2063 namespace_unlock(); 2089 namespace_unlock();
2064 inode_unlock(dentry->d_inode); 2090 inode_unlock(dentry->d_inode);
2065} 2091}
@@ -3135,9 +3161,9 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
3135 touch_mnt_namespace(current->nsproxy->mnt_ns); 3161 touch_mnt_namespace(current->nsproxy->mnt_ns);
3136 /* A moved mount should not expire automatically */ 3162 /* A moved mount should not expire automatically */
3137 list_del_init(&new_mnt->mnt_expire); 3163 list_del_init(&new_mnt->mnt_expire);
3164 put_mountpoint(root_mp);
3138 unlock_mount_hash(); 3165 unlock_mount_hash();
3139 chroot_fs_refs(&root, &new); 3166 chroot_fs_refs(&root, &new);
3140 put_mountpoint(root_mp);
3141 error = 0; 3167 error = 0;
3142out4: 3168out4:
3143 unlock_mount(old_mp); 3169 unlock_mount(old_mp);
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 6dcbc5defb7a..0a0eaecf9676 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -38,7 +38,6 @@
38#include <linux/mm.h> 38#include <linux/mm.h>
39#include <linux/delay.h> 39#include <linux/delay.h>
40#include <linux/errno.h> 40#include <linux/errno.h>
41#include <linux/file.h>
42#include <linux/string.h> 41#include <linux/string.h>
43#include <linux/ratelimit.h> 42#include <linux/ratelimit.h>
44#include <linux/printk.h> 43#include <linux/printk.h>
@@ -1083,7 +1082,8 @@ int nfs4_call_sync(struct rpc_clnt *clnt,
1083 return nfs4_call_sync_sequence(clnt, server, msg, args, res); 1082 return nfs4_call_sync_sequence(clnt, server, msg, args, res);
1084} 1083}
1085 1084
1086static void update_changeattr(struct inode *dir, struct nfs4_change_info *cinfo) 1085static void update_changeattr(struct inode *dir, struct nfs4_change_info *cinfo,
1086 unsigned long timestamp)
1087{ 1087{
1088 struct nfs_inode *nfsi = NFS_I(dir); 1088 struct nfs_inode *nfsi = NFS_I(dir);
1089 1089
@@ -1099,6 +1099,7 @@ static void update_changeattr(struct inode *dir, struct nfs4_change_info *cinfo)
1099 NFS_INO_INVALID_ACL; 1099 NFS_INO_INVALID_ACL;
1100 } 1100 }
1101 dir->i_version = cinfo->after; 1101 dir->i_version = cinfo->after;
1102 nfsi->read_cache_jiffies = timestamp;
1102 nfsi->attr_gencount = nfs_inc_attr_generation_counter(); 1103 nfsi->attr_gencount = nfs_inc_attr_generation_counter();
1103 nfs_fscache_invalidate(dir); 1104 nfs_fscache_invalidate(dir);
1104 spin_unlock(&dir->i_lock); 1105 spin_unlock(&dir->i_lock);
@@ -2391,11 +2392,13 @@ static int _nfs4_proc_open(struct nfs4_opendata *data)
2391 nfs_fattr_map_and_free_names(server, &data->f_attr); 2392 nfs_fattr_map_and_free_names(server, &data->f_attr);
2392 2393
2393 if (o_arg->open_flags & O_CREAT) { 2394 if (o_arg->open_flags & O_CREAT) {
2394 update_changeattr(dir, &o_res->cinfo);
2395 if (o_arg->open_flags & O_EXCL) 2395 if (o_arg->open_flags & O_EXCL)
2396 data->file_created = 1; 2396 data->file_created = 1;
2397 else if (o_res->cinfo.before != o_res->cinfo.after) 2397 else if (o_res->cinfo.before != o_res->cinfo.after)
2398 data->file_created = 1; 2398 data->file_created = 1;
2399 if (data->file_created || dir->i_version != o_res->cinfo.after)
2400 update_changeattr(dir, &o_res->cinfo,
2401 o_res->f_attr->time_start);
2399 } 2402 }
2400 if ((o_res->rflags & NFS4_OPEN_RESULT_LOCKTYPE_POSIX) == 0) 2403 if ((o_res->rflags & NFS4_OPEN_RESULT_LOCKTYPE_POSIX) == 0)
2401 server->caps &= ~NFS_CAP_POSIX_LOCK; 2404 server->caps &= ~NFS_CAP_POSIX_LOCK;
@@ -2697,7 +2700,8 @@ static inline void nfs4_exclusive_attrset(struct nfs4_opendata *opendata,
2697 sattr->ia_valid |= ATTR_MTIME; 2700 sattr->ia_valid |= ATTR_MTIME;
2698 2701
2699 /* Except MODE, it seems harmless of setting twice. */ 2702 /* Except MODE, it seems harmless of setting twice. */
2700 if ((attrset[1] & FATTR4_WORD1_MODE)) 2703 if (opendata->o_arg.createmode != NFS4_CREATE_EXCLUSIVE &&
2704 attrset[1] & FATTR4_WORD1_MODE)
2701 sattr->ia_valid &= ~ATTR_MODE; 2705 sattr->ia_valid &= ~ATTR_MODE;
2702 2706
2703 if (attrset[2] & FATTR4_WORD2_SECURITY_LABEL) 2707 if (attrset[2] & FATTR4_WORD2_SECURITY_LABEL)
@@ -4073,11 +4077,12 @@ static int _nfs4_proc_remove(struct inode *dir, const struct qstr *name)
4073 .rpc_argp = &args, 4077 .rpc_argp = &args,
4074 .rpc_resp = &res, 4078 .rpc_resp = &res,
4075 }; 4079 };
4080 unsigned long timestamp = jiffies;
4076 int status; 4081 int status;
4077 4082
4078 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 1); 4083 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 1);
4079 if (status == 0) 4084 if (status == 0)
4080 update_changeattr(dir, &res.cinfo); 4085 update_changeattr(dir, &res.cinfo, timestamp);
4081 return status; 4086 return status;
4082} 4087}
4083 4088
@@ -4125,7 +4130,8 @@ static int nfs4_proc_unlink_done(struct rpc_task *task, struct inode *dir)
4125 if (nfs4_async_handle_error(task, res->server, NULL, 4130 if (nfs4_async_handle_error(task, res->server, NULL,
4126 &data->timeout) == -EAGAIN) 4131 &data->timeout) == -EAGAIN)
4127 return 0; 4132 return 0;
4128 update_changeattr(dir, &res->cinfo); 4133 if (task->tk_status == 0)
4134 update_changeattr(dir, &res->cinfo, res->dir_attr->time_start);
4129 return 1; 4135 return 1;
4130} 4136}
4131 4137
@@ -4159,8 +4165,11 @@ static int nfs4_proc_rename_done(struct rpc_task *task, struct inode *old_dir,
4159 if (nfs4_async_handle_error(task, res->server, NULL, &data->timeout) == -EAGAIN) 4165 if (nfs4_async_handle_error(task, res->server, NULL, &data->timeout) == -EAGAIN)
4160 return 0; 4166 return 0;
4161 4167
4162 update_changeattr(old_dir, &res->old_cinfo); 4168 if (task->tk_status == 0) {
4163 update_changeattr(new_dir, &res->new_cinfo); 4169 update_changeattr(old_dir, &res->old_cinfo, res->old_fattr->time_start);
4170 if (new_dir != old_dir)
4171 update_changeattr(new_dir, &res->new_cinfo, res->new_fattr->time_start);
4172 }
4164 return 1; 4173 return 1;
4165} 4174}
4166 4175
@@ -4197,7 +4206,7 @@ static int _nfs4_proc_link(struct inode *inode, struct inode *dir, const struct
4197 4206
4198 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1); 4207 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
4199 if (!status) { 4208 if (!status) {
4200 update_changeattr(dir, &res.cinfo); 4209 update_changeattr(dir, &res.cinfo, res.fattr->time_start);
4201 status = nfs_post_op_update_inode(inode, res.fattr); 4210 status = nfs_post_op_update_inode(inode, res.fattr);
4202 if (!status) 4211 if (!status)
4203 nfs_setsecurity(inode, res.fattr, res.label); 4212 nfs_setsecurity(inode, res.fattr, res.label);
@@ -4272,7 +4281,8 @@ static int nfs4_do_create(struct inode *dir, struct dentry *dentry, struct nfs4_
4272 int status = nfs4_call_sync(NFS_SERVER(dir)->client, NFS_SERVER(dir), &data->msg, 4281 int status = nfs4_call_sync(NFS_SERVER(dir)->client, NFS_SERVER(dir), &data->msg,
4273 &data->arg.seq_args, &data->res.seq_res, 1); 4282 &data->arg.seq_args, &data->res.seq_res, 1);
4274 if (status == 0) { 4283 if (status == 0) {
4275 update_changeattr(dir, &data->res.dir_cinfo); 4284 update_changeattr(dir, &data->res.dir_cinfo,
4285 data->res.fattr->time_start);
4276 status = nfs_instantiate(dentry, data->res.fh, data->res.fattr, data->res.label); 4286 status = nfs_instantiate(dentry, data->res.fh, data->res.fattr, data->res.label);
4277 } 4287 }
4278 return status; 4288 return status;
@@ -6127,7 +6137,6 @@ static struct nfs4_lockdata *nfs4_alloc_lockdata(struct file_lock *fl,
6127 p->server = server; 6137 p->server = server;
6128 atomic_inc(&lsp->ls_count); 6138 atomic_inc(&lsp->ls_count);
6129 p->ctx = get_nfs_open_context(ctx); 6139 p->ctx = get_nfs_open_context(ctx);
6130 get_file(fl->fl_file);
6131 memcpy(&p->fl, fl, sizeof(p->fl)); 6140 memcpy(&p->fl, fl, sizeof(p->fl));
6132 return p; 6141 return p;
6133out_free_seqid: 6142out_free_seqid:
@@ -6240,7 +6249,6 @@ static void nfs4_lock_release(void *calldata)
6240 nfs_free_seqid(data->arg.lock_seqid); 6249 nfs_free_seqid(data->arg.lock_seqid);
6241 nfs4_put_lock_state(data->lsp); 6250 nfs4_put_lock_state(data->lsp);
6242 put_nfs_open_context(data->ctx); 6251 put_nfs_open_context(data->ctx);
6243 fput(data->fl.fl_file);
6244 kfree(data); 6252 kfree(data);
6245 dprintk("%s: done!\n", __func__); 6253 dprintk("%s: done!\n", __func__);
6246} 6254}
@@ -8483,6 +8491,7 @@ nfs4_layoutget_handle_exception(struct rpc_task *task,
8483 goto out; 8491 goto out;
8484 } 8492 }
8485 8493
8494 nfs4_sequence_free_slot(&lgp->res.seq_res);
8486 err = nfs4_handle_exception(server, nfs4err, exception); 8495 err = nfs4_handle_exception(server, nfs4err, exception);
8487 if (!status) { 8496 if (!status) {
8488 if (exception->retry) 8497 if (exception->retry)
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index 1d152f4470cd..daeb94e3acd4 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -1091,6 +1091,7 @@ static void nfs_increment_seqid(int status, struct nfs_seqid *seqid)
1091 case -NFS4ERR_BADXDR: 1091 case -NFS4ERR_BADXDR:
1092 case -NFS4ERR_RESOURCE: 1092 case -NFS4ERR_RESOURCE:
1093 case -NFS4ERR_NOFILEHANDLE: 1093 case -NFS4ERR_NOFILEHANDLE:
1094 case -NFS4ERR_MOVED:
1094 /* Non-seqid mutating errors */ 1095 /* Non-seqid mutating errors */
1095 return; 1096 return;
1096 }; 1097 };
@@ -1729,7 +1730,6 @@ static int nfs4_recovery_handle_error(struct nfs_client *clp, int error)
1729 break; 1730 break;
1730 case -NFS4ERR_STALE_CLIENTID: 1731 case -NFS4ERR_STALE_CLIENTID:
1731 set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state); 1732 set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state);
1732 nfs4_state_clear_reclaim_reboot(clp);
1733 nfs4_state_start_reclaim_reboot(clp); 1733 nfs4_state_start_reclaim_reboot(clp);
1734 break; 1734 break;
1735 case -NFS4ERR_EXPIRED: 1735 case -NFS4ERR_EXPIRED:
diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
index 59554f3adf29..dd042498ce7c 100644
--- a/fs/nfs/pnfs.c
+++ b/fs/nfs/pnfs.c
@@ -1200,10 +1200,10 @@ _pnfs_return_layout(struct inode *ino)
1200 1200
1201 send = pnfs_prepare_layoutreturn(lo, &stateid, NULL); 1201 send = pnfs_prepare_layoutreturn(lo, &stateid, NULL);
1202 spin_unlock(&ino->i_lock); 1202 spin_unlock(&ino->i_lock);
1203 pnfs_free_lseg_list(&tmp_list);
1204 if (send) 1203 if (send)
1205 status = pnfs_send_layoutreturn(lo, &stateid, IOMODE_ANY, true); 1204 status = pnfs_send_layoutreturn(lo, &stateid, IOMODE_ANY, true);
1206out_put_layout_hdr: 1205out_put_layout_hdr:
1206 pnfs_free_lseg_list(&tmp_list);
1207 pnfs_put_layout_hdr(lo); 1207 pnfs_put_layout_hdr(lo);
1208out: 1208out:
1209 dprintk("<-- %s status: %d\n", __func__, status); 1209 dprintk("<-- %s status: %d\n", __func__, status);
diff --git a/fs/nfsd/nfs4layouts.c b/fs/nfsd/nfs4layouts.c
index 596205d939a1..1fc07a9c70e9 100644
--- a/fs/nfsd/nfs4layouts.c
+++ b/fs/nfsd/nfs4layouts.c
@@ -223,10 +223,11 @@ nfsd4_alloc_layout_stateid(struct nfsd4_compound_state *cstate,
223 struct nfs4_layout_stateid *ls; 223 struct nfs4_layout_stateid *ls;
224 struct nfs4_stid *stp; 224 struct nfs4_stid *stp;
225 225
226 stp = nfs4_alloc_stid(cstate->clp, nfs4_layout_stateid_cache); 226 stp = nfs4_alloc_stid(cstate->clp, nfs4_layout_stateid_cache,
227 nfsd4_free_layout_stateid);
227 if (!stp) 228 if (!stp)
228 return NULL; 229 return NULL;
229 stp->sc_free = nfsd4_free_layout_stateid; 230
230 get_nfs4_file(fp); 231 get_nfs4_file(fp);
231 stp->sc_file = fp; 232 stp->sc_file = fp;
232 233
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index 4b4beaaa4eaa..a0dee8ae9f97 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -633,8 +633,8 @@ out:
633 return co; 633 return co;
634} 634}
635 635
636struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl, 636struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl, struct kmem_cache *slab,
637 struct kmem_cache *slab) 637 void (*sc_free)(struct nfs4_stid *))
638{ 638{
639 struct nfs4_stid *stid; 639 struct nfs4_stid *stid;
640 int new_id; 640 int new_id;
@@ -650,6 +650,8 @@ struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl,
650 idr_preload_end(); 650 idr_preload_end();
651 if (new_id < 0) 651 if (new_id < 0)
652 goto out_free; 652 goto out_free;
653
654 stid->sc_free = sc_free;
653 stid->sc_client = cl; 655 stid->sc_client = cl;
654 stid->sc_stateid.si_opaque.so_id = new_id; 656 stid->sc_stateid.si_opaque.so_id = new_id;
655 stid->sc_stateid.si_opaque.so_clid = cl->cl_clientid; 657 stid->sc_stateid.si_opaque.so_clid = cl->cl_clientid;
@@ -675,15 +677,12 @@ out_free:
675static struct nfs4_ol_stateid * nfs4_alloc_open_stateid(struct nfs4_client *clp) 677static struct nfs4_ol_stateid * nfs4_alloc_open_stateid(struct nfs4_client *clp)
676{ 678{
677 struct nfs4_stid *stid; 679 struct nfs4_stid *stid;
678 struct nfs4_ol_stateid *stp;
679 680
680 stid = nfs4_alloc_stid(clp, stateid_slab); 681 stid = nfs4_alloc_stid(clp, stateid_slab, nfs4_free_ol_stateid);
681 if (!stid) 682 if (!stid)
682 return NULL; 683 return NULL;
683 684
684 stp = openlockstateid(stid); 685 return openlockstateid(stid);
685 stp->st_stid.sc_free = nfs4_free_ol_stateid;
686 return stp;
687} 686}
688 687
689static void nfs4_free_deleg(struct nfs4_stid *stid) 688static void nfs4_free_deleg(struct nfs4_stid *stid)
@@ -781,11 +780,10 @@ alloc_init_deleg(struct nfs4_client *clp, struct svc_fh *current_fh,
781 goto out_dec; 780 goto out_dec;
782 if (delegation_blocked(&current_fh->fh_handle)) 781 if (delegation_blocked(&current_fh->fh_handle))
783 goto out_dec; 782 goto out_dec;
784 dp = delegstateid(nfs4_alloc_stid(clp, deleg_slab)); 783 dp = delegstateid(nfs4_alloc_stid(clp, deleg_slab, nfs4_free_deleg));
785 if (dp == NULL) 784 if (dp == NULL)
786 goto out_dec; 785 goto out_dec;
787 786
788 dp->dl_stid.sc_free = nfs4_free_deleg;
789 /* 787 /*
790 * delegation seqid's are never incremented. The 4.1 special 788 * delegation seqid's are never incremented. The 4.1 special
791 * meaning of seqid 0 isn't meaningful, really, but let's avoid 789 * meaning of seqid 0 isn't meaningful, really, but let's avoid
@@ -5580,7 +5578,6 @@ init_lock_stateid(struct nfs4_ol_stateid *stp, struct nfs4_lockowner *lo,
5580 stp->st_stateowner = nfs4_get_stateowner(&lo->lo_owner); 5578 stp->st_stateowner = nfs4_get_stateowner(&lo->lo_owner);
5581 get_nfs4_file(fp); 5579 get_nfs4_file(fp);
5582 stp->st_stid.sc_file = fp; 5580 stp->st_stid.sc_file = fp;
5583 stp->st_stid.sc_free = nfs4_free_lock_stateid;
5584 stp->st_access_bmap = 0; 5581 stp->st_access_bmap = 0;
5585 stp->st_deny_bmap = open_stp->st_deny_bmap; 5582 stp->st_deny_bmap = open_stp->st_deny_bmap;
5586 stp->st_openstp = open_stp; 5583 stp->st_openstp = open_stp;
@@ -5623,7 +5620,7 @@ find_or_create_lock_stateid(struct nfs4_lockowner *lo, struct nfs4_file *fi,
5623 lst = find_lock_stateid(lo, fi); 5620 lst = find_lock_stateid(lo, fi);
5624 if (lst == NULL) { 5621 if (lst == NULL) {
5625 spin_unlock(&clp->cl_lock); 5622 spin_unlock(&clp->cl_lock);
5626 ns = nfs4_alloc_stid(clp, stateid_slab); 5623 ns = nfs4_alloc_stid(clp, stateid_slab, nfs4_free_lock_stateid);
5627 if (ns == NULL) 5624 if (ns == NULL)
5628 return NULL; 5625 return NULL;
5629 5626
diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
index 7ecf16be4a44..8fae53ce21d1 100644
--- a/fs/nfsd/nfs4xdr.c
+++ b/fs/nfsd/nfs4xdr.c
@@ -2440,7 +2440,9 @@ nfsd4_encode_fattr(struct xdr_stream *xdr, struct svc_fh *fhp,
2440 p++; /* to be backfilled later */ 2440 p++; /* to be backfilled later */
2441 2441
2442 if (bmval0 & FATTR4_WORD0_SUPPORTED_ATTRS) { 2442 if (bmval0 & FATTR4_WORD0_SUPPORTED_ATTRS) {
2443 u32 *supp = nfsd_suppattrs[minorversion]; 2443 u32 supp[3];
2444
2445 memcpy(supp, nfsd_suppattrs[minorversion], sizeof(supp));
2444 2446
2445 if (!IS_POSIXACL(dentry->d_inode)) 2447 if (!IS_POSIXACL(dentry->d_inode))
2446 supp[0] &= ~FATTR4_WORD0_ACL; 2448 supp[0] &= ~FATTR4_WORD0_ACL;
diff --git a/fs/nfsd/state.h b/fs/nfsd/state.h
index c9399366f9df..4516e8b7d776 100644
--- a/fs/nfsd/state.h
+++ b/fs/nfsd/state.h
@@ -603,8 +603,8 @@ extern __be32 nfs4_preprocess_stateid_op(struct svc_rqst *rqstp,
603__be32 nfsd4_lookup_stateid(struct nfsd4_compound_state *cstate, 603__be32 nfsd4_lookup_stateid(struct nfsd4_compound_state *cstate,
604 stateid_t *stateid, unsigned char typemask, 604 stateid_t *stateid, unsigned char typemask,
605 struct nfs4_stid **s, struct nfsd_net *nn); 605 struct nfs4_stid **s, struct nfsd_net *nn);
606struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl, 606struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl, struct kmem_cache *slab,
607 struct kmem_cache *slab); 607 void (*sc_free)(struct nfs4_stid *));
608void nfs4_unhash_stid(struct nfs4_stid *s); 608void nfs4_unhash_stid(struct nfs4_stid *s);
609void nfs4_put_stid(struct nfs4_stid *s); 609void nfs4_put_stid(struct nfs4_stid *s);
610void nfs4_inc_and_copy_stateid(stateid_t *dst, struct nfs4_stid *stid); 610void nfs4_inc_and_copy_stateid(stateid_t *dst, struct nfs4_stid *stid);
diff --git a/fs/notify/mark.c b/fs/notify/mark.c
index d3fea0bd89e2..6043306e8e21 100644
--- a/fs/notify/mark.c
+++ b/fs/notify/mark.c
@@ -510,18 +510,6 @@ void fsnotify_detach_group_marks(struct fsnotify_group *group)
510 } 510 }
511} 511}
512 512
513void fsnotify_duplicate_mark(struct fsnotify_mark *new, struct fsnotify_mark *old)
514{
515 assert_spin_locked(&old->lock);
516 new->inode = old->inode;
517 new->mnt = old->mnt;
518 if (old->group)
519 fsnotify_get_group(old->group);
520 new->group = old->group;
521 new->mask = old->mask;
522 new->free_mark = old->free_mark;
523}
524
525/* 513/*
526 * Nothing fancy, just initialize lists and locks and counters. 514 * Nothing fancy, just initialize lists and locks and counters.
527 */ 515 */
diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c
index 83d576f6a287..77d1632e905d 100644
--- a/fs/ocfs2/dlmglue.c
+++ b/fs/ocfs2/dlmglue.c
@@ -3303,6 +3303,16 @@ static int ocfs2_downconvert_lock(struct ocfs2_super *osb,
3303 mlog(ML_BASTS, "lockres %s, level %d => %d\n", lockres->l_name, 3303 mlog(ML_BASTS, "lockres %s, level %d => %d\n", lockres->l_name,
3304 lockres->l_level, new_level); 3304 lockres->l_level, new_level);
3305 3305
3306 /*
3307 * On DLM_LKF_VALBLK, fsdlm behaves differently with o2cb. It always
3308 * expects DLM_LKF_VALBLK being set if the LKB has LVB, so that
3309 * we can recover correctly from node failure. Otherwise, we may get
3310 * invalid LVB in LKB, but without DLM_SBF_VALNOTVALID being set.
3311 */
3312 if (!ocfs2_is_o2cb_active() &&
3313 lockres->l_ops->flags & LOCK_TYPE_USES_LVB)
3314 lvb = 1;
3315
3306 if (lvb) 3316 if (lvb)
3307 dlm_flags |= DLM_LKF_VALBLK; 3317 dlm_flags |= DLM_LKF_VALBLK;
3308 3318
diff --git a/fs/ocfs2/stackglue.c b/fs/ocfs2/stackglue.c
index 52c07346bea3..820359096c7a 100644
--- a/fs/ocfs2/stackglue.c
+++ b/fs/ocfs2/stackglue.c
@@ -48,6 +48,12 @@ static char ocfs2_hb_ctl_path[OCFS2_MAX_HB_CTL_PATH] = "/sbin/ocfs2_hb_ctl";
48 */ 48 */
49static struct ocfs2_stack_plugin *active_stack; 49static struct ocfs2_stack_plugin *active_stack;
50 50
51inline int ocfs2_is_o2cb_active(void)
52{
53 return !strcmp(active_stack->sp_name, OCFS2_STACK_PLUGIN_O2CB);
54}
55EXPORT_SYMBOL_GPL(ocfs2_is_o2cb_active);
56
51static struct ocfs2_stack_plugin *ocfs2_stack_lookup(const char *name) 57static struct ocfs2_stack_plugin *ocfs2_stack_lookup(const char *name)
52{ 58{
53 struct ocfs2_stack_plugin *p; 59 struct ocfs2_stack_plugin *p;
diff --git a/fs/ocfs2/stackglue.h b/fs/ocfs2/stackglue.h
index f2dce10fae54..e3036e1790e8 100644
--- a/fs/ocfs2/stackglue.h
+++ b/fs/ocfs2/stackglue.h
@@ -298,6 +298,9 @@ void ocfs2_stack_glue_set_max_proto_version(struct ocfs2_protocol_version *max_p
298int ocfs2_stack_glue_register(struct ocfs2_stack_plugin *plugin); 298int ocfs2_stack_glue_register(struct ocfs2_stack_plugin *plugin);
299void ocfs2_stack_glue_unregister(struct ocfs2_stack_plugin *plugin); 299void ocfs2_stack_glue_unregister(struct ocfs2_stack_plugin *plugin);
300 300
301/* In ocfs2_downconvert_lock(), we need to know which stack we are using */
302int ocfs2_is_o2cb_active(void);
303
301extern struct kset *ocfs2_kset; 304extern struct kset *ocfs2_kset;
302 305
303#endif /* STACKGLUE_H */ 306#endif /* STACKGLUE_H */
diff --git a/fs/overlayfs/namei.c b/fs/overlayfs/namei.c
index 9ad48d9202a9..023bb0b03352 100644
--- a/fs/overlayfs/namei.c
+++ b/fs/overlayfs/namei.c
@@ -154,29 +154,38 @@ out_err:
154static int ovl_lookup_layer(struct dentry *base, struct ovl_lookup_data *d, 154static int ovl_lookup_layer(struct dentry *base, struct ovl_lookup_data *d,
155 struct dentry **ret) 155 struct dentry **ret)
156{ 156{
157 const char *s = d->name.name; 157 /* Counting down from the end, since the prefix can change */
158 size_t rem = d->name.len - 1;
158 struct dentry *dentry = NULL; 159 struct dentry *dentry = NULL;
159 int err; 160 int err;
160 161
161 if (*s != '/') 162 if (d->name.name[0] != '/')
162 return ovl_lookup_single(base, d, d->name.name, d->name.len, 163 return ovl_lookup_single(base, d, d->name.name, d->name.len,
163 0, "", ret); 164 0, "", ret);
164 165
165 while (*s++ == '/' && !IS_ERR_OR_NULL(base) && d_can_lookup(base)) { 166 while (!IS_ERR_OR_NULL(base) && d_can_lookup(base)) {
167 const char *s = d->name.name + d->name.len - rem;
166 const char *next = strchrnul(s, '/'); 168 const char *next = strchrnul(s, '/');
167 size_t slen = strlen(s); 169 size_t thislen = next - s;
170 bool end = !next[0];
168 171
169 if (WARN_ON(slen > d->name.len) || 172 /* Verify we did not go off the rails */
170 WARN_ON(strcmp(d->name.name + d->name.len - slen, s))) 173 if (WARN_ON(s[-1] != '/'))
171 return -EIO; 174 return -EIO;
172 175
173 err = ovl_lookup_single(base, d, s, next - s, 176 err = ovl_lookup_single(base, d, s, thislen,
174 d->name.len - slen, next, &base); 177 d->name.len - rem, next, &base);
175 dput(dentry); 178 dput(dentry);
176 if (err) 179 if (err)
177 return err; 180 return err;
178 dentry = base; 181 dentry = base;
179 s = next; 182 if (end)
183 break;
184
185 rem -= thislen + 1;
186
187 if (WARN_ON(rem >= d->name.len))
188 return -EIO;
180 } 189 }
181 *ret = dentry; 190 *ret = dentry;
182 return 0; 191 return 0;
diff --git a/fs/posix_acl.c b/fs/posix_acl.c
index 595522022aca..c9d48dc78495 100644
--- a/fs/posix_acl.c
+++ b/fs/posix_acl.c
@@ -922,11 +922,10 @@ int simple_set_acl(struct inode *inode, struct posix_acl *acl, int type)
922 int error; 922 int error;
923 923
924 if (type == ACL_TYPE_ACCESS) { 924 if (type == ACL_TYPE_ACCESS) {
925 error = posix_acl_equiv_mode(acl, &inode->i_mode); 925 error = posix_acl_update_mode(inode,
926 if (error < 0) 926 &inode->i_mode, &acl);
927 return 0; 927 if (error)
928 if (error == 0) 928 return error;
929 acl = NULL;
930 } 929 }
931 930
932 inode->i_ctime = current_time(inode); 931 inode->i_ctime = current_time(inode);
diff --git a/fs/proc/base.c b/fs/proc/base.c
index 8e7e61b28f31..87c9a9aacda3 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -3179,6 +3179,8 @@ int proc_pid_readdir(struct file *file, struct dir_context *ctx)
3179 iter.tgid += 1, iter = next_tgid(ns, iter)) { 3179 iter.tgid += 1, iter = next_tgid(ns, iter)) {
3180 char name[PROC_NUMBUF]; 3180 char name[PROC_NUMBUF];
3181 int len; 3181 int len;
3182
3183 cond_resched();
3182 if (!has_pid_permissions(ns, iter.task, 2)) 3184 if (!has_pid_permissions(ns, iter.task, 2))
3183 continue; 3185 continue;
3184 3186
diff --git a/fs/proc/page.c b/fs/proc/page.c
index a2066e6dee90..2726536489b1 100644
--- a/fs/proc/page.c
+++ b/fs/proc/page.c
@@ -173,7 +173,8 @@ u64 stable_page_flags(struct page *page)
173 u |= kpf_copy_bit(k, KPF_ACTIVE, PG_active); 173 u |= kpf_copy_bit(k, KPF_ACTIVE, PG_active);
174 u |= kpf_copy_bit(k, KPF_RECLAIM, PG_reclaim); 174 u |= kpf_copy_bit(k, KPF_RECLAIM, PG_reclaim);
175 175
176 u |= kpf_copy_bit(k, KPF_SWAPCACHE, PG_swapcache); 176 if (PageSwapCache(page))
177 u |= 1 << KPF_SWAPCACHE;
177 u |= kpf_copy_bit(k, KPF_SWAPBACKED, PG_swapbacked); 178 u |= kpf_copy_bit(k, KPF_SWAPBACKED, PG_swapbacked);
178 179
179 u |= kpf_copy_bit(k, KPF_UNEVICTABLE, PG_unevictable); 180 u |= kpf_copy_bit(k, KPF_UNEVICTABLE, PG_unevictable);
diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
index 55313d994895..d4e37acd4821 100644
--- a/fs/proc/proc_sysctl.c
+++ b/fs/proc/proc_sysctl.c
@@ -709,7 +709,7 @@ static int proc_sys_readdir(struct file *file, struct dir_context *ctx)
709 ctl_dir = container_of(head, struct ctl_dir, header); 709 ctl_dir = container_of(head, struct ctl_dir, header);
710 710
711 if (!dir_emit_dots(file, ctx)) 711 if (!dir_emit_dots(file, ctx))
712 return 0; 712 goto out;
713 713
714 pos = 2; 714 pos = 2;
715 715
@@ -719,6 +719,7 @@ static int proc_sys_readdir(struct file *file, struct dir_context *ctx)
719 break; 719 break;
720 } 720 }
721 } 721 }
722out:
722 sysctl_head_finish(head); 723 sysctl_head_finish(head);
723 return 0; 724 return 0;
724} 725}
diff --git a/fs/pstore/ram.c b/fs/pstore/ram.c
index 27c059e1760a..1d887efaaf71 100644
--- a/fs/pstore/ram.c
+++ b/fs/pstore/ram.c
@@ -280,7 +280,7 @@ static ssize_t ramoops_pstore_read(u64 *id, enum pstore_type_id *type,
280 1, id, type, PSTORE_TYPE_PMSG, 0); 280 1, id, type, PSTORE_TYPE_PMSG, 0);
281 281
282 /* ftrace is last since it may want to dynamically allocate memory. */ 282 /* ftrace is last since it may want to dynamically allocate memory. */
283 if (!prz_ok(prz)) { 283 if (!prz_ok(prz) && cxt->fprzs) {
284 if (!(cxt->flags & RAMOOPS_FLAG_FTRACE_PER_CPU)) { 284 if (!(cxt->flags & RAMOOPS_FLAG_FTRACE_PER_CPU)) {
285 prz = ramoops_get_next_prz(cxt->fprzs, 285 prz = ramoops_get_next_prz(cxt->fprzs,
286 &cxt->ftrace_read_cnt, 1, id, type, 286 &cxt->ftrace_read_cnt, 1, id, type,
diff --git a/fs/romfs/super.c b/fs/romfs/super.c
index d0f8a38dfafa..0186fe6d39f3 100644
--- a/fs/romfs/super.c
+++ b/fs/romfs/super.c
@@ -74,6 +74,7 @@
74#include <linux/highmem.h> 74#include <linux/highmem.h>
75#include <linux/pagemap.h> 75#include <linux/pagemap.h>
76#include <linux/uaccess.h> 76#include <linux/uaccess.h>
77#include <linux/major.h>
77#include "internal.h" 78#include "internal.h"
78 79
79static struct kmem_cache *romfs_inode_cachep; 80static struct kmem_cache *romfs_inode_cachep;
@@ -416,7 +417,22 @@ static void romfs_destroy_inode(struct inode *inode)
416static int romfs_statfs(struct dentry *dentry, struct kstatfs *buf) 417static int romfs_statfs(struct dentry *dentry, struct kstatfs *buf)
417{ 418{
418 struct super_block *sb = dentry->d_sb; 419 struct super_block *sb = dentry->d_sb;
419 u64 id = huge_encode_dev(sb->s_bdev->bd_dev); 420 u64 id = 0;
421
422 /* When calling huge_encode_dev(),
423 * use sb->s_bdev->bd_dev when,
424 * - CONFIG_ROMFS_ON_BLOCK defined
425 * use sb->s_dev when,
426 * - CONFIG_ROMFS_ON_BLOCK undefined and
427 * - CONFIG_ROMFS_ON_MTD defined
428 * leave id as 0 when,
429 * - CONFIG_ROMFS_ON_BLOCK undefined and
430 * - CONFIG_ROMFS_ON_MTD undefined
431 */
432 if (sb->s_bdev)
433 id = huge_encode_dev(sb->s_bdev->bd_dev);
434 else if (sb->s_dev)
435 id = huge_encode_dev(sb->s_dev);
420 436
421 buf->f_type = ROMFS_MAGIC; 437 buf->f_type = ROMFS_MAGIC;
422 buf->f_namelen = ROMFS_MAXFN; 438 buf->f_namelen = ROMFS_MAXFN;
@@ -489,6 +505,11 @@ static int romfs_fill_super(struct super_block *sb, void *data, int silent)
489 sb->s_flags |= MS_RDONLY | MS_NOATIME; 505 sb->s_flags |= MS_RDONLY | MS_NOATIME;
490 sb->s_op = &romfs_super_ops; 506 sb->s_op = &romfs_super_ops;
491 507
508#ifdef CONFIG_ROMFS_ON_MTD
509 /* Use same dev ID from the underlying mtdblock device */
510 if (sb->s_mtd)
511 sb->s_dev = MKDEV(MTD_BLOCK_MAJOR, sb->s_mtd->index);
512#endif
492 /* read the image superblock and check it */ 513 /* read the image superblock and check it */
493 rsb = kmalloc(512, GFP_KERNEL); 514 rsb = kmalloc(512, GFP_KERNEL);
494 if (!rsb) 515 if (!rsb)
diff --git a/fs/splice.c b/fs/splice.c
index 873d83104e79..4ef78aa8ef61 100644
--- a/fs/splice.c
+++ b/fs/splice.c
@@ -204,6 +204,7 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
204 buf->len = spd->partial[page_nr].len; 204 buf->len = spd->partial[page_nr].len;
205 buf->private = spd->partial[page_nr].private; 205 buf->private = spd->partial[page_nr].private;
206 buf->ops = spd->ops; 206 buf->ops = spd->ops;
207 buf->flags = 0;
207 208
208 pipe->nrbufs++; 209 pipe->nrbufs++;
209 page_nr++; 210 page_nr++;
diff --git a/fs/ubifs/Kconfig b/fs/ubifs/Kconfig
index 0a908ae7af13..b0d0623c83ed 100644
--- a/fs/ubifs/Kconfig
+++ b/fs/ubifs/Kconfig
@@ -53,7 +53,7 @@ config UBIFS_ATIME_SUPPORT
53 53
54config UBIFS_FS_ENCRYPTION 54config UBIFS_FS_ENCRYPTION
55 bool "UBIFS Encryption" 55 bool "UBIFS Encryption"
56 depends on UBIFS_FS 56 depends on UBIFS_FS && BLOCK
57 select FS_ENCRYPTION 57 select FS_ENCRYPTION
58 default n 58 default n
59 help 59 help
diff --git a/fs/ubifs/dir.c b/fs/ubifs/dir.c
index 1c5331ac9614..528369f3e472 100644
--- a/fs/ubifs/dir.c
+++ b/fs/ubifs/dir.c
@@ -390,16 +390,6 @@ static int do_tmpfile(struct inode *dir, struct dentry *dentry,
390 dbg_gen("dent '%pd', mode %#hx in dir ino %lu", 390 dbg_gen("dent '%pd', mode %#hx in dir ino %lu",
391 dentry, mode, dir->i_ino); 391 dentry, mode, dir->i_ino);
392 392
393 if (ubifs_crypt_is_encrypted(dir)) {
394 err = fscrypt_get_encryption_info(dir);
395 if (err)
396 return err;
397
398 if (!fscrypt_has_encryption_key(dir)) {
399 return -EPERM;
400 }
401 }
402
403 err = fscrypt_setup_filename(dir, &dentry->d_name, 0, &nm); 393 err = fscrypt_setup_filename(dir, &dentry->d_name, 0, &nm);
404 if (err) 394 if (err)
405 return err; 395 return err;
@@ -741,17 +731,9 @@ static int ubifs_link(struct dentry *old_dentry, struct inode *dir,
741 ubifs_assert(inode_is_locked(dir)); 731 ubifs_assert(inode_is_locked(dir));
742 ubifs_assert(inode_is_locked(inode)); 732 ubifs_assert(inode_is_locked(inode));
743 733
744 if (ubifs_crypt_is_encrypted(dir)) { 734 if (ubifs_crypt_is_encrypted(dir) &&
745 if (!fscrypt_has_permitted_context(dir, inode)) 735 !fscrypt_has_permitted_context(dir, inode))
746 return -EPERM; 736 return -EPERM;
747
748 err = fscrypt_get_encryption_info(inode);
749 if (err)
750 return err;
751
752 if (!fscrypt_has_encryption_key(inode))
753 return -EPERM;
754 }
755 737
756 err = fscrypt_setup_filename(dir, &dentry->d_name, 0, &nm); 738 err = fscrypt_setup_filename(dir, &dentry->d_name, 0, &nm);
757 if (err) 739 if (err)
@@ -1000,17 +982,6 @@ static int ubifs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
1000 if (err) 982 if (err)
1001 return err; 983 return err;
1002 984
1003 if (ubifs_crypt_is_encrypted(dir)) {
1004 err = fscrypt_get_encryption_info(dir);
1005 if (err)
1006 goto out_budg;
1007
1008 if (!fscrypt_has_encryption_key(dir)) {
1009 err = -EPERM;
1010 goto out_budg;
1011 }
1012 }
1013
1014 err = fscrypt_setup_filename(dir, &dentry->d_name, 0, &nm); 985 err = fscrypt_setup_filename(dir, &dentry->d_name, 0, &nm);
1015 if (err) 986 if (err)
1016 goto out_budg; 987 goto out_budg;
@@ -1096,17 +1067,6 @@ static int ubifs_mknod(struct inode *dir, struct dentry *dentry,
1096 return err; 1067 return err;
1097 } 1068 }
1098 1069
1099 if (ubifs_crypt_is_encrypted(dir)) {
1100 err = fscrypt_get_encryption_info(dir);
1101 if (err)
1102 goto out_budg;
1103
1104 if (!fscrypt_has_encryption_key(dir)) {
1105 err = -EPERM;
1106 goto out_budg;
1107 }
1108 }
1109
1110 err = fscrypt_setup_filename(dir, &dentry->d_name, 0, &nm); 1070 err = fscrypt_setup_filename(dir, &dentry->d_name, 0, &nm);
1111 if (err) 1071 if (err)
1112 goto out_budg; 1072 goto out_budg;
@@ -1231,18 +1191,6 @@ static int ubifs_symlink(struct inode *dir, struct dentry *dentry,
1231 goto out_inode; 1191 goto out_inode;
1232 } 1192 }
1233 1193
1234 err = fscrypt_get_encryption_info(inode);
1235 if (err) {
1236 kfree(sd);
1237 goto out_inode;
1238 }
1239
1240 if (!fscrypt_has_encryption_key(inode)) {
1241 kfree(sd);
1242 err = -EPERM;
1243 goto out_inode;
1244 }
1245
1246 ostr.name = sd->encrypted_path; 1194 ostr.name = sd->encrypted_path;
1247 ostr.len = disk_link.len; 1195 ostr.len = disk_link.len;
1248 1196
diff --git a/fs/ubifs/ioctl.c b/fs/ubifs/ioctl.c
index 78d713644df3..da519ba205f6 100644
--- a/fs/ubifs/ioctl.c
+++ b/fs/ubifs/ioctl.c
@@ -217,6 +217,9 @@ long ubifs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
217 case FS_IOC32_SETFLAGS: 217 case FS_IOC32_SETFLAGS:
218 cmd = FS_IOC_SETFLAGS; 218 cmd = FS_IOC_SETFLAGS;
219 break; 219 break;
220 case FS_IOC_SET_ENCRYPTION_POLICY:
221 case FS_IOC_GET_ENCRYPTION_POLICY:
222 break;
220 default: 223 default:
221 return -ENOIOCTLCMD; 224 return -ENOIOCTLCMD;
222 } 225 }
diff --git a/fs/ubifs/journal.c b/fs/ubifs/journal.c
index a459211a1c21..294519b98874 100644
--- a/fs/ubifs/journal.c
+++ b/fs/ubifs/journal.c
@@ -744,6 +744,7 @@ int ubifs_jnl_write_data(struct ubifs_info *c, const struct inode *inode,
744 744
745 } else { 745 } else {
746 data->compr_size = 0; 746 data->compr_size = 0;
747 out_len = compr_len;
747 } 748 }
748 749
749 dlen = UBIFS_DATA_NODE_SZ + out_len; 750 dlen = UBIFS_DATA_NODE_SZ + out_len;
@@ -1319,6 +1320,7 @@ static int truncate_data_node(const struct ubifs_info *c, const struct inode *in
1319 dn->compr_type = cpu_to_le16(compr_type); 1320 dn->compr_type = cpu_to_le16(compr_type);
1320 dn->size = cpu_to_le32(*new_len); 1321 dn->size = cpu_to_le32(*new_len);
1321 *new_len = UBIFS_DATA_NODE_SZ + out_len; 1322 *new_len = UBIFS_DATA_NODE_SZ + out_len;
1323 err = 0;
1322out: 1324out:
1323 kfree(buf); 1325 kfree(buf);
1324 return err; 1326 return err;
diff --git a/fs/ubifs/tnc.c b/fs/ubifs/tnc.c
index 74ae2de949df..709aa098dd46 100644
--- a/fs/ubifs/tnc.c
+++ b/fs/ubifs/tnc.c
@@ -34,6 +34,11 @@
34#include <linux/slab.h> 34#include <linux/slab.h>
35#include "ubifs.h" 35#include "ubifs.h"
36 36
37static int try_read_node(const struct ubifs_info *c, void *buf, int type,
38 int len, int lnum, int offs);
39static int fallible_read_node(struct ubifs_info *c, const union ubifs_key *key,
40 struct ubifs_zbranch *zbr, void *node);
41
37/* 42/*
38 * Returned codes of 'matches_name()' and 'fallible_matches_name()' functions. 43 * Returned codes of 'matches_name()' and 'fallible_matches_name()' functions.
39 * @NAME_LESS: name corresponding to the first argument is less than second 44 * @NAME_LESS: name corresponding to the first argument is less than second
@@ -402,7 +407,19 @@ static int tnc_read_hashed_node(struct ubifs_info *c, struct ubifs_zbranch *zbr,
402 return 0; 407 return 0;
403 } 408 }
404 409
405 err = ubifs_tnc_read_node(c, zbr, node); 410 if (c->replaying) {
411 err = fallible_read_node(c, &zbr->key, zbr, node);
412 /*
413 * When the node was not found, return -ENOENT, 0 otherwise.
414 * Negative return codes stay as-is.
415 */
416 if (err == 0)
417 err = -ENOENT;
418 else if (err == 1)
419 err = 0;
420 } else {
421 err = ubifs_tnc_read_node(c, zbr, node);
422 }
406 if (err) 423 if (err)
407 return err; 424 return err;
408 425
@@ -2857,7 +2874,11 @@ struct ubifs_dent_node *ubifs_tnc_next_ent(struct ubifs_info *c,
2857 if (fname_len(nm) > 0) { 2874 if (fname_len(nm) > 0) {
2858 if (err) { 2875 if (err) {
2859 /* Handle collisions */ 2876 /* Handle collisions */
2860 err = resolve_collision(c, key, &znode, &n, nm); 2877 if (c->replaying)
2878 err = fallible_resolve_collision(c, key, &znode, &n,
2879 nm, 0);
2880 else
2881 err = resolve_collision(c, key, &znode, &n, nm);
2861 dbg_tnc("rc returned %d, znode %p, n %d", 2882 dbg_tnc("rc returned %d, znode %p, n %d",
2862 err, znode, n); 2883 err, znode, n);
2863 if (unlikely(err < 0)) 2884 if (unlikely(err < 0))
diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c
index d96e2f30084b..43953e03c356 100644
--- a/fs/userfaultfd.c
+++ b/fs/userfaultfd.c
@@ -63,6 +63,7 @@ struct userfaultfd_wait_queue {
63 struct uffd_msg msg; 63 struct uffd_msg msg;
64 wait_queue_t wq; 64 wait_queue_t wq;
65 struct userfaultfd_ctx *ctx; 65 struct userfaultfd_ctx *ctx;
66 bool waken;
66}; 67};
67 68
68struct userfaultfd_wake_range { 69struct userfaultfd_wake_range {
@@ -86,6 +87,12 @@ static int userfaultfd_wake_function(wait_queue_t *wq, unsigned mode,
86 if (len && (start > uwq->msg.arg.pagefault.address || 87 if (len && (start > uwq->msg.arg.pagefault.address ||
87 start + len <= uwq->msg.arg.pagefault.address)) 88 start + len <= uwq->msg.arg.pagefault.address))
88 goto out; 89 goto out;
90 WRITE_ONCE(uwq->waken, true);
91 /*
92 * The implicit smp_mb__before_spinlock in try_to_wake_up()
93 * renders uwq->waken visible to other CPUs before the task is
94 * waken.
95 */
89 ret = wake_up_state(wq->private, mode); 96 ret = wake_up_state(wq->private, mode);
90 if (ret) 97 if (ret)
91 /* 98 /*
@@ -264,6 +271,7 @@ int handle_userfault(struct vm_fault *vmf, unsigned long reason)
264 struct userfaultfd_wait_queue uwq; 271 struct userfaultfd_wait_queue uwq;
265 int ret; 272 int ret;
266 bool must_wait, return_to_userland; 273 bool must_wait, return_to_userland;
274 long blocking_state;
267 275
268 BUG_ON(!rwsem_is_locked(&mm->mmap_sem)); 276 BUG_ON(!rwsem_is_locked(&mm->mmap_sem));
269 277
@@ -334,10 +342,13 @@ int handle_userfault(struct vm_fault *vmf, unsigned long reason)
334 uwq.wq.private = current; 342 uwq.wq.private = current;
335 uwq.msg = userfault_msg(vmf->address, vmf->flags, reason); 343 uwq.msg = userfault_msg(vmf->address, vmf->flags, reason);
336 uwq.ctx = ctx; 344 uwq.ctx = ctx;
345 uwq.waken = false;
337 346
338 return_to_userland = 347 return_to_userland =
339 (vmf->flags & (FAULT_FLAG_USER|FAULT_FLAG_KILLABLE)) == 348 (vmf->flags & (FAULT_FLAG_USER|FAULT_FLAG_KILLABLE)) ==
340 (FAULT_FLAG_USER|FAULT_FLAG_KILLABLE); 349 (FAULT_FLAG_USER|FAULT_FLAG_KILLABLE);
350 blocking_state = return_to_userland ? TASK_INTERRUPTIBLE :
351 TASK_KILLABLE;
341 352
342 spin_lock(&ctx->fault_pending_wqh.lock); 353 spin_lock(&ctx->fault_pending_wqh.lock);
343 /* 354 /*
@@ -350,8 +361,7 @@ int handle_userfault(struct vm_fault *vmf, unsigned long reason)
350 * following the spin_unlock to happen before the list_add in 361 * following the spin_unlock to happen before the list_add in
351 * __add_wait_queue. 362 * __add_wait_queue.
352 */ 363 */
353 set_current_state(return_to_userland ? TASK_INTERRUPTIBLE : 364 set_current_state(blocking_state);
354 TASK_KILLABLE);
355 spin_unlock(&ctx->fault_pending_wqh.lock); 365 spin_unlock(&ctx->fault_pending_wqh.lock);
356 366
357 must_wait = userfaultfd_must_wait(ctx, vmf->address, vmf->flags, 367 must_wait = userfaultfd_must_wait(ctx, vmf->address, vmf->flags,
@@ -364,6 +374,29 @@ int handle_userfault(struct vm_fault *vmf, unsigned long reason)
364 wake_up_poll(&ctx->fd_wqh, POLLIN); 374 wake_up_poll(&ctx->fd_wqh, POLLIN);
365 schedule(); 375 schedule();
366 ret |= VM_FAULT_MAJOR; 376 ret |= VM_FAULT_MAJOR;
377
378 /*
379 * False wakeups can orginate even from rwsem before
380 * up_read() however userfaults will wait either for a
381 * targeted wakeup on the specific uwq waitqueue from
382 * wake_userfault() or for signals or for uffd
383 * release.
384 */
385 while (!READ_ONCE(uwq.waken)) {
386 /*
387 * This needs the full smp_store_mb()
388 * guarantee as the state write must be
389 * visible to other CPUs before reading
390 * uwq.waken from other CPUs.
391 */
392 set_current_state(blocking_state);
393 if (READ_ONCE(uwq.waken) ||
394 READ_ONCE(ctx->released) ||
395 (return_to_userland ? signal_pending(current) :
396 fatal_signal_pending(current)))
397 break;
398 schedule();
399 }
367 } 400 }
368 401
369 __set_current_state(TASK_RUNNING); 402 __set_current_state(TASK_RUNNING);
diff --git a/fs/xfs/libxfs/xfs_ag_resv.c b/fs/xfs/libxfs/xfs_ag_resv.c
index e5ebc3770460..33db69be4832 100644
--- a/fs/xfs/libxfs/xfs_ag_resv.c
+++ b/fs/xfs/libxfs/xfs_ag_resv.c
@@ -39,6 +39,7 @@
39#include "xfs_rmap_btree.h" 39#include "xfs_rmap_btree.h"
40#include "xfs_btree.h" 40#include "xfs_btree.h"
41#include "xfs_refcount_btree.h" 41#include "xfs_refcount_btree.h"
42#include "xfs_ialloc_btree.h"
42 43
43/* 44/*
44 * Per-AG Block Reservations 45 * Per-AG Block Reservations
@@ -200,22 +201,30 @@ __xfs_ag_resv_init(
200 struct xfs_mount *mp = pag->pag_mount; 201 struct xfs_mount *mp = pag->pag_mount;
201 struct xfs_ag_resv *resv; 202 struct xfs_ag_resv *resv;
202 int error; 203 int error;
204 xfs_extlen_t reserved;
203 205
204 resv = xfs_perag_resv(pag, type);
205 if (used > ask) 206 if (used > ask)
206 ask = used; 207 ask = used;
207 resv->ar_asked = ask; 208 reserved = ask - used;
208 resv->ar_reserved = resv->ar_orig_reserved = ask - used;
209 mp->m_ag_max_usable -= ask;
210 209
211 trace_xfs_ag_resv_init(pag, type, ask); 210 error = xfs_mod_fdblocks(mp, -(int64_t)reserved, true);
212 211 if (error) {
213 error = xfs_mod_fdblocks(mp, -(int64_t)resv->ar_reserved, true);
214 if (error)
215 trace_xfs_ag_resv_init_error(pag->pag_mount, pag->pag_agno, 212 trace_xfs_ag_resv_init_error(pag->pag_mount, pag->pag_agno,
216 error, _RET_IP_); 213 error, _RET_IP_);
214 xfs_warn(mp,
215"Per-AG reservation for AG %u failed. Filesystem may run out of space.",
216 pag->pag_agno);
217 return error;
218 }
217 219
218 return error; 220 mp->m_ag_max_usable -= ask;
221
222 resv = xfs_perag_resv(pag, type);
223 resv->ar_asked = ask;
224 resv->ar_reserved = resv->ar_orig_reserved = reserved;
225
226 trace_xfs_ag_resv_init(pag, type, ask);
227 return 0;
219} 228}
220 229
221/* Create a per-AG block reservation. */ 230/* Create a per-AG block reservation. */
@@ -223,6 +232,8 @@ int
223xfs_ag_resv_init( 232xfs_ag_resv_init(
224 struct xfs_perag *pag) 233 struct xfs_perag *pag)
225{ 234{
235 struct xfs_mount *mp = pag->pag_mount;
236 xfs_agnumber_t agno = pag->pag_agno;
226 xfs_extlen_t ask; 237 xfs_extlen_t ask;
227 xfs_extlen_t used; 238 xfs_extlen_t used;
228 int error = 0; 239 int error = 0;
@@ -231,23 +242,45 @@ xfs_ag_resv_init(
231 if (pag->pag_meta_resv.ar_asked == 0) { 242 if (pag->pag_meta_resv.ar_asked == 0) {
232 ask = used = 0; 243 ask = used = 0;
233 244
234 error = xfs_refcountbt_calc_reserves(pag->pag_mount, 245 error = xfs_refcountbt_calc_reserves(mp, agno, &ask, &used);
235 pag->pag_agno, &ask, &used);
236 if (error) 246 if (error)
237 goto out; 247 goto out;
238 248
239 error = __xfs_ag_resv_init(pag, XFS_AG_RESV_METADATA, 249 error = xfs_finobt_calc_reserves(mp, agno, &ask, &used);
240 ask, used);
241 if (error) 250 if (error)
242 goto out; 251 goto out;
252
253 error = __xfs_ag_resv_init(pag, XFS_AG_RESV_METADATA,
254 ask, used);
255 if (error) {
256 /*
257 * Because we didn't have per-AG reservations when the
258 * finobt feature was added we might not be able to
259 * reserve all needed blocks. Warn and fall back to the
260 * old and potentially buggy code in that case, but
261 * ensure we do have the reservation for the refcountbt.
262 */
263 ask = used = 0;
264
265 mp->m_inotbt_nores = true;
266
267 error = xfs_refcountbt_calc_reserves(mp, agno, &ask,
268 &used);
269 if (error)
270 goto out;
271
272 error = __xfs_ag_resv_init(pag, XFS_AG_RESV_METADATA,
273 ask, used);
274 if (error)
275 goto out;
276 }
243 } 277 }
244 278
245 /* Create the AGFL metadata reservation */ 279 /* Create the AGFL metadata reservation */
246 if (pag->pag_agfl_resv.ar_asked == 0) { 280 if (pag->pag_agfl_resv.ar_asked == 0) {
247 ask = used = 0; 281 ask = used = 0;
248 282
249 error = xfs_rmapbt_calc_reserves(pag->pag_mount, pag->pag_agno, 283 error = xfs_rmapbt_calc_reserves(mp, agno, &ask, &used);
250 &ask, &used);
251 if (error) 284 if (error)
252 goto out; 285 goto out;
253 286
@@ -256,6 +289,16 @@ xfs_ag_resv_init(
256 goto out; 289 goto out;
257 } 290 }
258 291
292#ifdef DEBUG
293 /* need to read in the AGF for the ASSERT below to work */
294 error = xfs_alloc_pagf_init(pag->pag_mount, NULL, pag->pag_agno, 0);
295 if (error)
296 return error;
297
298 ASSERT(xfs_perag_resv(pag, XFS_AG_RESV_METADATA)->ar_reserved +
299 xfs_perag_resv(pag, XFS_AG_RESV_AGFL)->ar_reserved <=
300 pag->pagf_freeblks + pag->pagf_flcount);
301#endif
259out: 302out:
260 return error; 303 return error;
261} 304}
diff --git a/fs/xfs/libxfs/xfs_alloc.c b/fs/xfs/libxfs/xfs_alloc.c
index 5050056a0b06..9f06a211e157 100644
--- a/fs/xfs/libxfs/xfs_alloc.c
+++ b/fs/xfs/libxfs/xfs_alloc.c
@@ -95,10 +95,7 @@ unsigned int
95xfs_alloc_set_aside( 95xfs_alloc_set_aside(
96 struct xfs_mount *mp) 96 struct xfs_mount *mp)
97{ 97{
98 unsigned int blocks; 98 return mp->m_sb.sb_agcount * (XFS_ALLOC_AGFL_RESERVE + 4);
99
100 blocks = 4 + (mp->m_sb.sb_agcount * XFS_ALLOC_AGFL_RESERVE);
101 return blocks;
102} 99}
103 100
104/* 101/*
@@ -365,36 +362,12 @@ xfs_alloc_fix_len(
365 return; 362 return;
366 ASSERT(rlen >= args->minlen && rlen <= args->maxlen); 363 ASSERT(rlen >= args->minlen && rlen <= args->maxlen);
367 ASSERT(rlen % args->prod == args->mod); 364 ASSERT(rlen % args->prod == args->mod);
365 ASSERT(args->pag->pagf_freeblks + args->pag->pagf_flcount >=
366 rlen + args->minleft);
368 args->len = rlen; 367 args->len = rlen;
369} 368}
370 369
371/* 370/*
372 * Fix up length if there is too little space left in the a.g.
373 * Return 1 if ok, 0 if too little, should give up.
374 */
375STATIC int
376xfs_alloc_fix_minleft(
377 xfs_alloc_arg_t *args) /* allocation argument structure */
378{
379 xfs_agf_t *agf; /* a.g. freelist header */
380 int diff; /* free space difference */
381
382 if (args->minleft == 0)
383 return 1;
384 agf = XFS_BUF_TO_AGF(args->agbp);
385 diff = be32_to_cpu(agf->agf_freeblks)
386 - args->len - args->minleft;
387 if (diff >= 0)
388 return 1;
389 args->len += diff; /* shrink the allocated space */
390 /* casts to (int) catch length underflows */
391 if ((int)args->len >= (int)args->minlen)
392 return 1;
393 args->agbno = NULLAGBLOCK;
394 return 0;
395}
396
397/*
398 * Update the two btrees, logically removing from freespace the extent 371 * Update the two btrees, logically removing from freespace the extent
399 * starting at rbno, rlen blocks. The extent is contained within the 372 * starting at rbno, rlen blocks. The extent is contained within the
400 * actual (current) free extent fbno for flen blocks. 373 * actual (current) free extent fbno for flen blocks.
@@ -689,8 +662,6 @@ xfs_alloc_ag_vextent(
689 xfs_alloc_arg_t *args) /* argument structure for allocation */ 662 xfs_alloc_arg_t *args) /* argument structure for allocation */
690{ 663{
691 int error=0; 664 int error=0;
692 xfs_extlen_t reservation;
693 xfs_extlen_t oldmax;
694 665
695 ASSERT(args->minlen > 0); 666 ASSERT(args->minlen > 0);
696 ASSERT(args->maxlen > 0); 667 ASSERT(args->maxlen > 0);
@@ -699,20 +670,6 @@ xfs_alloc_ag_vextent(
699 ASSERT(args->alignment > 0); 670 ASSERT(args->alignment > 0);
700 671
701 /* 672 /*
702 * Clamp maxlen to the amount of free space minus any reservations
703 * that have been made.
704 */
705 oldmax = args->maxlen;
706 reservation = xfs_ag_resv_needed(args->pag, args->resv);
707 if (args->maxlen > args->pag->pagf_freeblks - reservation)
708 args->maxlen = args->pag->pagf_freeblks - reservation;
709 if (args->maxlen == 0) {
710 args->agbno = NULLAGBLOCK;
711 args->maxlen = oldmax;
712 return 0;
713 }
714
715 /*
716 * Branch to correct routine based on the type. 673 * Branch to correct routine based on the type.
717 */ 674 */
718 args->wasfromfl = 0; 675 args->wasfromfl = 0;
@@ -731,8 +688,6 @@ xfs_alloc_ag_vextent(
731 /* NOTREACHED */ 688 /* NOTREACHED */
732 } 689 }
733 690
734 args->maxlen = oldmax;
735
736 if (error || args->agbno == NULLAGBLOCK) 691 if (error || args->agbno == NULLAGBLOCK)
737 return error; 692 return error;
738 693
@@ -841,9 +796,6 @@ xfs_alloc_ag_vextent_exact(
841 args->len = XFS_AGBLOCK_MIN(tend, args->agbno + args->maxlen) 796 args->len = XFS_AGBLOCK_MIN(tend, args->agbno + args->maxlen)
842 - args->agbno; 797 - args->agbno;
843 xfs_alloc_fix_len(args); 798 xfs_alloc_fix_len(args);
844 if (!xfs_alloc_fix_minleft(args))
845 goto not_found;
846
847 ASSERT(args->agbno + args->len <= tend); 799 ASSERT(args->agbno + args->len <= tend);
848 800
849 /* 801 /*
@@ -1149,12 +1101,7 @@ restart:
1149 XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0); 1101 XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0);
1150 ASSERT(ltbno + ltlen <= be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length)); 1102 ASSERT(ltbno + ltlen <= be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length));
1151 args->len = blen; 1103 args->len = blen;
1152 if (!xfs_alloc_fix_minleft(args)) { 1104
1153 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1154 trace_xfs_alloc_near_nominleft(args);
1155 return 0;
1156 }
1157 blen = args->len;
1158 /* 1105 /*
1159 * We are allocating starting at bnew for blen blocks. 1106 * We are allocating starting at bnew for blen blocks.
1160 */ 1107 */
@@ -1346,12 +1293,6 @@ restart:
1346 */ 1293 */
1347 args->len = XFS_EXTLEN_MIN(ltlena, args->maxlen); 1294 args->len = XFS_EXTLEN_MIN(ltlena, args->maxlen);
1348 xfs_alloc_fix_len(args); 1295 xfs_alloc_fix_len(args);
1349 if (!xfs_alloc_fix_minleft(args)) {
1350 trace_xfs_alloc_near_nominleft(args);
1351 xfs_btree_del_cursor(bno_cur_lt, XFS_BTREE_NOERROR);
1352 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1353 return 0;
1354 }
1355 rlen = args->len; 1296 rlen = args->len;
1356 (void)xfs_alloc_compute_diff(args->agbno, rlen, args->alignment, 1297 (void)xfs_alloc_compute_diff(args->agbno, rlen, args->alignment,
1357 args->datatype, ltbnoa, ltlena, &ltnew); 1298 args->datatype, ltbnoa, ltlena, &ltnew);
@@ -1553,8 +1494,6 @@ restart:
1553 } 1494 }
1554 xfs_alloc_fix_len(args); 1495 xfs_alloc_fix_len(args);
1555 1496
1556 if (!xfs_alloc_fix_minleft(args))
1557 goto out_nominleft;
1558 rlen = args->len; 1497 rlen = args->len;
1559 XFS_WANT_CORRUPTED_GOTO(args->mp, rlen <= flen, error0); 1498 XFS_WANT_CORRUPTED_GOTO(args->mp, rlen <= flen, error0);
1560 /* 1499 /*
@@ -2056,7 +1995,7 @@ xfs_alloc_space_available(
2056 int flags) 1995 int flags)
2057{ 1996{
2058 struct xfs_perag *pag = args->pag; 1997 struct xfs_perag *pag = args->pag;
2059 xfs_extlen_t longest; 1998 xfs_extlen_t alloc_len, longest;
2060 xfs_extlen_t reservation; /* blocks that are still reserved */ 1999 xfs_extlen_t reservation; /* blocks that are still reserved */
2061 int available; 2000 int available;
2062 2001
@@ -2066,17 +2005,28 @@ xfs_alloc_space_available(
2066 reservation = xfs_ag_resv_needed(pag, args->resv); 2005 reservation = xfs_ag_resv_needed(pag, args->resv);
2067 2006
2068 /* do we have enough contiguous free space for the allocation? */ 2007 /* do we have enough contiguous free space for the allocation? */
2008 alloc_len = args->minlen + (args->alignment - 1) + args->minalignslop;
2069 longest = xfs_alloc_longest_free_extent(args->mp, pag, min_free, 2009 longest = xfs_alloc_longest_free_extent(args->mp, pag, min_free,
2070 reservation); 2010 reservation);
2071 if ((args->minlen + args->alignment + args->minalignslop - 1) > longest) 2011 if (longest < alloc_len)
2072 return false; 2012 return false;
2073 2013
2074 /* do we have enough free space remaining for the allocation? */ 2014 /* do we have enough free space remaining for the allocation? */
2075 available = (int)(pag->pagf_freeblks + pag->pagf_flcount - 2015 available = (int)(pag->pagf_freeblks + pag->pagf_flcount -
2076 reservation - min_free - args->total); 2016 reservation - min_free - args->minleft);
2077 if (available < (int)args->minleft || available <= 0) 2017 if (available < (int)max(args->total, alloc_len))
2078 return false; 2018 return false;
2079 2019
2020 /*
2021 * Clamp maxlen to the amount of free space available for the actual
2022 * extent allocation.
2023 */
2024 if (available < (int)args->maxlen && !(flags & XFS_ALLOC_FLAG_CHECK)) {
2025 args->maxlen = available;
2026 ASSERT(args->maxlen > 0);
2027 ASSERT(args->maxlen >= args->minlen);
2028 }
2029
2080 return true; 2030 return true;
2081} 2031}
2082 2032
@@ -2122,7 +2072,8 @@ xfs_alloc_fix_freelist(
2122 } 2072 }
2123 2073
2124 need = xfs_alloc_min_freelist(mp, pag); 2074 need = xfs_alloc_min_freelist(mp, pag);
2125 if (!xfs_alloc_space_available(args, need, flags)) 2075 if (!xfs_alloc_space_available(args, need, flags |
2076 XFS_ALLOC_FLAG_CHECK))
2126 goto out_agbp_relse; 2077 goto out_agbp_relse;
2127 2078
2128 /* 2079 /*
@@ -2638,12 +2589,10 @@ xfs_alloc_vextent(
2638 xfs_agblock_t agsize; /* allocation group size */ 2589 xfs_agblock_t agsize; /* allocation group size */
2639 int error; 2590 int error;
2640 int flags; /* XFS_ALLOC_FLAG_... locking flags */ 2591 int flags; /* XFS_ALLOC_FLAG_... locking flags */
2641 xfs_extlen_t minleft;/* minimum left value, temp copy */
2642 xfs_mount_t *mp; /* mount structure pointer */ 2592 xfs_mount_t *mp; /* mount structure pointer */
2643 xfs_agnumber_t sagno; /* starting allocation group number */ 2593 xfs_agnumber_t sagno; /* starting allocation group number */
2644 xfs_alloctype_t type; /* input allocation type */ 2594 xfs_alloctype_t type; /* input allocation type */
2645 int bump_rotor = 0; 2595 int bump_rotor = 0;
2646 int no_min = 0;
2647 xfs_agnumber_t rotorstep = xfs_rotorstep; /* inode32 agf stepper */ 2596 xfs_agnumber_t rotorstep = xfs_rotorstep; /* inode32 agf stepper */
2648 2597
2649 mp = args->mp; 2598 mp = args->mp;
@@ -2672,7 +2621,6 @@ xfs_alloc_vextent(
2672 trace_xfs_alloc_vextent_badargs(args); 2621 trace_xfs_alloc_vextent_badargs(args);
2673 return 0; 2622 return 0;
2674 } 2623 }
2675 minleft = args->minleft;
2676 2624
2677 switch (type) { 2625 switch (type) {
2678 case XFS_ALLOCTYPE_THIS_AG: 2626 case XFS_ALLOCTYPE_THIS_AG:
@@ -2683,9 +2631,7 @@ xfs_alloc_vextent(
2683 */ 2631 */
2684 args->agno = XFS_FSB_TO_AGNO(mp, args->fsbno); 2632 args->agno = XFS_FSB_TO_AGNO(mp, args->fsbno);
2685 args->pag = xfs_perag_get(mp, args->agno); 2633 args->pag = xfs_perag_get(mp, args->agno);
2686 args->minleft = 0;
2687 error = xfs_alloc_fix_freelist(args, 0); 2634 error = xfs_alloc_fix_freelist(args, 0);
2688 args->minleft = minleft;
2689 if (error) { 2635 if (error) {
2690 trace_xfs_alloc_vextent_nofix(args); 2636 trace_xfs_alloc_vextent_nofix(args);
2691 goto error0; 2637 goto error0;
@@ -2750,9 +2696,7 @@ xfs_alloc_vextent(
2750 */ 2696 */
2751 for (;;) { 2697 for (;;) {
2752 args->pag = xfs_perag_get(mp, args->agno); 2698 args->pag = xfs_perag_get(mp, args->agno);
2753 if (no_min) args->minleft = 0;
2754 error = xfs_alloc_fix_freelist(args, flags); 2699 error = xfs_alloc_fix_freelist(args, flags);
2755 args->minleft = minleft;
2756 if (error) { 2700 if (error) {
2757 trace_xfs_alloc_vextent_nofix(args); 2701 trace_xfs_alloc_vextent_nofix(args);
2758 goto error0; 2702 goto error0;
@@ -2792,20 +2736,17 @@ xfs_alloc_vextent(
2792 * or switch to non-trylock mode. 2736 * or switch to non-trylock mode.
2793 */ 2737 */
2794 if (args->agno == sagno) { 2738 if (args->agno == sagno) {
2795 if (no_min == 1) { 2739 if (flags == 0) {
2796 args->agbno = NULLAGBLOCK; 2740 args->agbno = NULLAGBLOCK;
2797 trace_xfs_alloc_vextent_allfailed(args); 2741 trace_xfs_alloc_vextent_allfailed(args);
2798 break; 2742 break;
2799 } 2743 }
2800 if (flags == 0) { 2744
2801 no_min = 1; 2745 flags = 0;
2802 } else { 2746 if (type == XFS_ALLOCTYPE_START_BNO) {
2803 flags = 0; 2747 args->agbno = XFS_FSB_TO_AGBNO(mp,
2804 if (type == XFS_ALLOCTYPE_START_BNO) { 2748 args->fsbno);
2805 args->agbno = XFS_FSB_TO_AGBNO(mp, 2749 args->type = XFS_ALLOCTYPE_NEAR_BNO;
2806 args->fsbno);
2807 args->type = XFS_ALLOCTYPE_NEAR_BNO;
2808 }
2809 } 2750 }
2810 } 2751 }
2811 xfs_perag_put(args->pag); 2752 xfs_perag_put(args->pag);
diff --git a/fs/xfs/libxfs/xfs_alloc.h b/fs/xfs/libxfs/xfs_alloc.h
index 7c404a6b0ae3..1d0f48a501a3 100644
--- a/fs/xfs/libxfs/xfs_alloc.h
+++ b/fs/xfs/libxfs/xfs_alloc.h
@@ -56,7 +56,7 @@ typedef unsigned int xfs_alloctype_t;
56#define XFS_ALLOC_FLAG_FREEING 0x00000002 /* indicate caller is freeing extents*/ 56#define XFS_ALLOC_FLAG_FREEING 0x00000002 /* indicate caller is freeing extents*/
57#define XFS_ALLOC_FLAG_NORMAP 0x00000004 /* don't modify the rmapbt */ 57#define XFS_ALLOC_FLAG_NORMAP 0x00000004 /* don't modify the rmapbt */
58#define XFS_ALLOC_FLAG_NOSHRINK 0x00000008 /* don't shrink the freelist */ 58#define XFS_ALLOC_FLAG_NOSHRINK 0x00000008 /* don't shrink the freelist */
59 59#define XFS_ALLOC_FLAG_CHECK 0x00000010 /* test only, don't modify args */
60 60
61/* 61/*
62 * Argument structure for xfs_alloc routines. 62 * Argument structure for xfs_alloc routines.
diff --git a/fs/xfs/libxfs/xfs_attr.c b/fs/xfs/libxfs/xfs_attr.c
index af1ecb19121e..6622d46ddec3 100644
--- a/fs/xfs/libxfs/xfs_attr.c
+++ b/fs/xfs/libxfs/xfs_attr.c
@@ -131,9 +131,6 @@ xfs_attr_get(
131 if (XFS_FORCED_SHUTDOWN(ip->i_mount)) 131 if (XFS_FORCED_SHUTDOWN(ip->i_mount))
132 return -EIO; 132 return -EIO;
133 133
134 if (!xfs_inode_hasattr(ip))
135 return -ENOATTR;
136
137 error = xfs_attr_args_init(&args, ip, name, flags); 134 error = xfs_attr_args_init(&args, ip, name, flags);
138 if (error) 135 if (error)
139 return error; 136 return error;
@@ -392,9 +389,6 @@ xfs_attr_remove(
392 if (XFS_FORCED_SHUTDOWN(dp->i_mount)) 389 if (XFS_FORCED_SHUTDOWN(dp->i_mount))
393 return -EIO; 390 return -EIO;
394 391
395 if (!xfs_inode_hasattr(dp))
396 return -ENOATTR;
397
398 error = xfs_attr_args_init(&args, dp, name, flags); 392 error = xfs_attr_args_init(&args, dp, name, flags);
399 if (error) 393 if (error)
400 return error; 394 return error;
diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c
index 2760bc3b2536..bfc00de5c6f1 100644
--- a/fs/xfs/libxfs/xfs_bmap.c
+++ b/fs/xfs/libxfs/xfs_bmap.c
@@ -3629,7 +3629,7 @@ xfs_bmap_btalloc(
3629 align = xfs_get_cowextsz_hint(ap->ip); 3629 align = xfs_get_cowextsz_hint(ap->ip);
3630 else if (xfs_alloc_is_userdata(ap->datatype)) 3630 else if (xfs_alloc_is_userdata(ap->datatype))
3631 align = xfs_get_extsz_hint(ap->ip); 3631 align = xfs_get_extsz_hint(ap->ip);
3632 if (unlikely(align)) { 3632 if (align) {
3633 error = xfs_bmap_extsize_align(mp, &ap->got, &ap->prev, 3633 error = xfs_bmap_extsize_align(mp, &ap->got, &ap->prev,
3634 align, 0, ap->eof, 0, ap->conv, 3634 align, 0, ap->eof, 0, ap->conv,
3635 &ap->offset, &ap->length); 3635 &ap->offset, &ap->length);
@@ -3701,7 +3701,7 @@ xfs_bmap_btalloc(
3701 args.minlen = ap->minlen; 3701 args.minlen = ap->minlen;
3702 } 3702 }
3703 /* apply extent size hints if obtained earlier */ 3703 /* apply extent size hints if obtained earlier */
3704 if (unlikely(align)) { 3704 if (align) {
3705 args.prod = align; 3705 args.prod = align;
3706 if ((args.mod = (xfs_extlen_t)do_mod(ap->offset, args.prod))) 3706 if ((args.mod = (xfs_extlen_t)do_mod(ap->offset, args.prod)))
3707 args.mod = (xfs_extlen_t)(args.prod - args.mod); 3707 args.mod = (xfs_extlen_t)(args.prod - args.mod);
@@ -3812,7 +3812,6 @@ xfs_bmap_btalloc(
3812 args.fsbno = 0; 3812 args.fsbno = 0;
3813 args.type = XFS_ALLOCTYPE_FIRST_AG; 3813 args.type = XFS_ALLOCTYPE_FIRST_AG;
3814 args.total = ap->minlen; 3814 args.total = ap->minlen;
3815 args.minleft = 0;
3816 if ((error = xfs_alloc_vextent(&args))) 3815 if ((error = xfs_alloc_vextent(&args)))
3817 return error; 3816 return error;
3818 ap->dfops->dop_low = true; 3817 ap->dfops->dop_low = true;
@@ -4344,8 +4343,6 @@ xfs_bmapi_allocate(
4344 if (error) 4343 if (error)
4345 return error; 4344 return error;
4346 4345
4347 if (bma->dfops->dop_low)
4348 bma->minleft = 0;
4349 if (bma->cur) 4346 if (bma->cur)
4350 bma->cur->bc_private.b.firstblock = *bma->firstblock; 4347 bma->cur->bc_private.b.firstblock = *bma->firstblock;
4351 if (bma->blkno == NULLFSBLOCK) 4348 if (bma->blkno == NULLFSBLOCK)
@@ -4517,8 +4514,6 @@ xfs_bmapi_write(
4517 int n; /* current extent index */ 4514 int n; /* current extent index */
4518 xfs_fileoff_t obno; /* old block number (offset) */ 4515 xfs_fileoff_t obno; /* old block number (offset) */
4519 int whichfork; /* data or attr fork */ 4516 int whichfork; /* data or attr fork */
4520 char inhole; /* current location is hole in file */
4521 char wasdelay; /* old extent was delayed */
4522 4517
4523#ifdef DEBUG 4518#ifdef DEBUG
4524 xfs_fileoff_t orig_bno; /* original block number value */ 4519 xfs_fileoff_t orig_bno; /* original block number value */
@@ -4606,22 +4601,44 @@ xfs_bmapi_write(
4606 bma.firstblock = firstblock; 4601 bma.firstblock = firstblock;
4607 4602
4608 while (bno < end && n < *nmap) { 4603 while (bno < end && n < *nmap) {
4609 inhole = eof || bma.got.br_startoff > bno; 4604 bool need_alloc = false, wasdelay = false;
4610 wasdelay = !inhole && isnullstartblock(bma.got.br_startblock);
4611 4605
4612 /* 4606 /* in hole or beyoned EOF? */
4613 * Make sure we only reflink into a hole. 4607 if (eof || bma.got.br_startoff > bno) {
4614 */ 4608 if (flags & XFS_BMAPI_DELALLOC) {
4615 if (flags & XFS_BMAPI_REMAP) 4609 /*
4616 ASSERT(inhole); 4610 * For the COW fork we can reasonably get a
4617 if (flags & XFS_BMAPI_COWFORK) 4611 * request for converting an extent that races
4618 ASSERT(!inhole); 4612 * with other threads already having converted
4613 * part of it, as there converting COW to
4614 * regular blocks is not protected using the
4615 * IOLOCK.
4616 */
4617 ASSERT(flags & XFS_BMAPI_COWFORK);
4618 if (!(flags & XFS_BMAPI_COWFORK)) {
4619 error = -EIO;
4620 goto error0;
4621 }
4622
4623 if (eof || bno >= end)
4624 break;
4625 } else {
4626 need_alloc = true;
4627 }
4628 } else {
4629 /*
4630 * Make sure we only reflink into a hole.
4631 */
4632 ASSERT(!(flags & XFS_BMAPI_REMAP));
4633 if (isnullstartblock(bma.got.br_startblock))
4634 wasdelay = true;
4635 }
4619 4636
4620 /* 4637 /*
4621 * First, deal with the hole before the allocated space 4638 * First, deal with the hole before the allocated space
4622 * that we found, if any. 4639 * that we found, if any.
4623 */ 4640 */
4624 if (inhole || wasdelay) { 4641 if (need_alloc || wasdelay) {
4625 bma.eof = eof; 4642 bma.eof = eof;
4626 bma.conv = !!(flags & XFS_BMAPI_CONVERT); 4643 bma.conv = !!(flags & XFS_BMAPI_CONVERT);
4627 bma.wasdel = wasdelay; 4644 bma.wasdel = wasdelay;
diff --git a/fs/xfs/libxfs/xfs_bmap.h b/fs/xfs/libxfs/xfs_bmap.h
index cecd094404cc..cdef87db5262 100644
--- a/fs/xfs/libxfs/xfs_bmap.h
+++ b/fs/xfs/libxfs/xfs_bmap.h
@@ -110,6 +110,9 @@ struct xfs_extent_free_item
110/* Map something in the CoW fork. */ 110/* Map something in the CoW fork. */
111#define XFS_BMAPI_COWFORK 0x200 111#define XFS_BMAPI_COWFORK 0x200
112 112
113/* Only convert delalloc space, don't allocate entirely new extents */
114#define XFS_BMAPI_DELALLOC 0x400
115
113#define XFS_BMAPI_FLAGS \ 116#define XFS_BMAPI_FLAGS \
114 { XFS_BMAPI_ENTIRE, "ENTIRE" }, \ 117 { XFS_BMAPI_ENTIRE, "ENTIRE" }, \
115 { XFS_BMAPI_METADATA, "METADATA" }, \ 118 { XFS_BMAPI_METADATA, "METADATA" }, \
@@ -120,7 +123,8 @@ struct xfs_extent_free_item
120 { XFS_BMAPI_CONVERT, "CONVERT" }, \ 123 { XFS_BMAPI_CONVERT, "CONVERT" }, \
121 { XFS_BMAPI_ZERO, "ZERO" }, \ 124 { XFS_BMAPI_ZERO, "ZERO" }, \
122 { XFS_BMAPI_REMAP, "REMAP" }, \ 125 { XFS_BMAPI_REMAP, "REMAP" }, \
123 { XFS_BMAPI_COWFORK, "COWFORK" } 126 { XFS_BMAPI_COWFORK, "COWFORK" }, \
127 { XFS_BMAPI_DELALLOC, "DELALLOC" }
124 128
125 129
126static inline int xfs_bmapi_aflag(int w) 130static inline int xfs_bmapi_aflag(int w)
diff --git a/fs/xfs/libxfs/xfs_bmap_btree.c b/fs/xfs/libxfs/xfs_bmap_btree.c
index d6330c297ca0..d9be241fc86f 100644
--- a/fs/xfs/libxfs/xfs_bmap_btree.c
+++ b/fs/xfs/libxfs/xfs_bmap_btree.c
@@ -502,12 +502,11 @@ try_another_ag:
502 if (args.fsbno == NULLFSBLOCK && args.minleft) { 502 if (args.fsbno == NULLFSBLOCK && args.minleft) {
503 /* 503 /*
504 * Could not find an AG with enough free space to satisfy 504 * Could not find an AG with enough free space to satisfy
505 * a full btree split. Try again without minleft and if 505 * a full btree split. Try again and if
506 * successful activate the lowspace algorithm. 506 * successful activate the lowspace algorithm.
507 */ 507 */
508 args.fsbno = 0; 508 args.fsbno = 0;
509 args.type = XFS_ALLOCTYPE_FIRST_AG; 509 args.type = XFS_ALLOCTYPE_FIRST_AG;
510 args.minleft = 0;
511 error = xfs_alloc_vextent(&args); 510 error = xfs_alloc_vextent(&args);
512 if (error) 511 if (error)
513 goto error0; 512 goto error0;
diff --git a/fs/xfs/libxfs/xfs_dir2.c b/fs/xfs/libxfs/xfs_dir2.c
index c58d72c220f5..2f389d366e93 100644
--- a/fs/xfs/libxfs/xfs_dir2.c
+++ b/fs/xfs/libxfs/xfs_dir2.c
@@ -36,21 +36,29 @@
36struct xfs_name xfs_name_dotdot = { (unsigned char *)"..", 2, XFS_DIR3_FT_DIR }; 36struct xfs_name xfs_name_dotdot = { (unsigned char *)"..", 2, XFS_DIR3_FT_DIR };
37 37
38/* 38/*
39 * @mode, if set, indicates that the type field needs to be set up. 39 * Convert inode mode to directory entry filetype
40 * This uses the transformation from file mode to DT_* as defined in linux/fs.h
41 * for file type specification. This will be propagated into the directory
42 * structure if appropriate for the given operation and filesystem config.
43 */ 40 */
44const unsigned char xfs_mode_to_ftype[S_IFMT >> S_SHIFT] = { 41unsigned char xfs_mode_to_ftype(int mode)
45 [0] = XFS_DIR3_FT_UNKNOWN, 42{
46 [S_IFREG >> S_SHIFT] = XFS_DIR3_FT_REG_FILE, 43 switch (mode & S_IFMT) {
47 [S_IFDIR >> S_SHIFT] = XFS_DIR3_FT_DIR, 44 case S_IFREG:
48 [S_IFCHR >> S_SHIFT] = XFS_DIR3_FT_CHRDEV, 45 return XFS_DIR3_FT_REG_FILE;
49 [S_IFBLK >> S_SHIFT] = XFS_DIR3_FT_BLKDEV, 46 case S_IFDIR:
50 [S_IFIFO >> S_SHIFT] = XFS_DIR3_FT_FIFO, 47 return XFS_DIR3_FT_DIR;
51 [S_IFSOCK >> S_SHIFT] = XFS_DIR3_FT_SOCK, 48 case S_IFCHR:
52 [S_IFLNK >> S_SHIFT] = XFS_DIR3_FT_SYMLINK, 49 return XFS_DIR3_FT_CHRDEV;
53}; 50 case S_IFBLK:
51 return XFS_DIR3_FT_BLKDEV;
52 case S_IFIFO:
53 return XFS_DIR3_FT_FIFO;
54 case S_IFSOCK:
55 return XFS_DIR3_FT_SOCK;
56 case S_IFLNK:
57 return XFS_DIR3_FT_SYMLINK;
58 default:
59 return XFS_DIR3_FT_UNKNOWN;
60 }
61}
54 62
55/* 63/*
56 * ASCII case-insensitive (ie. A-Z) support for directories that was 64 * ASCII case-insensitive (ie. A-Z) support for directories that was
@@ -631,7 +639,8 @@ xfs_dir2_isblock(
631 if ((rval = xfs_bmap_last_offset(args->dp, &last, XFS_DATA_FORK))) 639 if ((rval = xfs_bmap_last_offset(args->dp, &last, XFS_DATA_FORK)))
632 return rval; 640 return rval;
633 rval = XFS_FSB_TO_B(args->dp->i_mount, last) == args->geo->blksize; 641 rval = XFS_FSB_TO_B(args->dp->i_mount, last) == args->geo->blksize;
634 ASSERT(rval == 0 || args->dp->i_d.di_size == args->geo->blksize); 642 if (rval != 0 && args->dp->i_d.di_size != args->geo->blksize)
643 return -EFSCORRUPTED;
635 *vp = rval; 644 *vp = rval;
636 return 0; 645 return 0;
637} 646}
diff --git a/fs/xfs/libxfs/xfs_dir2.h b/fs/xfs/libxfs/xfs_dir2.h
index 0197590fa7d7..d6e6d9d16f6c 100644
--- a/fs/xfs/libxfs/xfs_dir2.h
+++ b/fs/xfs/libxfs/xfs_dir2.h
@@ -18,6 +18,9 @@
18#ifndef __XFS_DIR2_H__ 18#ifndef __XFS_DIR2_H__
19#define __XFS_DIR2_H__ 19#define __XFS_DIR2_H__
20 20
21#include "xfs_da_format.h"
22#include "xfs_da_btree.h"
23
21struct xfs_defer_ops; 24struct xfs_defer_ops;
22struct xfs_da_args; 25struct xfs_da_args;
23struct xfs_inode; 26struct xfs_inode;
@@ -32,10 +35,9 @@ struct xfs_dir2_data_unused;
32extern struct xfs_name xfs_name_dotdot; 35extern struct xfs_name xfs_name_dotdot;
33 36
34/* 37/*
35 * directory filetype conversion tables. 38 * Convert inode mode to directory entry filetype
36 */ 39 */
37#define S_SHIFT 12 40extern unsigned char xfs_mode_to_ftype(int mode);
38extern const unsigned char xfs_mode_to_ftype[];
39 41
40/* 42/*
41 * directory operations vector for encode/decode routines 43 * directory operations vector for encode/decode routines
diff --git a/fs/xfs/libxfs/xfs_ialloc_btree.c b/fs/xfs/libxfs/xfs_ialloc_btree.c
index 0fd086d03d41..7c471881c9a6 100644
--- a/fs/xfs/libxfs/xfs_ialloc_btree.c
+++ b/fs/xfs/libxfs/xfs_ialloc_btree.c
@@ -82,11 +82,12 @@ xfs_finobt_set_root(
82} 82}
83 83
84STATIC int 84STATIC int
85xfs_inobt_alloc_block( 85__xfs_inobt_alloc_block(
86 struct xfs_btree_cur *cur, 86 struct xfs_btree_cur *cur,
87 union xfs_btree_ptr *start, 87 union xfs_btree_ptr *start,
88 union xfs_btree_ptr *new, 88 union xfs_btree_ptr *new,
89 int *stat) 89 int *stat,
90 enum xfs_ag_resv_type resv)
90{ 91{
91 xfs_alloc_arg_t args; /* block allocation args */ 92 xfs_alloc_arg_t args; /* block allocation args */
92 int error; /* error return value */ 93 int error; /* error return value */
@@ -103,6 +104,7 @@ xfs_inobt_alloc_block(
103 args.maxlen = 1; 104 args.maxlen = 1;
104 args.prod = 1; 105 args.prod = 1;
105 args.type = XFS_ALLOCTYPE_NEAR_BNO; 106 args.type = XFS_ALLOCTYPE_NEAR_BNO;
107 args.resv = resv;
106 108
107 error = xfs_alloc_vextent(&args); 109 error = xfs_alloc_vextent(&args);
108 if (error) { 110 if (error) {
@@ -123,6 +125,27 @@ xfs_inobt_alloc_block(
123} 125}
124 126
125STATIC int 127STATIC int
128xfs_inobt_alloc_block(
129 struct xfs_btree_cur *cur,
130 union xfs_btree_ptr *start,
131 union xfs_btree_ptr *new,
132 int *stat)
133{
134 return __xfs_inobt_alloc_block(cur, start, new, stat, XFS_AG_RESV_NONE);
135}
136
137STATIC int
138xfs_finobt_alloc_block(
139 struct xfs_btree_cur *cur,
140 union xfs_btree_ptr *start,
141 union xfs_btree_ptr *new,
142 int *stat)
143{
144 return __xfs_inobt_alloc_block(cur, start, new, stat,
145 XFS_AG_RESV_METADATA);
146}
147
148STATIC int
126xfs_inobt_free_block( 149xfs_inobt_free_block(
127 struct xfs_btree_cur *cur, 150 struct xfs_btree_cur *cur,
128 struct xfs_buf *bp) 151 struct xfs_buf *bp)
@@ -328,7 +351,7 @@ static const struct xfs_btree_ops xfs_finobt_ops = {
328 351
329 .dup_cursor = xfs_inobt_dup_cursor, 352 .dup_cursor = xfs_inobt_dup_cursor,
330 .set_root = xfs_finobt_set_root, 353 .set_root = xfs_finobt_set_root,
331 .alloc_block = xfs_inobt_alloc_block, 354 .alloc_block = xfs_finobt_alloc_block,
332 .free_block = xfs_inobt_free_block, 355 .free_block = xfs_inobt_free_block,
333 .get_minrecs = xfs_inobt_get_minrecs, 356 .get_minrecs = xfs_inobt_get_minrecs,
334 .get_maxrecs = xfs_inobt_get_maxrecs, 357 .get_maxrecs = xfs_inobt_get_maxrecs,
@@ -480,3 +503,64 @@ xfs_inobt_rec_check_count(
480 return 0; 503 return 0;
481} 504}
482#endif /* DEBUG */ 505#endif /* DEBUG */
506
507static xfs_extlen_t
508xfs_inobt_max_size(
509 struct xfs_mount *mp)
510{
511 /* Bail out if we're uninitialized, which can happen in mkfs. */
512 if (mp->m_inobt_mxr[0] == 0)
513 return 0;
514
515 return xfs_btree_calc_size(mp, mp->m_inobt_mnr,
516 (uint64_t)mp->m_sb.sb_agblocks * mp->m_sb.sb_inopblock /
517 XFS_INODES_PER_CHUNK);
518}
519
520static int
521xfs_inobt_count_blocks(
522 struct xfs_mount *mp,
523 xfs_agnumber_t agno,
524 xfs_btnum_t btnum,
525 xfs_extlen_t *tree_blocks)
526{
527 struct xfs_buf *agbp;
528 struct xfs_btree_cur *cur;
529 int error;
530
531 error = xfs_ialloc_read_agi(mp, NULL, agno, &agbp);
532 if (error)
533 return error;
534
535 cur = xfs_inobt_init_cursor(mp, NULL, agbp, agno, btnum);
536 error = xfs_btree_count_blocks(cur, tree_blocks);
537 xfs_btree_del_cursor(cur, error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
538 xfs_buf_relse(agbp);
539
540 return error;
541}
542
543/*
544 * Figure out how many blocks to reserve and how many are used by this btree.
545 */
546int
547xfs_finobt_calc_reserves(
548 struct xfs_mount *mp,
549 xfs_agnumber_t agno,
550 xfs_extlen_t *ask,
551 xfs_extlen_t *used)
552{
553 xfs_extlen_t tree_len = 0;
554 int error;
555
556 if (!xfs_sb_version_hasfinobt(&mp->m_sb))
557 return 0;
558
559 error = xfs_inobt_count_blocks(mp, agno, XFS_BTNUM_FINO, &tree_len);
560 if (error)
561 return error;
562
563 *ask += xfs_inobt_max_size(mp);
564 *used += tree_len;
565 return 0;
566}
diff --git a/fs/xfs/libxfs/xfs_ialloc_btree.h b/fs/xfs/libxfs/xfs_ialloc_btree.h
index bd88453217ce..aa81e2e63f3f 100644
--- a/fs/xfs/libxfs/xfs_ialloc_btree.h
+++ b/fs/xfs/libxfs/xfs_ialloc_btree.h
@@ -72,4 +72,7 @@ int xfs_inobt_rec_check_count(struct xfs_mount *,
72#define xfs_inobt_rec_check_count(mp, rec) 0 72#define xfs_inobt_rec_check_count(mp, rec) 0
73#endif /* DEBUG */ 73#endif /* DEBUG */
74 74
75int xfs_finobt_calc_reserves(struct xfs_mount *mp, xfs_agnumber_t agno,
76 xfs_extlen_t *ask, xfs_extlen_t *used);
77
75#endif /* __XFS_IALLOC_BTREE_H__ */ 78#endif /* __XFS_IALLOC_BTREE_H__ */
diff --git a/fs/xfs/libxfs/xfs_inode_buf.c b/fs/xfs/libxfs/xfs_inode_buf.c
index dd483e2767f7..d93f9d918cfc 100644
--- a/fs/xfs/libxfs/xfs_inode_buf.c
+++ b/fs/xfs/libxfs/xfs_inode_buf.c
@@ -29,6 +29,7 @@
29#include "xfs_icache.h" 29#include "xfs_icache.h"
30#include "xfs_trans.h" 30#include "xfs_trans.h"
31#include "xfs_ialloc.h" 31#include "xfs_ialloc.h"
32#include "xfs_dir2.h"
32 33
33/* 34/*
34 * Check that none of the inode's in the buffer have a next 35 * Check that none of the inode's in the buffer have a next
@@ -386,6 +387,7 @@ xfs_dinode_verify(
386 xfs_ino_t ino, 387 xfs_ino_t ino,
387 struct xfs_dinode *dip) 388 struct xfs_dinode *dip)
388{ 389{
390 uint16_t mode;
389 uint16_t flags; 391 uint16_t flags;
390 uint64_t flags2; 392 uint64_t flags2;
391 393
@@ -396,8 +398,12 @@ xfs_dinode_verify(
396 if (be64_to_cpu(dip->di_size) & (1ULL << 63)) 398 if (be64_to_cpu(dip->di_size) & (1ULL << 63))
397 return false; 399 return false;
398 400
399 /* No zero-length symlinks. */ 401 mode = be16_to_cpu(dip->di_mode);
400 if (S_ISLNK(be16_to_cpu(dip->di_mode)) && dip->di_size == 0) 402 if (mode && xfs_mode_to_ftype(mode) == XFS_DIR3_FT_UNKNOWN)
403 return false;
404
405 /* No zero-length symlinks/dirs. */
406 if ((S_ISLNK(mode) || S_ISDIR(mode)) && dip->di_size == 0)
401 return false; 407 return false;
402 408
403 /* only version 3 or greater inodes are extensively verified here */ 409 /* only version 3 or greater inodes are extensively verified here */
diff --git a/fs/xfs/libxfs/xfs_refcount_btree.c b/fs/xfs/libxfs/xfs_refcount_btree.c
index 6fb2215f8ff7..50add5272807 100644
--- a/fs/xfs/libxfs/xfs_refcount_btree.c
+++ b/fs/xfs/libxfs/xfs_refcount_btree.c
@@ -409,13 +409,14 @@ xfs_refcountbt_calc_size(
409 */ 409 */
410xfs_extlen_t 410xfs_extlen_t
411xfs_refcountbt_max_size( 411xfs_refcountbt_max_size(
412 struct xfs_mount *mp) 412 struct xfs_mount *mp,
413 xfs_agblock_t agblocks)
413{ 414{
414 /* Bail out if we're uninitialized, which can happen in mkfs. */ 415 /* Bail out if we're uninitialized, which can happen in mkfs. */
415 if (mp->m_refc_mxr[0] == 0) 416 if (mp->m_refc_mxr[0] == 0)
416 return 0; 417 return 0;
417 418
418 return xfs_refcountbt_calc_size(mp, mp->m_sb.sb_agblocks); 419 return xfs_refcountbt_calc_size(mp, agblocks);
419} 420}
420 421
421/* 422/*
@@ -430,22 +431,24 @@ xfs_refcountbt_calc_reserves(
430{ 431{
431 struct xfs_buf *agbp; 432 struct xfs_buf *agbp;
432 struct xfs_agf *agf; 433 struct xfs_agf *agf;
434 xfs_agblock_t agblocks;
433 xfs_extlen_t tree_len; 435 xfs_extlen_t tree_len;
434 int error; 436 int error;
435 437
436 if (!xfs_sb_version_hasreflink(&mp->m_sb)) 438 if (!xfs_sb_version_hasreflink(&mp->m_sb))
437 return 0; 439 return 0;
438 440
439 *ask += xfs_refcountbt_max_size(mp);
440 441
441 error = xfs_alloc_read_agf(mp, NULL, agno, 0, &agbp); 442 error = xfs_alloc_read_agf(mp, NULL, agno, 0, &agbp);
442 if (error) 443 if (error)
443 return error; 444 return error;
444 445
445 agf = XFS_BUF_TO_AGF(agbp); 446 agf = XFS_BUF_TO_AGF(agbp);
447 agblocks = be32_to_cpu(agf->agf_length);
446 tree_len = be32_to_cpu(agf->agf_refcount_blocks); 448 tree_len = be32_to_cpu(agf->agf_refcount_blocks);
447 xfs_buf_relse(agbp); 449 xfs_buf_relse(agbp);
448 450
451 *ask += xfs_refcountbt_max_size(mp, agblocks);
449 *used += tree_len; 452 *used += tree_len;
450 453
451 return error; 454 return error;
diff --git a/fs/xfs/libxfs/xfs_refcount_btree.h b/fs/xfs/libxfs/xfs_refcount_btree.h
index 3be7768bd51a..9db008b955b7 100644
--- a/fs/xfs/libxfs/xfs_refcount_btree.h
+++ b/fs/xfs/libxfs/xfs_refcount_btree.h
@@ -66,7 +66,8 @@ extern void xfs_refcountbt_compute_maxlevels(struct xfs_mount *mp);
66 66
67extern xfs_extlen_t xfs_refcountbt_calc_size(struct xfs_mount *mp, 67extern xfs_extlen_t xfs_refcountbt_calc_size(struct xfs_mount *mp,
68 unsigned long long len); 68 unsigned long long len);
69extern xfs_extlen_t xfs_refcountbt_max_size(struct xfs_mount *mp); 69extern xfs_extlen_t xfs_refcountbt_max_size(struct xfs_mount *mp,
70 xfs_agblock_t agblocks);
70 71
71extern int xfs_refcountbt_calc_reserves(struct xfs_mount *mp, 72extern int xfs_refcountbt_calc_reserves(struct xfs_mount *mp,
72 xfs_agnumber_t agno, xfs_extlen_t *ask, xfs_extlen_t *used); 73 xfs_agnumber_t agno, xfs_extlen_t *ask, xfs_extlen_t *used);
diff --git a/fs/xfs/libxfs/xfs_rmap_btree.c b/fs/xfs/libxfs/xfs_rmap_btree.c
index de25771764ba..74e5a54bc428 100644
--- a/fs/xfs/libxfs/xfs_rmap_btree.c
+++ b/fs/xfs/libxfs/xfs_rmap_btree.c
@@ -550,13 +550,14 @@ xfs_rmapbt_calc_size(
550 */ 550 */
551xfs_extlen_t 551xfs_extlen_t
552xfs_rmapbt_max_size( 552xfs_rmapbt_max_size(
553 struct xfs_mount *mp) 553 struct xfs_mount *mp,
554 xfs_agblock_t agblocks)
554{ 555{
555 /* Bail out if we're uninitialized, which can happen in mkfs. */ 556 /* Bail out if we're uninitialized, which can happen in mkfs. */
556 if (mp->m_rmap_mxr[0] == 0) 557 if (mp->m_rmap_mxr[0] == 0)
557 return 0; 558 return 0;
558 559
559 return xfs_rmapbt_calc_size(mp, mp->m_sb.sb_agblocks); 560 return xfs_rmapbt_calc_size(mp, agblocks);
560} 561}
561 562
562/* 563/*
@@ -571,25 +572,24 @@ xfs_rmapbt_calc_reserves(
571{ 572{
572 struct xfs_buf *agbp; 573 struct xfs_buf *agbp;
573 struct xfs_agf *agf; 574 struct xfs_agf *agf;
574 xfs_extlen_t pool_len; 575 xfs_agblock_t agblocks;
575 xfs_extlen_t tree_len; 576 xfs_extlen_t tree_len;
576 int error; 577 int error;
577 578
578 if (!xfs_sb_version_hasrmapbt(&mp->m_sb)) 579 if (!xfs_sb_version_hasrmapbt(&mp->m_sb))
579 return 0; 580 return 0;
580 581
581 /* Reserve 1% of the AG or enough for 1 block per record. */
582 pool_len = max(mp->m_sb.sb_agblocks / 100, xfs_rmapbt_max_size(mp));
583 *ask += pool_len;
584
585 error = xfs_alloc_read_agf(mp, NULL, agno, 0, &agbp); 582 error = xfs_alloc_read_agf(mp, NULL, agno, 0, &agbp);
586 if (error) 583 if (error)
587 return error; 584 return error;
588 585
589 agf = XFS_BUF_TO_AGF(agbp); 586 agf = XFS_BUF_TO_AGF(agbp);
587 agblocks = be32_to_cpu(agf->agf_length);
590 tree_len = be32_to_cpu(agf->agf_rmap_blocks); 588 tree_len = be32_to_cpu(agf->agf_rmap_blocks);
591 xfs_buf_relse(agbp); 589 xfs_buf_relse(agbp);
592 590
591 /* Reserve 1% of the AG or enough for 1 block per record. */
592 *ask += max(agblocks / 100, xfs_rmapbt_max_size(mp, agblocks));
593 *used += tree_len; 593 *used += tree_len;
594 594
595 return error; 595 return error;
diff --git a/fs/xfs/libxfs/xfs_rmap_btree.h b/fs/xfs/libxfs/xfs_rmap_btree.h
index 2a9ac472fb15..19c08e933049 100644
--- a/fs/xfs/libxfs/xfs_rmap_btree.h
+++ b/fs/xfs/libxfs/xfs_rmap_btree.h
@@ -60,7 +60,8 @@ extern void xfs_rmapbt_compute_maxlevels(struct xfs_mount *mp);
60 60
61extern xfs_extlen_t xfs_rmapbt_calc_size(struct xfs_mount *mp, 61extern xfs_extlen_t xfs_rmapbt_calc_size(struct xfs_mount *mp,
62 unsigned long long len); 62 unsigned long long len);
63extern xfs_extlen_t xfs_rmapbt_max_size(struct xfs_mount *mp); 63extern xfs_extlen_t xfs_rmapbt_max_size(struct xfs_mount *mp,
64 xfs_agblock_t agblocks);
64 65
65extern int xfs_rmapbt_calc_reserves(struct xfs_mount *mp, 66extern int xfs_rmapbt_calc_reserves(struct xfs_mount *mp,
66 xfs_agnumber_t agno, xfs_extlen_t *ask, xfs_extlen_t *used); 67 xfs_agnumber_t agno, xfs_extlen_t *ask, xfs_extlen_t *used);
diff --git a/fs/xfs/libxfs/xfs_sb.c b/fs/xfs/libxfs/xfs_sb.c
index 2580262e4ea0..584ec896a533 100644
--- a/fs/xfs/libxfs/xfs_sb.c
+++ b/fs/xfs/libxfs/xfs_sb.c
@@ -242,7 +242,7 @@ xfs_mount_validate_sb(
242 sbp->sb_blocklog < XFS_MIN_BLOCKSIZE_LOG || 242 sbp->sb_blocklog < XFS_MIN_BLOCKSIZE_LOG ||
243 sbp->sb_blocklog > XFS_MAX_BLOCKSIZE_LOG || 243 sbp->sb_blocklog > XFS_MAX_BLOCKSIZE_LOG ||
244 sbp->sb_blocksize != (1 << sbp->sb_blocklog) || 244 sbp->sb_blocksize != (1 << sbp->sb_blocklog) ||
245 sbp->sb_dirblklog > XFS_MAX_BLOCKSIZE_LOG || 245 sbp->sb_dirblklog + sbp->sb_blocklog > XFS_MAX_BLOCKSIZE_LOG ||
246 sbp->sb_inodesize < XFS_DINODE_MIN_SIZE || 246 sbp->sb_inodesize < XFS_DINODE_MIN_SIZE ||
247 sbp->sb_inodesize > XFS_DINODE_MAX_SIZE || 247 sbp->sb_inodesize > XFS_DINODE_MAX_SIZE ||
248 sbp->sb_inodelog < XFS_DINODE_MIN_LOG || 248 sbp->sb_inodelog < XFS_DINODE_MIN_LOG ||
diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c
index 0f56fcd3a5d5..631e7c0e0a29 100644
--- a/fs/xfs/xfs_aops.c
+++ b/fs/xfs/xfs_aops.c
@@ -1152,19 +1152,22 @@ xfs_vm_releasepage(
1152 * block_invalidatepage() can send pages that are still marked dirty 1152 * block_invalidatepage() can send pages that are still marked dirty
1153 * but otherwise have invalidated buffers. 1153 * but otherwise have invalidated buffers.
1154 * 1154 *
1155 * We've historically freed buffers on the latter. Instead, quietly 1155 * We want to release the latter to avoid unnecessary buildup of the
1156 * filter out all dirty pages to avoid spurious buffer state warnings. 1156 * LRU, skip the former and warn if we've left any lingering
1157 * This can likely be removed once shrink_active_list() is fixed. 1157 * delalloc/unwritten buffers on clean pages. Skip pages with delalloc
1158 * or unwritten buffers and warn if the page is not dirty. Otherwise
1159 * try to release the buffers.
1158 */ 1160 */
1159 if (PageDirty(page))
1160 return 0;
1161
1162 xfs_count_page_state(page, &delalloc, &unwritten); 1161 xfs_count_page_state(page, &delalloc, &unwritten);
1163 1162
1164 if (WARN_ON_ONCE(delalloc)) 1163 if (delalloc) {
1164 WARN_ON_ONCE(!PageDirty(page));
1165 return 0; 1165 return 0;
1166 if (WARN_ON_ONCE(unwritten)) 1166 }
1167 if (unwritten) {
1168 WARN_ON_ONCE(!PageDirty(page));
1167 return 0; 1169 return 0;
1170 }
1168 1171
1169 return try_to_free_buffers(page); 1172 return try_to_free_buffers(page);
1170} 1173}
diff --git a/fs/xfs/xfs_bmap_util.c b/fs/xfs/xfs_bmap_util.c
index b9abce524c33..c1417919ab0a 100644
--- a/fs/xfs/xfs_bmap_util.c
+++ b/fs/xfs/xfs_bmap_util.c
@@ -528,7 +528,6 @@ xfs_getbmap(
528 xfs_bmbt_irec_t *map; /* buffer for user's data */ 528 xfs_bmbt_irec_t *map; /* buffer for user's data */
529 xfs_mount_t *mp; /* file system mount point */ 529 xfs_mount_t *mp; /* file system mount point */
530 int nex; /* # of user extents can do */ 530 int nex; /* # of user extents can do */
531 int nexleft; /* # of user extents left */
532 int subnex; /* # of bmapi's can do */ 531 int subnex; /* # of bmapi's can do */
533 int nmap; /* number of map entries */ 532 int nmap; /* number of map entries */
534 struct getbmapx *out; /* output structure */ 533 struct getbmapx *out; /* output structure */
@@ -686,10 +685,8 @@ xfs_getbmap(
686 goto out_free_map; 685 goto out_free_map;
687 } 686 }
688 687
689 nexleft = nex;
690
691 do { 688 do {
692 nmap = (nexleft > subnex) ? subnex : nexleft; 689 nmap = (nex> subnex) ? subnex : nex;
693 error = xfs_bmapi_read(ip, XFS_BB_TO_FSBT(mp, bmv->bmv_offset), 690 error = xfs_bmapi_read(ip, XFS_BB_TO_FSBT(mp, bmv->bmv_offset),
694 XFS_BB_TO_FSB(mp, bmv->bmv_length), 691 XFS_BB_TO_FSB(mp, bmv->bmv_length),
695 map, &nmap, bmapi_flags); 692 map, &nmap, bmapi_flags);
@@ -697,8 +694,8 @@ xfs_getbmap(
697 goto out_free_map; 694 goto out_free_map;
698 ASSERT(nmap <= subnex); 695 ASSERT(nmap <= subnex);
699 696
700 for (i = 0; i < nmap && nexleft && bmv->bmv_length && 697 for (i = 0; i < nmap && bmv->bmv_length &&
701 cur_ext < bmv->bmv_count; i++) { 698 cur_ext < bmv->bmv_count - 1; i++) {
702 out[cur_ext].bmv_oflags = 0; 699 out[cur_ext].bmv_oflags = 0;
703 if (map[i].br_state == XFS_EXT_UNWRITTEN) 700 if (map[i].br_state == XFS_EXT_UNWRITTEN)
704 out[cur_ext].bmv_oflags |= BMV_OF_PREALLOC; 701 out[cur_ext].bmv_oflags |= BMV_OF_PREALLOC;
@@ -760,16 +757,27 @@ xfs_getbmap(
760 continue; 757 continue;
761 } 758 }
762 759
760 /*
761 * In order to report shared extents accurately,
762 * we report each distinct shared/unshared part
763 * of a single bmbt record using multiple bmap
764 * extents. To make that happen, we iterate the
765 * same map array item multiple times, each
766 * time trimming out the subextent that we just
767 * reported.
768 *
769 * Because of this, we must check the out array
770 * index (cur_ext) directly against bmv_count-1
771 * to avoid overflows.
772 */
763 if (inject_map.br_startblock != NULLFSBLOCK) { 773 if (inject_map.br_startblock != NULLFSBLOCK) {
764 map[i] = inject_map; 774 map[i] = inject_map;
765 i--; 775 i--;
766 } else 776 }
767 nexleft--;
768 bmv->bmv_entries++; 777 bmv->bmv_entries++;
769 cur_ext++; 778 cur_ext++;
770 } 779 }
771 } while (nmap && nexleft && bmv->bmv_length && 780 } while (nmap && bmv->bmv_length && cur_ext < bmv->bmv_count - 1);
772 cur_ext < bmv->bmv_count);
773 781
774 out_free_map: 782 out_free_map:
775 kmem_free(map); 783 kmem_free(map);
diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c
index 7f0a01f7b592..ac3b4db519df 100644
--- a/fs/xfs/xfs_buf.c
+++ b/fs/xfs/xfs_buf.c
@@ -422,6 +422,7 @@ retry:
422out_free_pages: 422out_free_pages:
423 for (i = 0; i < bp->b_page_count; i++) 423 for (i = 0; i < bp->b_page_count; i++)
424 __free_page(bp->b_pages[i]); 424 __free_page(bp->b_pages[i]);
425 bp->b_flags &= ~_XBF_PAGES;
425 return error; 426 return error;
426} 427}
427 428
diff --git a/fs/xfs/xfs_dquot.c b/fs/xfs/xfs_dquot.c
index 7a30b8f11db7..9d06cc30e875 100644
--- a/fs/xfs/xfs_dquot.c
+++ b/fs/xfs/xfs_dquot.c
@@ -710,6 +710,10 @@ xfs_dq_get_next_id(
710 /* Simple advance */ 710 /* Simple advance */
711 next_id = *id + 1; 711 next_id = *id + 1;
712 712
713 /* If we'd wrap past the max ID, stop */
714 if (next_id < *id)
715 return -ENOENT;
716
713 /* If new ID is within the current chunk, advancing it sufficed */ 717 /* If new ID is within the current chunk, advancing it sufficed */
714 if (next_id % mp->m_quotainfo->qi_dqperchunk) { 718 if (next_id % mp->m_quotainfo->qi_dqperchunk) {
715 *id = next_id; 719 *id = next_id;
diff --git a/fs/xfs/xfs_fsops.c b/fs/xfs/xfs_fsops.c
index 93d12fa2670d..242e8091296d 100644
--- a/fs/xfs/xfs_fsops.c
+++ b/fs/xfs/xfs_fsops.c
@@ -631,6 +631,20 @@ xfs_growfs_data_private(
631 xfs_set_low_space_thresholds(mp); 631 xfs_set_low_space_thresholds(mp);
632 mp->m_alloc_set_aside = xfs_alloc_set_aside(mp); 632 mp->m_alloc_set_aside = xfs_alloc_set_aside(mp);
633 633
634 /*
635 * If we expanded the last AG, free the per-AG reservation
636 * so we can reinitialize it with the new size.
637 */
638 if (new) {
639 struct xfs_perag *pag;
640
641 pag = xfs_perag_get(mp, agno);
642 error = xfs_ag_resv_free(pag);
643 xfs_perag_put(pag);
644 if (error)
645 goto out;
646 }
647
634 /* Reserve AG metadata blocks. */ 648 /* Reserve AG metadata blocks. */
635 error = xfs_fs_reserve_ag_blocks(mp); 649 error = xfs_fs_reserve_ag_blocks(mp);
636 if (error && error != -ENOSPC) 650 if (error && error != -ENOSPC)
diff --git a/fs/xfs/xfs_icache.c b/fs/xfs/xfs_icache.c
index ff4d6311c7f4..70ca4f608321 100644
--- a/fs/xfs/xfs_icache.c
+++ b/fs/xfs/xfs_icache.c
@@ -1597,7 +1597,8 @@ xfs_inode_free_cowblocks(
1597 * If the mapping is dirty or under writeback we cannot touch the 1597 * If the mapping is dirty or under writeback we cannot touch the
1598 * CoW fork. Leave it alone if we're in the midst of a directio. 1598 * CoW fork. Leave it alone if we're in the midst of a directio.
1599 */ 1599 */
1600 if (mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_DIRTY) || 1600 if ((VFS_I(ip)->i_state & I_DIRTY_PAGES) ||
1601 mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_DIRTY) ||
1601 mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_WRITEBACK) || 1602 mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_WRITEBACK) ||
1602 atomic_read(&VFS_I(ip)->i_dio_count)) 1603 atomic_read(&VFS_I(ip)->i_dio_count))
1603 return 0; 1604 return 0;
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
index b9557795eb74..de32f0fe47c8 100644
--- a/fs/xfs/xfs_inode.c
+++ b/fs/xfs/xfs_inode.c
@@ -1792,22 +1792,23 @@ xfs_inactive_ifree(
1792 int error; 1792 int error;
1793 1793
1794 /* 1794 /*
1795 * The ifree transaction might need to allocate blocks for record 1795 * We try to use a per-AG reservation for any block needed by the finobt
1796 * insertion to the finobt. We don't want to fail here at ENOSPC, so 1796 * tree, but as the finobt feature predates the per-AG reservation
1797 * allow ifree to dip into the reserved block pool if necessary. 1797 * support a degraded file system might not have enough space for the
1798 * 1798 * reservation at mount time. In that case try to dip into the reserved
1799 * Freeing large sets of inodes generally means freeing inode chunks, 1799 * pool and pray.
1800 * directory and file data blocks, so this should be relatively safe.
1801 * Only under severe circumstances should it be possible to free enough
1802 * inodes to exhaust the reserve block pool via finobt expansion while
1803 * at the same time not creating free space in the filesystem.
1804 * 1800 *
1805 * Send a warning if the reservation does happen to fail, as the inode 1801 * Send a warning if the reservation does happen to fail, as the inode
1806 * now remains allocated and sits on the unlinked list until the fs is 1802 * now remains allocated and sits on the unlinked list until the fs is
1807 * repaired. 1803 * repaired.
1808 */ 1804 */
1809 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ifree, 1805 if (unlikely(mp->m_inotbt_nores)) {
1810 XFS_IFREE_SPACE_RES(mp), 0, XFS_TRANS_RESERVE, &tp); 1806 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ifree,
1807 XFS_IFREE_SPACE_RES(mp), 0, XFS_TRANS_RESERVE,
1808 &tp);
1809 } else {
1810 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ifree, 0, 0, 0, &tp);
1811 }
1811 if (error) { 1812 if (error) {
1812 if (error == -ENOSPC) { 1813 if (error == -ENOSPC) {
1813 xfs_warn_ratelimited(mp, 1814 xfs_warn_ratelimited(mp,
diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c
index 0d147428971e..1aa3abd67b36 100644
--- a/fs/xfs/xfs_iomap.c
+++ b/fs/xfs/xfs_iomap.c
@@ -681,7 +681,7 @@ xfs_iomap_write_allocate(
681 xfs_trans_t *tp; 681 xfs_trans_t *tp;
682 int nimaps; 682 int nimaps;
683 int error = 0; 683 int error = 0;
684 int flags = 0; 684 int flags = XFS_BMAPI_DELALLOC;
685 int nres; 685 int nres;
686 686
687 if (whichfork == XFS_COW_FORK) 687 if (whichfork == XFS_COW_FORK)
diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
index 308bebb6dfd2..22c16155f1b4 100644
--- a/fs/xfs/xfs_iops.c
+++ b/fs/xfs/xfs_iops.c
@@ -98,12 +98,27 @@ xfs_init_security(
98static void 98static void
99xfs_dentry_to_name( 99xfs_dentry_to_name(
100 struct xfs_name *namep, 100 struct xfs_name *namep,
101 struct dentry *dentry)
102{
103 namep->name = dentry->d_name.name;
104 namep->len = dentry->d_name.len;
105 namep->type = XFS_DIR3_FT_UNKNOWN;
106}
107
108static int
109xfs_dentry_mode_to_name(
110 struct xfs_name *namep,
101 struct dentry *dentry, 111 struct dentry *dentry,
102 int mode) 112 int mode)
103{ 113{
104 namep->name = dentry->d_name.name; 114 namep->name = dentry->d_name.name;
105 namep->len = dentry->d_name.len; 115 namep->len = dentry->d_name.len;
106 namep->type = xfs_mode_to_ftype[(mode & S_IFMT) >> S_SHIFT]; 116 namep->type = xfs_mode_to_ftype(mode);
117
118 if (unlikely(namep->type == XFS_DIR3_FT_UNKNOWN))
119 return -EFSCORRUPTED;
120
121 return 0;
107} 122}
108 123
109STATIC void 124STATIC void
@@ -119,7 +134,7 @@ xfs_cleanup_inode(
119 * xfs_init_security we must back out. 134 * xfs_init_security we must back out.
120 * ENOSPC can hit here, among other things. 135 * ENOSPC can hit here, among other things.
121 */ 136 */
122 xfs_dentry_to_name(&teardown, dentry, 0); 137 xfs_dentry_to_name(&teardown, dentry);
123 138
124 xfs_remove(XFS_I(dir), &teardown, XFS_I(inode)); 139 xfs_remove(XFS_I(dir), &teardown, XFS_I(inode));
125} 140}
@@ -154,8 +169,12 @@ xfs_generic_create(
154 if (error) 169 if (error)
155 return error; 170 return error;
156 171
172 /* Verify mode is valid also for tmpfile case */
173 error = xfs_dentry_mode_to_name(&name, dentry, mode);
174 if (unlikely(error))
175 goto out_free_acl;
176
157 if (!tmpfile) { 177 if (!tmpfile) {
158 xfs_dentry_to_name(&name, dentry, mode);
159 error = xfs_create(XFS_I(dir), &name, mode, rdev, &ip); 178 error = xfs_create(XFS_I(dir), &name, mode, rdev, &ip);
160 } else { 179 } else {
161 error = xfs_create_tmpfile(XFS_I(dir), dentry, mode, &ip); 180 error = xfs_create_tmpfile(XFS_I(dir), dentry, mode, &ip);
@@ -248,7 +267,7 @@ xfs_vn_lookup(
248 if (dentry->d_name.len >= MAXNAMELEN) 267 if (dentry->d_name.len >= MAXNAMELEN)
249 return ERR_PTR(-ENAMETOOLONG); 268 return ERR_PTR(-ENAMETOOLONG);
250 269
251 xfs_dentry_to_name(&name, dentry, 0); 270 xfs_dentry_to_name(&name, dentry);
252 error = xfs_lookup(XFS_I(dir), &name, &cip, NULL); 271 error = xfs_lookup(XFS_I(dir), &name, &cip, NULL);
253 if (unlikely(error)) { 272 if (unlikely(error)) {
254 if (unlikely(error != -ENOENT)) 273 if (unlikely(error != -ENOENT))
@@ -275,7 +294,7 @@ xfs_vn_ci_lookup(
275 if (dentry->d_name.len >= MAXNAMELEN) 294 if (dentry->d_name.len >= MAXNAMELEN)
276 return ERR_PTR(-ENAMETOOLONG); 295 return ERR_PTR(-ENAMETOOLONG);
277 296
278 xfs_dentry_to_name(&xname, dentry, 0); 297 xfs_dentry_to_name(&xname, dentry);
279 error = xfs_lookup(XFS_I(dir), &xname, &ip, &ci_name); 298 error = xfs_lookup(XFS_I(dir), &xname, &ip, &ci_name);
280 if (unlikely(error)) { 299 if (unlikely(error)) {
281 if (unlikely(error != -ENOENT)) 300 if (unlikely(error != -ENOENT))
@@ -310,7 +329,9 @@ xfs_vn_link(
310 struct xfs_name name; 329 struct xfs_name name;
311 int error; 330 int error;
312 331
313 xfs_dentry_to_name(&name, dentry, inode->i_mode); 332 error = xfs_dentry_mode_to_name(&name, dentry, inode->i_mode);
333 if (unlikely(error))
334 return error;
314 335
315 error = xfs_link(XFS_I(dir), XFS_I(inode), &name); 336 error = xfs_link(XFS_I(dir), XFS_I(inode), &name);
316 if (unlikely(error)) 337 if (unlikely(error))
@@ -329,7 +350,7 @@ xfs_vn_unlink(
329 struct xfs_name name; 350 struct xfs_name name;
330 int error; 351 int error;
331 352
332 xfs_dentry_to_name(&name, dentry, 0); 353 xfs_dentry_to_name(&name, dentry);
333 354
334 error = xfs_remove(XFS_I(dir), &name, XFS_I(d_inode(dentry))); 355 error = xfs_remove(XFS_I(dir), &name, XFS_I(d_inode(dentry)));
335 if (error) 356 if (error)
@@ -359,7 +380,9 @@ xfs_vn_symlink(
359 380
360 mode = S_IFLNK | 381 mode = S_IFLNK |
361 (irix_symlink_mode ? 0777 & ~current_umask() : S_IRWXUGO); 382 (irix_symlink_mode ? 0777 & ~current_umask() : S_IRWXUGO);
362 xfs_dentry_to_name(&name, dentry, mode); 383 error = xfs_dentry_mode_to_name(&name, dentry, mode);
384 if (unlikely(error))
385 goto out;
363 386
364 error = xfs_symlink(XFS_I(dir), &name, symname, mode, &cip); 387 error = xfs_symlink(XFS_I(dir), &name, symname, mode, &cip);
365 if (unlikely(error)) 388 if (unlikely(error))
@@ -395,6 +418,7 @@ xfs_vn_rename(
395{ 418{
396 struct inode *new_inode = d_inode(ndentry); 419 struct inode *new_inode = d_inode(ndentry);
397 int omode = 0; 420 int omode = 0;
421 int error;
398 struct xfs_name oname; 422 struct xfs_name oname;
399 struct xfs_name nname; 423 struct xfs_name nname;
400 424
@@ -405,8 +429,14 @@ xfs_vn_rename(
405 if (flags & RENAME_EXCHANGE) 429 if (flags & RENAME_EXCHANGE)
406 omode = d_inode(ndentry)->i_mode; 430 omode = d_inode(ndentry)->i_mode;
407 431
408 xfs_dentry_to_name(&oname, odentry, omode); 432 error = xfs_dentry_mode_to_name(&oname, odentry, omode);
409 xfs_dentry_to_name(&nname, ndentry, d_inode(odentry)->i_mode); 433 if (omode && unlikely(error))
434 return error;
435
436 error = xfs_dentry_mode_to_name(&nname, ndentry,
437 d_inode(odentry)->i_mode);
438 if (unlikely(error))
439 return error;
410 440
411 return xfs_rename(XFS_I(odir), &oname, XFS_I(d_inode(odentry)), 441 return xfs_rename(XFS_I(odir), &oname, XFS_I(d_inode(odentry)),
412 XFS_I(ndir), &nname, 442 XFS_I(ndir), &nname,
diff --git a/fs/xfs/xfs_linux.h b/fs/xfs/xfs_linux.h
index e467218c0098..7a989de224f4 100644
--- a/fs/xfs/xfs_linux.h
+++ b/fs/xfs/xfs_linux.h
@@ -331,11 +331,11 @@ static inline __uint64_t howmany_64(__uint64_t x, __uint32_t y)
331} 331}
332 332
333#define ASSERT_ALWAYS(expr) \ 333#define ASSERT_ALWAYS(expr) \
334 (unlikely(expr) ? (void)0 : assfail(#expr, __FILE__, __LINE__)) 334 (likely(expr) ? (void)0 : assfail(#expr, __FILE__, __LINE__))
335 335
336#ifdef DEBUG 336#ifdef DEBUG
337#define ASSERT(expr) \ 337#define ASSERT(expr) \
338 (unlikely(expr) ? (void)0 : assfail(#expr, __FILE__, __LINE__)) 338 (likely(expr) ? (void)0 : assfail(#expr, __FILE__, __LINE__))
339 339
340#ifndef STATIC 340#ifndef STATIC
341# define STATIC noinline 341# define STATIC noinline
@@ -346,7 +346,7 @@ static inline __uint64_t howmany_64(__uint64_t x, __uint32_t y)
346#ifdef XFS_WARN 346#ifdef XFS_WARN
347 347
348#define ASSERT(expr) \ 348#define ASSERT(expr) \
349 (unlikely(expr) ? (void)0 : asswarn(#expr, __FILE__, __LINE__)) 349 (likely(expr) ? (void)0 : asswarn(#expr, __FILE__, __LINE__))
350 350
351#ifndef STATIC 351#ifndef STATIC
352# define STATIC static noinline 352# define STATIC static noinline
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
index c39ac14ff540..b1469f0a91a6 100644
--- a/fs/xfs/xfs_log.c
+++ b/fs/xfs/xfs_log.c
@@ -3317,12 +3317,8 @@ xfs_log_force(
3317 xfs_mount_t *mp, 3317 xfs_mount_t *mp,
3318 uint flags) 3318 uint flags)
3319{ 3319{
3320 int error;
3321
3322 trace_xfs_log_force(mp, 0, _RET_IP_); 3320 trace_xfs_log_force(mp, 0, _RET_IP_);
3323 error = _xfs_log_force(mp, flags, NULL); 3321 _xfs_log_force(mp, flags, NULL);
3324 if (error)
3325 xfs_warn(mp, "%s: error %d returned.", __func__, error);
3326} 3322}
3327 3323
3328/* 3324/*
@@ -3466,12 +3462,8 @@ xfs_log_force_lsn(
3466 xfs_lsn_t lsn, 3462 xfs_lsn_t lsn,
3467 uint flags) 3463 uint flags)
3468{ 3464{
3469 int error;
3470
3471 trace_xfs_log_force(mp, lsn, _RET_IP_); 3465 trace_xfs_log_force(mp, lsn, _RET_IP_);
3472 error = _xfs_log_force_lsn(mp, lsn, flags, NULL); 3466 _xfs_log_force_lsn(mp, lsn, flags, NULL);
3473 if (error)
3474 xfs_warn(mp, "%s: error %d returned.", __func__, error);
3475} 3467}
3476 3468
3477/* 3469/*
diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h
index 84f785218907..7f351f706b7a 100644
--- a/fs/xfs/xfs_mount.h
+++ b/fs/xfs/xfs_mount.h
@@ -140,6 +140,7 @@ typedef struct xfs_mount {
140 int m_fixedfsid[2]; /* unchanged for life of FS */ 140 int m_fixedfsid[2]; /* unchanged for life of FS */
141 uint m_dmevmask; /* DMI events for this FS */ 141 uint m_dmevmask; /* DMI events for this FS */
142 __uint64_t m_flags; /* global mount flags */ 142 __uint64_t m_flags; /* global mount flags */
143 bool m_inotbt_nores; /* no per-AG finobt resv. */
143 int m_ialloc_inos; /* inodes in inode allocation */ 144 int m_ialloc_inos; /* inodes in inode allocation */
144 int m_ialloc_blks; /* blocks in inode allocation */ 145 int m_ialloc_blks; /* blocks in inode allocation */
145 int m_ialloc_min_blks;/* min blocks in sparse inode 146 int m_ialloc_min_blks;/* min blocks in sparse inode
diff --git a/fs/xfs/xfs_qm.c b/fs/xfs/xfs_qm.c
index 45e50ea90769..b669b123287b 100644
--- a/fs/xfs/xfs_qm.c
+++ b/fs/xfs/xfs_qm.c
@@ -1177,7 +1177,8 @@ xfs_qm_dqusage_adjust(
1177 * the case in all other instances. It's OK that we do this because 1177 * the case in all other instances. It's OK that we do this because
1178 * quotacheck is done only at mount time. 1178 * quotacheck is done only at mount time.
1179 */ 1179 */
1180 error = xfs_iget(mp, NULL, ino, 0, XFS_ILOCK_EXCL, &ip); 1180 error = xfs_iget(mp, NULL, ino, XFS_IGET_DONTCACHE, XFS_ILOCK_EXCL,
1181 &ip);
1181 if (error) { 1182 if (error) {
1182 *res = BULKSTAT_RV_NOTHING; 1183 *res = BULKSTAT_RV_NOTHING;
1183 return error; 1184 return error;
diff --git a/fs/xfs/xfs_refcount_item.c b/fs/xfs/xfs_refcount_item.c
index fe86a668a57e..6e4c7446c3d4 100644
--- a/fs/xfs/xfs_refcount_item.c
+++ b/fs/xfs/xfs_refcount_item.c
@@ -526,13 +526,14 @@ xfs_cui_recover(
526 xfs_refcount_finish_one_cleanup(tp, rcur, error); 526 xfs_refcount_finish_one_cleanup(tp, rcur, error);
527 error = xfs_defer_finish(&tp, &dfops, NULL); 527 error = xfs_defer_finish(&tp, &dfops, NULL);
528 if (error) 528 if (error)
529 goto abort_error; 529 goto abort_defer;
530 set_bit(XFS_CUI_RECOVERED, &cuip->cui_flags); 530 set_bit(XFS_CUI_RECOVERED, &cuip->cui_flags);
531 error = xfs_trans_commit(tp); 531 error = xfs_trans_commit(tp);
532 return error; 532 return error;
533 533
534abort_error: 534abort_error:
535 xfs_refcount_finish_one_cleanup(tp, rcur, error); 535 xfs_refcount_finish_one_cleanup(tp, rcur, error);
536abort_defer:
536 xfs_defer_cancel(&dfops); 537 xfs_defer_cancel(&dfops);
537 xfs_trans_cancel(tp); 538 xfs_trans_cancel(tp);
538 return error; 539 return error;
diff --git a/fs/xfs/xfs_sysfs.c b/fs/xfs/xfs_sysfs.c
index 276d3023d60f..de6195e38910 100644
--- a/fs/xfs/xfs_sysfs.c
+++ b/fs/xfs/xfs_sysfs.c
@@ -396,7 +396,7 @@ max_retries_show(
396 int retries; 396 int retries;
397 struct xfs_error_cfg *cfg = to_error_cfg(kobject); 397 struct xfs_error_cfg *cfg = to_error_cfg(kobject);
398 398
399 if (cfg->retry_timeout == XFS_ERR_RETRY_FOREVER) 399 if (cfg->max_retries == XFS_ERR_RETRY_FOREVER)
400 retries = -1; 400 retries = -1;
401 else 401 else
402 retries = cfg->max_retries; 402 retries = cfg->max_retries;
@@ -422,7 +422,7 @@ max_retries_store(
422 return -EINVAL; 422 return -EINVAL;
423 423
424 if (val == -1) 424 if (val == -1)
425 cfg->retry_timeout = XFS_ERR_RETRY_FOREVER; 425 cfg->max_retries = XFS_ERR_RETRY_FOREVER;
426 else 426 else
427 cfg->max_retries = val; 427 cfg->max_retries = val;
428 return count; 428 return count;
diff --git a/include/asm-generic/asm-prototypes.h b/include/asm-generic/asm-prototypes.h
index df13637e4017..939869c772b1 100644
--- a/include/asm-generic/asm-prototypes.h
+++ b/include/asm-generic/asm-prototypes.h
@@ -1,7 +1,13 @@
1#include <linux/bitops.h> 1#include <linux/bitops.h>
2#undef __memset
2extern void *__memset(void *, int, __kernel_size_t); 3extern void *__memset(void *, int, __kernel_size_t);
4#undef __memcpy
3extern void *__memcpy(void *, const void *, __kernel_size_t); 5extern void *__memcpy(void *, const void *, __kernel_size_t);
6#undef __memmove
4extern void *__memmove(void *, const void *, __kernel_size_t); 7extern void *__memmove(void *, const void *, __kernel_size_t);
8#undef memset
5extern void *memset(void *, int, __kernel_size_t); 9extern void *memset(void *, int, __kernel_size_t);
10#undef memcpy
6extern void *memcpy(void *, const void *, __kernel_size_t); 11extern void *memcpy(void *, const void *, __kernel_size_t);
12#undef memmove
7extern void *memmove(void *, const void *, __kernel_size_t); 13extern void *memmove(void *, const void *, __kernel_size_t);
diff --git a/include/asm-generic/export.h b/include/asm-generic/export.h
index 63554e9f6e0c..719db1968d81 100644
--- a/include/asm-generic/export.h
+++ b/include/asm-generic/export.h
@@ -9,18 +9,15 @@
9#ifndef KSYM_ALIGN 9#ifndef KSYM_ALIGN
10#define KSYM_ALIGN 8 10#define KSYM_ALIGN 8
11#endif 11#endif
12#ifndef KCRC_ALIGN
13#define KCRC_ALIGN 8
14#endif
15#else 12#else
16#define __put .long 13#define __put .long
17#ifndef KSYM_ALIGN 14#ifndef KSYM_ALIGN
18#define KSYM_ALIGN 4 15#define KSYM_ALIGN 4
19#endif 16#endif
17#endif
20#ifndef KCRC_ALIGN 18#ifndef KCRC_ALIGN
21#define KCRC_ALIGN 4 19#define KCRC_ALIGN 4
22#endif 20#endif
23#endif
24 21
25#ifdef CONFIG_HAVE_UNDERSCORE_SYMBOL_PREFIX 22#ifdef CONFIG_HAVE_UNDERSCORE_SYMBOL_PREFIX
26#define KSYM(name) _##name 23#define KSYM(name) _##name
@@ -52,7 +49,11 @@ KSYM(__kstrtab_\name):
52 .section ___kcrctab\sec+\name,"a" 49 .section ___kcrctab\sec+\name,"a"
53 .balign KCRC_ALIGN 50 .balign KCRC_ALIGN
54KSYM(__kcrctab_\name): 51KSYM(__kcrctab_\name):
55 __put KSYM(__crc_\name) 52#if defined(CONFIG_MODULE_REL_CRCS)
53 .long KSYM(__crc_\name) - .
54#else
55 .long KSYM(__crc_\name)
56#endif
56 .weak KSYM(__crc_\name) 57 .weak KSYM(__crc_\name)
57 .previous 58 .previous
58#endif 59#endif
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index 192016e2b518..9c4ee144b5f6 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -517,6 +517,7 @@ struct drm_device {
517 struct drm_minor *control; /**< Control node */ 517 struct drm_minor *control; /**< Control node */
518 struct drm_minor *primary; /**< Primary node */ 518 struct drm_minor *primary; /**< Primary node */
519 struct drm_minor *render; /**< Render node */ 519 struct drm_minor *render; /**< Render node */
520 bool registered;
520 521
521 /* currently active master for this device. Protected by master_mutex */ 522 /* currently active master for this device. Protected by master_mutex */
522 struct drm_master *master; 523 struct drm_master *master;
diff --git a/include/drm/drm_atomic.h b/include/drm/drm_atomic.h
index d6d241f63b9f..56814e8ae7ea 100644
--- a/include/drm/drm_atomic.h
+++ b/include/drm/drm_atomic.h
@@ -144,7 +144,7 @@ struct __drm_crtcs_state {
144 struct drm_crtc *ptr; 144 struct drm_crtc *ptr;
145 struct drm_crtc_state *state; 145 struct drm_crtc_state *state;
146 struct drm_crtc_commit *commit; 146 struct drm_crtc_commit *commit;
147 s64 __user *out_fence_ptr; 147 s32 __user *out_fence_ptr;
148}; 148};
149 149
150struct __drm_connnectors_state { 150struct __drm_connnectors_state {
diff --git a/include/drm/drm_connector.h b/include/drm/drm_connector.h
index a9b95246e26e..045a97cbeba2 100644
--- a/include/drm/drm_connector.h
+++ b/include/drm/drm_connector.h
@@ -381,6 +381,8 @@ struct drm_connector_funcs {
381 * core drm connector interfaces. Everything added from this callback 381 * core drm connector interfaces. Everything added from this callback
382 * should be unregistered in the early_unregister callback. 382 * should be unregistered in the early_unregister callback.
383 * 383 *
384 * This is called while holding drm_connector->mutex.
385 *
384 * Returns: 386 * Returns:
385 * 387 *
386 * 0 on success, or a negative error code on failure. 388 * 0 on success, or a negative error code on failure.
@@ -395,6 +397,8 @@ struct drm_connector_funcs {
395 * late_register(). It is called from drm_connector_unregister(), 397 * late_register(). It is called from drm_connector_unregister(),
396 * early in the driver unload sequence to disable userspace access 398 * early in the driver unload sequence to disable userspace access
397 * before data structures are torndown. 399 * before data structures are torndown.
400 *
401 * This is called while holding drm_connector->mutex.
398 */ 402 */
399 void (*early_unregister)(struct drm_connector *connector); 403 void (*early_unregister)(struct drm_connector *connector);
400 404
@@ -559,7 +563,6 @@ struct drm_cmdline_mode {
559 * @interlace_allowed: can this connector handle interlaced modes? 563 * @interlace_allowed: can this connector handle interlaced modes?
560 * @doublescan_allowed: can this connector handle doublescan? 564 * @doublescan_allowed: can this connector handle doublescan?
561 * @stereo_allowed: can this connector handle stereo modes? 565 * @stereo_allowed: can this connector handle stereo modes?
562 * @registered: is this connector exposed (registered) with userspace?
563 * @modes: modes available on this connector (from fill_modes() + user) 566 * @modes: modes available on this connector (from fill_modes() + user)
564 * @status: one of the drm_connector_status enums (connected, not, or unknown) 567 * @status: one of the drm_connector_status enums (connected, not, or unknown)
565 * @probed_modes: list of modes derived directly from the display 568 * @probed_modes: list of modes derived directly from the display
@@ -608,6 +611,13 @@ struct drm_connector {
608 char *name; 611 char *name;
609 612
610 /** 613 /**
614 * @mutex: Lock for general connector state, but currently only protects
615 * @registered. Most of the connector state is still protected by the
616 * mutex in &drm_mode_config.
617 */
618 struct mutex mutex;
619
620 /**
611 * @index: Compacted connector index, which matches the position inside 621 * @index: Compacted connector index, which matches the position inside
612 * the mode_config.list for drivers not supporting hot-add/removing. Can 622 * the mode_config.list for drivers not supporting hot-add/removing. Can
613 * be used as an array index. It is invariant over the lifetime of the 623 * be used as an array index. It is invariant over the lifetime of the
@@ -620,6 +630,10 @@ struct drm_connector {
620 bool interlace_allowed; 630 bool interlace_allowed;
621 bool doublescan_allowed; 631 bool doublescan_allowed;
622 bool stereo_allowed; 632 bool stereo_allowed;
633 /**
634 * @registered: Is this connector exposed (registered) with userspace?
635 * Protected by @mutex.
636 */
623 bool registered; 637 bool registered;
624 struct list_head modes; /* list of modes on this connector */ 638 struct list_head modes; /* list of modes on this connector */
625 639
diff --git a/include/drm/drm_mode_config.h b/include/drm/drm_mode_config.h
index bf9991b20611..137432386310 100644
--- a/include/drm/drm_mode_config.h
+++ b/include/drm/drm_mode_config.h
@@ -488,7 +488,7 @@ struct drm_mode_config {
488 /** 488 /**
489 * @prop_out_fence_ptr: Sync File fd pointer representing the 489 * @prop_out_fence_ptr: Sync File fd pointer representing the
490 * outgoing fences for a CRTC. Userspace should provide a pointer to a 490 * outgoing fences for a CRTC. Userspace should provide a pointer to a
491 * value of type s64, and then cast that pointer to u64. 491 * value of type s32, and then cast that pointer to u64.
492 */ 492 */
493 struct drm_property *prop_out_fence_ptr; 493 struct drm_property *prop_out_fence_ptr;
494 /** 494 /**
diff --git a/include/dt-bindings/mfd/tps65217.h b/include/dt-bindings/mfd/tps65217.h
deleted file mode 100644
index cafb9e60cf12..000000000000
--- a/include/dt-bindings/mfd/tps65217.h
+++ /dev/null
@@ -1,26 +0,0 @@
1/*
2 * This header provides macros for TI TPS65217 DT bindings.
3 *
4 * Copyright (C) 2016 Texas Instruments
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18
19#ifndef __DT_BINDINGS_TPS65217_H__
20#define __DT_BINDINGS_TPS65217_H__
21
22#define TPS65217_IRQ_USB 0
23#define TPS65217_IRQ_AC 1
24#define TPS65217_IRQ_PB 2
25
26#endif
diff --git a/include/kvm/arm_arch_timer.h b/include/kvm/arm_arch_timer.h
index b717ed9d2b75..5c970ce67949 100644
--- a/include/kvm/arm_arch_timer.h
+++ b/include/kvm/arm_arch_timer.h
@@ -76,4 +76,5 @@ void kvm_timer_unschedule(struct kvm_vcpu *vcpu);
76 76
77void kvm_timer_vcpu_put(struct kvm_vcpu *vcpu); 77void kvm_timer_vcpu_put(struct kvm_vcpu *vcpu);
78 78
79void kvm_timer_init_vhe(void);
79#endif 80#endif
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 83695641bd5e..1ca8e8fd1078 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -739,7 +739,7 @@ static inline bool blk_queue_is_zoned(struct request_queue *q)
739 } 739 }
740} 740}
741 741
742static inline unsigned int blk_queue_zone_size(struct request_queue *q) 742static inline unsigned int blk_queue_zone_sectors(struct request_queue *q)
743{ 743{
744 return blk_queue_is_zoned(q) ? q->limits.chunk_sectors : 0; 744 return blk_queue_is_zoned(q) ? q->limits.chunk_sectors : 0;
745} 745}
@@ -1000,6 +1000,19 @@ static inline unsigned int blk_rq_cur_sectors(const struct request *rq)
1000 return blk_rq_cur_bytes(rq) >> 9; 1000 return blk_rq_cur_bytes(rq) >> 9;
1001} 1001}
1002 1002
1003/*
1004 * Some commands like WRITE SAME have a payload or data transfer size which
1005 * is different from the size of the request. Any driver that supports such
1006 * commands using the RQF_SPECIAL_PAYLOAD flag needs to use this helper to
1007 * calculate the data transfer size.
1008 */
1009static inline unsigned int blk_rq_payload_bytes(struct request *rq)
1010{
1011 if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
1012 return rq->special_vec.bv_len;
1013 return blk_rq_bytes(rq);
1014}
1015
1003static inline unsigned int blk_queue_get_max_sectors(struct request_queue *q, 1016static inline unsigned int blk_queue_get_max_sectors(struct request_queue *q,
1004 int op) 1017 int op)
1005{ 1018{
@@ -1536,12 +1549,12 @@ static inline bool bdev_is_zoned(struct block_device *bdev)
1536 return false; 1549 return false;
1537} 1550}
1538 1551
1539static inline unsigned int bdev_zone_size(struct block_device *bdev) 1552static inline unsigned int bdev_zone_sectors(struct block_device *bdev)
1540{ 1553{
1541 struct request_queue *q = bdev_get_queue(bdev); 1554 struct request_queue *q = bdev_get_queue(bdev);
1542 1555
1543 if (q) 1556 if (q)
1544 return blk_queue_zone_size(q); 1557 return blk_queue_zone_sectors(q);
1545 1558
1546 return 0; 1559 return 0;
1547} 1560}
diff --git a/include/linux/bpf-cgroup.h b/include/linux/bpf-cgroup.h
index 92bc89ae7e20..c970a25d2a49 100644
--- a/include/linux/bpf-cgroup.h
+++ b/include/linux/bpf-cgroup.h
@@ -21,20 +21,19 @@ struct cgroup_bpf {
21 */ 21 */
22 struct bpf_prog *prog[MAX_BPF_ATTACH_TYPE]; 22 struct bpf_prog *prog[MAX_BPF_ATTACH_TYPE];
23 struct bpf_prog __rcu *effective[MAX_BPF_ATTACH_TYPE]; 23 struct bpf_prog __rcu *effective[MAX_BPF_ATTACH_TYPE];
24 bool disallow_override[MAX_BPF_ATTACH_TYPE];
24}; 25};
25 26
26void cgroup_bpf_put(struct cgroup *cgrp); 27void cgroup_bpf_put(struct cgroup *cgrp);
27void cgroup_bpf_inherit(struct cgroup *cgrp, struct cgroup *parent); 28void cgroup_bpf_inherit(struct cgroup *cgrp, struct cgroup *parent);
28 29
29void __cgroup_bpf_update(struct cgroup *cgrp, 30int __cgroup_bpf_update(struct cgroup *cgrp, struct cgroup *parent,
30 struct cgroup *parent, 31 struct bpf_prog *prog, enum bpf_attach_type type,
31 struct bpf_prog *prog, 32 bool overridable);
32 enum bpf_attach_type type);
33 33
34/* Wrapper for __cgroup_bpf_update() protected by cgroup_mutex */ 34/* Wrapper for __cgroup_bpf_update() protected by cgroup_mutex */
35void cgroup_bpf_update(struct cgroup *cgrp, 35int cgroup_bpf_update(struct cgroup *cgrp, struct bpf_prog *prog,
36 struct bpf_prog *prog, 36 enum bpf_attach_type type, bool overridable);
37 enum bpf_attach_type type);
38 37
39int __cgroup_bpf_run_filter_skb(struct sock *sk, 38int __cgroup_bpf_run_filter_skb(struct sock *sk,
40 struct sk_buff *skb, 39 struct sk_buff *skb,
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index f74ae68086dc..3ed1f3b1d594 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -216,7 +216,7 @@ u64 bpf_tail_call(u64 ctx, u64 r2, u64 index, u64 r4, u64 r5);
216u64 bpf_get_stackid(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); 216u64 bpf_get_stackid(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
217 217
218bool bpf_prog_array_compatible(struct bpf_array *array, const struct bpf_prog *fp); 218bool bpf_prog_array_compatible(struct bpf_array *array, const struct bpf_prog *fp);
219int bpf_prog_calc_digest(struct bpf_prog *fp); 219int bpf_prog_calc_tag(struct bpf_prog *fp);
220 220
221const struct bpf_func_proto *bpf_get_trace_printk_proto(void); 221const struct bpf_func_proto *bpf_get_trace_printk_proto(void);
222 222
@@ -247,6 +247,8 @@ struct bpf_map * __must_check bpf_map_inc(struct bpf_map *map, bool uref);
247void bpf_map_put_with_uref(struct bpf_map *map); 247void bpf_map_put_with_uref(struct bpf_map *map);
248void bpf_map_put(struct bpf_map *map); 248void bpf_map_put(struct bpf_map *map);
249int bpf_map_precharge_memlock(u32 pages); 249int bpf_map_precharge_memlock(u32 pages);
250void *bpf_map_area_alloc(size_t size);
251void bpf_map_area_free(void *base);
250 252
251extern int sysctl_unprivileged_bpf_disabled; 253extern int sysctl_unprivileged_bpf_disabled;
252 254
diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h
index d67ab83823ad..79591c3660cc 100644
--- a/include/linux/buffer_head.h
+++ b/include/linux/buffer_head.h
@@ -243,12 +243,10 @@ static inline int block_page_mkwrite_return(int err)
243{ 243{
244 if (err == 0) 244 if (err == 0)
245 return VM_FAULT_LOCKED; 245 return VM_FAULT_LOCKED;
246 if (err == -EFAULT) 246 if (err == -EFAULT || err == -EAGAIN)
247 return VM_FAULT_NOPAGE; 247 return VM_FAULT_NOPAGE;
248 if (err == -ENOMEM) 248 if (err == -ENOMEM)
249 return VM_FAULT_OOM; 249 return VM_FAULT_OOM;
250 if (err == -EAGAIN)
251 return VM_FAULT_RETRY;
252 /* -ENOSPC, -EDQUOT, -EIO ... */ 250 /* -ENOSPC, -EDQUOT, -EIO ... */
253 return VM_FAULT_SIGBUS; 251 return VM_FAULT_SIGBUS;
254} 252}
diff --git a/include/linux/can/core.h b/include/linux/can/core.h
index a0875001b13c..df08a41d5be5 100644
--- a/include/linux/can/core.h
+++ b/include/linux/can/core.h
@@ -45,10 +45,9 @@ struct can_proto {
45extern int can_proto_register(const struct can_proto *cp); 45extern int can_proto_register(const struct can_proto *cp);
46extern void can_proto_unregister(const struct can_proto *cp); 46extern void can_proto_unregister(const struct can_proto *cp);
47 47
48extern int can_rx_register(struct net_device *dev, canid_t can_id, 48int can_rx_register(struct net_device *dev, canid_t can_id, canid_t mask,
49 canid_t mask, 49 void (*func)(struct sk_buff *, void *),
50 void (*func)(struct sk_buff *, void *), 50 void *data, char *ident, struct sock *sk);
51 void *data, char *ident);
52 51
53extern void can_rx_unregister(struct net_device *dev, canid_t can_id, 52extern void can_rx_unregister(struct net_device *dev, canid_t can_id,
54 canid_t mask, 53 canid_t mask,
diff --git a/include/linux/coredump.h b/include/linux/coredump.h
index d016a121a8c4..28ffa94aed6b 100644
--- a/include/linux/coredump.h
+++ b/include/linux/coredump.h
@@ -14,6 +14,7 @@ struct coredump_params;
14extern int dump_skip(struct coredump_params *cprm, size_t nr); 14extern int dump_skip(struct coredump_params *cprm, size_t nr);
15extern int dump_emit(struct coredump_params *cprm, const void *addr, int nr); 15extern int dump_emit(struct coredump_params *cprm, const void *addr, int nr);
16extern int dump_align(struct coredump_params *cprm, int align); 16extern int dump_align(struct coredump_params *cprm, int align);
17extern void dump_truncate(struct coredump_params *cprm);
17#ifdef CONFIG_COREDUMP 18#ifdef CONFIG_COREDUMP
18extern void do_coredump(const siginfo_t *siginfo); 19extern void do_coredump(const siginfo_t *siginfo);
19#else 20#else
diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h
index 20bfefbe7594..921acaaa1601 100644
--- a/include/linux/cpuhotplug.h
+++ b/include/linux/cpuhotplug.h
@@ -8,9 +8,7 @@ enum cpuhp_state {
8 CPUHP_CREATE_THREADS, 8 CPUHP_CREATE_THREADS,
9 CPUHP_PERF_PREPARE, 9 CPUHP_PERF_PREPARE,
10 CPUHP_PERF_X86_PREPARE, 10 CPUHP_PERF_X86_PREPARE,
11 CPUHP_PERF_X86_UNCORE_PREP,
12 CPUHP_PERF_X86_AMD_UNCORE_PREP, 11 CPUHP_PERF_X86_AMD_UNCORE_PREP,
13 CPUHP_PERF_X86_RAPL_PREP,
14 CPUHP_PERF_BFIN, 12 CPUHP_PERF_BFIN,
15 CPUHP_PERF_POWER, 13 CPUHP_PERF_POWER,
16 CPUHP_PERF_SUPERH, 14 CPUHP_PERF_SUPERH,
@@ -74,6 +72,8 @@ enum cpuhp_state {
74 CPUHP_ZCOMP_PREPARE, 72 CPUHP_ZCOMP_PREPARE,
75 CPUHP_TIMERS_DEAD, 73 CPUHP_TIMERS_DEAD,
76 CPUHP_MIPS_SOC_PREPARE, 74 CPUHP_MIPS_SOC_PREPARE,
75 CPUHP_BP_PREPARE_DYN,
76 CPUHP_BP_PREPARE_DYN_END = CPUHP_BP_PREPARE_DYN + 20,
77 CPUHP_BRINGUP_CPU, 77 CPUHP_BRINGUP_CPU,
78 CPUHP_AP_IDLE_DEAD, 78 CPUHP_AP_IDLE_DEAD,
79 CPUHP_AP_OFFLINE, 79 CPUHP_AP_OFFLINE,
@@ -84,7 +84,6 @@ enum cpuhp_state {
84 CPUHP_AP_IRQ_ARMADA_XP_STARTING, 84 CPUHP_AP_IRQ_ARMADA_XP_STARTING,
85 CPUHP_AP_IRQ_BCM2836_STARTING, 85 CPUHP_AP_IRQ_BCM2836_STARTING,
86 CPUHP_AP_ARM_MVEBU_COHERENCY, 86 CPUHP_AP_ARM_MVEBU_COHERENCY,
87 CPUHP_AP_PERF_X86_UNCORE_STARTING,
88 CPUHP_AP_PERF_X86_AMD_UNCORE_STARTING, 87 CPUHP_AP_PERF_X86_AMD_UNCORE_STARTING,
89 CPUHP_AP_PERF_X86_STARTING, 88 CPUHP_AP_PERF_X86_STARTING,
90 CPUHP_AP_PERF_X86_AMD_IBS_STARTING, 89 CPUHP_AP_PERF_X86_AMD_IBS_STARTING,
diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
index c717f5ea88cb..b3d2c1a89ac4 100644
--- a/include/linux/cpumask.h
+++ b/include/linux/cpumask.h
@@ -560,7 +560,7 @@ static inline void cpumask_copy(struct cpumask *dstp,
560static inline int cpumask_parse_user(const char __user *buf, int len, 560static inline int cpumask_parse_user(const char __user *buf, int len,
561 struct cpumask *dstp) 561 struct cpumask *dstp)
562{ 562{
563 return bitmap_parse_user(buf, len, cpumask_bits(dstp), nr_cpu_ids); 563 return bitmap_parse_user(buf, len, cpumask_bits(dstp), nr_cpumask_bits);
564} 564}
565 565
566/** 566/**
@@ -575,7 +575,7 @@ static inline int cpumask_parselist_user(const char __user *buf, int len,
575 struct cpumask *dstp) 575 struct cpumask *dstp)
576{ 576{
577 return bitmap_parselist_user(buf, len, cpumask_bits(dstp), 577 return bitmap_parselist_user(buf, len, cpumask_bits(dstp),
578 nr_cpu_ids); 578 nr_cpumask_bits);
579} 579}
580 580
581/** 581/**
@@ -590,7 +590,7 @@ static inline int cpumask_parse(const char *buf, struct cpumask *dstp)
590 char *nl = strchr(buf, '\n'); 590 char *nl = strchr(buf, '\n');
591 unsigned int len = nl ? (unsigned int)(nl - buf) : strlen(buf); 591 unsigned int len = nl ? (unsigned int)(nl - buf) : strlen(buf);
592 592
593 return bitmap_parse(buf, len, cpumask_bits(dstp), nr_cpu_ids); 593 return bitmap_parse(buf, len, cpumask_bits(dstp), nr_cpumask_bits);
594} 594}
595 595
596/** 596/**
@@ -602,7 +602,7 @@ static inline int cpumask_parse(const char *buf, struct cpumask *dstp)
602 */ 602 */
603static inline int cpulist_parse(const char *buf, struct cpumask *dstp) 603static inline int cpulist_parse(const char *buf, struct cpumask *dstp)
604{ 604{
605 return bitmap_parselist(buf, cpumask_bits(dstp), nr_cpu_ids); 605 return bitmap_parselist(buf, cpumask_bits(dstp), nr_cpumask_bits);
606} 606}
607 607
608/** 608/**
diff --git a/include/linux/dax.h b/include/linux/dax.h
index f97bcfe79472..24ad71173995 100644
--- a/include/linux/dax.h
+++ b/include/linux/dax.h
@@ -41,6 +41,9 @@ ssize_t dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter,
41int dax_iomap_fault(struct vm_area_struct *vma, struct vm_fault *vmf, 41int dax_iomap_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
42 struct iomap_ops *ops); 42 struct iomap_ops *ops);
43int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index); 43int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index);
44int dax_invalidate_mapping_entry(struct address_space *mapping, pgoff_t index);
45int dax_invalidate_mapping_entry_sync(struct address_space *mapping,
46 pgoff_t index);
44void dax_wake_mapping_entry_waiter(struct address_space *mapping, 47void dax_wake_mapping_entry_waiter(struct address_space *mapping,
45 pgoff_t index, void *entry, bool wake_all); 48 pgoff_t index, void *entry, bool wake_all);
46 49
diff --git a/include/linux/efi.h b/include/linux/efi.h
index a07a476178cd..5b1af30ece55 100644
--- a/include/linux/efi.h
+++ b/include/linux/efi.h
@@ -103,6 +103,7 @@ typedef struct {
103 103
104#define EFI_PAGE_SHIFT 12 104#define EFI_PAGE_SHIFT 12
105#define EFI_PAGE_SIZE (1UL << EFI_PAGE_SHIFT) 105#define EFI_PAGE_SIZE (1UL << EFI_PAGE_SHIFT)
106#define EFI_PAGES_MAX (U64_MAX >> EFI_PAGE_SHIFT)
106 107
107typedef struct { 108typedef struct {
108 u32 type; 109 u32 type;
@@ -950,6 +951,7 @@ static inline efi_status_t efi_query_variable_store(u32 attributes,
950#endif 951#endif
951extern void __iomem *efi_lookup_mapped_addr(u64 phys_addr); 952extern void __iomem *efi_lookup_mapped_addr(u64 phys_addr);
952 953
954extern phys_addr_t __init efi_memmap_alloc(unsigned int num_entries);
953extern int __init efi_memmap_init_early(struct efi_memory_map_data *data); 955extern int __init efi_memmap_init_early(struct efi_memory_map_data *data);
954extern int __init efi_memmap_init_late(phys_addr_t addr, unsigned long size); 956extern int __init efi_memmap_init_late(phys_addr_t addr, unsigned long size);
955extern void __init efi_memmap_unmap(void); 957extern void __init efi_memmap_unmap(void);
diff --git a/include/linux/export.h b/include/linux/export.h
index 2a0f61fbc731..1a1dfdb2a5c6 100644
--- a/include/linux/export.h
+++ b/include/linux/export.h
@@ -43,12 +43,19 @@ extern struct module __this_module;
43#ifdef CONFIG_MODVERSIONS 43#ifdef CONFIG_MODVERSIONS
44/* Mark the CRC weak since genksyms apparently decides not to 44/* Mark the CRC weak since genksyms apparently decides not to
45 * generate a checksums for some symbols */ 45 * generate a checksums for some symbols */
46#if defined(CONFIG_MODULE_REL_CRCS)
46#define __CRC_SYMBOL(sym, sec) \ 47#define __CRC_SYMBOL(sym, sec) \
47 extern __visible void *__crc_##sym __attribute__((weak)); \ 48 asm(" .section \"___kcrctab" sec "+" #sym "\", \"a\" \n" \
48 static const unsigned long __kcrctab_##sym \ 49 " .weak " VMLINUX_SYMBOL_STR(__crc_##sym) " \n" \
49 __used \ 50 " .long " VMLINUX_SYMBOL_STR(__crc_##sym) " - . \n" \
50 __attribute__((section("___kcrctab" sec "+" #sym), used)) \ 51 " .previous \n");
51 = (unsigned long) &__crc_##sym; 52#else
53#define __CRC_SYMBOL(sym, sec) \
54 asm(" .section \"___kcrctab" sec "+" #sym "\", \"a\" \n" \
55 " .weak " VMLINUX_SYMBOL_STR(__crc_##sym) " \n" \
56 " .long " VMLINUX_SYMBOL_STR(__crc_##sym) " \n" \
57 " .previous \n");
58#endif
52#else 59#else
53#define __CRC_SYMBOL(sym, sec) 60#define __CRC_SYMBOL(sym, sec)
54#endif 61#endif
diff --git a/include/linux/filter.h b/include/linux/filter.h
index 702314253797..e4eb2546339a 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -57,6 +57,8 @@ struct bpf_prog_aux;
57/* BPF program can access up to 512 bytes of stack space. */ 57/* BPF program can access up to 512 bytes of stack space. */
58#define MAX_BPF_STACK 512 58#define MAX_BPF_STACK 512
59 59
60#define BPF_TAG_SIZE 8
61
60/* Helper macros for filter block array initializers. */ 62/* Helper macros for filter block array initializers. */
61 63
62/* ALU ops on registers, bpf_add|sub|...: dst_reg += src_reg */ 64/* ALU ops on registers, bpf_add|sub|...: dst_reg += src_reg */
@@ -408,7 +410,7 @@ struct bpf_prog {
408 kmemcheck_bitfield_end(meta); 410 kmemcheck_bitfield_end(meta);
409 enum bpf_prog_type type; /* Type of BPF program */ 411 enum bpf_prog_type type; /* Type of BPF program */
410 u32 len; /* Number of filter blocks */ 412 u32 len; /* Number of filter blocks */
411 u32 digest[SHA_DIGEST_WORDS]; /* Program digest */ 413 u8 tag[BPF_TAG_SIZE];
412 struct bpf_prog_aux *aux; /* Auxiliary fields */ 414 struct bpf_prog_aux *aux; /* Auxiliary fields */
413 struct sock_fprog_kern *orig_prog; /* Original BPF program */ 415 struct sock_fprog_kern *orig_prog; /* Original BPF program */
414 unsigned int (*bpf_func)(const void *ctx, 416 unsigned int (*bpf_func)(const void *ctx,
@@ -519,7 +521,7 @@ static inline u32 bpf_prog_insn_size(const struct bpf_prog *prog)
519 return prog->len * sizeof(struct bpf_insn); 521 return prog->len * sizeof(struct bpf_insn);
520} 522}
521 523
522static inline u32 bpf_prog_digest_scratch_size(const struct bpf_prog *prog) 524static inline u32 bpf_prog_tag_scratch_size(const struct bpf_prog *prog)
523{ 525{
524 return round_up(bpf_prog_insn_size(prog) + 526 return round_up(bpf_prog_insn_size(prog) +
525 sizeof(__be64) + 1, SHA_MESSAGE_BYTES); 527 sizeof(__be64) + 1, SHA_MESSAGE_BYTES);
@@ -610,7 +612,6 @@ bool bpf_helper_changes_pkt_data(void *func);
610struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off, 612struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
611 const struct bpf_insn *patch, u32 len); 613 const struct bpf_insn *patch, u32 len);
612void bpf_warn_invalid_xdp_action(u32 act); 614void bpf_warn_invalid_xdp_action(u32 act);
613void bpf_warn_invalid_xdp_buffer(void);
614 615
615#ifdef CONFIG_BPF_JIT 616#ifdef CONFIG_BPF_JIT
616extern int bpf_jit_enable; 617extern int bpf_jit_enable;
diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h
index 13ba552e6c09..4c467ef50159 100644
--- a/include/linux/fscache-cache.h
+++ b/include/linux/fscache-cache.h
@@ -360,6 +360,7 @@ struct fscache_object {
360#define FSCACHE_OBJECT_IS_AVAILABLE 5 /* T if object has become active */ 360#define FSCACHE_OBJECT_IS_AVAILABLE 5 /* T if object has become active */
361#define FSCACHE_OBJECT_RETIRED 6 /* T if object was retired on relinquishment */ 361#define FSCACHE_OBJECT_RETIRED 6 /* T if object was retired on relinquishment */
362#define FSCACHE_OBJECT_KILLED_BY_CACHE 7 /* T if object was killed by the cache */ 362#define FSCACHE_OBJECT_KILLED_BY_CACHE 7 /* T if object was killed by the cache */
363#define FSCACHE_OBJECT_RUN_AFTER_DEAD 8 /* T if object has been dispatched after death */
363 364
364 struct list_head cache_link; /* link in cache->object_list */ 365 struct list_head cache_link; /* link in cache->object_list */
365 struct hlist_node cookie_link; /* link in cookie->backing_objects */ 366 struct hlist_node cookie_link; /* link in cookie->backing_objects */
diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h
index 0cf34d6cc253..487246546ebe 100644
--- a/include/linux/fsnotify_backend.h
+++ b/include/linux/fsnotify_backend.h
@@ -323,8 +323,6 @@ extern void fsnotify_init_mark(struct fsnotify_mark *mark, void (*free_mark)(str
323extern struct fsnotify_mark *fsnotify_find_inode_mark(struct fsnotify_group *group, struct inode *inode); 323extern struct fsnotify_mark *fsnotify_find_inode_mark(struct fsnotify_group *group, struct inode *inode);
324/* find (and take a reference) to a mark associated with group and vfsmount */ 324/* find (and take a reference) to a mark associated with group and vfsmount */
325extern struct fsnotify_mark *fsnotify_find_vfsmount_mark(struct fsnotify_group *group, struct vfsmount *mnt); 325extern struct fsnotify_mark *fsnotify_find_vfsmount_mark(struct fsnotify_group *group, struct vfsmount *mnt);
326/* copy the values from old into new */
327extern void fsnotify_duplicate_mark(struct fsnotify_mark *new, struct fsnotify_mark *old);
328/* set the ignored_mask of a mark */ 326/* set the ignored_mask of a mark */
329extern void fsnotify_set_mark_ignored_mask_locked(struct fsnotify_mark *mark, __u32 mask); 327extern void fsnotify_set_mark_ignored_mask_locked(struct fsnotify_mark *mark, __u32 mask);
330/* set the mask of a mark (might pin the object into memory */ 328/* set the mask of a mark (might pin the object into memory */
diff --git a/include/linux/genhd.h b/include/linux/genhd.h
index e0341af6950e..76f39754e7b0 100644
--- a/include/linux/genhd.h
+++ b/include/linux/genhd.h
@@ -146,15 +146,6 @@ enum {
146 DISK_EVENT_EJECT_REQUEST = 1 << 1, /* eject requested */ 146 DISK_EVENT_EJECT_REQUEST = 1 << 1, /* eject requested */
147}; 147};
148 148
149#define BLK_SCSI_MAX_CMDS (256)
150#define BLK_SCSI_CMD_PER_LONG (BLK_SCSI_MAX_CMDS / (sizeof(long) * 8))
151
152struct blk_scsi_cmd_filter {
153 unsigned long read_ok[BLK_SCSI_CMD_PER_LONG];
154 unsigned long write_ok[BLK_SCSI_CMD_PER_LONG];
155 struct kobject kobj;
156};
157
158struct disk_part_tbl { 149struct disk_part_tbl {
159 struct rcu_head rcu_head; 150 struct rcu_head rcu_head;
160 int len; 151 int len;
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index 4175dca4ac39..0fe0b6295ab5 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -38,9 +38,8 @@ struct vm_area_struct;
38#define ___GFP_ACCOUNT 0x100000u 38#define ___GFP_ACCOUNT 0x100000u
39#define ___GFP_NOTRACK 0x200000u 39#define ___GFP_NOTRACK 0x200000u
40#define ___GFP_DIRECT_RECLAIM 0x400000u 40#define ___GFP_DIRECT_RECLAIM 0x400000u
41#define ___GFP_OTHER_NODE 0x800000u 41#define ___GFP_WRITE 0x800000u
42#define ___GFP_WRITE 0x1000000u 42#define ___GFP_KSWAPD_RECLAIM 0x1000000u
43#define ___GFP_KSWAPD_RECLAIM 0x2000000u
44/* If the above are modified, __GFP_BITS_SHIFT may need updating */ 43/* If the above are modified, __GFP_BITS_SHIFT may need updating */
45 44
46/* 45/*
@@ -172,11 +171,6 @@ struct vm_area_struct;
172 * __GFP_NOTRACK_FALSE_POSITIVE is an alias of __GFP_NOTRACK. It's a means of 171 * __GFP_NOTRACK_FALSE_POSITIVE is an alias of __GFP_NOTRACK. It's a means of
173 * distinguishing in the source between false positives and allocations that 172 * distinguishing in the source between false positives and allocations that
174 * cannot be supported (e.g. page tables). 173 * cannot be supported (e.g. page tables).
175 *
176 * __GFP_OTHER_NODE is for allocations that are on a remote node but that
177 * should not be accounted for as a remote allocation in vmstat. A
178 * typical user would be khugepaged collapsing a huge page on a remote
179 * node.
180 */ 174 */
181#define __GFP_COLD ((__force gfp_t)___GFP_COLD) 175#define __GFP_COLD ((__force gfp_t)___GFP_COLD)
182#define __GFP_NOWARN ((__force gfp_t)___GFP_NOWARN) 176#define __GFP_NOWARN ((__force gfp_t)___GFP_NOWARN)
@@ -184,10 +178,9 @@ struct vm_area_struct;
184#define __GFP_ZERO ((__force gfp_t)___GFP_ZERO) 178#define __GFP_ZERO ((__force gfp_t)___GFP_ZERO)
185#define __GFP_NOTRACK ((__force gfp_t)___GFP_NOTRACK) 179#define __GFP_NOTRACK ((__force gfp_t)___GFP_NOTRACK)
186#define __GFP_NOTRACK_FALSE_POSITIVE (__GFP_NOTRACK) 180#define __GFP_NOTRACK_FALSE_POSITIVE (__GFP_NOTRACK)
187#define __GFP_OTHER_NODE ((__force gfp_t)___GFP_OTHER_NODE)
188 181
189/* Room for N __GFP_FOO bits */ 182/* Room for N __GFP_FOO bits */
190#define __GFP_BITS_SHIFT 26 183#define __GFP_BITS_SHIFT 25
191#define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1)) 184#define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
192 185
193/* 186/*
@@ -506,11 +499,10 @@ extern void free_hot_cold_page(struct page *page, bool cold);
506extern void free_hot_cold_page_list(struct list_head *list, bool cold); 499extern void free_hot_cold_page_list(struct list_head *list, bool cold);
507 500
508struct page_frag_cache; 501struct page_frag_cache;
509extern void __page_frag_drain(struct page *page, unsigned int order, 502extern void __page_frag_cache_drain(struct page *page, unsigned int count);
510 unsigned int count); 503extern void *page_frag_alloc(struct page_frag_cache *nc,
511extern void *__alloc_page_frag(struct page_frag_cache *nc, 504 unsigned int fragsz, gfp_t gfp_mask);
512 unsigned int fragsz, gfp_t gfp_mask); 505extern void page_frag_free(void *addr);
513extern void __free_page_frag(void *addr);
514 506
515#define __free_page(page) __free_pages((page), 0) 507#define __free_page(page) __free_pages((page), 0)
516#define free_page(addr) free_pages((addr), 0) 508#define free_page(addr) free_pages((addr), 0)
diff --git a/include/linux/gpio/driver.h b/include/linux/gpio/driver.h
index c2748accea71..e973faba69dc 100644
--- a/include/linux/gpio/driver.h
+++ b/include/linux/gpio/driver.h
@@ -274,37 +274,67 @@ void gpiochip_set_nested_irqchip(struct gpio_chip *gpiochip,
274 struct irq_chip *irqchip, 274 struct irq_chip *irqchip,
275 int parent_irq); 275 int parent_irq);
276 276
277int _gpiochip_irqchip_add(struct gpio_chip *gpiochip, 277int gpiochip_irqchip_add_key(struct gpio_chip *gpiochip,
278 struct irq_chip *irqchip,
279 unsigned int first_irq,
280 irq_flow_handler_t handler,
281 unsigned int type,
282 bool nested,
283 struct lock_class_key *lock_key);
284
285#ifdef CONFIG_LOCKDEP
286
287/*
288 * Lockdep requires that each irqchip instance be created with a
289 * unique key so as to avoid unnecessary warnings. This upfront
290 * boilerplate static inlines provides such a key for each
291 * unique instance.
292 */
293static inline int gpiochip_irqchip_add(struct gpio_chip *gpiochip,
294 struct irq_chip *irqchip,
295 unsigned int first_irq,
296 irq_flow_handler_t handler,
297 unsigned int type)
298{
299 static struct lock_class_key key;
300
301 return gpiochip_irqchip_add_key(gpiochip, irqchip, first_irq,
302 handler, type, false, &key);
303}
304
305static inline int gpiochip_irqchip_add_nested(struct gpio_chip *gpiochip,
278 struct irq_chip *irqchip, 306 struct irq_chip *irqchip,
279 unsigned int first_irq, 307 unsigned int first_irq,
280 irq_flow_handler_t handler, 308 irq_flow_handler_t handler,
281 unsigned int type, 309 unsigned int type)
282 bool nested, 310{
283 struct lock_class_key *lock_key); 311
312 static struct lock_class_key key;
313
314 return gpiochip_irqchip_add_key(gpiochip, irqchip, first_irq,
315 handler, type, true, &key);
316}
317#else
318static inline int gpiochip_irqchip_add(struct gpio_chip *gpiochip,
319 struct irq_chip *irqchip,
320 unsigned int first_irq,
321 irq_flow_handler_t handler,
322 unsigned int type)
323{
324 return gpiochip_irqchip_add_key(gpiochip, irqchip, first_irq,
325 handler, type, false, NULL);
326}
284 327
285/* FIXME: I assume threaded IRQchips do not have the lockdep problem */
286static inline int gpiochip_irqchip_add_nested(struct gpio_chip *gpiochip, 328static inline int gpiochip_irqchip_add_nested(struct gpio_chip *gpiochip,
287 struct irq_chip *irqchip, 329 struct irq_chip *irqchip,
288 unsigned int first_irq, 330 unsigned int first_irq,
289 irq_flow_handler_t handler, 331 irq_flow_handler_t handler,
290 unsigned int type) 332 unsigned int type)
291{ 333{
292 return _gpiochip_irqchip_add(gpiochip, irqchip, first_irq, 334 return gpiochip_irqchip_add_key(gpiochip, irqchip, first_irq,
293 handler, type, true, NULL); 335 handler, type, true, NULL);
294} 336}
295 337#endif /* CONFIG_LOCKDEP */
296#ifdef CONFIG_LOCKDEP
297#define gpiochip_irqchip_add(...) \
298( \
299 ({ \
300 static struct lock_class_key _key; \
301 _gpiochip_irqchip_add(__VA_ARGS__, false, &_key); \
302 }) \
303)
304#else
305#define gpiochip_irqchip_add(...) \
306 _gpiochip_irqchip_add(__VA_ARGS__, false, NULL)
307#endif
308 338
309#endif /* CONFIG_GPIOLIB_IRQCHIP */ 339#endif /* CONFIG_GPIOLIB_IRQCHIP */
310 340
diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
index 42fe43fb0c80..183efde54269 100644
--- a/include/linux/hyperv.h
+++ b/include/linux/hyperv.h
@@ -128,6 +128,7 @@ struct hv_ring_buffer_info {
128 u32 ring_data_startoffset; 128 u32 ring_data_startoffset;
129 u32 priv_write_index; 129 u32 priv_write_index;
130 u32 priv_read_index; 130 u32 priv_read_index;
131 u32 cached_read_index;
131}; 132};
132 133
133/* 134/*
@@ -180,6 +181,19 @@ static inline u32 hv_get_bytes_to_write(struct hv_ring_buffer_info *rbi)
180 return write; 181 return write;
181} 182}
182 183
184static inline u32 hv_get_cached_bytes_to_write(
185 const struct hv_ring_buffer_info *rbi)
186{
187 u32 read_loc, write_loc, dsize, write;
188
189 dsize = rbi->ring_datasize;
190 read_loc = rbi->cached_read_index;
191 write_loc = rbi->ring_buffer->write_index;
192
193 write = write_loc >= read_loc ? dsize - (write_loc - read_loc) :
194 read_loc - write_loc;
195 return write;
196}
183/* 197/*
184 * VMBUS version is 32 bit entity broken up into 198 * VMBUS version is 32 bit entity broken up into
185 * two 16 bit quantities: major_number. minor_number. 199 * two 16 bit quantities: major_number. minor_number.
@@ -1488,7 +1502,7 @@ hv_get_ring_buffer(struct hv_ring_buffer_info *ring_info)
1488 1502
1489static inline void hv_signal_on_read(struct vmbus_channel *channel) 1503static inline void hv_signal_on_read(struct vmbus_channel *channel)
1490{ 1504{
1491 u32 cur_write_sz; 1505 u32 cur_write_sz, cached_write_sz;
1492 u32 pending_sz; 1506 u32 pending_sz;
1493 struct hv_ring_buffer_info *rbi = &channel->inbound; 1507 struct hv_ring_buffer_info *rbi = &channel->inbound;
1494 1508
@@ -1512,12 +1526,24 @@ static inline void hv_signal_on_read(struct vmbus_channel *channel)
1512 1526
1513 cur_write_sz = hv_get_bytes_to_write(rbi); 1527 cur_write_sz = hv_get_bytes_to_write(rbi);
1514 1528
1515 if (cur_write_sz >= pending_sz) 1529 if (cur_write_sz < pending_sz)
1530 return;
1531
1532 cached_write_sz = hv_get_cached_bytes_to_write(rbi);
1533 if (cached_write_sz < pending_sz)
1516 vmbus_setevent(channel); 1534 vmbus_setevent(channel);
1517 1535
1518 return; 1536 return;
1519} 1537}
1520 1538
1539static inline void
1540init_cached_read_index(struct vmbus_channel *channel)
1541{
1542 struct hv_ring_buffer_info *rbi = &channel->inbound;
1543
1544 rbi->cached_read_index = rbi->ring_buffer->read_index;
1545}
1546
1521/* 1547/*
1522 * An API to support in-place processing of incoming VMBUS packets. 1548 * An API to support in-place processing of incoming VMBUS packets.
1523 */ 1549 */
@@ -1569,6 +1595,8 @@ static inline void put_pkt_raw(struct vmbus_channel *channel,
1569 * This call commits the read index and potentially signals the host. 1595 * This call commits the read index and potentially signals the host.
1570 * Here is the pattern for using the "in-place" consumption APIs: 1596 * Here is the pattern for using the "in-place" consumption APIs:
1571 * 1597 *
1598 * init_cached_read_index();
1599 *
1572 * while (get_next_pkt_raw() { 1600 * while (get_next_pkt_raw() {
1573 * process the packet "in-place"; 1601 * process the packet "in-place";
1574 * put_pkt_raw(); 1602 * put_pkt_raw();
diff --git a/include/linux/i2c.h b/include/linux/i2c.h
index b2109c522dec..4b45ec46161f 100644
--- a/include/linux/i2c.h
+++ b/include/linux/i2c.h
@@ -665,6 +665,7 @@ i2c_unlock_adapter(struct i2c_adapter *adapter)
665#define I2C_CLIENT_TEN 0x10 /* we have a ten bit chip address */ 665#define I2C_CLIENT_TEN 0x10 /* we have a ten bit chip address */
666 /* Must equal I2C_M_TEN below */ 666 /* Must equal I2C_M_TEN below */
667#define I2C_CLIENT_SLAVE 0x20 /* we are the slave */ 667#define I2C_CLIENT_SLAVE 0x20 /* we are the slave */
668#define I2C_CLIENT_HOST_NOTIFY 0x40 /* We want to use I2C host notify */
668#define I2C_CLIENT_WAKE 0x80 /* for board_info; true iff can wake */ 669#define I2C_CLIENT_WAKE 0x80 /* for board_info; true iff can wake */
669#define I2C_CLIENT_SCCB 0x9000 /* Use Omnivision SCCB protocol */ 670#define I2C_CLIENT_SCCB 0x9000 /* Use Omnivision SCCB protocol */
670 /* Must match I2C_M_STOP|IGNORE_NAK */ 671 /* Must match I2C_M_STOP|IGNORE_NAK */
diff --git a/include/linux/iio/common/st_sensors.h b/include/linux/iio/common/st_sensors.h
index 228bd44efa4c..497f2b3a5a62 100644
--- a/include/linux/iio/common/st_sensors.h
+++ b/include/linux/iio/common/st_sensors.h
@@ -116,6 +116,16 @@ struct st_sensor_bdu {
116}; 116};
117 117
118/** 118/**
119 * struct st_sensor_das - ST sensor device data alignment selection
120 * @addr: address of the register.
121 * @mask: mask to write the das flag for left alignment.
122 */
123struct st_sensor_das {
124 u8 addr;
125 u8 mask;
126};
127
128/**
119 * struct st_sensor_data_ready_irq - ST sensor device data-ready interrupt 129 * struct st_sensor_data_ready_irq - ST sensor device data-ready interrupt
120 * @addr: address of the register. 130 * @addr: address of the register.
121 * @mask_int1: mask to enable/disable IRQ on INT1 pin. 131 * @mask_int1: mask to enable/disable IRQ on INT1 pin.
@@ -185,6 +195,7 @@ struct st_sensor_transfer_function {
185 * @enable_axis: Enable one or more axis of the sensor. 195 * @enable_axis: Enable one or more axis of the sensor.
186 * @fs: Full scale register and full scale list available. 196 * @fs: Full scale register and full scale list available.
187 * @bdu: Block data update register. 197 * @bdu: Block data update register.
198 * @das: Data Alignment Selection register.
188 * @drdy_irq: Data ready register of the sensor. 199 * @drdy_irq: Data ready register of the sensor.
189 * @multi_read_bit: Use or not particular bit for [I2C/SPI] multi-read. 200 * @multi_read_bit: Use or not particular bit for [I2C/SPI] multi-read.
190 * @bootime: samples to discard when sensor passing from power-down to power-up. 201 * @bootime: samples to discard when sensor passing from power-down to power-up.
@@ -200,6 +211,7 @@ struct st_sensor_settings {
200 struct st_sensor_axis enable_axis; 211 struct st_sensor_axis enable_axis;
201 struct st_sensor_fullscale fs; 212 struct st_sensor_fullscale fs;
202 struct st_sensor_bdu bdu; 213 struct st_sensor_bdu bdu;
214 struct st_sensor_das das;
203 struct st_sensor_data_ready_irq drdy_irq; 215 struct st_sensor_data_ready_irq drdy_irq;
204 bool multi_read_bit; 216 bool multi_read_bit;
205 unsigned int bootime; 217 unsigned int bootime;
diff --git a/include/linux/irq.h b/include/linux/irq.h
index e79875574b39..39e3254e5769 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -184,6 +184,7 @@ struct irq_data {
184 * 184 *
185 * IRQD_TRIGGER_MASK - Mask for the trigger type bits 185 * IRQD_TRIGGER_MASK - Mask for the trigger type bits
186 * IRQD_SETAFFINITY_PENDING - Affinity setting is pending 186 * IRQD_SETAFFINITY_PENDING - Affinity setting is pending
187 * IRQD_ACTIVATED - Interrupt has already been activated
187 * IRQD_NO_BALANCING - Balancing disabled for this IRQ 188 * IRQD_NO_BALANCING - Balancing disabled for this IRQ
188 * IRQD_PER_CPU - Interrupt is per cpu 189 * IRQD_PER_CPU - Interrupt is per cpu
189 * IRQD_AFFINITY_SET - Interrupt affinity was set 190 * IRQD_AFFINITY_SET - Interrupt affinity was set
@@ -202,6 +203,7 @@ struct irq_data {
202enum { 203enum {
203 IRQD_TRIGGER_MASK = 0xf, 204 IRQD_TRIGGER_MASK = 0xf,
204 IRQD_SETAFFINITY_PENDING = (1 << 8), 205 IRQD_SETAFFINITY_PENDING = (1 << 8),
206 IRQD_ACTIVATED = (1 << 9),
205 IRQD_NO_BALANCING = (1 << 10), 207 IRQD_NO_BALANCING = (1 << 10),
206 IRQD_PER_CPU = (1 << 11), 208 IRQD_PER_CPU = (1 << 11),
207 IRQD_AFFINITY_SET = (1 << 12), 209 IRQD_AFFINITY_SET = (1 << 12),
@@ -312,6 +314,21 @@ static inline bool irqd_affinity_is_managed(struct irq_data *d)
312 return __irqd_to_state(d) & IRQD_AFFINITY_MANAGED; 314 return __irqd_to_state(d) & IRQD_AFFINITY_MANAGED;
313} 315}
314 316
317static inline bool irqd_is_activated(struct irq_data *d)
318{
319 return __irqd_to_state(d) & IRQD_ACTIVATED;
320}
321
322static inline void irqd_set_activated(struct irq_data *d)
323{
324 __irqd_to_state(d) |= IRQD_ACTIVATED;
325}
326
327static inline void irqd_clr_activated(struct irq_data *d)
328{
329 __irqd_to_state(d) &= ~IRQD_ACTIVATED;
330}
331
315#undef __irqd_to_state 332#undef __irqd_to_state
316 333
317static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d) 334static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d)
diff --git a/include/linux/jump_label_ratelimit.h b/include/linux/jump_label_ratelimit.h
index 089f70f83e97..23da3af459fe 100644
--- a/include/linux/jump_label_ratelimit.h
+++ b/include/linux/jump_label_ratelimit.h
@@ -14,6 +14,7 @@ struct static_key_deferred {
14 14
15#ifdef HAVE_JUMP_LABEL 15#ifdef HAVE_JUMP_LABEL
16extern void static_key_slow_dec_deferred(struct static_key_deferred *key); 16extern void static_key_slow_dec_deferred(struct static_key_deferred *key);
17extern void static_key_deferred_flush(struct static_key_deferred *key);
17extern void 18extern void
18jump_label_rate_limit(struct static_key_deferred *key, unsigned long rl); 19jump_label_rate_limit(struct static_key_deferred *key, unsigned long rl);
19 20
@@ -26,6 +27,10 @@ static inline void static_key_slow_dec_deferred(struct static_key_deferred *key)
26 STATIC_KEY_CHECK_USE(); 27 STATIC_KEY_CHECK_USE();
27 static_key_slow_dec(&key->key); 28 static_key_slow_dec(&key->key);
28} 29}
30static inline void static_key_deferred_flush(struct static_key_deferred *key)
31{
32 STATIC_KEY_CHECK_USE();
33}
29static inline void 34static inline void
30jump_label_rate_limit(struct static_key_deferred *key, 35jump_label_rate_limit(struct static_key_deferred *key,
31 unsigned long rl) 36 unsigned long rl)
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 56aec84237ad..cb09238f6d32 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -514,8 +514,8 @@ extern enum system_states {
514#define TAINT_FLAGS_COUNT 16 514#define TAINT_FLAGS_COUNT 16
515 515
516struct taint_flag { 516struct taint_flag {
517 char true; /* character printed when tainted */ 517 char c_true; /* character printed when tainted */
518 char false; /* character printed when not tainted */ 518 char c_false; /* character printed when not tainted */
519 bool module; /* also show as a per-module taint flag */ 519 bool module; /* also show as a per-module taint flag */
520}; 520};
521 521
diff --git a/include/linux/log2.h b/include/linux/log2.h
index fd7ff3d91e6a..ef3d4f67118c 100644
--- a/include/linux/log2.h
+++ b/include/linux/log2.h
@@ -203,6 +203,17 @@ unsigned long __rounddown_pow_of_two(unsigned long n)
203 * ... and so on. 203 * ... and so on.
204 */ 204 */
205 205
206#define order_base_2(n) ilog2(roundup_pow_of_two(n)) 206static inline __attribute_const__
207int __order_base_2(unsigned long n)
208{
209 return n > 1 ? ilog2(n - 1) + 1 : 0;
210}
207 211
212#define order_base_2(n) \
213( \
214 __builtin_constant_p(n) ? ( \
215 ((n) == 0 || (n) == 1) ? 0 : \
216 ilog2((n) - 1) + 1) : \
217 __order_base_2(n) \
218)
208#endif /* _LINUX_LOG2_H */ 219#endif /* _LINUX_LOG2_H */
diff --git a/include/linux/mdev.h b/include/linux/mdev.h
index ec819e9a115a..b6e048e1045f 100644
--- a/include/linux/mdev.h
+++ b/include/linux/mdev.h
@@ -13,34 +13,10 @@
13#ifndef MDEV_H 13#ifndef MDEV_H
14#define MDEV_H 14#define MDEV_H
15 15
16/* Parent device */ 16struct mdev_device;
17struct parent_device {
18 struct device *dev;
19 const struct parent_ops *ops;
20
21 /* internal */
22 struct kref ref;
23 struct mutex lock;
24 struct list_head next;
25 struct kset *mdev_types_kset;
26 struct list_head type_list;
27};
28
29/* Mediated device */
30struct mdev_device {
31 struct device dev;
32 struct parent_device *parent;
33 uuid_le uuid;
34 void *driver_data;
35
36 /* internal */
37 struct kref ref;
38 struct list_head next;
39 struct kobject *type_kobj;
40};
41 17
42/** 18/**
43 * struct parent_ops - Structure to be registered for each parent device to 19 * struct mdev_parent_ops - Structure to be registered for each parent device to
44 * register the device to mdev module. 20 * register the device to mdev module.
45 * 21 *
46 * @owner: The module owner. 22 * @owner: The module owner.
@@ -86,10 +62,9 @@ struct mdev_device {
86 * @mdev: mediated device structure 62 * @mdev: mediated device structure
87 * @vma: vma structure 63 * @vma: vma structure
88 * Parent device that support mediated device should be registered with mdev 64 * Parent device that support mediated device should be registered with mdev
89 * module with parent_ops structure. 65 * module with mdev_parent_ops structure.
90 **/ 66 **/
91 67struct mdev_parent_ops {
92struct parent_ops {
93 struct module *owner; 68 struct module *owner;
94 const struct attribute_group **dev_attr_groups; 69 const struct attribute_group **dev_attr_groups;
95 const struct attribute_group **mdev_attr_groups; 70 const struct attribute_group **mdev_attr_groups;
@@ -103,7 +78,7 @@ struct parent_ops {
103 size_t count, loff_t *ppos); 78 size_t count, loff_t *ppos);
104 ssize_t (*write)(struct mdev_device *mdev, const char __user *buf, 79 ssize_t (*write)(struct mdev_device *mdev, const char __user *buf,
105 size_t count, loff_t *ppos); 80 size_t count, loff_t *ppos);
106 ssize_t (*ioctl)(struct mdev_device *mdev, unsigned int cmd, 81 long (*ioctl)(struct mdev_device *mdev, unsigned int cmd,
107 unsigned long arg); 82 unsigned long arg);
108 int (*mmap)(struct mdev_device *mdev, struct vm_area_struct *vma); 83 int (*mmap)(struct mdev_device *mdev, struct vm_area_struct *vma);
109}; 84};
@@ -142,27 +117,22 @@ struct mdev_driver {
142}; 117};
143 118
144#define to_mdev_driver(drv) container_of(drv, struct mdev_driver, driver) 119#define to_mdev_driver(drv) container_of(drv, struct mdev_driver, driver)
145#define to_mdev_device(dev) container_of(dev, struct mdev_device, dev)
146
147static inline void *mdev_get_drvdata(struct mdev_device *mdev)
148{
149 return mdev->driver_data;
150}
151 120
152static inline void mdev_set_drvdata(struct mdev_device *mdev, void *data) 121extern void *mdev_get_drvdata(struct mdev_device *mdev);
153{ 122extern void mdev_set_drvdata(struct mdev_device *mdev, void *data);
154 mdev->driver_data = data; 123extern uuid_le mdev_uuid(struct mdev_device *mdev);
155}
156 124
157extern struct bus_type mdev_bus_type; 125extern struct bus_type mdev_bus_type;
158 126
159#define dev_is_mdev(d) ((d)->bus == &mdev_bus_type)
160
161extern int mdev_register_device(struct device *dev, 127extern int mdev_register_device(struct device *dev,
162 const struct parent_ops *ops); 128 const struct mdev_parent_ops *ops);
163extern void mdev_unregister_device(struct device *dev); 129extern void mdev_unregister_device(struct device *dev);
164 130
165extern int mdev_register_driver(struct mdev_driver *drv, struct module *owner); 131extern int mdev_register_driver(struct mdev_driver *drv, struct module *owner);
166extern void mdev_unregister_driver(struct mdev_driver *drv); 132extern void mdev_unregister_driver(struct mdev_driver *drv);
167 133
134extern struct device *mdev_parent_dev(struct mdev_device *mdev);
135extern struct device *mdev_dev(struct mdev_device *mdev);
136extern struct mdev_device *mdev_from_dev(struct device *dev);
137
168#endif /* MDEV_H */ 138#endif /* MDEV_H */
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 61d20c17f3b7..254698856b8f 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -120,7 +120,7 @@ struct mem_cgroup_reclaim_iter {
120 */ 120 */
121struct mem_cgroup_per_node { 121struct mem_cgroup_per_node {
122 struct lruvec lruvec; 122 struct lruvec lruvec;
123 unsigned long lru_size[NR_LRU_LISTS]; 123 unsigned long lru_zone_size[MAX_NR_ZONES][NR_LRU_LISTS];
124 124
125 struct mem_cgroup_reclaim_iter iter[DEF_PRIORITY + 1]; 125 struct mem_cgroup_reclaim_iter iter[DEF_PRIORITY + 1];
126 126
@@ -432,7 +432,7 @@ static inline bool mem_cgroup_online(struct mem_cgroup *memcg)
432int mem_cgroup_select_victim_node(struct mem_cgroup *memcg); 432int mem_cgroup_select_victim_node(struct mem_cgroup *memcg);
433 433
434void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru, 434void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
435 int nr_pages); 435 int zid, int nr_pages);
436 436
437unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg, 437unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
438 int nid, unsigned int lru_mask); 438 int nid, unsigned int lru_mask);
@@ -441,9 +441,23 @@ static inline
441unsigned long mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list lru) 441unsigned long mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list lru)
442{ 442{
443 struct mem_cgroup_per_node *mz; 443 struct mem_cgroup_per_node *mz;
444 unsigned long nr_pages = 0;
445 int zid;
444 446
445 mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec); 447 mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
446 return mz->lru_size[lru]; 448 for (zid = 0; zid < MAX_NR_ZONES; zid++)
449 nr_pages += mz->lru_zone_size[zid][lru];
450 return nr_pages;
451}
452
453static inline
454unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec,
455 enum lru_list lru, int zone_idx)
456{
457 struct mem_cgroup_per_node *mz;
458
459 mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
460 return mz->lru_zone_size[zone_idx][lru];
447} 461}
448 462
449void mem_cgroup_handle_over_high(void); 463void mem_cgroup_handle_over_high(void);
@@ -671,6 +685,12 @@ mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list lru)
671{ 685{
672 return 0; 686 return 0;
673} 687}
688static inline
689unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec,
690 enum lru_list lru, int zone_idx)
691{
692 return 0;
693}
674 694
675static inline unsigned long 695static inline unsigned long
676mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg, 696mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h
index 01033fadea47..134a2f69c21a 100644
--- a/include/linux/memory_hotplug.h
+++ b/include/linux/memory_hotplug.h
@@ -85,7 +85,8 @@ extern int zone_grow_waitqueues(struct zone *zone, unsigned long nr_pages);
85extern int add_one_highpage(struct page *page, int pfn, int bad_ppro); 85extern int add_one_highpage(struct page *page, int pfn, int bad_ppro);
86/* VM interface that may be used by firmware interface */ 86/* VM interface that may be used by firmware interface */
87extern int online_pages(unsigned long, unsigned long, int); 87extern int online_pages(unsigned long, unsigned long, int);
88extern int test_pages_in_a_zone(unsigned long, unsigned long); 88extern int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn,
89 unsigned long *valid_start, unsigned long *valid_end);
89extern void __offline_isolated_pages(unsigned long, unsigned long); 90extern void __offline_isolated_pages(unsigned long, unsigned long);
90 91
91typedef void (*online_page_callback_t)(struct page *page); 92typedef void (*online_page_callback_t)(struct page *page);
@@ -284,7 +285,7 @@ extern void sparse_remove_one_section(struct zone *zone, struct mem_section *ms,
284 unsigned long map_offset); 285 unsigned long map_offset);
285extern struct page *sparse_decode_mem_map(unsigned long coded_mem_map, 286extern struct page *sparse_decode_mem_map(unsigned long coded_mem_map,
286 unsigned long pnum); 287 unsigned long pnum);
287extern int zone_can_shift(unsigned long pfn, unsigned long nr_pages, 288extern bool zone_can_shift(unsigned long pfn, unsigned long nr_pages,
288 enum zone_type target); 289 enum zone_type target, int *zone_shift);
289 290
290#endif /* __LINUX_MEMORY_HOTPLUG_H */ 291#endif /* __LINUX_MEMORY_HOTPLUG_H */
diff --git a/include/linux/micrel_phy.h b/include/linux/micrel_phy.h
index 257173e0095e..f541da68d1e7 100644
--- a/include/linux/micrel_phy.h
+++ b/include/linux/micrel_phy.h
@@ -35,6 +35,8 @@
35#define PHY_ID_KSZ886X 0x00221430 35#define PHY_ID_KSZ886X 0x00221430
36#define PHY_ID_KSZ8863 0x00221435 36#define PHY_ID_KSZ8863 0x00221435
37 37
38#define PHY_ID_KSZ8795 0x00221550
39
38/* struct phy_device dev_flags definitions */ 40/* struct phy_device dev_flags definitions */
39#define MICREL_PHY_50MHZ_CLK 0x00000001 41#define MICREL_PHY_50MHZ_CLK 0x00000001
40#define MICREL_PHY_FXEN 0x00000002 42#define MICREL_PHY_FXEN 0x00000002
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index 93bdb3485192..6533c16e27ad 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -1384,6 +1384,8 @@ int set_phv_bit(struct mlx4_dev *dev, u8 port, int new_val);
1384int get_phv_bit(struct mlx4_dev *dev, u8 port, int *phv); 1384int get_phv_bit(struct mlx4_dev *dev, u8 port, int *phv);
1385int mlx4_get_is_vlan_offload_disabled(struct mlx4_dev *dev, u8 port, 1385int mlx4_get_is_vlan_offload_disabled(struct mlx4_dev *dev, u8 port,
1386 bool *vlan_offload_disabled); 1386 bool *vlan_offload_disabled);
1387void mlx4_handle_eth_header_mcast_prio(struct mlx4_net_trans_rule_hw_ctrl *ctrl,
1388 struct _rule_hw *eth_header);
1387int mlx4_find_cached_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *idx); 1389int mlx4_find_cached_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *idx);
1388int mlx4_find_cached_vlan(struct mlx4_dev *dev, u8 port, u16 vid, int *idx); 1390int mlx4_find_cached_vlan(struct mlx4_dev *dev, u8 port, u16 vid, int *idx);
1389int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index); 1391int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index);
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h
index 9f489365b3d3..52b437431c6a 100644
--- a/include/linux/mlx5/device.h
+++ b/include/linux/mlx5/device.h
@@ -1071,11 +1071,6 @@ enum {
1071 MLX5_INFINIBAND_PORT_COUNTERS_GROUP = 0x20, 1071 MLX5_INFINIBAND_PORT_COUNTERS_GROUP = 0x20,
1072}; 1072};
1073 1073
1074enum {
1075 MLX5_PCIE_PERFORMANCE_COUNTERS_GROUP = 0x0,
1076 MLX5_PCIE_TIMERS_AND_STATES_COUNTERS_GROUP = 0x2,
1077};
1078
1079static inline u16 mlx5_to_sw_pkey_sz(int pkey_sz) 1074static inline u16 mlx5_to_sw_pkey_sz(int pkey_sz)
1080{ 1075{
1081 if (pkey_sz > MLX5_MAX_LOG_PKEY_TABLE) 1076 if (pkey_sz > MLX5_MAX_LOG_PKEY_TABLE)
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index 0ae55361e674..735b36335f29 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -123,7 +123,6 @@ enum {
123 MLX5_REG_HOST_ENDIANNESS = 0x7004, 123 MLX5_REG_HOST_ENDIANNESS = 0x7004,
124 MLX5_REG_MCIA = 0x9014, 124 MLX5_REG_MCIA = 0x9014,
125 MLX5_REG_MLCR = 0x902b, 125 MLX5_REG_MLCR = 0x902b,
126 MLX5_REG_MPCNT = 0x9051,
127}; 126};
128 127
129enum mlx5_dcbx_oper_mode { 128enum mlx5_dcbx_oper_mode {
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index 57bec544e20a..a852e9db6f0d 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -1757,80 +1757,6 @@ struct mlx5_ifc_eth_802_3_cntrs_grp_data_layout_bits {
1757 u8 reserved_at_4c0[0x300]; 1757 u8 reserved_at_4c0[0x300];
1758}; 1758};
1759 1759
1760struct mlx5_ifc_pcie_perf_cntrs_grp_data_layout_bits {
1761 u8 life_time_counter_high[0x20];
1762
1763 u8 life_time_counter_low[0x20];
1764
1765 u8 rx_errors[0x20];
1766
1767 u8 tx_errors[0x20];
1768
1769 u8 l0_to_recovery_eieos[0x20];
1770
1771 u8 l0_to_recovery_ts[0x20];
1772
1773 u8 l0_to_recovery_framing[0x20];
1774
1775 u8 l0_to_recovery_retrain[0x20];
1776
1777 u8 crc_error_dllp[0x20];
1778
1779 u8 crc_error_tlp[0x20];
1780
1781 u8 reserved_at_140[0x680];
1782};
1783
1784struct mlx5_ifc_pcie_tas_cntrs_grp_data_layout_bits {
1785 u8 life_time_counter_high[0x20];
1786
1787 u8 life_time_counter_low[0x20];
1788
1789 u8 time_to_boot_image_start[0x20];
1790
1791 u8 time_to_link_image[0x20];
1792
1793 u8 calibration_time[0x20];
1794
1795 u8 time_to_first_perst[0x20];
1796
1797 u8 time_to_detect_state[0x20];
1798
1799 u8 time_to_l0[0x20];
1800
1801 u8 time_to_crs_en[0x20];
1802
1803 u8 time_to_plastic_image_start[0x20];
1804
1805 u8 time_to_iron_image_start[0x20];
1806
1807 u8 perst_handler[0x20];
1808
1809 u8 times_in_l1[0x20];
1810
1811 u8 times_in_l23[0x20];
1812
1813 u8 dl_down[0x20];
1814
1815 u8 config_cycle1usec[0x20];
1816
1817 u8 config_cycle2to7usec[0x20];
1818
1819 u8 config_cycle_8to15usec[0x20];
1820
1821 u8 config_cycle_16_to_63usec[0x20];
1822
1823 u8 config_cycle_64usec[0x20];
1824
1825 u8 correctable_err_msg_sent[0x20];
1826
1827 u8 non_fatal_err_msg_sent[0x20];
1828
1829 u8 fatal_err_msg_sent[0x20];
1830
1831 u8 reserved_at_2e0[0x4e0];
1832};
1833
1834struct mlx5_ifc_cmd_inter_comp_event_bits { 1760struct mlx5_ifc_cmd_inter_comp_event_bits {
1835 u8 command_completion_vector[0x20]; 1761 u8 command_completion_vector[0x20];
1836 1762
@@ -2995,12 +2921,6 @@ union mlx5_ifc_eth_cntrs_grp_data_layout_auto_bits {
2995 u8 reserved_at_0[0x7c0]; 2921 u8 reserved_at_0[0x7c0];
2996}; 2922};
2997 2923
2998union mlx5_ifc_pcie_cntrs_grp_data_layout_auto_bits {
2999 struct mlx5_ifc_pcie_perf_cntrs_grp_data_layout_bits pcie_perf_cntrs_grp_data_layout;
3000 struct mlx5_ifc_pcie_tas_cntrs_grp_data_layout_bits pcie_tas_cntrs_grp_data_layout;
3001 u8 reserved_at_0[0x7c0];
3002};
3003
3004union mlx5_ifc_event_auto_bits { 2924union mlx5_ifc_event_auto_bits {
3005 struct mlx5_ifc_comp_event_bits comp_event; 2925 struct mlx5_ifc_comp_event_bits comp_event;
3006 struct mlx5_ifc_dct_events_bits dct_events; 2926 struct mlx5_ifc_dct_events_bits dct_events;
@@ -7320,18 +7240,6 @@ struct mlx5_ifc_ppcnt_reg_bits {
7320 union mlx5_ifc_eth_cntrs_grp_data_layout_auto_bits counter_set; 7240 union mlx5_ifc_eth_cntrs_grp_data_layout_auto_bits counter_set;
7321}; 7241};
7322 7242
7323struct mlx5_ifc_mpcnt_reg_bits {
7324 u8 reserved_at_0[0x8];
7325 u8 pcie_index[0x8];
7326 u8 reserved_at_10[0xa];
7327 u8 grp[0x6];
7328
7329 u8 clr[0x1];
7330 u8 reserved_at_21[0x1f];
7331
7332 union mlx5_ifc_pcie_cntrs_grp_data_layout_auto_bits counter_set;
7333};
7334
7335struct mlx5_ifc_ppad_reg_bits { 7243struct mlx5_ifc_ppad_reg_bits {
7336 u8 reserved_at_0[0x3]; 7244 u8 reserved_at_0[0x3];
7337 u8 single_mac[0x1]; 7245 u8 single_mac[0x1];
@@ -7937,7 +7845,6 @@ union mlx5_ifc_ports_control_registers_document_bits {
7937 struct mlx5_ifc_pmtu_reg_bits pmtu_reg; 7845 struct mlx5_ifc_pmtu_reg_bits pmtu_reg;
7938 struct mlx5_ifc_ppad_reg_bits ppad_reg; 7846 struct mlx5_ifc_ppad_reg_bits ppad_reg;
7939 struct mlx5_ifc_ppcnt_reg_bits ppcnt_reg; 7847 struct mlx5_ifc_ppcnt_reg_bits ppcnt_reg;
7940 struct mlx5_ifc_mpcnt_reg_bits mpcnt_reg;
7941 struct mlx5_ifc_pplm_reg_bits pplm_reg; 7848 struct mlx5_ifc_pplm_reg_bits pplm_reg;
7942 struct mlx5_ifc_pplr_reg_bits pplr_reg; 7849 struct mlx5_ifc_pplr_reg_bits pplr_reg;
7943 struct mlx5_ifc_ppsc_reg_bits ppsc_reg; 7850 struct mlx5_ifc_ppsc_reg_bits ppsc_reg;
diff --git a/include/linux/mm.h b/include/linux/mm.h
index fe6b4036664a..b84615b0f64c 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1210,8 +1210,8 @@ int copy_page_range(struct mm_struct *dst, struct mm_struct *src,
1210 struct vm_area_struct *vma); 1210 struct vm_area_struct *vma);
1211void unmap_mapping_range(struct address_space *mapping, 1211void unmap_mapping_range(struct address_space *mapping,
1212 loff_t const holebegin, loff_t const holelen, int even_cows); 1212 loff_t const holebegin, loff_t const holelen, int even_cows);
1213int follow_pte(struct mm_struct *mm, unsigned long address, pte_t **ptepp, 1213int follow_pte_pmd(struct mm_struct *mm, unsigned long address,
1214 spinlock_t **ptlp); 1214 pte_t **ptepp, pmd_t **pmdpp, spinlock_t **ptlp);
1215int follow_pfn(struct vm_area_struct *vma, unsigned long address, 1215int follow_pfn(struct vm_area_struct *vma, unsigned long address,
1216 unsigned long *pfn); 1216 unsigned long *pfn);
1217int follow_phys(struct vm_area_struct *vma, unsigned long address, 1217int follow_phys(struct vm_area_struct *vma, unsigned long address,
diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h
index 71613e8a720f..41d376e7116d 100644
--- a/include/linux/mm_inline.h
+++ b/include/linux/mm_inline.h
@@ -39,7 +39,7 @@ static __always_inline void update_lru_size(struct lruvec *lruvec,
39{ 39{
40 __update_lru_size(lruvec, lru, zid, nr_pages); 40 __update_lru_size(lruvec, lru, zid, nr_pages);
41#ifdef CONFIG_MEMCG 41#ifdef CONFIG_MEMCG
42 mem_cgroup_update_lru_size(lruvec, lru, nr_pages); 42 mem_cgroup_update_lru_size(lruvec, lru, zid, nr_pages);
43#endif 43#endif
44} 44}
45 45
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 36d9896fbc1e..f4aac87adcc3 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -972,12 +972,16 @@ static __always_inline struct zoneref *next_zones_zonelist(struct zoneref *z,
972 * @zonelist - The zonelist to search for a suitable zone 972 * @zonelist - The zonelist to search for a suitable zone
973 * @highest_zoneidx - The zone index of the highest zone to return 973 * @highest_zoneidx - The zone index of the highest zone to return
974 * @nodes - An optional nodemask to filter the zonelist with 974 * @nodes - An optional nodemask to filter the zonelist with
975 * @zone - The first suitable zone found is returned via this parameter 975 * @return - Zoneref pointer for the first suitable zone found (see below)
976 * 976 *
977 * This function returns the first zone at or below a given zone index that is 977 * This function returns the first zone at or below a given zone index that is
978 * within the allowed nodemask. The zoneref returned is a cursor that can be 978 * within the allowed nodemask. The zoneref returned is a cursor that can be
979 * used to iterate the zonelist with next_zones_zonelist by advancing it by 979 * used to iterate the zonelist with next_zones_zonelist by advancing it by
980 * one before calling. 980 * one before calling.
981 *
982 * When no eligible zone is found, zoneref->zone is NULL (zoneref itself is
983 * never NULL). This may happen either genuinely, or due to concurrent nodemask
984 * update due to cpuset modification.
981 */ 985 */
982static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist, 986static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist,
983 enum zone_type highest_zoneidx, 987 enum zone_type highest_zoneidx,
diff --git a/include/linux/module.h b/include/linux/module.h
index 7c84273d60b9..cc7cba219b20 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -346,7 +346,7 @@ struct module {
346 346
347 /* Exported symbols */ 347 /* Exported symbols */
348 const struct kernel_symbol *syms; 348 const struct kernel_symbol *syms;
349 const unsigned long *crcs; 349 const s32 *crcs;
350 unsigned int num_syms; 350 unsigned int num_syms;
351 351
352 /* Kernel parameters. */ 352 /* Kernel parameters. */
@@ -359,18 +359,18 @@ struct module {
359 /* GPL-only exported symbols. */ 359 /* GPL-only exported symbols. */
360 unsigned int num_gpl_syms; 360 unsigned int num_gpl_syms;
361 const struct kernel_symbol *gpl_syms; 361 const struct kernel_symbol *gpl_syms;
362 const unsigned long *gpl_crcs; 362 const s32 *gpl_crcs;
363 363
364#ifdef CONFIG_UNUSED_SYMBOLS 364#ifdef CONFIG_UNUSED_SYMBOLS
365 /* unused exported symbols. */ 365 /* unused exported symbols. */
366 const struct kernel_symbol *unused_syms; 366 const struct kernel_symbol *unused_syms;
367 const unsigned long *unused_crcs; 367 const s32 *unused_crcs;
368 unsigned int num_unused_syms; 368 unsigned int num_unused_syms;
369 369
370 /* GPL-only, unused exported symbols. */ 370 /* GPL-only, unused exported symbols. */
371 unsigned int num_unused_gpl_syms; 371 unsigned int num_unused_gpl_syms;
372 const struct kernel_symbol *unused_gpl_syms; 372 const struct kernel_symbol *unused_gpl_syms;
373 const unsigned long *unused_gpl_crcs; 373 const s32 *unused_gpl_crcs;
374#endif 374#endif
375 375
376#ifdef CONFIG_MODULE_SIG 376#ifdef CONFIG_MODULE_SIG
@@ -382,7 +382,7 @@ struct module {
382 382
383 /* symbols that will be GPL-only in the near future. */ 383 /* symbols that will be GPL-only in the near future. */
384 const struct kernel_symbol *gpl_future_syms; 384 const struct kernel_symbol *gpl_future_syms;
385 const unsigned long *gpl_future_crcs; 385 const s32 *gpl_future_crcs;
386 unsigned int num_gpl_future_syms; 386 unsigned int num_gpl_future_syms;
387 387
388 /* Exception table */ 388 /* Exception table */
@@ -523,7 +523,7 @@ struct module *find_module(const char *name);
523 523
524struct symsearch { 524struct symsearch {
525 const struct kernel_symbol *start, *stop; 525 const struct kernel_symbol *start, *stop;
526 const unsigned long *crcs; 526 const s32 *crcs;
527 enum { 527 enum {
528 NOT_GPL_ONLY, 528 NOT_GPL_ONLY,
529 GPL_ONLY, 529 GPL_ONLY,
@@ -539,7 +539,7 @@ struct symsearch {
539 */ 539 */
540const struct kernel_symbol *find_symbol(const char *name, 540const struct kernel_symbol *find_symbol(const char *name,
541 struct module **owner, 541 struct module **owner,
542 const unsigned long **crc, 542 const s32 **crc,
543 bool gplok, 543 bool gplok,
544 bool warn); 544 bool warn);
545 545
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 994f7423a74b..27914672602d 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -866,11 +866,15 @@ struct netdev_xdp {
866 * of useless work if you return NETDEV_TX_BUSY. 866 * of useless work if you return NETDEV_TX_BUSY.
867 * Required; cannot be NULL. 867 * Required; cannot be NULL.
868 * 868 *
869 * netdev_features_t (*ndo_fix_features)(struct net_device *dev, 869 * netdev_features_t (*ndo_features_check)(struct sk_buff *skb,
870 * netdev_features_t features); 870 * struct net_device *dev
871 * Adjusts the requested feature flags according to device-specific 871 * netdev_features_t features);
872 * constraints, and returns the resulting flags. Must not modify 872 * Called by core transmit path to determine if device is capable of
873 * the device state. 873 * performing offload operations on a given packet. This is to give
874 * the device an opportunity to implement any restrictions that cannot
875 * be otherwise expressed by feature flags. The check is called with
876 * the set of features that the stack has calculated and it returns
877 * those the driver believes to be appropriate.
874 * 878 *
875 * u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb, 879 * u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb,
876 * void *accel_priv, select_queue_fallback_t fallback); 880 * void *accel_priv, select_queue_fallback_t fallback);
@@ -1028,6 +1032,12 @@ struct netdev_xdp {
1028 * Called to release previously enslaved netdev. 1032 * Called to release previously enslaved netdev.
1029 * 1033 *
1030 * Feature/offload setting functions. 1034 * Feature/offload setting functions.
1035 * netdev_features_t (*ndo_fix_features)(struct net_device *dev,
1036 * netdev_features_t features);
1037 * Adjusts the requested feature flags according to device-specific
1038 * constraints, and returns the resulting flags. Must not modify
1039 * the device state.
1040 *
1031 * int (*ndo_set_features)(struct net_device *dev, netdev_features_t features); 1041 * int (*ndo_set_features)(struct net_device *dev, netdev_features_t features);
1032 * Called to update device configuration to new features. Passed 1042 * Called to update device configuration to new features. Passed
1033 * feature set might be less than what was returned by ndo_fix_features()). 1043 * feature set might be less than what was returned by ndo_fix_features()).
@@ -1100,15 +1110,6 @@ struct netdev_xdp {
1100 * Callback to use for xmit over the accelerated station. This 1110 * Callback to use for xmit over the accelerated station. This
1101 * is used in place of ndo_start_xmit on accelerated net 1111 * is used in place of ndo_start_xmit on accelerated net
1102 * devices. 1112 * devices.
1103 * netdev_features_t (*ndo_features_check)(struct sk_buff *skb,
1104 * struct net_device *dev
1105 * netdev_features_t features);
1106 * Called by core transmit path to determine if device is capable of
1107 * performing offload operations on a given packet. This is to give
1108 * the device an opportunity to implement any restrictions that cannot
1109 * be otherwise expressed by feature flags. The check is called with
1110 * the set of features that the stack has calculated and it returns
1111 * those the driver believes to be appropriate.
1112 * int (*ndo_set_tx_maxrate)(struct net_device *dev, 1113 * int (*ndo_set_tx_maxrate)(struct net_device *dev,
1113 * int queue_index, u32 maxrate); 1114 * int queue_index, u32 maxrate);
1114 * Called when a user wants to set a max-rate limitation of specific 1115 * Called when a user wants to set a max-rate limitation of specific
@@ -1510,6 +1511,7 @@ enum netdev_priv_flags {
1510 * @max_mtu: Interface Maximum MTU value 1511 * @max_mtu: Interface Maximum MTU value
1511 * @type: Interface hardware type 1512 * @type: Interface hardware type
1512 * @hard_header_len: Maximum hardware header length. 1513 * @hard_header_len: Maximum hardware header length.
1514 * @min_header_len: Minimum hardware header length
1513 * 1515 *
1514 * @needed_headroom: Extra headroom the hardware may need, but not in all 1516 * @needed_headroom: Extra headroom the hardware may need, but not in all
1515 * cases can this be guaranteed 1517 * cases can this be guaranteed
@@ -1727,6 +1729,7 @@ struct net_device {
1727 unsigned int max_mtu; 1729 unsigned int max_mtu;
1728 unsigned short type; 1730 unsigned short type;
1729 unsigned short hard_header_len; 1731 unsigned short hard_header_len;
1732 unsigned short min_header_len;
1730 1733
1731 unsigned short needed_headroom; 1734 unsigned short needed_headroom;
1732 unsigned short needed_tailroom; 1735 unsigned short needed_tailroom;
@@ -2477,14 +2480,19 @@ static inline int skb_gro_header_hard(struct sk_buff *skb, unsigned int hlen)
2477 return NAPI_GRO_CB(skb)->frag0_len < hlen; 2480 return NAPI_GRO_CB(skb)->frag0_len < hlen;
2478} 2481}
2479 2482
2483static inline void skb_gro_frag0_invalidate(struct sk_buff *skb)
2484{
2485 NAPI_GRO_CB(skb)->frag0 = NULL;
2486 NAPI_GRO_CB(skb)->frag0_len = 0;
2487}
2488
2480static inline void *skb_gro_header_slow(struct sk_buff *skb, unsigned int hlen, 2489static inline void *skb_gro_header_slow(struct sk_buff *skb, unsigned int hlen,
2481 unsigned int offset) 2490 unsigned int offset)
2482{ 2491{
2483 if (!pskb_may_pull(skb, hlen)) 2492 if (!pskb_may_pull(skb, hlen))
2484 return NULL; 2493 return NULL;
2485 2494
2486 NAPI_GRO_CB(skb)->frag0 = NULL; 2495 skb_gro_frag0_invalidate(skb);
2487 NAPI_GRO_CB(skb)->frag0_len = 0;
2488 return skb->data + offset; 2496 return skb->data + offset;
2489} 2497}
2490 2498
@@ -2688,6 +2696,8 @@ static inline bool dev_validate_header(const struct net_device *dev,
2688{ 2696{
2689 if (likely(len >= dev->hard_header_len)) 2697 if (likely(len >= dev->hard_header_len))
2690 return true; 2698 return true;
2699 if (len < dev->min_header_len)
2700 return false;
2691 2701
2692 if (capable(CAP_SYS_RAWIO)) { 2702 if (capable(CAP_SYS_RAWIO)) {
2693 memset(ll_header + len, 0, dev->hard_header_len - len); 2703 memset(ll_header + len, 0, dev->hard_header_len - len);
diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h
index bca536341d1a..1b1ca04820a3 100644
--- a/include/linux/nfs4.h
+++ b/include/linux/nfs4.h
@@ -282,7 +282,7 @@ enum nfsstat4 {
282 282
283static inline bool seqid_mutating_err(u32 err) 283static inline bool seqid_mutating_err(u32 err)
284{ 284{
285 /* rfc 3530 section 8.1.5: */ 285 /* See RFC 7530, section 9.1.7 */
286 switch (err) { 286 switch (err) {
287 case NFS4ERR_STALE_CLIENTID: 287 case NFS4ERR_STALE_CLIENTID:
288 case NFS4ERR_STALE_STATEID: 288 case NFS4ERR_STALE_STATEID:
@@ -291,6 +291,7 @@ static inline bool seqid_mutating_err(u32 err)
291 case NFS4ERR_BADXDR: 291 case NFS4ERR_BADXDR:
292 case NFS4ERR_RESOURCE: 292 case NFS4ERR_RESOURCE:
293 case NFS4ERR_NOFILEHANDLE: 293 case NFS4ERR_NOFILEHANDLE:
294 case NFS4ERR_MOVED:
294 return false; 295 return false;
295 }; 296 };
296 return true; 297 return true;
diff --git a/include/linux/nmi.h b/include/linux/nmi.h
index aacca824a6ae..0a3fadc32693 100644
--- a/include/linux/nmi.h
+++ b/include/linux/nmi.h
@@ -110,6 +110,7 @@ extern int watchdog_user_enabled;
110extern int watchdog_thresh; 110extern int watchdog_thresh;
111extern unsigned long watchdog_enabled; 111extern unsigned long watchdog_enabled;
112extern unsigned long *watchdog_cpumask_bits; 112extern unsigned long *watchdog_cpumask_bits;
113extern atomic_t watchdog_park_in_progress;
113#ifdef CONFIG_SMP 114#ifdef CONFIG_SMP
114extern int sysctl_softlockup_all_cpu_backtrace; 115extern int sysctl_softlockup_all_cpu_backtrace;
115extern int sysctl_hardlockup_all_cpu_backtrace; 116extern int sysctl_hardlockup_all_cpu_backtrace;
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index c56b39890a41..6b5818d6de32 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -73,13 +73,13 @@
73 */ 73 */
74enum pageflags { 74enum pageflags {
75 PG_locked, /* Page is locked. Don't touch. */ 75 PG_locked, /* Page is locked. Don't touch. */
76 PG_waiters, /* Page has waiters, check its waitqueue */
77 PG_error, 76 PG_error,
78 PG_referenced, 77 PG_referenced,
79 PG_uptodate, 78 PG_uptodate,
80 PG_dirty, 79 PG_dirty,
81 PG_lru, 80 PG_lru,
82 PG_active, 81 PG_active,
82 PG_waiters, /* Page has waiters, check its waitqueue. Must be bit #7 and in the same byte as "PG_locked" */
83 PG_slab, 83 PG_slab,
84 PG_owner_priv_1, /* Owner use. If pagecache, fs may use*/ 84 PG_owner_priv_1, /* Owner use. If pagecache, fs may use*/
85 PG_arch_1, 85 PG_arch_1,
diff --git a/include/linux/percpu-refcount.h b/include/linux/percpu-refcount.h
index 1c7eec09e5eb..3a481a49546e 100644
--- a/include/linux/percpu-refcount.h
+++ b/include/linux/percpu-refcount.h
@@ -204,7 +204,7 @@ static inline void percpu_ref_get(struct percpu_ref *ref)
204static inline bool percpu_ref_tryget(struct percpu_ref *ref) 204static inline bool percpu_ref_tryget(struct percpu_ref *ref)
205{ 205{
206 unsigned long __percpu *percpu_count; 206 unsigned long __percpu *percpu_count;
207 int ret; 207 bool ret;
208 208
209 rcu_read_lock_sched(); 209 rcu_read_lock_sched();
210 210
@@ -238,7 +238,7 @@ static inline bool percpu_ref_tryget(struct percpu_ref *ref)
238static inline bool percpu_ref_tryget_live(struct percpu_ref *ref) 238static inline bool percpu_ref_tryget_live(struct percpu_ref *ref)
239{ 239{
240 unsigned long __percpu *percpu_count; 240 unsigned long __percpu *percpu_count;
241 int ret = false; 241 bool ret = false;
242 242
243 rcu_read_lock_sched(); 243 rcu_read_lock_sched();
244 244
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 4741ecdb9817..78ed8105e64d 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -1259,6 +1259,7 @@ extern void perf_event_disable(struct perf_event *event);
1259extern void perf_event_disable_local(struct perf_event *event); 1259extern void perf_event_disable_local(struct perf_event *event);
1260extern void perf_event_disable_inatomic(struct perf_event *event); 1260extern void perf_event_disable_inatomic(struct perf_event *event);
1261extern void perf_event_task_tick(void); 1261extern void perf_event_task_tick(void);
1262extern int perf_event_account_interrupt(struct perf_event *event);
1262#else /* !CONFIG_PERF_EVENTS: */ 1263#else /* !CONFIG_PERF_EVENTS: */
1263static inline void * 1264static inline void *
1264perf_aux_output_begin(struct perf_output_handle *handle, 1265perf_aux_output_begin(struct perf_output_handle *handle,
diff --git a/include/linux/phy.h b/include/linux/phy.h
index f7d95f644eed..7fc1105605bf 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -25,7 +25,6 @@
25#include <linux/timer.h> 25#include <linux/timer.h>
26#include <linux/workqueue.h> 26#include <linux/workqueue.h>
27#include <linux/mod_devicetable.h> 27#include <linux/mod_devicetable.h>
28#include <linux/phy_led_triggers.h>
29 28
30#include <linux/atomic.h> 29#include <linux/atomic.h>
31 30
diff --git a/include/linux/phy_led_triggers.h b/include/linux/phy_led_triggers.h
index a2daea0a37d2..b37b05bfd1a6 100644
--- a/include/linux/phy_led_triggers.h
+++ b/include/linux/phy_led_triggers.h
@@ -18,11 +18,11 @@ struct phy_device;
18#ifdef CONFIG_LED_TRIGGER_PHY 18#ifdef CONFIG_LED_TRIGGER_PHY
19 19
20#include <linux/leds.h> 20#include <linux/leds.h>
21#include <linux/phy.h>
21 22
22#define PHY_LED_TRIGGER_SPEED_SUFFIX_SIZE 10 23#define PHY_LED_TRIGGER_SPEED_SUFFIX_SIZE 10
23#define PHY_MII_BUS_ID_SIZE (20 - 3)
24 24
25#define PHY_LINK_LED_TRIGGER_NAME_SIZE (PHY_MII_BUS_ID_SIZE + \ 25#define PHY_LINK_LED_TRIGGER_NAME_SIZE (MII_BUS_ID_SIZE + \
26 FIELD_SIZEOF(struct mdio_device, addr)+\ 26 FIELD_SIZEOF(struct mdio_device, addr)+\
27 PHY_LED_TRIGGER_SPEED_SUFFIX_SIZE) 27 PHY_LED_TRIGGER_SPEED_SUFFIX_SIZE)
28 28
diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h
index 5dea8f6440e4..52bda854593b 100644
--- a/include/linux/radix-tree.h
+++ b/include/linux/radix-tree.h
@@ -306,7 +306,9 @@ void radix_tree_iter_replace(struct radix_tree_root *,
306void radix_tree_replace_slot(struct radix_tree_root *root, 306void radix_tree_replace_slot(struct radix_tree_root *root,
307 void **slot, void *item); 307 void **slot, void *item);
308void __radix_tree_delete_node(struct radix_tree_root *root, 308void __radix_tree_delete_node(struct radix_tree_root *root,
309 struct radix_tree_node *node); 309 struct radix_tree_node *node,
310 radix_tree_update_node_t update_node,
311 void *private);
310void *radix_tree_delete_item(struct radix_tree_root *, unsigned long, void *); 312void *radix_tree_delete_item(struct radix_tree_root *, unsigned long, void *);
311void *radix_tree_delete(struct radix_tree_root *, unsigned long); 313void *radix_tree_delete(struct radix_tree_root *, unsigned long);
312void radix_tree_clear_tags(struct radix_tree_root *root, 314void radix_tree_clear_tags(struct radix_tree_root *root,
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index 321f9ed552a9..01f71e1d2e94 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -444,6 +444,10 @@ bool __rcu_is_watching(void);
444#error "Unknown RCU implementation specified to kernel configuration" 444#error "Unknown RCU implementation specified to kernel configuration"
445#endif 445#endif
446 446
447#define RCU_SCHEDULER_INACTIVE 0
448#define RCU_SCHEDULER_INIT 1
449#define RCU_SCHEDULER_RUNNING 2
450
447/* 451/*
448 * init_rcu_head_on_stack()/destroy_rcu_head_on_stack() are needed for dynamic 452 * init_rcu_head_on_stack()/destroy_rcu_head_on_stack() are needed for dynamic
449 * initialization and destruction of rcu_head on the stack. rcu_head structures 453 * initialization and destruction of rcu_head on the stack. rcu_head structures
diff --git a/include/linux/remoteproc.h b/include/linux/remoteproc.h
index e2f3a3281d8f..8265d351c9f0 100644
--- a/include/linux/remoteproc.h
+++ b/include/linux/remoteproc.h
@@ -408,7 +408,8 @@ enum rproc_crash_type {
408 * @crash_comp: completion used to sync crash handler and the rproc reload 408 * @crash_comp: completion used to sync crash handler and the rproc reload
409 * @recovery_disabled: flag that state if recovery was disabled 409 * @recovery_disabled: flag that state if recovery was disabled
410 * @max_notifyid: largest allocated notify id. 410 * @max_notifyid: largest allocated notify id.
411 * @table_ptr: our copy of the resource table 411 * @table_ptr: pointer to the resource table in effect
412 * @cached_table: copy of the resource table
412 * @has_iommu: flag to indicate if remote processor is behind an MMU 413 * @has_iommu: flag to indicate if remote processor is behind an MMU
413 */ 414 */
414struct rproc { 415struct rproc {
@@ -440,6 +441,7 @@ struct rproc {
440 bool recovery_disabled; 441 bool recovery_disabled;
441 int max_notifyid; 442 int max_notifyid;
442 struct resource_table *table_ptr; 443 struct resource_table *table_ptr;
444 struct resource_table *cached_table;
443 bool has_iommu; 445 bool has_iommu;
444 bool auto_boot; 446 bool auto_boot;
445}; 447};
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 4d1905245c7a..ad3ec9ec61f7 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -854,6 +854,16 @@ struct signal_struct {
854 854
855#define SIGNAL_UNKILLABLE 0x00000040 /* for init: ignore fatal signals */ 855#define SIGNAL_UNKILLABLE 0x00000040 /* for init: ignore fatal signals */
856 856
857#define SIGNAL_STOP_MASK (SIGNAL_CLD_MASK | SIGNAL_STOP_STOPPED | \
858 SIGNAL_STOP_CONTINUED)
859
860static inline void signal_set_stop_flags(struct signal_struct *sig,
861 unsigned int flags)
862{
863 WARN_ON(sig->flags & (SIGNAL_GROUP_EXIT|SIGNAL_GROUP_COREDUMP));
864 sig->flags = (sig->flags & ~SIGNAL_STOP_MASK) | flags;
865}
866
857/* If true, all threads except ->group_exit_task have pending SIGKILL */ 867/* If true, all threads except ->group_exit_task have pending SIGKILL */
858static inline int signal_group_exit(const struct signal_struct *sig) 868static inline int signal_group_exit(const struct signal_struct *sig)
859{ 869{
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index b53c0cfd417e..a410715bbef8 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -2480,7 +2480,7 @@ static inline struct sk_buff *netdev_alloc_skb_ip_align(struct net_device *dev,
2480 2480
2481static inline void skb_free_frag(void *addr) 2481static inline void skb_free_frag(void *addr)
2482{ 2482{
2483 __free_page_frag(addr); 2483 page_frag_free(addr);
2484} 2484}
2485 2485
2486void *napi_alloc_frag(unsigned int fragsz); 2486void *napi_alloc_frag(unsigned int fragsz);
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 084b12bad198..4c5363566815 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -226,7 +226,7 @@ static inline const char *__check_heap_object(const void *ptr,
226 * (PAGE_SIZE*2). Larger requests are passed to the page allocator. 226 * (PAGE_SIZE*2). Larger requests are passed to the page allocator.
227 */ 227 */
228#define KMALLOC_SHIFT_HIGH (PAGE_SHIFT + 1) 228#define KMALLOC_SHIFT_HIGH (PAGE_SHIFT + 1)
229#define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT) 229#define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT - 1)
230#ifndef KMALLOC_SHIFT_LOW 230#ifndef KMALLOC_SHIFT_LOW
231#define KMALLOC_SHIFT_LOW 3 231#define KMALLOC_SHIFT_LOW 3
232#endif 232#endif
@@ -239,7 +239,7 @@ static inline const char *__check_heap_object(const void *ptr,
239 * be allocated from the same page. 239 * be allocated from the same page.
240 */ 240 */
241#define KMALLOC_SHIFT_HIGH PAGE_SHIFT 241#define KMALLOC_SHIFT_HIGH PAGE_SHIFT
242#define KMALLOC_SHIFT_MAX 30 242#define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT - 1)
243#ifndef KMALLOC_SHIFT_LOW 243#ifndef KMALLOC_SHIFT_LOW
244#define KMALLOC_SHIFT_LOW 3 244#define KMALLOC_SHIFT_LOW 3
245#endif 245#endif
diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
index 85cc819676e8..333ad11b3dd9 100644
--- a/include/linux/sunrpc/clnt.h
+++ b/include/linux/sunrpc/clnt.h
@@ -216,5 +216,6 @@ void rpc_clnt_xprt_switch_put(struct rpc_clnt *);
216void rpc_clnt_xprt_switch_add_xprt(struct rpc_clnt *, struct rpc_xprt *); 216void rpc_clnt_xprt_switch_add_xprt(struct rpc_clnt *, struct rpc_xprt *);
217bool rpc_clnt_xprt_switch_has_addr(struct rpc_clnt *clnt, 217bool rpc_clnt_xprt_switch_has_addr(struct rpc_clnt *clnt,
218 const struct sockaddr *sap); 218 const struct sockaddr *sap);
219void rpc_cleanup_clids(void);
219#endif /* __KERNEL__ */ 220#endif /* __KERNEL__ */
220#endif /* _LINUX_SUNRPC_CLNT_H */ 221#endif /* _LINUX_SUNRPC_CLNT_H */
diff --git a/include/linux/sunrpc/svc_xprt.h b/include/linux/sunrpc/svc_xprt.h
index e5d193440374..7440290f64ac 100644
--- a/include/linux/sunrpc/svc_xprt.h
+++ b/include/linux/sunrpc/svc_xprt.h
@@ -66,6 +66,7 @@ struct svc_xprt {
66#define XPT_LISTENER 10 /* listening endpoint */ 66#define XPT_LISTENER 10 /* listening endpoint */
67#define XPT_CACHE_AUTH 11 /* cache auth info */ 67#define XPT_CACHE_AUTH 11 /* cache auth info */
68#define XPT_LOCAL 12 /* connection from loopback interface */ 68#define XPT_LOCAL 12 /* connection from loopback interface */
69#define XPT_KILL_TEMP 13 /* call xpo_kill_temp_xprt before closing */
69 70
70 struct svc_serv *xpt_server; /* service for transport */ 71 struct svc_serv *xpt_server; /* service for transport */
71 atomic_t xpt_reserved; /* space on outq that is rsvd */ 72 atomic_t xpt_reserved; /* space on outq that is rsvd */
diff --git a/include/linux/suspend.h b/include/linux/suspend.h
index 0c729c3c8549..d9718378a8be 100644
--- a/include/linux/suspend.h
+++ b/include/linux/suspend.h
@@ -194,8 +194,6 @@ struct platform_freeze_ops {
194}; 194};
195 195
196#ifdef CONFIG_SUSPEND 196#ifdef CONFIG_SUSPEND
197extern suspend_state_t mem_sleep_default;
198
199/** 197/**
200 * suspend_set_ops - set platform dependent suspend operations 198 * suspend_set_ops - set platform dependent suspend operations
201 * @ops: The new suspend operations to set. 199 * @ops: The new suspend operations to set.
diff --git a/include/linux/swap.h b/include/linux/swap.h
index 09f4be179ff3..7f47b7098b1b 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -150,8 +150,9 @@ enum {
150 SWP_FILE = (1 << 7), /* set after swap_activate success */ 150 SWP_FILE = (1 << 7), /* set after swap_activate success */
151 SWP_AREA_DISCARD = (1 << 8), /* single-time swap area discards */ 151 SWP_AREA_DISCARD = (1 << 8), /* single-time swap area discards */
152 SWP_PAGE_DISCARD = (1 << 9), /* freed swap page-cluster discards */ 152 SWP_PAGE_DISCARD = (1 << 9), /* freed swap page-cluster discards */
153 SWP_STABLE_WRITES = (1 << 10), /* no overwrite PG_writeback pages */
153 /* add others here before... */ 154 /* add others here before... */
154 SWP_SCANNING = (1 << 10), /* refcount in scan_swap_map */ 155 SWP_SCANNING = (1 << 11), /* refcount in scan_swap_map */
155}; 156};
156 157
157#define SWAP_CLUSTER_MAX 32UL 158#define SWAP_CLUSTER_MAX 32UL
diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h
index 183f37c8a5e1..4ee479f2f355 100644
--- a/include/linux/swiotlb.h
+++ b/include/linux/swiotlb.h
@@ -9,7 +9,13 @@ struct device;
9struct page; 9struct page;
10struct scatterlist; 10struct scatterlist;
11 11
12extern int swiotlb_force; 12enum swiotlb_force {
13 SWIOTLB_NORMAL, /* Default - depending on HW DMA mask etc. */
14 SWIOTLB_FORCE, /* swiotlb=force */
15 SWIOTLB_NO_FORCE, /* swiotlb=noforce */
16};
17
18extern enum swiotlb_force swiotlb_force;
13 19
14/* 20/*
15 * Maximum allowable number of contiguous slabs to map, 21 * Maximum allowable number of contiguous slabs to map,
@@ -108,11 +114,14 @@ swiotlb_dma_supported(struct device *hwdev, u64 mask);
108 114
109#ifdef CONFIG_SWIOTLB 115#ifdef CONFIG_SWIOTLB
110extern void __init swiotlb_free(void); 116extern void __init swiotlb_free(void);
117unsigned int swiotlb_max_segment(void);
111#else 118#else
112static inline void swiotlb_free(void) { } 119static inline void swiotlb_free(void) { }
120static inline unsigned int swiotlb_max_segment(void) { return 0; }
113#endif 121#endif
114 122
115extern void swiotlb_print_info(void); 123extern void swiotlb_print_info(void);
116extern int is_swiotlb_buffer(phys_addr_t paddr); 124extern int is_swiotlb_buffer(phys_addr_t paddr);
125extern void swiotlb_set_max_segment(unsigned int);
117 126
118#endif /* __LINUX_SWIOTLB_H */ 127#endif /* __LINUX_SWIOTLB_H */
diff --git a/include/linux/tcp.h b/include/linux/tcp.h
index fc5848dad7a4..c93f4b3a59cb 100644
--- a/include/linux/tcp.h
+++ b/include/linux/tcp.h
@@ -62,8 +62,13 @@ static inline unsigned int tcp_optlen(const struct sk_buff *skb)
62 62
63/* TCP Fast Open Cookie as stored in memory */ 63/* TCP Fast Open Cookie as stored in memory */
64struct tcp_fastopen_cookie { 64struct tcp_fastopen_cookie {
65 union {
66 u8 val[TCP_FASTOPEN_COOKIE_MAX];
67#if IS_ENABLED(CONFIG_IPV6)
68 struct in6_addr addr;
69#endif
70 };
65 s8 len; 71 s8 len;
66 u8 val[TCP_FASTOPEN_COOKIE_MAX];
67 bool exp; /* In RFC6994 experimental option format */ 72 bool exp; /* In RFC6994 experimental option format */
68}; 73};
69 74
diff --git a/include/linux/timerfd.h b/include/linux/timerfd.h
index bd36ce431e32..bab0b1ad0613 100644
--- a/include/linux/timerfd.h
+++ b/include/linux/timerfd.h
@@ -8,23 +8,7 @@
8#ifndef _LINUX_TIMERFD_H 8#ifndef _LINUX_TIMERFD_H
9#define _LINUX_TIMERFD_H 9#define _LINUX_TIMERFD_H
10 10
11/* For O_CLOEXEC and O_NONBLOCK */ 11#include <uapi/linux/timerfd.h>
12#include <linux/fcntl.h>
13
14/* For _IO helpers */
15#include <linux/ioctl.h>
16
17/*
18 * CAREFUL: Check include/asm-generic/fcntl.h when defining
19 * new flags, since they might collide with O_* ones. We want
20 * to re-use O_* flags that couldn't possibly have a meaning
21 * from eventfd, in order to leave a free define-space for
22 * shared O_* flags.
23 */
24#define TFD_TIMER_ABSTIME (1 << 0)
25#define TFD_TIMER_CANCEL_ON_SET (1 << 1)
26#define TFD_CLOEXEC O_CLOEXEC
27#define TFD_NONBLOCK O_NONBLOCK
28 12
29#define TFD_SHARED_FCNTL_FLAGS (TFD_CLOEXEC | TFD_NONBLOCK) 13#define TFD_SHARED_FCNTL_FLAGS (TFD_CLOEXEC | TFD_NONBLOCK)
30/* Flags for timerfd_create. */ 14/* Flags for timerfd_create. */
@@ -32,6 +16,4 @@
32/* Flags for timerfd_settime. */ 16/* Flags for timerfd_settime. */
33#define TFD_SETTIME_FLAGS (TFD_TIMER_ABSTIME | TFD_TIMER_CANCEL_ON_SET) 17#define TFD_SETTIME_FLAGS (TFD_TIMER_ABSTIME | TFD_TIMER_CANCEL_ON_SET)
34 18
35#define TFD_IOC_SET_TICKS _IOW('T', 0, u64)
36
37#endif /* _LINUX_TIMERFD_H */ 19#endif /* _LINUX_TIMERFD_H */
diff --git a/include/linux/virtio_net.h b/include/linux/virtio_net.h
index 66204007d7ac..5209b5ed2a64 100644
--- a/include/linux/virtio_net.h
+++ b/include/linux/virtio_net.h
@@ -56,7 +56,8 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb,
56 56
57static inline int virtio_net_hdr_from_skb(const struct sk_buff *skb, 57static inline int virtio_net_hdr_from_skb(const struct sk_buff *skb,
58 struct virtio_net_hdr *hdr, 58 struct virtio_net_hdr *hdr,
59 bool little_endian) 59 bool little_endian,
60 bool has_data_valid)
60{ 61{
61 memset(hdr, 0, sizeof(*hdr)); /* no info leak */ 62 memset(hdr, 0, sizeof(*hdr)); /* no info leak */
62 63
@@ -91,7 +92,8 @@ static inline int virtio_net_hdr_from_skb(const struct sk_buff *skb,
91 skb_checksum_start_offset(skb)); 92 skb_checksum_start_offset(skb));
92 hdr->csum_offset = __cpu_to_virtio16(little_endian, 93 hdr->csum_offset = __cpu_to_virtio16(little_endian,
93 skb->csum_offset); 94 skb->csum_offset);
94 } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) { 95 } else if (has_data_valid &&
96 skb->ip_summed == CHECKSUM_UNNECESSARY) {
95 hdr->flags = VIRTIO_NET_HDR_F_DATA_VALID; 97 hdr->flags = VIRTIO_NET_HDR_F_DATA_VALID;
96 } /* else everything is zero */ 98 } /* else everything is zero */
97 99
diff --git a/include/net/cipso_ipv4.h b/include/net/cipso_ipv4.h
index 3ebb168b9afc..a34b141f125f 100644
--- a/include/net/cipso_ipv4.h
+++ b/include/net/cipso_ipv4.h
@@ -309,6 +309,10 @@ static inline int cipso_v4_validate(const struct sk_buff *skb,
309 } 309 }
310 310
311 for (opt_iter = 6; opt_iter < opt_len;) { 311 for (opt_iter = 6; opt_iter < opt_len;) {
312 if (opt_iter + 1 == opt_len) {
313 err_offset = opt_iter;
314 goto out;
315 }
312 tag_len = opt[opt_iter + 1]; 316 tag_len = opt[opt_iter + 1];
313 if ((tag_len == 0) || (tag_len > (opt_len - opt_iter))) { 317 if ((tag_len == 0) || (tag_len > (opt_len - opt_iter))) {
314 err_offset = opt_iter + 1; 318 err_offset = opt_iter + 1;
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
index 487e57391664..dbf0abba33b8 100644
--- a/include/net/ipv6.h
+++ b/include/net/ipv6.h
@@ -776,6 +776,11 @@ static inline __be32 ip6_make_flowlabel(struct net *net, struct sk_buff *skb,
776{ 776{
777 u32 hash; 777 u32 hash;
778 778
779 /* @flowlabel may include more than a flow label, eg, the traffic class.
780 * Here we want only the flow label value.
781 */
782 flowlabel &= IPV6_FLOWLABEL_MASK;
783
779 if (flowlabel || 784 if (flowlabel ||
780 net->ipv6.sysctl.auto_flowlabels == IP6_AUTO_FLOW_LABEL_OFF || 785 net->ipv6.sysctl.auto_flowlabels == IP6_AUTO_FLOW_LABEL_OFF ||
781 (!autolabel && 786 (!autolabel &&
@@ -871,7 +876,7 @@ int ip6_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb);
871 * upper-layer output functions 876 * upper-layer output functions
872 */ 877 */
873int ip6_xmit(const struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6, 878int ip6_xmit(const struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
874 struct ipv6_txoptions *opt, int tclass); 879 __u32 mark, struct ipv6_txoptions *opt, int tclass);
875 880
876int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr); 881int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr);
877 882
diff --git a/include/net/lwtunnel.h b/include/net/lwtunnel.h
index d4c1c75b8862..0388b9c5f5e2 100644
--- a/include/net/lwtunnel.h
+++ b/include/net/lwtunnel.h
@@ -44,6 +44,8 @@ struct lwtunnel_encap_ops {
44 int (*get_encap_size)(struct lwtunnel_state *lwtstate); 44 int (*get_encap_size)(struct lwtunnel_state *lwtstate);
45 int (*cmp_encap)(struct lwtunnel_state *a, struct lwtunnel_state *b); 45 int (*cmp_encap)(struct lwtunnel_state *a, struct lwtunnel_state *b);
46 int (*xmit)(struct sk_buff *skb); 46 int (*xmit)(struct sk_buff *skb);
47
48 struct module *owner;
47}; 49};
48 50
49#ifdef CONFIG_LWTUNNEL 51#ifdef CONFIG_LWTUNNEL
@@ -105,6 +107,8 @@ int lwtunnel_encap_add_ops(const struct lwtunnel_encap_ops *op,
105 unsigned int num); 107 unsigned int num);
106int lwtunnel_encap_del_ops(const struct lwtunnel_encap_ops *op, 108int lwtunnel_encap_del_ops(const struct lwtunnel_encap_ops *op,
107 unsigned int num); 109 unsigned int num);
110int lwtunnel_valid_encap_type(u16 encap_type);
111int lwtunnel_valid_encap_type_attr(struct nlattr *attr, int len);
108int lwtunnel_build_state(struct net_device *dev, u16 encap_type, 112int lwtunnel_build_state(struct net_device *dev, u16 encap_type,
109 struct nlattr *encap, 113 struct nlattr *encap,
110 unsigned int family, const void *cfg, 114 unsigned int family, const void *cfg,
@@ -168,6 +172,18 @@ static inline int lwtunnel_encap_del_ops(const struct lwtunnel_encap_ops *op,
168 return -EOPNOTSUPP; 172 return -EOPNOTSUPP;
169} 173}
170 174
175static inline int lwtunnel_valid_encap_type(u16 encap_type)
176{
177 return -EOPNOTSUPP;
178}
179static inline int lwtunnel_valid_encap_type_attr(struct nlattr *attr, int len)
180{
181 /* return 0 since we are not walking attr looking for
182 * RTA_ENCAP_TYPE attribute on nexthops.
183 */
184 return 0;
185}
186
171static inline int lwtunnel_build_state(struct net_device *dev, u16 encap_type, 187static inline int lwtunnel_build_state(struct net_device *dev, u16 encap_type,
172 struct nlattr *encap, 188 struct nlattr *encap,
173 unsigned int family, const void *cfg, 189 unsigned int family, const void *cfg,
diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
index 924325c46aab..7dfdb517f0be 100644
--- a/include/net/netfilter/nf_tables.h
+++ b/include/net/netfilter/nf_tables.h
@@ -207,9 +207,9 @@ struct nft_set_iter {
207 unsigned int skip; 207 unsigned int skip;
208 int err; 208 int err;
209 int (*fn)(const struct nft_ctx *ctx, 209 int (*fn)(const struct nft_ctx *ctx,
210 const struct nft_set *set, 210 struct nft_set *set,
211 const struct nft_set_iter *iter, 211 const struct nft_set_iter *iter,
212 const struct nft_set_elem *elem); 212 struct nft_set_elem *elem);
213}; 213};
214 214
215/** 215/**
@@ -301,7 +301,7 @@ struct nft_set_ops {
301 void (*remove)(const struct nft_set *set, 301 void (*remove)(const struct nft_set *set,
302 const struct nft_set_elem *elem); 302 const struct nft_set_elem *elem);
303 void (*walk)(const struct nft_ctx *ctx, 303 void (*walk)(const struct nft_ctx *ctx,
304 const struct nft_set *set, 304 struct nft_set *set,
305 struct nft_set_iter *iter); 305 struct nft_set_iter *iter);
306 306
307 unsigned int (*privsize)(const struct nlattr * const nla[]); 307 unsigned int (*privsize)(const struct nlattr * const nla[]);
diff --git a/include/net/netfilter/nft_fib.h b/include/net/netfilter/nft_fib.h
index cbedda077db2..5ceb2205e4e3 100644
--- a/include/net/netfilter/nft_fib.h
+++ b/include/net/netfilter/nft_fib.h
@@ -9,6 +9,12 @@ struct nft_fib {
9 9
10extern const struct nla_policy nft_fib_policy[]; 10extern const struct nla_policy nft_fib_policy[];
11 11
12static inline bool
13nft_fib_is_loopback(const struct sk_buff *skb, const struct net_device *in)
14{
15 return skb->pkt_type == PACKET_LOOPBACK || in->flags & IFF_LOOPBACK;
16}
17
12int nft_fib_dump(struct sk_buff *skb, const struct nft_expr *expr); 18int nft_fib_dump(struct sk_buff *skb, const struct nft_expr *expr);
13int nft_fib_init(const struct nft_ctx *ctx, const struct nft_expr *expr, 19int nft_fib_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
14 const struct nlattr * const tb[]); 20 const struct nlattr * const tb[]);
diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
index f0cf5a1b777e..0378e88f6fd3 100644
--- a/include/net/netns/ipv4.h
+++ b/include/net/netns/ipv4.h
@@ -110,6 +110,7 @@ struct netns_ipv4 {
110 int sysctl_tcp_orphan_retries; 110 int sysctl_tcp_orphan_retries;
111 int sysctl_tcp_fin_timeout; 111 int sysctl_tcp_fin_timeout;
112 unsigned int sysctl_tcp_notsent_lowat; 112 unsigned int sysctl_tcp_notsent_lowat;
113 int sysctl_tcp_tw_reuse;
113 114
114 int sysctl_igmp_max_memberships; 115 int sysctl_igmp_max_memberships;
115 int sysctl_igmp_max_msf; 116 int sysctl_igmp_max_msf;
diff --git a/include/net/sock.h b/include/net/sock.h
index f0e867f58722..c4f5e6fca17c 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -2006,7 +2006,9 @@ void sk_reset_timer(struct sock *sk, struct timer_list *timer,
2006void sk_stop_timer(struct sock *sk, struct timer_list *timer); 2006void sk_stop_timer(struct sock *sk, struct timer_list *timer);
2007 2007
2008int __sk_queue_drop_skb(struct sock *sk, struct sk_buff *skb, 2008int __sk_queue_drop_skb(struct sock *sk, struct sk_buff *skb,
2009 unsigned int flags); 2009 unsigned int flags,
2010 void (*destructor)(struct sock *sk,
2011 struct sk_buff *skb));
2010int __sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb); 2012int __sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
2011int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb); 2013int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
2012 2014
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 207147b4c6b2..6061963cca98 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -252,7 +252,6 @@ extern int sysctl_tcp_wmem[3];
252extern int sysctl_tcp_rmem[3]; 252extern int sysctl_tcp_rmem[3];
253extern int sysctl_tcp_app_win; 253extern int sysctl_tcp_app_win;
254extern int sysctl_tcp_adv_win_scale; 254extern int sysctl_tcp_adv_win_scale;
255extern int sysctl_tcp_tw_reuse;
256extern int sysctl_tcp_frto; 255extern int sysctl_tcp_frto;
257extern int sysctl_tcp_low_latency; 256extern int sysctl_tcp_low_latency;
258extern int sysctl_tcp_nometrics_save; 257extern int sysctl_tcp_nometrics_save;
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index 958a24d8fae7..b567e4452a47 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -352,6 +352,20 @@ static inline int ib_mtu_enum_to_int(enum ib_mtu mtu)
352 } 352 }
353} 353}
354 354
355static inline enum ib_mtu ib_mtu_int_to_enum(int mtu)
356{
357 if (mtu >= 4096)
358 return IB_MTU_4096;
359 else if (mtu >= 2048)
360 return IB_MTU_2048;
361 else if (mtu >= 1024)
362 return IB_MTU_1024;
363 else if (mtu >= 512)
364 return IB_MTU_512;
365 else
366 return IB_MTU_256;
367}
368
355enum ib_port_state { 369enum ib_port_state {
356 IB_PORT_NOP = 0, 370 IB_PORT_NOP = 0,
357 IB_PORT_DOWN = 1, 371 IB_PORT_DOWN = 1,
diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h
index 96dd0b3f70d7..da5033dd8cbc 100644
--- a/include/scsi/libfc.h
+++ b/include/scsi/libfc.h
@@ -809,11 +809,11 @@ static inline void fc_set_wwnn(struct fc_lport *lport, u64 wwnn)
809/** 809/**
810 * fc_set_wwpn() - Set the World Wide Port Name of a local port 810 * fc_set_wwpn() - Set the World Wide Port Name of a local port
811 * @lport: The local port whose WWPN is to be set 811 * @lport: The local port whose WWPN is to be set
812 * @wwnn: The new WWPN 812 * @wwpn: The new WWPN
813 */ 813 */
814static inline void fc_set_wwpn(struct fc_lport *lport, u64 wwnn) 814static inline void fc_set_wwpn(struct fc_lport *lport, u64 wwpn)
815{ 815{
816 lport->wwpn = wwnn; 816 lport->wwpn = wwpn;
817} 817}
818 818
819/** 819/**
diff --git a/include/soc/arc/mcip.h b/include/soc/arc/mcip.h
index 6902c2a8bd23..4b6b489a8d7c 100644
--- a/include/soc/arc/mcip.h
+++ b/include/soc/arc/mcip.h
@@ -55,17 +55,17 @@ struct mcip_cmd {
55 55
56struct mcip_bcr { 56struct mcip_bcr {
57#ifdef CONFIG_CPU_BIG_ENDIAN 57#ifdef CONFIG_CPU_BIG_ENDIAN
58 unsigned int pad3:8, 58 unsigned int pad4:6, pw_dom:1, pad3:1,
59 idu:1, llm:1, num_cores:6, 59 idu:1, pad2:1, num_cores:6,
60 iocoh:1, gfrc:1, dbg:1, pad2:1, 60 pad:1, gfrc:1, dbg:1, pw:1,
61 msg:1, sem:1, ipi:1, pad:1, 61 msg:1, sem:1, ipi:1, slv:1,
62 ver:8; 62 ver:8;
63#else 63#else
64 unsigned int ver:8, 64 unsigned int ver:8,
65 pad:1, ipi:1, sem:1, msg:1, 65 slv:1, ipi:1, sem:1, msg:1,
66 pad2:1, dbg:1, gfrc:1, iocoh:1, 66 pw:1, dbg:1, gfrc:1, pad:1,
67 num_cores:6, llm:1, idu:1, 67 num_cores:6, pad2:1, idu:1,
68 pad3:8; 68 pad3:1, pw_dom:1, pad4:6;
69#endif 69#endif
70}; 70};
71 71
diff --git a/include/sound/hdmi-codec.h b/include/sound/hdmi-codec.h
index 530c57bdefa0..915c4357945c 100644
--- a/include/sound/hdmi-codec.h
+++ b/include/sound/hdmi-codec.h
@@ -36,10 +36,10 @@ struct hdmi_codec_daifmt {
36 HDMI_AC97, 36 HDMI_AC97,
37 HDMI_SPDIF, 37 HDMI_SPDIF,
38 } fmt; 38 } fmt;
39 int bit_clk_inv:1; 39 unsigned int bit_clk_inv:1;
40 int frame_clk_inv:1; 40 unsigned int frame_clk_inv:1;
41 int bit_clk_master:1; 41 unsigned int bit_clk_master:1;
42 int frame_clk_master:1; 42 unsigned int frame_clk_master:1;
43}; 43};
44 44
45/* 45/*
diff --git a/include/sound/soc.h b/include/sound/soc.h
index 2b502f6cc6d0..b86168a21d56 100644
--- a/include/sound/soc.h
+++ b/include/sound/soc.h
@@ -813,6 +813,7 @@ struct snd_soc_component {
813 unsigned int suspended:1; /* is in suspend PM state */ 813 unsigned int suspended:1; /* is in suspend PM state */
814 814
815 struct list_head list; 815 struct list_head list;
816 struct list_head card_aux_list; /* for auxiliary bound components */
816 struct list_head card_list; 817 struct list_head card_list;
817 818
818 struct snd_soc_dai_driver *dai_drv; 819 struct snd_soc_dai_driver *dai_drv;
@@ -1152,6 +1153,7 @@ struct snd_soc_card {
1152 */ 1153 */
1153 struct snd_soc_aux_dev *aux_dev; 1154 struct snd_soc_aux_dev *aux_dev;
1154 int num_aux_devs; 1155 int num_aux_devs;
1156 struct list_head aux_comp_list;
1155 1157
1156 const struct snd_kcontrol_new *controls; 1158 const struct snd_kcontrol_new *controls;
1157 int num_controls; 1159 int num_controls;
@@ -1547,6 +1549,7 @@ static inline void snd_soc_initialize_card_lists(struct snd_soc_card *card)
1547 INIT_LIST_HEAD(&card->widgets); 1549 INIT_LIST_HEAD(&card->widgets);
1548 INIT_LIST_HEAD(&card->paths); 1550 INIT_LIST_HEAD(&card->paths);
1549 INIT_LIST_HEAD(&card->dapm_list); 1551 INIT_LIST_HEAD(&card->dapm_list);
1552 INIT_LIST_HEAD(&card->aux_comp_list);
1550 INIT_LIST_HEAD(&card->component_dev_list); 1553 INIT_LIST_HEAD(&card->component_dev_list);
1551} 1554}
1552 1555
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
index 29e6858bb164..da854fb4530f 100644
--- a/include/target/target_core_base.h
+++ b/include/target/target_core_base.h
@@ -174,6 +174,10 @@ enum tcm_sense_reason_table {
174 TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED = R(0x16), 174 TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED = R(0x16),
175 TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED = R(0x17), 175 TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED = R(0x17),
176 TCM_COPY_TARGET_DEVICE_NOT_REACHABLE = R(0x18), 176 TCM_COPY_TARGET_DEVICE_NOT_REACHABLE = R(0x18),
177 TCM_TOO_MANY_TARGET_DESCS = R(0x19),
178 TCM_UNSUPPORTED_TARGET_DESC_TYPE_CODE = R(0x1a),
179 TCM_TOO_MANY_SEGMENT_DESCS = R(0x1b),
180 TCM_UNSUPPORTED_SEGMENT_DESC_TYPE_CODE = R(0x1c),
177#undef R 181#undef R
178}; 182};
179 183
@@ -534,6 +538,7 @@ struct se_node_acl {
534 char initiatorname[TRANSPORT_IQN_LEN]; 538 char initiatorname[TRANSPORT_IQN_LEN];
535 /* Used to signal demo mode created ACL, disabled by default */ 539 /* Used to signal demo mode created ACL, disabled by default */
536 bool dynamic_node_acl; 540 bool dynamic_node_acl;
541 bool dynamic_stop;
537 u32 queue_depth; 542 u32 queue_depth;
538 u32 acl_index; 543 u32 acl_index;
539 enum target_prot_type saved_prot_type; 544 enum target_prot_type saved_prot_type;
diff --git a/include/trace/events/btrfs.h b/include/trace/events/btrfs.h
index c14bed4ab097..88d18a8ceb59 100644
--- a/include/trace/events/btrfs.h
+++ b/include/trace/events/btrfs.h
@@ -130,8 +130,8 @@ DECLARE_EVENT_CLASS(btrfs__inode,
130 BTRFS_I(inode)->root->root_key.objectid; 130 BTRFS_I(inode)->root->root_key.objectid;
131 ), 131 ),
132 132
133 TP_printk_btrfs("root = %llu(%s), gen = %llu, ino = %lu, blocks = %llu, " 133 TP_printk_btrfs("root=%llu(%s) gen=%llu ino=%lu blocks=%llu "
134 "disk_i_size = %llu, last_trans = %llu, logged_trans = %llu", 134 "disk_i_size=%llu last_trans=%llu logged_trans=%llu",
135 show_root_type(__entry->root_objectid), 135 show_root_type(__entry->root_objectid),
136 (unsigned long long)__entry->generation, 136 (unsigned long long)__entry->generation,
137 (unsigned long)__entry->ino, 137 (unsigned long)__entry->ino,
@@ -184,14 +184,16 @@ DEFINE_EVENT(btrfs__inode, btrfs_inode_evict,
184 184
185TRACE_EVENT_CONDITION(btrfs_get_extent, 185TRACE_EVENT_CONDITION(btrfs_get_extent,
186 186
187 TP_PROTO(struct btrfs_root *root, struct extent_map *map), 187 TP_PROTO(struct btrfs_root *root, struct inode *inode,
188 struct extent_map *map),
188 189
189 TP_ARGS(root, map), 190 TP_ARGS(root, inode, map),
190 191
191 TP_CONDITION(map), 192 TP_CONDITION(map),
192 193
193 TP_STRUCT__entry_btrfs( 194 TP_STRUCT__entry_btrfs(
194 __field( u64, root_objectid ) 195 __field( u64, root_objectid )
196 __field( u64, ino )
195 __field( u64, start ) 197 __field( u64, start )
196 __field( u64, len ) 198 __field( u64, len )
197 __field( u64, orig_start ) 199 __field( u64, orig_start )
@@ -204,7 +206,8 @@ TRACE_EVENT_CONDITION(btrfs_get_extent,
204 206
205 TP_fast_assign_btrfs(root->fs_info, 207 TP_fast_assign_btrfs(root->fs_info,
206 __entry->root_objectid = root->root_key.objectid; 208 __entry->root_objectid = root->root_key.objectid;
207 __entry->start = map->start; 209 __entry->ino = btrfs_ino(inode);
210 __entry->start = map->start;
208 __entry->len = map->len; 211 __entry->len = map->len;
209 __entry->orig_start = map->orig_start; 212 __entry->orig_start = map->orig_start;
210 __entry->block_start = map->block_start; 213 __entry->block_start = map->block_start;
@@ -214,11 +217,12 @@ TRACE_EVENT_CONDITION(btrfs_get_extent,
214 __entry->compress_type = map->compress_type; 217 __entry->compress_type = map->compress_type;
215 ), 218 ),
216 219
217 TP_printk_btrfs("root = %llu(%s), start = %llu, len = %llu, " 220 TP_printk_btrfs("root=%llu(%s) ino=%llu start=%llu len=%llu "
218 "orig_start = %llu, block_start = %llu(%s), " 221 "orig_start=%llu block_start=%llu(%s) "
219 "block_len = %llu, flags = %s, refs = %u, " 222 "block_len=%llu flags=%s refs=%u "
220 "compress_type = %u", 223 "compress_type=%u",
221 show_root_type(__entry->root_objectid), 224 show_root_type(__entry->root_objectid),
225 (unsigned long long)__entry->ino,
222 (unsigned long long)__entry->start, 226 (unsigned long long)__entry->start,
223 (unsigned long long)__entry->len, 227 (unsigned long long)__entry->len,
224 (unsigned long long)__entry->orig_start, 228 (unsigned long long)__entry->orig_start,
@@ -259,6 +263,7 @@ DECLARE_EVENT_CLASS(btrfs__ordered_extent,
259 __field( int, compress_type ) 263 __field( int, compress_type )
260 __field( int, refs ) 264 __field( int, refs )
261 __field( u64, root_objectid ) 265 __field( u64, root_objectid )
266 __field( u64, truncated_len )
262 ), 267 ),
263 268
264 TP_fast_assign_btrfs(btrfs_sb(inode->i_sb), 269 TP_fast_assign_btrfs(btrfs_sb(inode->i_sb),
@@ -273,18 +278,21 @@ DECLARE_EVENT_CLASS(btrfs__ordered_extent,
273 __entry->refs = atomic_read(&ordered->refs); 278 __entry->refs = atomic_read(&ordered->refs);
274 __entry->root_objectid = 279 __entry->root_objectid =
275 BTRFS_I(inode)->root->root_key.objectid; 280 BTRFS_I(inode)->root->root_key.objectid;
281 __entry->truncated_len = ordered->truncated_len;
276 ), 282 ),
277 283
278 TP_printk_btrfs("root = %llu(%s), ino = %llu, file_offset = %llu, " 284 TP_printk_btrfs("root=%llu(%s) ino=%llu file_offset=%llu "
279 "start = %llu, len = %llu, disk_len = %llu, " 285 "start=%llu len=%llu disk_len=%llu "
280 "bytes_left = %llu, flags = %s, compress_type = %d, " 286 "truncated_len=%llu "
281 "refs = %d", 287 "bytes_left=%llu flags=%s compress_type=%d "
288 "refs=%d",
282 show_root_type(__entry->root_objectid), 289 show_root_type(__entry->root_objectid),
283 (unsigned long long)__entry->ino, 290 (unsigned long long)__entry->ino,
284 (unsigned long long)__entry->file_offset, 291 (unsigned long long)__entry->file_offset,
285 (unsigned long long)__entry->start, 292 (unsigned long long)__entry->start,
286 (unsigned long long)__entry->len, 293 (unsigned long long)__entry->len,
287 (unsigned long long)__entry->disk_len, 294 (unsigned long long)__entry->disk_len,
295 (unsigned long long)__entry->truncated_len,
288 (unsigned long long)__entry->bytes_left, 296 (unsigned long long)__entry->bytes_left,
289 show_ordered_flags(__entry->flags), 297 show_ordered_flags(__entry->flags),
290 __entry->compress_type, __entry->refs) 298 __entry->compress_type, __entry->refs)
@@ -354,10 +362,10 @@ DECLARE_EVENT_CLASS(btrfs__writepage,
354 BTRFS_I(inode)->root->root_key.objectid; 362 BTRFS_I(inode)->root->root_key.objectid;
355 ), 363 ),
356 364
357 TP_printk_btrfs("root = %llu(%s), ino = %lu, page_index = %lu, " 365 TP_printk_btrfs("root=%llu(%s) ino=%lu page_index=%lu "
358 "nr_to_write = %ld, pages_skipped = %ld, range_start = %llu, " 366 "nr_to_write=%ld pages_skipped=%ld range_start=%llu "
359 "range_end = %llu, for_kupdate = %d, " 367 "range_end=%llu for_kupdate=%d "
360 "for_reclaim = %d, range_cyclic = %d, writeback_index = %lu", 368 "for_reclaim=%d range_cyclic=%d writeback_index=%lu",
361 show_root_type(__entry->root_objectid), 369 show_root_type(__entry->root_objectid),
362 (unsigned long)__entry->ino, __entry->index, 370 (unsigned long)__entry->ino, __entry->index,
363 __entry->nr_to_write, __entry->pages_skipped, 371 __entry->nr_to_write, __entry->pages_skipped,
@@ -400,8 +408,8 @@ TRACE_EVENT(btrfs_writepage_end_io_hook,
400 BTRFS_I(page->mapping->host)->root->root_key.objectid; 408 BTRFS_I(page->mapping->host)->root->root_key.objectid;
401 ), 409 ),
402 410
403 TP_printk_btrfs("root = %llu(%s), ino = %lu, page_index = %lu, start = %llu, " 411 TP_printk_btrfs("root=%llu(%s) ino=%lu page_index=%lu start=%llu "
404 "end = %llu, uptodate = %d", 412 "end=%llu uptodate=%d",
405 show_root_type(__entry->root_objectid), 413 show_root_type(__entry->root_objectid),
406 (unsigned long)__entry->ino, (unsigned long)__entry->index, 414 (unsigned long)__entry->ino, (unsigned long)__entry->index,
407 (unsigned long long)__entry->start, 415 (unsigned long long)__entry->start,
@@ -433,7 +441,7 @@ TRACE_EVENT(btrfs_sync_file,
433 BTRFS_I(inode)->root->root_key.objectid; 441 BTRFS_I(inode)->root->root_key.objectid;
434 ), 442 ),
435 443
436 TP_printk_btrfs("root = %llu(%s), ino = %ld, parent = %ld, datasync = %d", 444 TP_printk_btrfs("root=%llu(%s) ino=%ld parent=%ld datasync=%d",
437 show_root_type(__entry->root_objectid), 445 show_root_type(__entry->root_objectid),
438 (unsigned long)__entry->ino, (unsigned long)__entry->parent, 446 (unsigned long)__entry->ino, (unsigned long)__entry->parent,
439 __entry->datasync) 447 __entry->datasync)
@@ -484,9 +492,9 @@ TRACE_EVENT(btrfs_add_block_group,
484 __entry->create = create; 492 __entry->create = create;
485 ), 493 ),
486 494
487 TP_printk("%pU: block_group offset = %llu, size = %llu, " 495 TP_printk("%pU: block_group offset=%llu size=%llu "
488 "flags = %llu(%s), bytes_used = %llu, bytes_super = %llu, " 496 "flags=%llu(%s) bytes_used=%llu bytes_super=%llu "
489 "create = %d", __entry->fsid, 497 "create=%d", __entry->fsid,
490 (unsigned long long)__entry->offset, 498 (unsigned long long)__entry->offset,
491 (unsigned long long)__entry->size, 499 (unsigned long long)__entry->size,
492 (unsigned long long)__entry->flags, 500 (unsigned long long)__entry->flags,
@@ -535,9 +543,9 @@ DECLARE_EVENT_CLASS(btrfs_delayed_tree_ref,
535 __entry->seq = ref->seq; 543 __entry->seq = ref->seq;
536 ), 544 ),
537 545
538 TP_printk_btrfs("bytenr = %llu, num_bytes = %llu, action = %s, " 546 TP_printk_btrfs("bytenr=%llu num_bytes=%llu action=%s "
539 "parent = %llu(%s), ref_root = %llu(%s), level = %d, " 547 "parent=%llu(%s) ref_root=%llu(%s) level=%d "
540 "type = %s, seq = %llu", 548 "type=%s seq=%llu",
541 (unsigned long long)__entry->bytenr, 549 (unsigned long long)__entry->bytenr,
542 (unsigned long long)__entry->num_bytes, 550 (unsigned long long)__entry->num_bytes,
543 show_ref_action(__entry->action), 551 show_ref_action(__entry->action),
@@ -600,9 +608,9 @@ DECLARE_EVENT_CLASS(btrfs_delayed_data_ref,
600 __entry->seq = ref->seq; 608 __entry->seq = ref->seq;
601 ), 609 ),
602 610
603 TP_printk_btrfs("bytenr = %llu, num_bytes = %llu, action = %s, " 611 TP_printk_btrfs("bytenr=%llu num_bytes=%llu action=%s "
604 "parent = %llu(%s), ref_root = %llu(%s), owner = %llu, " 612 "parent=%llu(%s) ref_root=%llu(%s) owner=%llu "
605 "offset = %llu, type = %s, seq = %llu", 613 "offset=%llu type=%s seq=%llu",
606 (unsigned long long)__entry->bytenr, 614 (unsigned long long)__entry->bytenr,
607 (unsigned long long)__entry->num_bytes, 615 (unsigned long long)__entry->num_bytes,
608 show_ref_action(__entry->action), 616 show_ref_action(__entry->action),
@@ -657,7 +665,7 @@ DECLARE_EVENT_CLASS(btrfs_delayed_ref_head,
657 __entry->is_data = head_ref->is_data; 665 __entry->is_data = head_ref->is_data;
658 ), 666 ),
659 667
660 TP_printk_btrfs("bytenr = %llu, num_bytes = %llu, action = %s, is_data = %d", 668 TP_printk_btrfs("bytenr=%llu num_bytes=%llu action=%s is_data=%d",
661 (unsigned long long)__entry->bytenr, 669 (unsigned long long)__entry->bytenr,
662 (unsigned long long)__entry->num_bytes, 670 (unsigned long long)__entry->num_bytes,
663 show_ref_action(__entry->action), 671 show_ref_action(__entry->action),
@@ -721,8 +729,8 @@ DECLARE_EVENT_CLASS(btrfs__chunk,
721 __entry->root_objectid = fs_info->chunk_root->root_key.objectid; 729 __entry->root_objectid = fs_info->chunk_root->root_key.objectid;
722 ), 730 ),
723 731
724 TP_printk_btrfs("root = %llu(%s), offset = %llu, size = %llu, " 732 TP_printk_btrfs("root=%llu(%s) offset=%llu size=%llu "
725 "num_stripes = %d, sub_stripes = %d, type = %s", 733 "num_stripes=%d sub_stripes=%d type=%s",
726 show_root_type(__entry->root_objectid), 734 show_root_type(__entry->root_objectid),
727 (unsigned long long)__entry->offset, 735 (unsigned long long)__entry->offset,
728 (unsigned long long)__entry->size, 736 (unsigned long long)__entry->size,
@@ -771,8 +779,8 @@ TRACE_EVENT(btrfs_cow_block,
771 __entry->cow_level = btrfs_header_level(cow); 779 __entry->cow_level = btrfs_header_level(cow);
772 ), 780 ),
773 781
774 TP_printk_btrfs("root = %llu(%s), refs = %d, orig_buf = %llu " 782 TP_printk_btrfs("root=%llu(%s) refs=%d orig_buf=%llu "
775 "(orig_level = %d), cow_buf = %llu (cow_level = %d)", 783 "(orig_level=%d) cow_buf=%llu (cow_level=%d)",
776 show_root_type(__entry->root_objectid), 784 show_root_type(__entry->root_objectid),
777 __entry->refs, 785 __entry->refs,
778 (unsigned long long)__entry->buf_start, 786 (unsigned long long)__entry->buf_start,
@@ -836,7 +844,7 @@ TRACE_EVENT(btrfs_trigger_flush,
836 __assign_str(reason, reason) 844 __assign_str(reason, reason)
837 ), 845 ),
838 846
839 TP_printk("%pU: %s: flush = %d(%s), flags = %llu(%s), bytes = %llu", 847 TP_printk("%pU: %s: flush=%d(%s) flags=%llu(%s) bytes=%llu",
840 __entry->fsid, __get_str(reason), __entry->flush, 848 __entry->fsid, __get_str(reason), __entry->flush,
841 show_flush_action(__entry->flush), 849 show_flush_action(__entry->flush),
842 (unsigned long long)__entry->flags, 850 (unsigned long long)__entry->flags,
@@ -879,8 +887,8 @@ TRACE_EVENT(btrfs_flush_space,
879 __entry->ret = ret; 887 __entry->ret = ret;
880 ), 888 ),
881 889
882 TP_printk("%pU: state = %d(%s), flags = %llu(%s), num_bytes = %llu, " 890 TP_printk("%pU: state=%d(%s) flags=%llu(%s) num_bytes=%llu "
883 "orig_bytes = %llu, ret = %d", __entry->fsid, __entry->state, 891 "orig_bytes=%llu ret=%d", __entry->fsid, __entry->state,
884 show_flush_state(__entry->state), 892 show_flush_state(__entry->state),
885 (unsigned long long)__entry->flags, 893 (unsigned long long)__entry->flags,
886 __print_flags((unsigned long)__entry->flags, "|", 894 __print_flags((unsigned long)__entry->flags, "|",
@@ -905,7 +913,7 @@ DECLARE_EVENT_CLASS(btrfs__reserved_extent,
905 __entry->len = len; 913 __entry->len = len;
906 ), 914 ),
907 915
908 TP_printk_btrfs("root = %llu(%s), start = %llu, len = %llu", 916 TP_printk_btrfs("root=%llu(%s) start=%llu len=%llu",
909 show_root_type(BTRFS_EXTENT_TREE_OBJECTID), 917 show_root_type(BTRFS_EXTENT_TREE_OBJECTID),
910 (unsigned long long)__entry->start, 918 (unsigned long long)__entry->start,
911 (unsigned long long)__entry->len) 919 (unsigned long long)__entry->len)
@@ -944,7 +952,7 @@ TRACE_EVENT(find_free_extent,
944 __entry->data = data; 952 __entry->data = data;
945 ), 953 ),
946 954
947 TP_printk_btrfs("root = %Lu(%s), len = %Lu, empty_size = %Lu, flags = %Lu(%s)", 955 TP_printk_btrfs("root=%Lu(%s) len=%Lu empty_size=%Lu flags=%Lu(%s)",
948 show_root_type(BTRFS_EXTENT_TREE_OBJECTID), 956 show_root_type(BTRFS_EXTENT_TREE_OBJECTID),
949 __entry->num_bytes, __entry->empty_size, __entry->data, 957 __entry->num_bytes, __entry->empty_size, __entry->data,
950 __print_flags((unsigned long)__entry->data, "|", 958 __print_flags((unsigned long)__entry->data, "|",
@@ -973,8 +981,8 @@ DECLARE_EVENT_CLASS(btrfs__reserve_extent,
973 __entry->len = len; 981 __entry->len = len;
974 ), 982 ),
975 983
976 TP_printk_btrfs("root = %Lu(%s), block_group = %Lu, flags = %Lu(%s), " 984 TP_printk_btrfs("root=%Lu(%s) block_group=%Lu flags=%Lu(%s) "
977 "start = %Lu, len = %Lu", 985 "start=%Lu len=%Lu",
978 show_root_type(BTRFS_EXTENT_TREE_OBJECTID), 986 show_root_type(BTRFS_EXTENT_TREE_OBJECTID),
979 __entry->bg_objectid, 987 __entry->bg_objectid,
980 __entry->flags, __print_flags((unsigned long)__entry->flags, 988 __entry->flags, __print_flags((unsigned long)__entry->flags,
@@ -1025,8 +1033,8 @@ TRACE_EVENT(btrfs_find_cluster,
1025 __entry->min_bytes = min_bytes; 1033 __entry->min_bytes = min_bytes;
1026 ), 1034 ),
1027 1035
1028 TP_printk_btrfs("block_group = %Lu, flags = %Lu(%s), start = %Lu, len = %Lu," 1036 TP_printk_btrfs("block_group=%Lu flags=%Lu(%s) start=%Lu len=%Lu "
1029 " empty_size = %Lu, min_bytes = %Lu", __entry->bg_objectid, 1037 "empty_size=%Lu min_bytes=%Lu", __entry->bg_objectid,
1030 __entry->flags, 1038 __entry->flags,
1031 __print_flags((unsigned long)__entry->flags, "|", 1039 __print_flags((unsigned long)__entry->flags, "|",
1032 BTRFS_GROUP_FLAGS), __entry->start, 1040 BTRFS_GROUP_FLAGS), __entry->start,
@@ -1047,7 +1055,7 @@ TRACE_EVENT(btrfs_failed_cluster_setup,
1047 __entry->bg_objectid = block_group->key.objectid; 1055 __entry->bg_objectid = block_group->key.objectid;
1048 ), 1056 ),
1049 1057
1050 TP_printk_btrfs("block_group = %Lu", __entry->bg_objectid) 1058 TP_printk_btrfs("block_group=%Lu", __entry->bg_objectid)
1051); 1059);
1052 1060
1053TRACE_EVENT(btrfs_setup_cluster, 1061TRACE_EVENT(btrfs_setup_cluster,
@@ -1075,8 +1083,8 @@ TRACE_EVENT(btrfs_setup_cluster,
1075 __entry->bitmap = bitmap; 1083 __entry->bitmap = bitmap;
1076 ), 1084 ),
1077 1085
1078 TP_printk_btrfs("block_group = %Lu, flags = %Lu(%s), window_start = %Lu, " 1086 TP_printk_btrfs("block_group=%Lu flags=%Lu(%s) window_start=%Lu "
1079 "size = %Lu, max_size = %Lu, bitmap = %d", 1087 "size=%Lu max_size=%Lu bitmap=%d",
1080 __entry->bg_objectid, 1088 __entry->bg_objectid,
1081 __entry->flags, 1089 __entry->flags,
1082 __print_flags((unsigned long)__entry->flags, "|", 1090 __print_flags((unsigned long)__entry->flags, "|",
@@ -1103,7 +1111,7 @@ TRACE_EVENT(alloc_extent_state,
1103 __entry->ip = IP 1111 __entry->ip = IP
1104 ), 1112 ),
1105 1113
1106 TP_printk("state=%p; mask = %s; caller = %pS", __entry->state, 1114 TP_printk("state=%p mask=%s caller=%pS", __entry->state,
1107 show_gfp_flags(__entry->mask), (void *)__entry->ip) 1115 show_gfp_flags(__entry->mask), (void *)__entry->ip)
1108); 1116);
1109 1117
@@ -1123,7 +1131,7 @@ TRACE_EVENT(free_extent_state,
1123 __entry->ip = IP 1131 __entry->ip = IP
1124 ), 1132 ),
1125 1133
1126 TP_printk(" state=%p; caller = %pS", __entry->state, 1134 TP_printk("state=%p caller=%pS", __entry->state,
1127 (void *)__entry->ip) 1135 (void *)__entry->ip)
1128); 1136);
1129 1137
@@ -1151,28 +1159,32 @@ DECLARE_EVENT_CLASS(btrfs__work,
1151 __entry->normal_work = &work->normal_work; 1159 __entry->normal_work = &work->normal_work;
1152 ), 1160 ),
1153 1161
1154 TP_printk_btrfs("work=%p (normal_work=%p), wq=%p, func=%pf, ordered_func=%p," 1162 TP_printk_btrfs("work=%p (normal_work=%p) wq=%p func=%pf ordered_func=%p "
1155 " ordered_free=%p", 1163 "ordered_free=%p",
1156 __entry->work, __entry->normal_work, __entry->wq, 1164 __entry->work, __entry->normal_work, __entry->wq,
1157 __entry->func, __entry->ordered_func, __entry->ordered_free) 1165 __entry->func, __entry->ordered_func, __entry->ordered_free)
1158); 1166);
1159 1167
1160/* For situiations that the work is freed */ 1168/*
1169 * For situiations when the work is freed, we pass fs_info and a tag that that
1170 * matches address of the work structure so it can be paired with the
1171 * scheduling event.
1172 */
1161DECLARE_EVENT_CLASS(btrfs__work__done, 1173DECLARE_EVENT_CLASS(btrfs__work__done,
1162 1174
1163 TP_PROTO(struct btrfs_work *work), 1175 TP_PROTO(struct btrfs_fs_info *fs_info, void *wtag),
1164 1176
1165 TP_ARGS(work), 1177 TP_ARGS(fs_info, wtag),
1166 1178
1167 TP_STRUCT__entry_btrfs( 1179 TP_STRUCT__entry_btrfs(
1168 __field( void *, work ) 1180 __field( void *, wtag )
1169 ), 1181 ),
1170 1182
1171 TP_fast_assign_btrfs(btrfs_work_owner(work), 1183 TP_fast_assign_btrfs(fs_info,
1172 __entry->work = work; 1184 __entry->wtag = wtag;
1173 ), 1185 ),
1174 1186
1175 TP_printk_btrfs("work->%p", __entry->work) 1187 TP_printk_btrfs("work->%p", __entry->wtag)
1176); 1188);
1177 1189
1178DEFINE_EVENT(btrfs__work, btrfs_work_queued, 1190DEFINE_EVENT(btrfs__work, btrfs_work_queued,
@@ -1191,9 +1203,9 @@ DEFINE_EVENT(btrfs__work, btrfs_work_sched,
1191 1203
1192DEFINE_EVENT(btrfs__work__done, btrfs_all_work_done, 1204DEFINE_EVENT(btrfs__work__done, btrfs_all_work_done,
1193 1205
1194 TP_PROTO(struct btrfs_work *work), 1206 TP_PROTO(struct btrfs_fs_info *fs_info, void *wtag),
1195 1207
1196 TP_ARGS(work) 1208 TP_ARGS(fs_info, wtag)
1197); 1209);
1198 1210
1199DEFINE_EVENT(btrfs__work, btrfs_ordered_sched, 1211DEFINE_EVENT(btrfs__work, btrfs_ordered_sched,
@@ -1221,7 +1233,7 @@ DECLARE_EVENT_CLASS(btrfs__workqueue,
1221 __entry->high = high; 1233 __entry->high = high;
1222 ), 1234 ),
1223 1235
1224 TP_printk_btrfs("name=%s%s, wq=%p", __get_str(name), 1236 TP_printk_btrfs("name=%s%s wq=%p", __get_str(name),
1225 __print_flags(__entry->high, "", 1237 __print_flags(__entry->high, "",
1226 {(WQ_HIGHPRI), "-high"}), 1238 {(WQ_HIGHPRI), "-high"}),
1227 __entry->wq) 1239 __entry->wq)
@@ -1276,7 +1288,7 @@ DECLARE_EVENT_CLASS(btrfs__qgroup_data_map,
1276 __entry->free_reserved = free_reserved; 1288 __entry->free_reserved = free_reserved;
1277 ), 1289 ),
1278 1290
1279 TP_printk_btrfs("rootid=%llu, ino=%lu, free_reserved=%llu", 1291 TP_printk_btrfs("rootid=%llu ino=%lu free_reserved=%llu",
1280 __entry->rootid, __entry->ino, __entry->free_reserved) 1292 __entry->rootid, __entry->ino, __entry->free_reserved)
1281); 1293);
1282 1294
@@ -1323,7 +1335,7 @@ DECLARE_EVENT_CLASS(btrfs__qgroup_rsv_data,
1323 __entry->op = op; 1335 __entry->op = op;
1324 ), 1336 ),
1325 1337
1326 TP_printk_btrfs("root=%llu, ino=%lu, start=%llu, len=%llu, reserved=%llu, op=%s", 1338 TP_printk_btrfs("root=%llu ino=%lu start=%llu len=%llu reserved=%llu op=%s",
1327 __entry->rootid, __entry->ino, __entry->start, __entry->len, 1339 __entry->rootid, __entry->ino, __entry->start, __entry->len,
1328 __entry->reserved, 1340 __entry->reserved,
1329 __print_flags((unsigned long)__entry->op, "", 1341 __print_flags((unsigned long)__entry->op, "",
@@ -1361,7 +1373,7 @@ DECLARE_EVENT_CLASS(btrfs__qgroup_delayed_ref,
1361 __entry->reserved = reserved; 1373 __entry->reserved = reserved;
1362 ), 1374 ),
1363 1375
1364 TP_printk_btrfs("root=%llu, reserved=%llu, op=free", 1376 TP_printk_btrfs("root=%llu reserved=%llu op=free",
1365 __entry->ref_root, __entry->reserved) 1377 __entry->ref_root, __entry->reserved)
1366); 1378);
1367 1379
@@ -1388,7 +1400,7 @@ DECLARE_EVENT_CLASS(btrfs_qgroup_extent,
1388 __entry->num_bytes = rec->num_bytes; 1400 __entry->num_bytes = rec->num_bytes;
1389 ), 1401 ),
1390 1402
1391 TP_printk_btrfs("bytenr = %llu, num_bytes = %llu", 1403 TP_printk_btrfs("bytenr=%llu num_bytes=%llu",
1392 (unsigned long long)__entry->bytenr, 1404 (unsigned long long)__entry->bytenr,
1393 (unsigned long long)__entry->num_bytes) 1405 (unsigned long long)__entry->num_bytes)
1394); 1406);
@@ -1430,8 +1442,8 @@ TRACE_EVENT(btrfs_qgroup_account_extent,
1430 __entry->nr_new_roots = nr_new_roots; 1442 __entry->nr_new_roots = nr_new_roots;
1431 ), 1443 ),
1432 1444
1433 TP_printk_btrfs("bytenr = %llu, num_bytes = %llu, nr_old_roots = %llu, " 1445 TP_printk_btrfs("bytenr=%llu num_bytes=%llu nr_old_roots=%llu "
1434 "nr_new_roots = %llu", 1446 "nr_new_roots=%llu",
1435 __entry->bytenr, 1447 __entry->bytenr,
1436 __entry->num_bytes, 1448 __entry->num_bytes,
1437 __entry->nr_old_roots, 1449 __entry->nr_old_roots,
@@ -1457,7 +1469,7 @@ TRACE_EVENT(qgroup_update_counters,
1457 __entry->cur_new_count = cur_new_count; 1469 __entry->cur_new_count = cur_new_count;
1458 ), 1470 ),
1459 1471
1460 TP_printk_btrfs("qgid = %llu, cur_old_count = %llu, cur_new_count = %llu", 1472 TP_printk_btrfs("qgid=%llu cur_old_count=%llu cur_new_count=%llu",
1461 __entry->qgid, 1473 __entry->qgid,
1462 __entry->cur_old_count, 1474 __entry->cur_old_count,
1463 __entry->cur_new_count) 1475 __entry->cur_new_count)
diff --git a/include/trace/events/mmflags.h b/include/trace/events/mmflags.h
index 9e687ca9a307..15bf875d0e4a 100644
--- a/include/trace/events/mmflags.h
+++ b/include/trace/events/mmflags.h
@@ -47,8 +47,7 @@
47 {(unsigned long)__GFP_WRITE, "__GFP_WRITE"}, \ 47 {(unsigned long)__GFP_WRITE, "__GFP_WRITE"}, \
48 {(unsigned long)__GFP_RECLAIM, "__GFP_RECLAIM"}, \ 48 {(unsigned long)__GFP_RECLAIM, "__GFP_RECLAIM"}, \
49 {(unsigned long)__GFP_DIRECT_RECLAIM, "__GFP_DIRECT_RECLAIM"},\ 49 {(unsigned long)__GFP_DIRECT_RECLAIM, "__GFP_DIRECT_RECLAIM"},\
50 {(unsigned long)__GFP_KSWAPD_RECLAIM, "__GFP_KSWAPD_RECLAIM"},\ 50 {(unsigned long)__GFP_KSWAPD_RECLAIM, "__GFP_KSWAPD_RECLAIM"}\
51 {(unsigned long)__GFP_OTHER_NODE, "__GFP_OTHER_NODE"} \
52 51
53#define show_gfp_flags(flags) \ 52#define show_gfp_flags(flags) \
54 (flags) ? __print_flags(flags, "|", \ 53 (flags) ? __print_flags(flags, "|", \
diff --git a/include/trace/events/swiotlb.h b/include/trace/events/swiotlb.h
index 7ea4c5e7c448..288c0c54a2b4 100644
--- a/include/trace/events/swiotlb.h
+++ b/include/trace/events/swiotlb.h
@@ -11,16 +11,16 @@ TRACE_EVENT(swiotlb_bounced,
11 TP_PROTO(struct device *dev, 11 TP_PROTO(struct device *dev,
12 dma_addr_t dev_addr, 12 dma_addr_t dev_addr,
13 size_t size, 13 size_t size,
14 int swiotlb_force), 14 enum swiotlb_force swiotlb_force),
15 15
16 TP_ARGS(dev, dev_addr, size, swiotlb_force), 16 TP_ARGS(dev, dev_addr, size, swiotlb_force),
17 17
18 TP_STRUCT__entry( 18 TP_STRUCT__entry(
19 __string( dev_name, dev_name(dev) ) 19 __string( dev_name, dev_name(dev) )
20 __field( u64, dma_mask ) 20 __field( u64, dma_mask )
21 __field( dma_addr_t, dev_addr ) 21 __field( dma_addr_t, dev_addr )
22 __field( size_t, size ) 22 __field( size_t, size )
23 __field( int, swiotlb_force ) 23 __field( enum swiotlb_force, swiotlb_force )
24 ), 24 ),
25 25
26 TP_fast_assign( 26 TP_fast_assign(
@@ -37,7 +37,10 @@ TRACE_EVENT(swiotlb_bounced,
37 __entry->dma_mask, 37 __entry->dma_mask,
38 (unsigned long long)__entry->dev_addr, 38 (unsigned long long)__entry->dev_addr,
39 __entry->size, 39 __entry->size,
40 __entry->swiotlb_force ? "swiotlb_force" : "" ) 40 __print_symbolic(__entry->swiotlb_force,
41 { SWIOTLB_NORMAL, "NORMAL" },
42 { SWIOTLB_FORCE, "FORCE" },
43 { SWIOTLB_NO_FORCE, "NO_FORCE" }))
41); 44);
42 45
43#endif /* _TRACE_SWIOTLB_H */ 46#endif /* _TRACE_SWIOTLB_H */
diff --git a/include/uapi/linux/Kbuild b/include/uapi/linux/Kbuild
index a8b93e685239..f330ba4547cf 100644
--- a/include/uapi/linux/Kbuild
+++ b/include/uapi/linux/Kbuild
@@ -414,6 +414,7 @@ header-y += telephony.h
414header-y += termios.h 414header-y += termios.h
415header-y += thermal.h 415header-y += thermal.h
416header-y += time.h 416header-y += time.h
417header-y += timerfd.h
417header-y += times.h 418header-y += times.h
418header-y += timex.h 419header-y += timex.h
419header-y += tiocl.h 420header-y += tiocl.h
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index 0eb0e87dbe9f..d2b0ac799d03 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -116,6 +116,12 @@ enum bpf_attach_type {
116 116
117#define MAX_BPF_ATTACH_TYPE __MAX_BPF_ATTACH_TYPE 117#define MAX_BPF_ATTACH_TYPE __MAX_BPF_ATTACH_TYPE
118 118
119/* If BPF_F_ALLOW_OVERRIDE flag is used in BPF_PROG_ATTACH command
120 * to the given target_fd cgroup the descendent cgroup will be able to
121 * override effective bpf program that was inherited from this cgroup
122 */
123#define BPF_F_ALLOW_OVERRIDE (1U << 0)
124
119#define BPF_PSEUDO_MAP_FD 1 125#define BPF_PSEUDO_MAP_FD 1
120 126
121/* flags for BPF_MAP_UPDATE_ELEM command */ 127/* flags for BPF_MAP_UPDATE_ELEM command */
@@ -171,6 +177,7 @@ union bpf_attr {
171 __u32 target_fd; /* container object to attach to */ 177 __u32 target_fd; /* container object to attach to */
172 __u32 attach_bpf_fd; /* eBPF program to attach */ 178 __u32 attach_bpf_fd; /* eBPF program to attach */
173 __u32 attach_type; 179 __u32 attach_type;
180 __u32 attach_flags;
174 }; 181 };
175} __attribute__((aligned(8))); 182} __attribute__((aligned(8)));
176 183
diff --git a/include/uapi/linux/cec-funcs.h b/include/uapi/linux/cec-funcs.h
index 3cbc327801d6..c451eec42a83 100644
--- a/include/uapi/linux/cec-funcs.h
+++ b/include/uapi/linux/cec-funcs.h
@@ -1665,14 +1665,15 @@ static inline void cec_msg_report_current_latency(struct cec_msg *msg,
1665 __u8 audio_out_compensated, 1665 __u8 audio_out_compensated,
1666 __u8 audio_out_delay) 1666 __u8 audio_out_delay)
1667{ 1667{
1668 msg->len = 7; 1668 msg->len = 6;
1669 msg->msg[0] |= 0xf; /* broadcast */ 1669 msg->msg[0] |= 0xf; /* broadcast */
1670 msg->msg[1] = CEC_MSG_REPORT_CURRENT_LATENCY; 1670 msg->msg[1] = CEC_MSG_REPORT_CURRENT_LATENCY;
1671 msg->msg[2] = phys_addr >> 8; 1671 msg->msg[2] = phys_addr >> 8;
1672 msg->msg[3] = phys_addr & 0xff; 1672 msg->msg[3] = phys_addr & 0xff;
1673 msg->msg[4] = video_latency; 1673 msg->msg[4] = video_latency;
1674 msg->msg[5] = (low_latency_mode << 2) | audio_out_compensated; 1674 msg->msg[5] = (low_latency_mode << 2) | audio_out_compensated;
1675 msg->msg[6] = audio_out_delay; 1675 if (audio_out_compensated == 3)
1676 msg->msg[msg->len++] = audio_out_delay;
1676} 1677}
1677 1678
1678static inline void cec_ops_report_current_latency(const struct cec_msg *msg, 1679static inline void cec_ops_report_current_latency(const struct cec_msg *msg,
@@ -1686,7 +1687,10 @@ static inline void cec_ops_report_current_latency(const struct cec_msg *msg,
1686 *video_latency = msg->msg[4]; 1687 *video_latency = msg->msg[4];
1687 *low_latency_mode = (msg->msg[5] >> 2) & 1; 1688 *low_latency_mode = (msg->msg[5] >> 2) & 1;
1688 *audio_out_compensated = msg->msg[5] & 3; 1689 *audio_out_compensated = msg->msg[5] & 3;
1689 *audio_out_delay = msg->msg[6]; 1690 if (*audio_out_compensated == 3 && msg->len >= 7)
1691 *audio_out_delay = msg->msg[6];
1692 else
1693 *audio_out_delay = 0;
1690} 1694}
1691 1695
1692static inline void cec_msg_request_current_latency(struct cec_msg *msg, 1696static inline void cec_msg_request_current_latency(struct cec_msg *msg,
diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h
index f0db7788f887..3dc91a46e8b8 100644
--- a/include/uapi/linux/ethtool.h
+++ b/include/uapi/linux/ethtool.h
@@ -1384,6 +1384,8 @@ enum ethtool_link_mode_bit_indices {
1384 ETHTOOL_LINK_MODE_10000baseLR_Full_BIT = 44, 1384 ETHTOOL_LINK_MODE_10000baseLR_Full_BIT = 44,
1385 ETHTOOL_LINK_MODE_10000baseLRM_Full_BIT = 45, 1385 ETHTOOL_LINK_MODE_10000baseLRM_Full_BIT = 45,
1386 ETHTOOL_LINK_MODE_10000baseER_Full_BIT = 46, 1386 ETHTOOL_LINK_MODE_10000baseER_Full_BIT = 46,
1387 ETHTOOL_LINK_MODE_2500baseT_Full_BIT = 47,
1388 ETHTOOL_LINK_MODE_5000baseT_Full_BIT = 48,
1387 1389
1388 1390
1389 /* Last allowed bit for __ETHTOOL_LINK_MODE_LEGACY_MASK is bit 1391 /* Last allowed bit for __ETHTOOL_LINK_MODE_LEGACY_MASK is bit
@@ -1393,7 +1395,7 @@ enum ethtool_link_mode_bit_indices {
1393 */ 1395 */
1394 1396
1395 __ETHTOOL_LINK_MODE_LAST 1397 __ETHTOOL_LINK_MODE_LAST
1396 = ETHTOOL_LINK_MODE_10000baseER_Full_BIT, 1398 = ETHTOOL_LINK_MODE_5000baseT_Full_BIT,
1397}; 1399};
1398 1400
1399#define __ETHTOOL_LINK_MODE_LEGACY_MASK(base_name) \ 1401#define __ETHTOOL_LINK_MODE_LEGACY_MASK(base_name) \
diff --git a/include/uapi/linux/l2tp.h b/include/uapi/linux/l2tp.h
index 85ddb74fcd1c..b23c1914a182 100644
--- a/include/uapi/linux/l2tp.h
+++ b/include/uapi/linux/l2tp.h
@@ -9,9 +9,8 @@
9 9
10#include <linux/types.h> 10#include <linux/types.h>
11#include <linux/socket.h> 11#include <linux/socket.h>
12#ifndef __KERNEL__ 12#include <linux/in.h>
13#include <netinet/in.h> 13#include <linux/in6.h>
14#endif
15 14
16#define IPPROTO_L2TP 115 15#define IPPROTO_L2TP 115
17 16
@@ -31,7 +30,7 @@ struct sockaddr_l2tpip {
31 __u32 l2tp_conn_id; /* Connection ID of tunnel */ 30 __u32 l2tp_conn_id; /* Connection ID of tunnel */
32 31
33 /* Pad to size of `struct sockaddr'. */ 32 /* Pad to size of `struct sockaddr'. */
34 unsigned char __pad[sizeof(struct sockaddr) - 33 unsigned char __pad[__SOCK_SIZE__ -
35 sizeof(__kernel_sa_family_t) - 34 sizeof(__kernel_sa_family_t) -
36 sizeof(__be16) - sizeof(struct in_addr) - 35 sizeof(__be16) - sizeof(struct in_addr) -
37 sizeof(__u32)]; 36 sizeof(__u32)];
diff --git a/include/uapi/linux/netfilter/nf_log.h b/include/uapi/linux/netfilter/nf_log.h
index 8be21e02387d..d0b5fa91ff54 100644
--- a/include/uapi/linux/netfilter/nf_log.h
+++ b/include/uapi/linux/netfilter/nf_log.h
@@ -9,4 +9,6 @@
9#define NF_LOG_MACDECODE 0x20 /* Decode MAC header */ 9#define NF_LOG_MACDECODE 0x20 /* Decode MAC header */
10#define NF_LOG_MASK 0x2f 10#define NF_LOG_MASK 0x2f
11 11
12#define NF_LOG_PREFIXLEN 128
13
12#endif /* _NETFILTER_NF_LOG_H */ 14#endif /* _NETFILTER_NF_LOG_H */
diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h
index 881d49e94569..e3f27e09eb2b 100644
--- a/include/uapi/linux/netfilter/nf_tables.h
+++ b/include/uapi/linux/netfilter/nf_tables.h
@@ -235,7 +235,7 @@ enum nft_rule_compat_flags {
235/** 235/**
236 * enum nft_rule_compat_attributes - nf_tables rule compat attributes 236 * enum nft_rule_compat_attributes - nf_tables rule compat attributes
237 * 237 *
238 * @NFTA_RULE_COMPAT_PROTO: numerice value of handled protocol (NLA_U32) 238 * @NFTA_RULE_COMPAT_PROTO: numeric value of handled protocol (NLA_U32)
239 * @NFTA_RULE_COMPAT_FLAGS: bitmask of enum nft_rule_compat_flags (NLA_U32) 239 * @NFTA_RULE_COMPAT_FLAGS: bitmask of enum nft_rule_compat_flags (NLA_U32)
240 */ 240 */
241enum nft_rule_compat_attributes { 241enum nft_rule_compat_attributes {
@@ -499,7 +499,7 @@ enum nft_bitwise_attributes {
499 * enum nft_byteorder_ops - nf_tables byteorder operators 499 * enum nft_byteorder_ops - nf_tables byteorder operators
500 * 500 *
501 * @NFT_BYTEORDER_NTOH: network to host operator 501 * @NFT_BYTEORDER_NTOH: network to host operator
502 * @NFT_BYTEORDER_HTON: host to network opertaor 502 * @NFT_BYTEORDER_HTON: host to network operator
503 */ 503 */
504enum nft_byteorder_ops { 504enum nft_byteorder_ops {
505 NFT_BYTEORDER_NTOH, 505 NFT_BYTEORDER_NTOH,
diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h
index 6b76e3b0c18e..bea982af9cfb 100644
--- a/include/uapi/linux/nl80211.h
+++ b/include/uapi/linux/nl80211.h
@@ -1772,7 +1772,9 @@ enum nl80211_commands {
1772 * 1772 *
1773 * @NL80211_ATTR_OPMODE_NOTIF: Operating mode field from Operating Mode 1773 * @NL80211_ATTR_OPMODE_NOTIF: Operating mode field from Operating Mode
1774 * Notification Element based on association request when used with 1774 * Notification Element based on association request when used with
1775 * %NL80211_CMD_NEW_STATION; u8 attribute. 1775 * %NL80211_CMD_NEW_STATION or %NL80211_CMD_SET_STATION (only when
1776 * %NL80211_FEATURE_FULL_AP_CLIENT_STATE is supported, or with TDLS);
1777 * u8 attribute.
1776 * 1778 *
1777 * @NL80211_ATTR_VENDOR_ID: The vendor ID, either a 24-bit OUI or, if 1779 * @NL80211_ATTR_VENDOR_ID: The vendor ID, either a 24-bit OUI or, if
1778 * %NL80211_VENDOR_ID_IS_LINUX is set, a special Linux ID (not used yet) 1780 * %NL80211_VENDOR_ID_IS_LINUX is set, a special Linux ID (not used yet)
diff --git a/include/uapi/linux/pkt_cls.h b/include/uapi/linux/pkt_cls.h
index cb4bcdc58543..a4dcd88ec271 100644
--- a/include/uapi/linux/pkt_cls.h
+++ b/include/uapi/linux/pkt_cls.h
@@ -397,7 +397,7 @@ enum {
397 TCA_BPF_NAME, 397 TCA_BPF_NAME,
398 TCA_BPF_FLAGS, 398 TCA_BPF_FLAGS,
399 TCA_BPF_FLAGS_GEN, 399 TCA_BPF_FLAGS_GEN,
400 TCA_BPF_DIGEST, 400 TCA_BPF_TAG,
401 __TCA_BPF_MAX, 401 __TCA_BPF_MAX,
402}; 402};
403 403
diff --git a/include/uapi/linux/seg6.h b/include/uapi/linux/seg6.h
index c396a8052f73..052799e4d751 100644
--- a/include/uapi/linux/seg6.h
+++ b/include/uapi/linux/seg6.h
@@ -23,14 +23,12 @@ struct ipv6_sr_hdr {
23 __u8 type; 23 __u8 type;
24 __u8 segments_left; 24 __u8 segments_left;
25 __u8 first_segment; 25 __u8 first_segment;
26 __u8 flag_1; 26 __u8 flags;
27 __u8 flag_2; 27 __u16 reserved;
28 __u8 reserved;
29 28
30 struct in6_addr segments[0]; 29 struct in6_addr segments[0];
31}; 30};
32 31
33#define SR6_FLAG1_CLEANUP (1 << 7)
34#define SR6_FLAG1_PROTECTED (1 << 6) 32#define SR6_FLAG1_PROTECTED (1 << 6)
35#define SR6_FLAG1_OAM (1 << 5) 33#define SR6_FLAG1_OAM (1 << 5)
36#define SR6_FLAG1_ALERT (1 << 4) 34#define SR6_FLAG1_ALERT (1 << 4)
@@ -42,8 +40,7 @@ struct ipv6_sr_hdr {
42#define SR6_TLV_PADDING 4 40#define SR6_TLV_PADDING 4
43#define SR6_TLV_HMAC 5 41#define SR6_TLV_HMAC 5
44 42
45#define sr_has_cleanup(srh) ((srh)->flag_1 & SR6_FLAG1_CLEANUP) 43#define sr_has_hmac(srh) ((srh)->flags & SR6_FLAG1_HMAC)
46#define sr_has_hmac(srh) ((srh)->flag_1 & SR6_FLAG1_HMAC)
47 44
48struct sr6_tlv { 45struct sr6_tlv {
49 __u8 type; 46 __u8 type;
diff --git a/include/uapi/linux/tc_act/tc_bpf.h b/include/uapi/linux/tc_act/tc_bpf.h
index a6b88a6f7f71..975b50dc8d1d 100644
--- a/include/uapi/linux/tc_act/tc_bpf.h
+++ b/include/uapi/linux/tc_act/tc_bpf.h
@@ -27,7 +27,7 @@ enum {
27 TCA_ACT_BPF_FD, 27 TCA_ACT_BPF_FD,
28 TCA_ACT_BPF_NAME, 28 TCA_ACT_BPF_NAME,
29 TCA_ACT_BPF_PAD, 29 TCA_ACT_BPF_PAD,
30 TCA_ACT_BPF_DIGEST, 30 TCA_ACT_BPF_TAG,
31 __TCA_ACT_BPF_MAX, 31 __TCA_ACT_BPF_MAX,
32}; 32};
33#define TCA_ACT_BPF_MAX (__TCA_ACT_BPF_MAX - 1) 33#define TCA_ACT_BPF_MAX (__TCA_ACT_BPF_MAX - 1)
diff --git a/include/uapi/linux/timerfd.h b/include/uapi/linux/timerfd.h
new file mode 100644
index 000000000000..6fcfaa8da173
--- /dev/null
+++ b/include/uapi/linux/timerfd.h
@@ -0,0 +1,36 @@
1/*
2 * include/linux/timerfd.h
3 *
4 * Copyright (C) 2007 Davide Libenzi <davidel@xmailserver.org>
5 *
6 */
7
8#ifndef _UAPI_LINUX_TIMERFD_H
9#define _UAPI_LINUX_TIMERFD_H
10
11#include <linux/types.h>
12
13/* For O_CLOEXEC and O_NONBLOCK */
14#include <linux/fcntl.h>
15
16/* For _IO helpers */
17#include <linux/ioctl.h>
18
19/*
20 * CAREFUL: Check include/asm-generic/fcntl.h when defining
21 * new flags, since they might collide with O_* ones. We want
22 * to re-use O_* flags that couldn't possibly have a meaning
23 * from eventfd, in order to leave a free define-space for
24 * shared O_* flags.
25 *
26 * Also make sure to update the masks in include/linux/timerfd.h
27 * when adding new flags.
28 */
29#define TFD_TIMER_ABSTIME (1 << 0)
30#define TFD_TIMER_CANCEL_ON_SET (1 << 1)
31#define TFD_CLOEXEC O_CLOEXEC
32#define TFD_NONBLOCK O_NONBLOCK
33
34#define TFD_IOC_SET_TICKS _IOW('T', 0, __u64)
35
36#endif /* _UAPI_LINUX_TIMERFD_H */
diff --git a/include/uapi/linux/usb/functionfs.h b/include/uapi/linux/usb/functionfs.h
index acc63697a0cc..b2a31a55a612 100644
--- a/include/uapi/linux/usb/functionfs.h
+++ b/include/uapi/linux/usb/functionfs.h
@@ -93,6 +93,7 @@ struct usb_ext_prop_desc {
93 * | 0 | magic | LE32 | FUNCTIONFS_DESCRIPTORS_MAGIC_V2 | 93 * | 0 | magic | LE32 | FUNCTIONFS_DESCRIPTORS_MAGIC_V2 |
94 * | 4 | length | LE32 | length of the whole data chunk | 94 * | 4 | length | LE32 | length of the whole data chunk |
95 * | 8 | flags | LE32 | combination of functionfs_flags | 95 * | 8 | flags | LE32 | combination of functionfs_flags |
96 * | | eventfd | LE32 | eventfd file descriptor |
96 * | | fs_count | LE32 | number of full-speed descriptors | 97 * | | fs_count | LE32 | number of full-speed descriptors |
97 * | | hs_count | LE32 | number of high-speed descriptors | 98 * | | hs_count | LE32 | number of high-speed descriptors |
98 * | | ss_count | LE32 | number of super-speed descriptors | 99 * | | ss_count | LE32 | number of super-speed descriptors |
diff --git a/include/uapi/linux/videodev2.h b/include/uapi/linux/videodev2.h
index 46e8a2e369f9..45184a2ef66c 100644
--- a/include/uapi/linux/videodev2.h
+++ b/include/uapi/linux/videodev2.h
@@ -362,8 +362,8 @@ enum v4l2_quantization {
362 /* 362 /*
363 * The default for R'G'B' quantization is always full range, except 363 * The default for R'G'B' quantization is always full range, except
364 * for the BT2020 colorspace. For Y'CbCr the quantization is always 364 * for the BT2020 colorspace. For Y'CbCr the quantization is always
365 * limited range, except for COLORSPACE_JPEG, SRGB, ADOBERGB, 365 * limited range, except for COLORSPACE_JPEG, XV601 or XV709: those
366 * XV601 or XV709: those are full range. 366 * are full range.
367 */ 367 */
368 V4L2_QUANTIZATION_DEFAULT = 0, 368 V4L2_QUANTIZATION_DEFAULT = 0,
369 V4L2_QUANTIZATION_FULL_RANGE = 1, 369 V4L2_QUANTIZATION_FULL_RANGE = 1,
@@ -379,8 +379,7 @@ enum v4l2_quantization {
379 (((is_rgb_or_hsv) && (colsp) == V4L2_COLORSPACE_BT2020) ? \ 379 (((is_rgb_or_hsv) && (colsp) == V4L2_COLORSPACE_BT2020) ? \
380 V4L2_QUANTIZATION_LIM_RANGE : \ 380 V4L2_QUANTIZATION_LIM_RANGE : \
381 (((is_rgb_or_hsv) || (ycbcr_enc) == V4L2_YCBCR_ENC_XV601 || \ 381 (((is_rgb_or_hsv) || (ycbcr_enc) == V4L2_YCBCR_ENC_XV601 || \
382 (ycbcr_enc) == V4L2_YCBCR_ENC_XV709 || (colsp) == V4L2_COLORSPACE_JPEG) || \ 382 (ycbcr_enc) == V4L2_YCBCR_ENC_XV709 || (colsp) == V4L2_COLORSPACE_JPEG) ? \
383 (colsp) == V4L2_COLORSPACE_ADOBERGB || (colsp) == V4L2_COLORSPACE_SRGB ? \
384 V4L2_QUANTIZATION_FULL_RANGE : V4L2_QUANTIZATION_LIM_RANGE)) 383 V4L2_QUANTIZATION_FULL_RANGE : V4L2_QUANTIZATION_LIM_RANGE))
385 384
386enum v4l2_priority { 385enum v4l2_priority {
diff --git a/include/uapi/rdma/Kbuild b/include/uapi/rdma/Kbuild
index 82bdf5626859..bb68cb1b04ed 100644
--- a/include/uapi/rdma/Kbuild
+++ b/include/uapi/rdma/Kbuild
@@ -16,3 +16,4 @@ header-y += nes-abi.h
16header-y += ocrdma-abi.h 16header-y += ocrdma-abi.h
17header-y += hns-abi.h 17header-y += hns-abi.h
18header-y += vmw_pvrdma-abi.h 18header-y += vmw_pvrdma-abi.h
19header-y += qedr-abi.h
diff --git a/include/uapi/rdma/cxgb3-abi.h b/include/uapi/rdma/cxgb3-abi.h
index 48a19bda071b..d24eee12128f 100644
--- a/include/uapi/rdma/cxgb3-abi.h
+++ b/include/uapi/rdma/cxgb3-abi.h
@@ -30,7 +30,7 @@
30 * SOFTWARE. 30 * SOFTWARE.
31 */ 31 */
32#ifndef CXGB3_ABI_USER_H 32#ifndef CXGB3_ABI_USER_H
33#define CXBG3_ABI_USER_H 33#define CXGB3_ABI_USER_H
34 34
35#include <linux/types.h> 35#include <linux/types.h>
36 36
diff --git a/include/uapi/rdma/ib_user_verbs.h b/include/uapi/rdma/ib_user_verbs.h
index dfdfe4e92d31..f4f87cff6dc6 100644
--- a/include/uapi/rdma/ib_user_verbs.h
+++ b/include/uapi/rdma/ib_user_verbs.h
@@ -37,7 +37,6 @@
37#define IB_USER_VERBS_H 37#define IB_USER_VERBS_H
38 38
39#include <linux/types.h> 39#include <linux/types.h>
40#include <rdma/ib_verbs.h>
41 40
42/* 41/*
43 * Increment this value if any changes that break userspace ABI 42 * Increment this value if any changes that break userspace ABI
@@ -548,11 +547,17 @@ enum {
548}; 547};
549 548
550enum { 549enum {
551 IB_USER_LEGACY_LAST_QP_ATTR_MASK = IB_QP_DEST_QPN 550 /*
551 * This value is equal to IB_QP_DEST_QPN.
552 */
553 IB_USER_LEGACY_LAST_QP_ATTR_MASK = 1ULL << 20,
552}; 554};
553 555
554enum { 556enum {
555 IB_USER_LAST_QP_ATTR_MASK = IB_QP_RATE_LIMIT 557 /*
558 * This value is equal to IB_QP_RATE_LIMIT.
559 */
560 IB_USER_LAST_QP_ATTR_MASK = 1ULL << 25,
556}; 561};
557 562
558struct ib_uverbs_ex_create_qp { 563struct ib_uverbs_ex_create_qp {
diff --git a/init/Kconfig b/init/Kconfig
index 223b734abccd..4dd8bd232a1d 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -1176,6 +1176,10 @@ config CGROUP_DEBUG
1176 1176
1177 Say N. 1177 Say N.
1178 1178
1179config SOCK_CGROUP_DATA
1180 bool
1181 default n
1182
1179endif # CGROUPS 1183endif # CGROUPS
1180 1184
1181config CHECKPOINT_RESTORE 1185config CHECKPOINT_RESTORE
@@ -1983,6 +1987,10 @@ config MODVERSIONS
1983 make them incompatible with the kernel you are running. If 1987 make them incompatible with the kernel you are running. If
1984 unsure, say N. 1988 unsure, say N.
1985 1989
1990config MODULE_REL_CRCS
1991 bool
1992 depends on MODVERSIONS
1993
1986config MODULE_SRCVERSION_ALL 1994config MODULE_SRCVERSION_ALL
1987 bool "Source checksum for all modules" 1995 bool "Source checksum for all modules"
1988 help 1996 help
diff --git a/ipc/sem.c b/ipc/sem.c
index e08b94851922..3ec5742b5640 100644
--- a/ipc/sem.c
+++ b/ipc/sem.c
@@ -1977,7 +1977,7 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
1977 } 1977 }
1978 1978
1979 rcu_read_lock(); 1979 rcu_read_lock();
1980 sem_lock(sma, sops, nsops); 1980 locknum = sem_lock(sma, sops, nsops);
1981 1981
1982 if (!ipc_valid_object(&sma->sem_perm)) 1982 if (!ipc_valid_object(&sma->sem_perm))
1983 goto out_unlock_free; 1983 goto out_unlock_free;
diff --git a/kernel/audit_tree.c b/kernel/audit_tree.c
index 8b1dde96a0fa..7b44195da81b 100644
--- a/kernel/audit_tree.c
+++ b/kernel/audit_tree.c
@@ -231,9 +231,11 @@ static void untag_chunk(struct node *p)
231 if (size) 231 if (size)
232 new = alloc_chunk(size); 232 new = alloc_chunk(size);
233 233
234 mutex_lock(&entry->group->mark_mutex);
234 spin_lock(&entry->lock); 235 spin_lock(&entry->lock);
235 if (chunk->dead || !entry->inode) { 236 if (chunk->dead || !entry->inode) {
236 spin_unlock(&entry->lock); 237 spin_unlock(&entry->lock);
238 mutex_unlock(&entry->group->mark_mutex);
237 if (new) 239 if (new)
238 free_chunk(new); 240 free_chunk(new);
239 goto out; 241 goto out;
@@ -251,6 +253,7 @@ static void untag_chunk(struct node *p)
251 list_del_rcu(&chunk->hash); 253 list_del_rcu(&chunk->hash);
252 spin_unlock(&hash_lock); 254 spin_unlock(&hash_lock);
253 spin_unlock(&entry->lock); 255 spin_unlock(&entry->lock);
256 mutex_unlock(&entry->group->mark_mutex);
254 fsnotify_destroy_mark(entry, audit_tree_group); 257 fsnotify_destroy_mark(entry, audit_tree_group);
255 goto out; 258 goto out;
256 } 259 }
@@ -258,8 +261,8 @@ static void untag_chunk(struct node *p)
258 if (!new) 261 if (!new)
259 goto Fallback; 262 goto Fallback;
260 263
261 fsnotify_duplicate_mark(&new->mark, entry); 264 if (fsnotify_add_mark_locked(&new->mark, entry->group, entry->inode,
262 if (fsnotify_add_mark(&new->mark, new->mark.group, new->mark.inode, NULL, 1)) { 265 NULL, 1)) {
263 fsnotify_put_mark(&new->mark); 266 fsnotify_put_mark(&new->mark);
264 goto Fallback; 267 goto Fallback;
265 } 268 }
@@ -293,6 +296,7 @@ static void untag_chunk(struct node *p)
293 owner->root = new; 296 owner->root = new;
294 spin_unlock(&hash_lock); 297 spin_unlock(&hash_lock);
295 spin_unlock(&entry->lock); 298 spin_unlock(&entry->lock);
299 mutex_unlock(&entry->group->mark_mutex);
296 fsnotify_destroy_mark(entry, audit_tree_group); 300 fsnotify_destroy_mark(entry, audit_tree_group);
297 fsnotify_put_mark(&new->mark); /* drop initial reference */ 301 fsnotify_put_mark(&new->mark); /* drop initial reference */
298 goto out; 302 goto out;
@@ -309,6 +313,7 @@ Fallback:
309 put_tree(owner); 313 put_tree(owner);
310 spin_unlock(&hash_lock); 314 spin_unlock(&hash_lock);
311 spin_unlock(&entry->lock); 315 spin_unlock(&entry->lock);
316 mutex_unlock(&entry->group->mark_mutex);
312out: 317out:
313 fsnotify_put_mark(entry); 318 fsnotify_put_mark(entry);
314 spin_lock(&hash_lock); 319 spin_lock(&hash_lock);
@@ -386,18 +391,21 @@ static int tag_chunk(struct inode *inode, struct audit_tree *tree)
386 391
387 chunk_entry = &chunk->mark; 392 chunk_entry = &chunk->mark;
388 393
394 mutex_lock(&old_entry->group->mark_mutex);
389 spin_lock(&old_entry->lock); 395 spin_lock(&old_entry->lock);
390 if (!old_entry->inode) { 396 if (!old_entry->inode) {
391 /* old_entry is being shot, lets just lie */ 397 /* old_entry is being shot, lets just lie */
392 spin_unlock(&old_entry->lock); 398 spin_unlock(&old_entry->lock);
399 mutex_unlock(&old_entry->group->mark_mutex);
393 fsnotify_put_mark(old_entry); 400 fsnotify_put_mark(old_entry);
394 free_chunk(chunk); 401 free_chunk(chunk);
395 return -ENOENT; 402 return -ENOENT;
396 } 403 }
397 404
398 fsnotify_duplicate_mark(chunk_entry, old_entry); 405 if (fsnotify_add_mark_locked(chunk_entry, old_entry->group,
399 if (fsnotify_add_mark(chunk_entry, chunk_entry->group, chunk_entry->inode, NULL, 1)) { 406 old_entry->inode, NULL, 1)) {
400 spin_unlock(&old_entry->lock); 407 spin_unlock(&old_entry->lock);
408 mutex_unlock(&old_entry->group->mark_mutex);
401 fsnotify_put_mark(chunk_entry); 409 fsnotify_put_mark(chunk_entry);
402 fsnotify_put_mark(old_entry); 410 fsnotify_put_mark(old_entry);
403 return -ENOSPC; 411 return -ENOSPC;
@@ -413,6 +421,7 @@ static int tag_chunk(struct inode *inode, struct audit_tree *tree)
413 chunk->dead = 1; 421 chunk->dead = 1;
414 spin_unlock(&chunk_entry->lock); 422 spin_unlock(&chunk_entry->lock);
415 spin_unlock(&old_entry->lock); 423 spin_unlock(&old_entry->lock);
424 mutex_unlock(&old_entry->group->mark_mutex);
416 425
417 fsnotify_destroy_mark(chunk_entry, audit_tree_group); 426 fsnotify_destroy_mark(chunk_entry, audit_tree_group);
418 427
@@ -445,6 +454,7 @@ static int tag_chunk(struct inode *inode, struct audit_tree *tree)
445 spin_unlock(&hash_lock); 454 spin_unlock(&hash_lock);
446 spin_unlock(&chunk_entry->lock); 455 spin_unlock(&chunk_entry->lock);
447 spin_unlock(&old_entry->lock); 456 spin_unlock(&old_entry->lock);
457 mutex_unlock(&old_entry->group->mark_mutex);
448 fsnotify_destroy_mark(old_entry, audit_tree_group); 458 fsnotify_destroy_mark(old_entry, audit_tree_group);
449 fsnotify_put_mark(chunk_entry); /* drop initial reference */ 459 fsnotify_put_mark(chunk_entry); /* drop initial reference */
450 fsnotify_put_mark(old_entry); /* pair to fsnotify_find mark_entry */ 460 fsnotify_put_mark(old_entry); /* pair to fsnotify_find mark_entry */
diff --git a/kernel/bpf/arraymap.c b/kernel/bpf/arraymap.c
index a2ac051c342f..3d55d95dcf49 100644
--- a/kernel/bpf/arraymap.c
+++ b/kernel/bpf/arraymap.c
@@ -11,7 +11,6 @@
11 */ 11 */
12#include <linux/bpf.h> 12#include <linux/bpf.h>
13#include <linux/err.h> 13#include <linux/err.h>
14#include <linux/vmalloc.h>
15#include <linux/slab.h> 14#include <linux/slab.h>
16#include <linux/mm.h> 15#include <linux/mm.h>
17#include <linux/filter.h> 16#include <linux/filter.h>
@@ -56,7 +55,7 @@ static struct bpf_map *array_map_alloc(union bpf_attr *attr)
56 attr->value_size == 0 || attr->map_flags) 55 attr->value_size == 0 || attr->map_flags)
57 return ERR_PTR(-EINVAL); 56 return ERR_PTR(-EINVAL);
58 57
59 if (attr->value_size >= 1 << (KMALLOC_SHIFT_MAX - 1)) 58 if (attr->value_size > KMALLOC_MAX_SIZE)
60 /* if value_size is bigger, the user space won't be able to 59 /* if value_size is bigger, the user space won't be able to
61 * access the elements. 60 * access the elements.
62 */ 61 */
@@ -74,14 +73,10 @@ static struct bpf_map *array_map_alloc(union bpf_attr *attr)
74 if (array_size >= U32_MAX - PAGE_SIZE) 73 if (array_size >= U32_MAX - PAGE_SIZE)
75 return ERR_PTR(-ENOMEM); 74 return ERR_PTR(-ENOMEM);
76 75
77
78 /* allocate all map elements and zero-initialize them */ 76 /* allocate all map elements and zero-initialize them */
79 array = kzalloc(array_size, GFP_USER | __GFP_NOWARN); 77 array = bpf_map_area_alloc(array_size);
80 if (!array) { 78 if (!array)
81 array = vzalloc(array_size); 79 return ERR_PTR(-ENOMEM);
82 if (!array)
83 return ERR_PTR(-ENOMEM);
84 }
85 80
86 /* copy mandatory map attributes */ 81 /* copy mandatory map attributes */
87 array->map.map_type = attr->map_type; 82 array->map.map_type = attr->map_type;
@@ -97,7 +92,7 @@ static struct bpf_map *array_map_alloc(union bpf_attr *attr)
97 92
98 if (array_size >= U32_MAX - PAGE_SIZE || 93 if (array_size >= U32_MAX - PAGE_SIZE ||
99 elem_size > PCPU_MIN_UNIT_SIZE || bpf_array_alloc_percpu(array)) { 94 elem_size > PCPU_MIN_UNIT_SIZE || bpf_array_alloc_percpu(array)) {
100 kvfree(array); 95 bpf_map_area_free(array);
101 return ERR_PTR(-ENOMEM); 96 return ERR_PTR(-ENOMEM);
102 } 97 }
103out: 98out:
@@ -262,7 +257,7 @@ static void array_map_free(struct bpf_map *map)
262 if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY) 257 if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
263 bpf_array_free_percpu(array); 258 bpf_array_free_percpu(array);
264 259
265 kvfree(array); 260 bpf_map_area_free(array);
266} 261}
267 262
268static const struct bpf_map_ops array_ops = { 263static const struct bpf_map_ops array_ops = {
@@ -319,7 +314,8 @@ static void fd_array_map_free(struct bpf_map *map)
319 /* make sure it's empty */ 314 /* make sure it's empty */
320 for (i = 0; i < array->map.max_entries; i++) 315 for (i = 0; i < array->map.max_entries; i++)
321 BUG_ON(array->ptrs[i] != NULL); 316 BUG_ON(array->ptrs[i] != NULL);
322 kvfree(array); 317
318 bpf_map_area_free(array);
323} 319}
324 320
325static void *fd_array_map_lookup_elem(struct bpf_map *map, void *key) 321static void *fd_array_map_lookup_elem(struct bpf_map *map, void *key)
diff --git a/kernel/bpf/cgroup.c b/kernel/bpf/cgroup.c
index a515f7b007c6..da0f53690295 100644
--- a/kernel/bpf/cgroup.c
+++ b/kernel/bpf/cgroup.c
@@ -52,6 +52,7 @@ void cgroup_bpf_inherit(struct cgroup *cgrp, struct cgroup *parent)
52 e = rcu_dereference_protected(parent->bpf.effective[type], 52 e = rcu_dereference_protected(parent->bpf.effective[type],
53 lockdep_is_held(&cgroup_mutex)); 53 lockdep_is_held(&cgroup_mutex));
54 rcu_assign_pointer(cgrp->bpf.effective[type], e); 54 rcu_assign_pointer(cgrp->bpf.effective[type], e);
55 cgrp->bpf.disallow_override[type] = parent->bpf.disallow_override[type];
55 } 56 }
56} 57}
57 58
@@ -82,30 +83,63 @@ void cgroup_bpf_inherit(struct cgroup *cgrp, struct cgroup *parent)
82 * 83 *
83 * Must be called with cgroup_mutex held. 84 * Must be called with cgroup_mutex held.
84 */ 85 */
85void __cgroup_bpf_update(struct cgroup *cgrp, 86int __cgroup_bpf_update(struct cgroup *cgrp, struct cgroup *parent,
86 struct cgroup *parent, 87 struct bpf_prog *prog, enum bpf_attach_type type,
87 struct bpf_prog *prog, 88 bool new_overridable)
88 enum bpf_attach_type type)
89{ 89{
90 struct bpf_prog *old_prog, *effective; 90 struct bpf_prog *old_prog, *effective = NULL;
91 struct cgroup_subsys_state *pos; 91 struct cgroup_subsys_state *pos;
92 bool overridable = true;
92 93
93 old_prog = xchg(cgrp->bpf.prog + type, prog); 94 if (parent) {
95 overridable = !parent->bpf.disallow_override[type];
96 effective = rcu_dereference_protected(parent->bpf.effective[type],
97 lockdep_is_held(&cgroup_mutex));
98 }
99
100 if (prog && effective && !overridable)
101 /* if parent has non-overridable prog attached, disallow
102 * attaching new programs to descendent cgroup
103 */
104 return -EPERM;
105
106 if (prog && effective && overridable != new_overridable)
107 /* if parent has overridable prog attached, only
108 * allow overridable programs in descendent cgroup
109 */
110 return -EPERM;
94 111
95 effective = (!prog && parent) ? 112 old_prog = cgrp->bpf.prog[type];
96 rcu_dereference_protected(parent->bpf.effective[type], 113
97 lockdep_is_held(&cgroup_mutex)) : 114 if (prog) {
98 prog; 115 overridable = new_overridable;
116 effective = prog;
117 if (old_prog &&
118 cgrp->bpf.disallow_override[type] == new_overridable)
119 /* disallow attaching non-overridable on top
120 * of existing overridable in this cgroup
121 * and vice versa
122 */
123 return -EPERM;
124 }
125
126 if (!prog && !old_prog)
127 /* report error when trying to detach and nothing is attached */
128 return -ENOENT;
129
130 cgrp->bpf.prog[type] = prog;
99 131
100 css_for_each_descendant_pre(pos, &cgrp->self) { 132 css_for_each_descendant_pre(pos, &cgrp->self) {
101 struct cgroup *desc = container_of(pos, struct cgroup, self); 133 struct cgroup *desc = container_of(pos, struct cgroup, self);
102 134
103 /* skip the subtree if the descendant has its own program */ 135 /* skip the subtree if the descendant has its own program */
104 if (desc->bpf.prog[type] && desc != cgrp) 136 if (desc->bpf.prog[type] && desc != cgrp) {
105 pos = css_rightmost_descendant(pos); 137 pos = css_rightmost_descendant(pos);
106 else 138 } else {
107 rcu_assign_pointer(desc->bpf.effective[type], 139 rcu_assign_pointer(desc->bpf.effective[type],
108 effective); 140 effective);
141 desc->bpf.disallow_override[type] = !overridable;
142 }
109 } 143 }
110 144
111 if (prog) 145 if (prog)
@@ -115,6 +149,7 @@ void __cgroup_bpf_update(struct cgroup *cgrp,
115 bpf_prog_put(old_prog); 149 bpf_prog_put(old_prog);
116 static_branch_dec(&cgroup_bpf_enabled_key); 150 static_branch_dec(&cgroup_bpf_enabled_key);
117 } 151 }
152 return 0;
118} 153}
119 154
120/** 155/**
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index 1eb4f1303756..503d4211988a 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -146,10 +146,11 @@ void __bpf_prog_free(struct bpf_prog *fp)
146 vfree(fp); 146 vfree(fp);
147} 147}
148 148
149int bpf_prog_calc_digest(struct bpf_prog *fp) 149int bpf_prog_calc_tag(struct bpf_prog *fp)
150{ 150{
151 const u32 bits_offset = SHA_MESSAGE_BYTES - sizeof(__be64); 151 const u32 bits_offset = SHA_MESSAGE_BYTES - sizeof(__be64);
152 u32 raw_size = bpf_prog_digest_scratch_size(fp); 152 u32 raw_size = bpf_prog_tag_scratch_size(fp);
153 u32 digest[SHA_DIGEST_WORDS];
153 u32 ws[SHA_WORKSPACE_WORDS]; 154 u32 ws[SHA_WORKSPACE_WORDS];
154 u32 i, bsize, psize, blocks; 155 u32 i, bsize, psize, blocks;
155 struct bpf_insn *dst; 156 struct bpf_insn *dst;
@@ -162,7 +163,7 @@ int bpf_prog_calc_digest(struct bpf_prog *fp)
162 if (!raw) 163 if (!raw)
163 return -ENOMEM; 164 return -ENOMEM;
164 165
165 sha_init(fp->digest); 166 sha_init(digest);
166 memset(ws, 0, sizeof(ws)); 167 memset(ws, 0, sizeof(ws));
167 168
168 /* We need to take out the map fd for the digest calculation 169 /* We need to take out the map fd for the digest calculation
@@ -204,13 +205,14 @@ int bpf_prog_calc_digest(struct bpf_prog *fp)
204 *bits = cpu_to_be64((psize - 1) << 3); 205 *bits = cpu_to_be64((psize - 1) << 3);
205 206
206 while (blocks--) { 207 while (blocks--) {
207 sha_transform(fp->digest, todo, ws); 208 sha_transform(digest, todo, ws);
208 todo += SHA_MESSAGE_BYTES; 209 todo += SHA_MESSAGE_BYTES;
209 } 210 }
210 211
211 result = (__force __be32 *)fp->digest; 212 result = (__force __be32 *)digest;
212 for (i = 0; i < SHA_DIGEST_WORDS; i++) 213 for (i = 0; i < SHA_DIGEST_WORDS; i++)
213 result[i] = cpu_to_be32(fp->digest[i]); 214 result[i] = cpu_to_be32(digest[i]);
215 memcpy(fp->tag, result, sizeof(fp->tag));
214 216
215 vfree(raw); 217 vfree(raw);
216 return 0; 218 return 0;
diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c
index 34debc1a9641..a753bbe7df0a 100644
--- a/kernel/bpf/hashtab.c
+++ b/kernel/bpf/hashtab.c
@@ -13,7 +13,6 @@
13#include <linux/bpf.h> 13#include <linux/bpf.h>
14#include <linux/jhash.h> 14#include <linux/jhash.h>
15#include <linux/filter.h> 15#include <linux/filter.h>
16#include <linux/vmalloc.h>
17#include "percpu_freelist.h" 16#include "percpu_freelist.h"
18#include "bpf_lru_list.h" 17#include "bpf_lru_list.h"
19 18
@@ -103,7 +102,7 @@ static void htab_free_elems(struct bpf_htab *htab)
103 free_percpu(pptr); 102 free_percpu(pptr);
104 } 103 }
105free_elems: 104free_elems:
106 vfree(htab->elems); 105 bpf_map_area_free(htab->elems);
107} 106}
108 107
109static struct htab_elem *prealloc_lru_pop(struct bpf_htab *htab, void *key, 108static struct htab_elem *prealloc_lru_pop(struct bpf_htab *htab, void *key,
@@ -125,7 +124,8 @@ static int prealloc_init(struct bpf_htab *htab)
125{ 124{
126 int err = -ENOMEM, i; 125 int err = -ENOMEM, i;
127 126
128 htab->elems = vzalloc(htab->elem_size * htab->map.max_entries); 127 htab->elems = bpf_map_area_alloc(htab->elem_size *
128 htab->map.max_entries);
129 if (!htab->elems) 129 if (!htab->elems)
130 return -ENOMEM; 130 return -ENOMEM;
131 131
@@ -274,7 +274,7 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
274 */ 274 */
275 goto free_htab; 275 goto free_htab;
276 276
277 if (htab->map.value_size >= (1 << (KMALLOC_SHIFT_MAX - 1)) - 277 if (htab->map.value_size >= KMALLOC_MAX_SIZE -
278 MAX_BPF_STACK - sizeof(struct htab_elem)) 278 MAX_BPF_STACK - sizeof(struct htab_elem))
279 /* if value_size is bigger, the user space won't be able to 279 /* if value_size is bigger, the user space won't be able to
280 * access the elements via bpf syscall. This check also makes 280 * access the elements via bpf syscall. This check also makes
@@ -320,14 +320,10 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
320 goto free_htab; 320 goto free_htab;
321 321
322 err = -ENOMEM; 322 err = -ENOMEM;
323 htab->buckets = kmalloc_array(htab->n_buckets, sizeof(struct bucket), 323 htab->buckets = bpf_map_area_alloc(htab->n_buckets *
324 GFP_USER | __GFP_NOWARN); 324 sizeof(struct bucket));
325 325 if (!htab->buckets)
326 if (!htab->buckets) { 326 goto free_htab;
327 htab->buckets = vmalloc(htab->n_buckets * sizeof(struct bucket));
328 if (!htab->buckets)
329 goto free_htab;
330 }
331 327
332 for (i = 0; i < htab->n_buckets; i++) { 328 for (i = 0; i < htab->n_buckets; i++) {
333 INIT_HLIST_HEAD(&htab->buckets[i].head); 329 INIT_HLIST_HEAD(&htab->buckets[i].head);
@@ -354,7 +350,7 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
354free_extra_elems: 350free_extra_elems:
355 free_percpu(htab->extra_elems); 351 free_percpu(htab->extra_elems);
356free_buckets: 352free_buckets:
357 kvfree(htab->buckets); 353 bpf_map_area_free(htab->buckets);
358free_htab: 354free_htab:
359 kfree(htab); 355 kfree(htab);
360 return ERR_PTR(err); 356 return ERR_PTR(err);
@@ -1014,7 +1010,7 @@ static void htab_map_free(struct bpf_map *map)
1014 prealloc_destroy(htab); 1010 prealloc_destroy(htab);
1015 1011
1016 free_percpu(htab->extra_elems); 1012 free_percpu(htab->extra_elems);
1017 kvfree(htab->buckets); 1013 bpf_map_area_free(htab->buckets);
1018 kfree(htab); 1014 kfree(htab);
1019} 1015}
1020 1016
diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c
index 732ae16d12b7..be8519148c25 100644
--- a/kernel/bpf/stackmap.c
+++ b/kernel/bpf/stackmap.c
@@ -7,7 +7,6 @@
7#include <linux/bpf.h> 7#include <linux/bpf.h>
8#include <linux/jhash.h> 8#include <linux/jhash.h>
9#include <linux/filter.h> 9#include <linux/filter.h>
10#include <linux/vmalloc.h>
11#include <linux/stacktrace.h> 10#include <linux/stacktrace.h>
12#include <linux/perf_event.h> 11#include <linux/perf_event.h>
13#include "percpu_freelist.h" 12#include "percpu_freelist.h"
@@ -32,7 +31,7 @@ static int prealloc_elems_and_freelist(struct bpf_stack_map *smap)
32 u32 elem_size = sizeof(struct stack_map_bucket) + smap->map.value_size; 31 u32 elem_size = sizeof(struct stack_map_bucket) + smap->map.value_size;
33 int err; 32 int err;
34 33
35 smap->elems = vzalloc(elem_size * smap->map.max_entries); 34 smap->elems = bpf_map_area_alloc(elem_size * smap->map.max_entries);
36 if (!smap->elems) 35 if (!smap->elems)
37 return -ENOMEM; 36 return -ENOMEM;
38 37
@@ -45,7 +44,7 @@ static int prealloc_elems_and_freelist(struct bpf_stack_map *smap)
45 return 0; 44 return 0;
46 45
47free_elems: 46free_elems:
48 vfree(smap->elems); 47 bpf_map_area_free(smap->elems);
49 return err; 48 return err;
50} 49}
51 50
@@ -76,12 +75,9 @@ static struct bpf_map *stack_map_alloc(union bpf_attr *attr)
76 if (cost >= U32_MAX - PAGE_SIZE) 75 if (cost >= U32_MAX - PAGE_SIZE)
77 return ERR_PTR(-E2BIG); 76 return ERR_PTR(-E2BIG);
78 77
79 smap = kzalloc(cost, GFP_USER | __GFP_NOWARN); 78 smap = bpf_map_area_alloc(cost);
80 if (!smap) { 79 if (!smap)
81 smap = vzalloc(cost); 80 return ERR_PTR(-ENOMEM);
82 if (!smap)
83 return ERR_PTR(-ENOMEM);
84 }
85 81
86 err = -E2BIG; 82 err = -E2BIG;
87 cost += n_buckets * (value_size + sizeof(struct stack_map_bucket)); 83 cost += n_buckets * (value_size + sizeof(struct stack_map_bucket));
@@ -112,7 +108,7 @@ static struct bpf_map *stack_map_alloc(union bpf_attr *attr)
112put_buffers: 108put_buffers:
113 put_callchain_buffers(); 109 put_callchain_buffers();
114free_smap: 110free_smap:
115 kvfree(smap); 111 bpf_map_area_free(smap);
116 return ERR_PTR(err); 112 return ERR_PTR(err);
117} 113}
118 114
@@ -262,9 +258,9 @@ static void stack_map_free(struct bpf_map *map)
262 /* wait for bpf programs to complete before freeing stack map */ 258 /* wait for bpf programs to complete before freeing stack map */
263 synchronize_rcu(); 259 synchronize_rcu();
264 260
265 vfree(smap->elems); 261 bpf_map_area_free(smap->elems);
266 pcpu_freelist_destroy(&smap->freelist); 262 pcpu_freelist_destroy(&smap->freelist);
267 kvfree(smap); 263 bpf_map_area_free(smap);
268 put_callchain_buffers(); 264 put_callchain_buffers();
269} 265}
270 266
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index e89acea22ecf..bbb016adbaeb 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -12,6 +12,8 @@
12#include <linux/bpf.h> 12#include <linux/bpf.h>
13#include <linux/syscalls.h> 13#include <linux/syscalls.h>
14#include <linux/slab.h> 14#include <linux/slab.h>
15#include <linux/vmalloc.h>
16#include <linux/mmzone.h>
15#include <linux/anon_inodes.h> 17#include <linux/anon_inodes.h>
16#include <linux/file.h> 18#include <linux/file.h>
17#include <linux/license.h> 19#include <linux/license.h>
@@ -49,6 +51,30 @@ void bpf_register_map_type(struct bpf_map_type_list *tl)
49 list_add(&tl->list_node, &bpf_map_types); 51 list_add(&tl->list_node, &bpf_map_types);
50} 52}
51 53
54void *bpf_map_area_alloc(size_t size)
55{
56 /* We definitely need __GFP_NORETRY, so OOM killer doesn't
57 * trigger under memory pressure as we really just want to
58 * fail instead.
59 */
60 const gfp_t flags = __GFP_NOWARN | __GFP_NORETRY | __GFP_ZERO;
61 void *area;
62
63 if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) {
64 area = kmalloc(size, GFP_USER | flags);
65 if (area != NULL)
66 return area;
67 }
68
69 return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM | flags,
70 PAGE_KERNEL);
71}
72
73void bpf_map_area_free(void *area)
74{
75 kvfree(area);
76}
77
52int bpf_map_precharge_memlock(u32 pages) 78int bpf_map_precharge_memlock(u32 pages)
53{ 79{
54 struct user_struct *user = get_current_user(); 80 struct user_struct *user = get_current_user();
@@ -688,17 +714,17 @@ static int bpf_prog_release(struct inode *inode, struct file *filp)
688static void bpf_prog_show_fdinfo(struct seq_file *m, struct file *filp) 714static void bpf_prog_show_fdinfo(struct seq_file *m, struct file *filp)
689{ 715{
690 const struct bpf_prog *prog = filp->private_data; 716 const struct bpf_prog *prog = filp->private_data;
691 char prog_digest[sizeof(prog->digest) * 2 + 1] = { }; 717 char prog_tag[sizeof(prog->tag) * 2 + 1] = { };
692 718
693 bin2hex(prog_digest, prog->digest, sizeof(prog->digest)); 719 bin2hex(prog_tag, prog->tag, sizeof(prog->tag));
694 seq_printf(m, 720 seq_printf(m,
695 "prog_type:\t%u\n" 721 "prog_type:\t%u\n"
696 "prog_jited:\t%u\n" 722 "prog_jited:\t%u\n"
697 "prog_digest:\t%s\n" 723 "prog_tag:\t%s\n"
698 "memlock:\t%llu\n", 724 "memlock:\t%llu\n",
699 prog->type, 725 prog->type,
700 prog->jited, 726 prog->jited,
701 prog_digest, 727 prog_tag,
702 prog->pages * 1ULL << PAGE_SHIFT); 728 prog->pages * 1ULL << PAGE_SHIFT);
703} 729}
704#endif 730#endif
@@ -894,13 +920,14 @@ static int bpf_obj_get(const union bpf_attr *attr)
894 920
895#ifdef CONFIG_CGROUP_BPF 921#ifdef CONFIG_CGROUP_BPF
896 922
897#define BPF_PROG_ATTACH_LAST_FIELD attach_type 923#define BPF_PROG_ATTACH_LAST_FIELD attach_flags
898 924
899static int bpf_prog_attach(const union bpf_attr *attr) 925static int bpf_prog_attach(const union bpf_attr *attr)
900{ 926{
927 enum bpf_prog_type ptype;
901 struct bpf_prog *prog; 928 struct bpf_prog *prog;
902 struct cgroup *cgrp; 929 struct cgroup *cgrp;
903 enum bpf_prog_type ptype; 930 int ret;
904 931
905 if (!capable(CAP_NET_ADMIN)) 932 if (!capable(CAP_NET_ADMIN))
906 return -EPERM; 933 return -EPERM;
@@ -908,6 +935,9 @@ static int bpf_prog_attach(const union bpf_attr *attr)
908 if (CHECK_ATTR(BPF_PROG_ATTACH)) 935 if (CHECK_ATTR(BPF_PROG_ATTACH))
909 return -EINVAL; 936 return -EINVAL;
910 937
938 if (attr->attach_flags & ~BPF_F_ALLOW_OVERRIDE)
939 return -EINVAL;
940
911 switch (attr->attach_type) { 941 switch (attr->attach_type) {
912 case BPF_CGROUP_INET_INGRESS: 942 case BPF_CGROUP_INET_INGRESS:
913 case BPF_CGROUP_INET_EGRESS: 943 case BPF_CGROUP_INET_EGRESS:
@@ -930,10 +960,13 @@ static int bpf_prog_attach(const union bpf_attr *attr)
930 return PTR_ERR(cgrp); 960 return PTR_ERR(cgrp);
931 } 961 }
932 962
933 cgroup_bpf_update(cgrp, prog, attr->attach_type); 963 ret = cgroup_bpf_update(cgrp, prog, attr->attach_type,
964 attr->attach_flags & BPF_F_ALLOW_OVERRIDE);
965 if (ret)
966 bpf_prog_put(prog);
934 cgroup_put(cgrp); 967 cgroup_put(cgrp);
935 968
936 return 0; 969 return ret;
937} 970}
938 971
939#define BPF_PROG_DETACH_LAST_FIELD attach_type 972#define BPF_PROG_DETACH_LAST_FIELD attach_type
@@ -941,6 +974,7 @@ static int bpf_prog_attach(const union bpf_attr *attr)
941static int bpf_prog_detach(const union bpf_attr *attr) 974static int bpf_prog_detach(const union bpf_attr *attr)
942{ 975{
943 struct cgroup *cgrp; 976 struct cgroup *cgrp;
977 int ret;
944 978
945 if (!capable(CAP_NET_ADMIN)) 979 if (!capable(CAP_NET_ADMIN))
946 return -EPERM; 980 return -EPERM;
@@ -956,7 +990,7 @@ static int bpf_prog_detach(const union bpf_attr *attr)
956 if (IS_ERR(cgrp)) 990 if (IS_ERR(cgrp))
957 return PTR_ERR(cgrp); 991 return PTR_ERR(cgrp);
958 992
959 cgroup_bpf_update(cgrp, NULL, attr->attach_type); 993 ret = cgroup_bpf_update(cgrp, NULL, attr->attach_type, false);
960 cgroup_put(cgrp); 994 cgroup_put(cgrp);
961 break; 995 break;
962 996
@@ -964,7 +998,7 @@ static int bpf_prog_detach(const union bpf_attr *attr)
964 return -EINVAL; 998 return -EINVAL;
965 } 999 }
966 1000
967 return 0; 1001 return ret;
968} 1002}
969#endif /* CONFIG_CGROUP_BPF */ 1003#endif /* CONFIG_CGROUP_BPF */
970 1004
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 83ed2f8f6f22..cdc43b899f28 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -2936,7 +2936,7 @@ static int replace_map_fd_with_map_ptr(struct bpf_verifier_env *env)
2936 int insn_cnt = env->prog->len; 2936 int insn_cnt = env->prog->len;
2937 int i, j, err; 2937 int i, j, err;
2938 2938
2939 err = bpf_prog_calc_digest(env->prog); 2939 err = bpf_prog_calc_tag(env->prog);
2940 if (err) 2940 if (err)
2941 return err; 2941 return err;
2942 2942
diff --git a/kernel/capability.c b/kernel/capability.c
index a98e814f216f..f97fe77ceb88 100644
--- a/kernel/capability.c
+++ b/kernel/capability.c
@@ -318,6 +318,7 @@ bool has_capability(struct task_struct *t, int cap)
318{ 318{
319 return has_ns_capability(t, &init_user_ns, cap); 319 return has_ns_capability(t, &init_user_ns, cap);
320} 320}
321EXPORT_SYMBOL(has_capability);
321 322
322/** 323/**
323 * has_ns_capability_noaudit - Does a task have a capability (unaudited) 324 * has_ns_capability_noaudit - Does a task have a capability (unaudited)
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index 2ee9ec3051b2..53bbca7c4859 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -5221,6 +5221,11 @@ err_free_css:
5221 return ERR_PTR(err); 5221 return ERR_PTR(err);
5222} 5222}
5223 5223
5224/*
5225 * The returned cgroup is fully initialized including its control mask, but
5226 * it isn't associated with its kernfs_node and doesn't have the control
5227 * mask applied.
5228 */
5224static struct cgroup *cgroup_create(struct cgroup *parent) 5229static struct cgroup *cgroup_create(struct cgroup *parent)
5225{ 5230{
5226 struct cgroup_root *root = parent->root; 5231 struct cgroup_root *root = parent->root;
@@ -5288,11 +5293,6 @@ static struct cgroup *cgroup_create(struct cgroup *parent)
5288 5293
5289 cgroup_propagate_control(cgrp); 5294 cgroup_propagate_control(cgrp);
5290 5295
5291 /* @cgrp doesn't have dir yet so the following will only create csses */
5292 ret = cgroup_apply_control_enable(cgrp);
5293 if (ret)
5294 goto out_destroy;
5295
5296 return cgrp; 5296 return cgrp;
5297 5297
5298out_cancel_ref: 5298out_cancel_ref:
@@ -5300,9 +5300,6 @@ out_cancel_ref:
5300out_free_cgrp: 5300out_free_cgrp:
5301 kfree(cgrp); 5301 kfree(cgrp);
5302 return ERR_PTR(ret); 5302 return ERR_PTR(ret);
5303out_destroy:
5304 cgroup_destroy_locked(cgrp);
5305 return ERR_PTR(ret);
5306} 5303}
5307 5304
5308static int cgroup_mkdir(struct kernfs_node *parent_kn, const char *name, 5305static int cgroup_mkdir(struct kernfs_node *parent_kn, const char *name,
@@ -6501,15 +6498,16 @@ static __init int cgroup_namespaces_init(void)
6501subsys_initcall(cgroup_namespaces_init); 6498subsys_initcall(cgroup_namespaces_init);
6502 6499
6503#ifdef CONFIG_CGROUP_BPF 6500#ifdef CONFIG_CGROUP_BPF
6504void cgroup_bpf_update(struct cgroup *cgrp, 6501int cgroup_bpf_update(struct cgroup *cgrp, struct bpf_prog *prog,
6505 struct bpf_prog *prog, 6502 enum bpf_attach_type type, bool overridable)
6506 enum bpf_attach_type type)
6507{ 6503{
6508 struct cgroup *parent = cgroup_parent(cgrp); 6504 struct cgroup *parent = cgroup_parent(cgrp);
6505 int ret;
6509 6506
6510 mutex_lock(&cgroup_mutex); 6507 mutex_lock(&cgroup_mutex);
6511 __cgroup_bpf_update(cgrp, parent, prog, type); 6508 ret = __cgroup_bpf_update(cgrp, parent, prog, type, overridable);
6512 mutex_unlock(&cgroup_mutex); 6509 mutex_unlock(&cgroup_mutex);
6510 return ret;
6513} 6511}
6514#endif /* CONFIG_CGROUP_BPF */ 6512#endif /* CONFIG_CGROUP_BPF */
6515 6513
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 042fd7e8e030..0a5f630f5c54 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -764,7 +764,6 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen,
764{ 764{
765 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); 765 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
766 int prev_state, ret = 0; 766 int prev_state, ret = 0;
767 bool hasdied = false;
768 767
769 if (num_online_cpus() == 1) 768 if (num_online_cpus() == 1)
770 return -EBUSY; 769 return -EBUSY;
@@ -809,7 +808,6 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen,
809 cpuhp_kick_ap_work(cpu); 808 cpuhp_kick_ap_work(cpu);
810 } 809 }
811 810
812 hasdied = prev_state != st->state && st->state == CPUHP_OFFLINE;
813out: 811out:
814 cpu_hotplug_done(); 812 cpu_hotplug_done();
815 return ret; 813 return ret;
@@ -1302,10 +1300,24 @@ static int cpuhp_cb_check(enum cpuhp_state state)
1302 */ 1300 */
1303static int cpuhp_reserve_state(enum cpuhp_state state) 1301static int cpuhp_reserve_state(enum cpuhp_state state)
1304{ 1302{
1305 enum cpuhp_state i; 1303 enum cpuhp_state i, end;
1304 struct cpuhp_step *step;
1306 1305
1307 for (i = CPUHP_AP_ONLINE_DYN; i <= CPUHP_AP_ONLINE_DYN_END; i++) { 1306 switch (state) {
1308 if (!cpuhp_ap_states[i].name) 1307 case CPUHP_AP_ONLINE_DYN:
1308 step = cpuhp_ap_states + CPUHP_AP_ONLINE_DYN;
1309 end = CPUHP_AP_ONLINE_DYN_END;
1310 break;
1311 case CPUHP_BP_PREPARE_DYN:
1312 step = cpuhp_bp_states + CPUHP_BP_PREPARE_DYN;
1313 end = CPUHP_BP_PREPARE_DYN_END;
1314 break;
1315 default:
1316 return -EINVAL;
1317 }
1318
1319 for (i = state; i <= end; i++, step++) {
1320 if (!step->name)
1309 return i; 1321 return i;
1310 } 1322 }
1311 WARN(1, "No more dynamic states available for CPU hotplug\n"); 1323 WARN(1, "No more dynamic states available for CPU hotplug\n");
@@ -1323,7 +1335,7 @@ static int cpuhp_store_callbacks(enum cpuhp_state state, const char *name,
1323 1335
1324 mutex_lock(&cpuhp_state_mutex); 1336 mutex_lock(&cpuhp_state_mutex);
1325 1337
1326 if (state == CPUHP_AP_ONLINE_DYN) { 1338 if (state == CPUHP_AP_ONLINE_DYN || state == CPUHP_BP_PREPARE_DYN) {
1327 ret = cpuhp_reserve_state(state); 1339 ret = cpuhp_reserve_state(state);
1328 if (ret < 0) 1340 if (ret < 0)
1329 goto out; 1341 goto out;
@@ -1471,6 +1483,7 @@ int __cpuhp_setup_state(enum cpuhp_state state,
1471 bool multi_instance) 1483 bool multi_instance)
1472{ 1484{
1473 int cpu, ret = 0; 1485 int cpu, ret = 0;
1486 bool dynstate;
1474 1487
1475 if (cpuhp_cb_check(state) || !name) 1488 if (cpuhp_cb_check(state) || !name)
1476 return -EINVAL; 1489 return -EINVAL;
@@ -1480,6 +1493,12 @@ int __cpuhp_setup_state(enum cpuhp_state state,
1480 ret = cpuhp_store_callbacks(state, name, startup, teardown, 1493 ret = cpuhp_store_callbacks(state, name, startup, teardown,
1481 multi_instance); 1494 multi_instance);
1482 1495
1496 dynstate = state == CPUHP_AP_ONLINE_DYN;
1497 if (ret > 0 && dynstate) {
1498 state = ret;
1499 ret = 0;
1500 }
1501
1483 if (ret || !invoke || !startup) 1502 if (ret || !invoke || !startup)
1484 goto out; 1503 goto out;
1485 1504
@@ -1508,7 +1527,7 @@ out:
1508 * If the requested state is CPUHP_AP_ONLINE_DYN, return the 1527 * If the requested state is CPUHP_AP_ONLINE_DYN, return the
1509 * dynamically allocated state in case of success. 1528 * dynamically allocated state in case of success.
1510 */ 1529 */
1511 if (!ret && state == CPUHP_AP_ONLINE_DYN) 1530 if (!ret && dynstate)
1512 return state; 1531 return state;
1513 return ret; 1532 return ret;
1514} 1533}
diff --git a/kernel/events/core.c b/kernel/events/core.c
index ab15509fab8c..e235bb991bdd 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -1469,7 +1469,6 @@ ctx_group_list(struct perf_event *event, struct perf_event_context *ctx)
1469static void 1469static void
1470list_add_event(struct perf_event *event, struct perf_event_context *ctx) 1470list_add_event(struct perf_event *event, struct perf_event_context *ctx)
1471{ 1471{
1472
1473 lockdep_assert_held(&ctx->lock); 1472 lockdep_assert_held(&ctx->lock);
1474 1473
1475 WARN_ON_ONCE(event->attach_state & PERF_ATTACH_CONTEXT); 1474 WARN_ON_ONCE(event->attach_state & PERF_ATTACH_CONTEXT);
@@ -1624,6 +1623,8 @@ static void perf_group_attach(struct perf_event *event)
1624{ 1623{
1625 struct perf_event *group_leader = event->group_leader, *pos; 1624 struct perf_event *group_leader = event->group_leader, *pos;
1626 1625
1626 lockdep_assert_held(&event->ctx->lock);
1627
1627 /* 1628 /*
1628 * We can have double attach due to group movement in perf_event_open. 1629 * We can have double attach due to group movement in perf_event_open.
1629 */ 1630 */
@@ -1697,6 +1698,8 @@ static void perf_group_detach(struct perf_event *event)
1697 struct perf_event *sibling, *tmp; 1698 struct perf_event *sibling, *tmp;
1698 struct list_head *list = NULL; 1699 struct list_head *list = NULL;
1699 1700
1701 lockdep_assert_held(&event->ctx->lock);
1702
1700 /* 1703 /*
1701 * We can have double detach due to exit/hot-unplug + close. 1704 * We can have double detach due to exit/hot-unplug + close.
1702 */ 1705 */
@@ -1895,9 +1898,29 @@ __perf_remove_from_context(struct perf_event *event,
1895 */ 1898 */
1896static void perf_remove_from_context(struct perf_event *event, unsigned long flags) 1899static void perf_remove_from_context(struct perf_event *event, unsigned long flags)
1897{ 1900{
1898 lockdep_assert_held(&event->ctx->mutex); 1901 struct perf_event_context *ctx = event->ctx;
1902
1903 lockdep_assert_held(&ctx->mutex);
1899 1904
1900 event_function_call(event, __perf_remove_from_context, (void *)flags); 1905 event_function_call(event, __perf_remove_from_context, (void *)flags);
1906
1907 /*
1908 * The above event_function_call() can NO-OP when it hits
1909 * TASK_TOMBSTONE. In that case we must already have been detached
1910 * from the context (by perf_event_exit_event()) but the grouping
1911 * might still be in-tact.
1912 */
1913 WARN_ON_ONCE(event->attach_state & PERF_ATTACH_CONTEXT);
1914 if ((flags & DETACH_GROUP) &&
1915 (event->attach_state & PERF_ATTACH_GROUP)) {
1916 /*
1917 * Since in that case we cannot possibly be scheduled, simply
1918 * detach now.
1919 */
1920 raw_spin_lock_irq(&ctx->lock);
1921 perf_group_detach(event);
1922 raw_spin_unlock_irq(&ctx->lock);
1923 }
1901} 1924}
1902 1925
1903/* 1926/*
@@ -2249,7 +2272,7 @@ static int __perf_install_in_context(void *info)
2249 struct perf_event_context *ctx = event->ctx; 2272 struct perf_event_context *ctx = event->ctx;
2250 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); 2273 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
2251 struct perf_event_context *task_ctx = cpuctx->task_ctx; 2274 struct perf_event_context *task_ctx = cpuctx->task_ctx;
2252 bool activate = true; 2275 bool reprogram = true;
2253 int ret = 0; 2276 int ret = 0;
2254 2277
2255 raw_spin_lock(&cpuctx->ctx.lock); 2278 raw_spin_lock(&cpuctx->ctx.lock);
@@ -2257,27 +2280,26 @@ static int __perf_install_in_context(void *info)
2257 raw_spin_lock(&ctx->lock); 2280 raw_spin_lock(&ctx->lock);
2258 task_ctx = ctx; 2281 task_ctx = ctx;
2259 2282
2260 /* If we're on the wrong CPU, try again */ 2283 reprogram = (ctx->task == current);
2261 if (task_cpu(ctx->task) != smp_processor_id()) {
2262 ret = -ESRCH;
2263 goto unlock;
2264 }
2265 2284
2266 /* 2285 /*
2267 * If we're on the right CPU, see if the task we target is 2286 * If the task is running, it must be running on this CPU,
2268 * current, if not we don't have to activate the ctx, a future 2287 * otherwise we cannot reprogram things.
2269 * context switch will do that for us. 2288 *
2289 * If its not running, we don't care, ctx->lock will
2290 * serialize against it becoming runnable.
2270 */ 2291 */
2271 if (ctx->task != current) 2292 if (task_curr(ctx->task) && !reprogram) {
2272 activate = false; 2293 ret = -ESRCH;
2273 else 2294 goto unlock;
2274 WARN_ON_ONCE(cpuctx->task_ctx && cpuctx->task_ctx != ctx); 2295 }
2275 2296
2297 WARN_ON_ONCE(reprogram && cpuctx->task_ctx && cpuctx->task_ctx != ctx);
2276 } else if (task_ctx) { 2298 } else if (task_ctx) {
2277 raw_spin_lock(&task_ctx->lock); 2299 raw_spin_lock(&task_ctx->lock);
2278 } 2300 }
2279 2301
2280 if (activate) { 2302 if (reprogram) {
2281 ctx_sched_out(ctx, cpuctx, EVENT_TIME); 2303 ctx_sched_out(ctx, cpuctx, EVENT_TIME);
2282 add_event_to_ctx(event, ctx); 2304 add_event_to_ctx(event, ctx);
2283 ctx_resched(cpuctx, task_ctx); 2305 ctx_resched(cpuctx, task_ctx);
@@ -2328,13 +2350,36 @@ perf_install_in_context(struct perf_event_context *ctx,
2328 /* 2350 /*
2329 * Installing events is tricky because we cannot rely on ctx->is_active 2351 * Installing events is tricky because we cannot rely on ctx->is_active
2330 * to be set in case this is the nr_events 0 -> 1 transition. 2352 * to be set in case this is the nr_events 0 -> 1 transition.
2353 *
2354 * Instead we use task_curr(), which tells us if the task is running.
2355 * However, since we use task_curr() outside of rq::lock, we can race
2356 * against the actual state. This means the result can be wrong.
2357 *
2358 * If we get a false positive, we retry, this is harmless.
2359 *
2360 * If we get a false negative, things are complicated. If we are after
2361 * perf_event_context_sched_in() ctx::lock will serialize us, and the
2362 * value must be correct. If we're before, it doesn't matter since
2363 * perf_event_context_sched_in() will program the counter.
2364 *
2365 * However, this hinges on the remote context switch having observed
2366 * our task->perf_event_ctxp[] store, such that it will in fact take
2367 * ctx::lock in perf_event_context_sched_in().
2368 *
2369 * We do this by task_function_call(), if the IPI fails to hit the task
2370 * we know any future context switch of task must see the
2371 * perf_event_ctpx[] store.
2331 */ 2372 */
2332again: 2373
2333 /* 2374 /*
2334 * Cannot use task_function_call() because we need to run on the task's 2375 * This smp_mb() orders the task->perf_event_ctxp[] store with the
2335 * CPU regardless of whether its current or not. 2376 * task_cpu() load, such that if the IPI then does not find the task
2377 * running, a future context switch of that task must observe the
2378 * store.
2336 */ 2379 */
2337 if (!cpu_function_call(task_cpu(task), __perf_install_in_context, event)) 2380 smp_mb();
2381again:
2382 if (!task_function_call(task, __perf_install_in_context, event))
2338 return; 2383 return;
2339 2384
2340 raw_spin_lock_irq(&ctx->lock); 2385 raw_spin_lock_irq(&ctx->lock);
@@ -2348,12 +2393,16 @@ again:
2348 raw_spin_unlock_irq(&ctx->lock); 2393 raw_spin_unlock_irq(&ctx->lock);
2349 return; 2394 return;
2350 } 2395 }
2351 raw_spin_unlock_irq(&ctx->lock);
2352 /* 2396 /*
2353 * Since !ctx->is_active doesn't mean anything, we must IPI 2397 * If the task is not running, ctx->lock will avoid it becoming so,
2354 * unconditionally. 2398 * thus we can safely install the event.
2355 */ 2399 */
2356 goto again; 2400 if (task_curr(task)) {
2401 raw_spin_unlock_irq(&ctx->lock);
2402 goto again;
2403 }
2404 add_event_to_ctx(event, ctx);
2405 raw_spin_unlock_irq(&ctx->lock);
2357} 2406}
2358 2407
2359/* 2408/*
@@ -3438,14 +3487,15 @@ struct perf_read_data {
3438 int ret; 3487 int ret;
3439}; 3488};
3440 3489
3441static int find_cpu_to_read(struct perf_event *event, int local_cpu) 3490static int __perf_event_read_cpu(struct perf_event *event, int event_cpu)
3442{ 3491{
3443 int event_cpu = event->oncpu;
3444 u16 local_pkg, event_pkg; 3492 u16 local_pkg, event_pkg;
3445 3493
3446 if (event->group_caps & PERF_EV_CAP_READ_ACTIVE_PKG) { 3494 if (event->group_caps & PERF_EV_CAP_READ_ACTIVE_PKG) {
3447 event_pkg = topology_physical_package_id(event_cpu); 3495 int local_cpu = smp_processor_id();
3448 local_pkg = topology_physical_package_id(local_cpu); 3496
3497 event_pkg = topology_physical_package_id(event_cpu);
3498 local_pkg = topology_physical_package_id(local_cpu);
3449 3499
3450 if (event_pkg == local_pkg) 3500 if (event_pkg == local_pkg)
3451 return local_cpu; 3501 return local_cpu;
@@ -3575,7 +3625,7 @@ u64 perf_event_read_local(struct perf_event *event)
3575 3625
3576static int perf_event_read(struct perf_event *event, bool group) 3626static int perf_event_read(struct perf_event *event, bool group)
3577{ 3627{
3578 int ret = 0, cpu_to_read, local_cpu; 3628 int event_cpu, ret = 0;
3579 3629
3580 /* 3630 /*
3581 * If event is enabled and currently active on a CPU, update the 3631 * If event is enabled and currently active on a CPU, update the
@@ -3588,21 +3638,25 @@ static int perf_event_read(struct perf_event *event, bool group)
3588 .ret = 0, 3638 .ret = 0,
3589 }; 3639 };
3590 3640
3591 local_cpu = get_cpu(); 3641 event_cpu = READ_ONCE(event->oncpu);
3592 cpu_to_read = find_cpu_to_read(event, local_cpu); 3642 if ((unsigned)event_cpu >= nr_cpu_ids)
3593 put_cpu(); 3643 return 0;
3644
3645 preempt_disable();
3646 event_cpu = __perf_event_read_cpu(event, event_cpu);
3594 3647
3595 /* 3648 /*
3596 * Purposely ignore the smp_call_function_single() return 3649 * Purposely ignore the smp_call_function_single() return
3597 * value. 3650 * value.
3598 * 3651 *
3599 * If event->oncpu isn't a valid CPU it means the event got 3652 * If event_cpu isn't a valid CPU it means the event got
3600 * scheduled out and that will have updated the event count. 3653 * scheduled out and that will have updated the event count.
3601 * 3654 *
3602 * Therefore, either way, we'll have an up-to-date event count 3655 * Therefore, either way, we'll have an up-to-date event count
3603 * after this. 3656 * after this.
3604 */ 3657 */
3605 (void)smp_call_function_single(cpu_to_read, __perf_event_read, &data, 1); 3658 (void)smp_call_function_single(event_cpu, __perf_event_read, &data, 1);
3659 preempt_enable();
3606 ret = data.ret; 3660 ret = data.ret;
3607 } else if (event->state == PERF_EVENT_STATE_INACTIVE) { 3661 } else if (event->state == PERF_EVENT_STATE_INACTIVE) {
3608 struct perf_event_context *ctx = event->ctx; 3662 struct perf_event_context *ctx = event->ctx;
@@ -6583,6 +6637,27 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
6583 char *buf = NULL; 6637 char *buf = NULL;
6584 char *name; 6638 char *name;
6585 6639
6640 if (vma->vm_flags & VM_READ)
6641 prot |= PROT_READ;
6642 if (vma->vm_flags & VM_WRITE)
6643 prot |= PROT_WRITE;
6644 if (vma->vm_flags & VM_EXEC)
6645 prot |= PROT_EXEC;
6646
6647 if (vma->vm_flags & VM_MAYSHARE)
6648 flags = MAP_SHARED;
6649 else
6650 flags = MAP_PRIVATE;
6651
6652 if (vma->vm_flags & VM_DENYWRITE)
6653 flags |= MAP_DENYWRITE;
6654 if (vma->vm_flags & VM_MAYEXEC)
6655 flags |= MAP_EXECUTABLE;
6656 if (vma->vm_flags & VM_LOCKED)
6657 flags |= MAP_LOCKED;
6658 if (vma->vm_flags & VM_HUGETLB)
6659 flags |= MAP_HUGETLB;
6660
6586 if (file) { 6661 if (file) {
6587 struct inode *inode; 6662 struct inode *inode;
6588 dev_t dev; 6663 dev_t dev;
@@ -6609,27 +6684,6 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
6609 maj = MAJOR(dev); 6684 maj = MAJOR(dev);
6610 min = MINOR(dev); 6685 min = MINOR(dev);
6611 6686
6612 if (vma->vm_flags & VM_READ)
6613 prot |= PROT_READ;
6614 if (vma->vm_flags & VM_WRITE)
6615 prot |= PROT_WRITE;
6616 if (vma->vm_flags & VM_EXEC)
6617 prot |= PROT_EXEC;
6618
6619 if (vma->vm_flags & VM_MAYSHARE)
6620 flags = MAP_SHARED;
6621 else
6622 flags = MAP_PRIVATE;
6623
6624 if (vma->vm_flags & VM_DENYWRITE)
6625 flags |= MAP_DENYWRITE;
6626 if (vma->vm_flags & VM_MAYEXEC)
6627 flags |= MAP_EXECUTABLE;
6628 if (vma->vm_flags & VM_LOCKED)
6629 flags |= MAP_LOCKED;
6630 if (vma->vm_flags & VM_HUGETLB)
6631 flags |= MAP_HUGETLB;
6632
6633 goto got_name; 6687 goto got_name;
6634 } else { 6688 } else {
6635 if (vma->vm_ops && vma->vm_ops->name) { 6689 if (vma->vm_ops && vma->vm_ops->name) {
@@ -7034,25 +7088,12 @@ static void perf_log_itrace_start(struct perf_event *event)
7034 perf_output_end(&handle); 7088 perf_output_end(&handle);
7035} 7089}
7036 7090
7037/* 7091static int
7038 * Generic event overflow handling, sampling. 7092__perf_event_account_interrupt(struct perf_event *event, int throttle)
7039 */
7040
7041static int __perf_event_overflow(struct perf_event *event,
7042 int throttle, struct perf_sample_data *data,
7043 struct pt_regs *regs)
7044{ 7093{
7045 int events = atomic_read(&event->event_limit);
7046 struct hw_perf_event *hwc = &event->hw; 7094 struct hw_perf_event *hwc = &event->hw;
7047 u64 seq;
7048 int ret = 0; 7095 int ret = 0;
7049 7096 u64 seq;
7050 /*
7051 * Non-sampling counters might still use the PMI to fold short
7052 * hardware counters, ignore those.
7053 */
7054 if (unlikely(!is_sampling_event(event)))
7055 return 0;
7056 7097
7057 seq = __this_cpu_read(perf_throttled_seq); 7098 seq = __this_cpu_read(perf_throttled_seq);
7058 if (seq != hwc->interrupts_seq) { 7099 if (seq != hwc->interrupts_seq) {
@@ -7080,6 +7121,34 @@ static int __perf_event_overflow(struct perf_event *event,
7080 perf_adjust_period(event, delta, hwc->last_period, true); 7121 perf_adjust_period(event, delta, hwc->last_period, true);
7081 } 7122 }
7082 7123
7124 return ret;
7125}
7126
7127int perf_event_account_interrupt(struct perf_event *event)
7128{
7129 return __perf_event_account_interrupt(event, 1);
7130}
7131
7132/*
7133 * Generic event overflow handling, sampling.
7134 */
7135
7136static int __perf_event_overflow(struct perf_event *event,
7137 int throttle, struct perf_sample_data *data,
7138 struct pt_regs *regs)
7139{
7140 int events = atomic_read(&event->event_limit);
7141 int ret = 0;
7142
7143 /*
7144 * Non-sampling counters might still use the PMI to fold short
7145 * hardware counters, ignore those.
7146 */
7147 if (unlikely(!is_sampling_event(event)))
7148 return 0;
7149
7150 ret = __perf_event_account_interrupt(event, throttle);
7151
7083 /* 7152 /*
7084 * XXX event_limit might not quite work as expected on inherited 7153 * XXX event_limit might not quite work as expected on inherited
7085 * events 7154 * events
@@ -9503,6 +9572,37 @@ static int perf_event_set_clock(struct perf_event *event, clockid_t clk_id)
9503 return 0; 9572 return 0;
9504} 9573}
9505 9574
9575/*
9576 * Variation on perf_event_ctx_lock_nested(), except we take two context
9577 * mutexes.
9578 */
9579static struct perf_event_context *
9580__perf_event_ctx_lock_double(struct perf_event *group_leader,
9581 struct perf_event_context *ctx)
9582{
9583 struct perf_event_context *gctx;
9584
9585again:
9586 rcu_read_lock();
9587 gctx = READ_ONCE(group_leader->ctx);
9588 if (!atomic_inc_not_zero(&gctx->refcount)) {
9589 rcu_read_unlock();
9590 goto again;
9591 }
9592 rcu_read_unlock();
9593
9594 mutex_lock_double(&gctx->mutex, &ctx->mutex);
9595
9596 if (group_leader->ctx != gctx) {
9597 mutex_unlock(&ctx->mutex);
9598 mutex_unlock(&gctx->mutex);
9599 put_ctx(gctx);
9600 goto again;
9601 }
9602
9603 return gctx;
9604}
9605
9506/** 9606/**
9507 * sys_perf_event_open - open a performance event, associate it to a task/cpu 9607 * sys_perf_event_open - open a performance event, associate it to a task/cpu
9508 * 9608 *
@@ -9746,12 +9846,31 @@ SYSCALL_DEFINE5(perf_event_open,
9746 } 9846 }
9747 9847
9748 if (move_group) { 9848 if (move_group) {
9749 gctx = group_leader->ctx; 9849 gctx = __perf_event_ctx_lock_double(group_leader, ctx);
9750 mutex_lock_double(&gctx->mutex, &ctx->mutex); 9850
9751 if (gctx->task == TASK_TOMBSTONE) { 9851 if (gctx->task == TASK_TOMBSTONE) {
9752 err = -ESRCH; 9852 err = -ESRCH;
9753 goto err_locked; 9853 goto err_locked;
9754 } 9854 }
9855
9856 /*
9857 * Check if we raced against another sys_perf_event_open() call
9858 * moving the software group underneath us.
9859 */
9860 if (!(group_leader->group_caps & PERF_EV_CAP_SOFTWARE)) {
9861 /*
9862 * If someone moved the group out from under us, check
9863 * if this new event wound up on the same ctx, if so
9864 * its the regular !move_group case, otherwise fail.
9865 */
9866 if (gctx != ctx) {
9867 err = -EINVAL;
9868 goto err_locked;
9869 } else {
9870 perf_event_ctx_unlock(group_leader, gctx);
9871 move_group = 0;
9872 }
9873 }
9755 } else { 9874 } else {
9756 mutex_lock(&ctx->mutex); 9875 mutex_lock(&ctx->mutex);
9757 } 9876 }
@@ -9853,7 +9972,7 @@ SYSCALL_DEFINE5(perf_event_open,
9853 perf_unpin_context(ctx); 9972 perf_unpin_context(ctx);
9854 9973
9855 if (move_group) 9974 if (move_group)
9856 mutex_unlock(&gctx->mutex); 9975 perf_event_ctx_unlock(group_leader, gctx);
9857 mutex_unlock(&ctx->mutex); 9976 mutex_unlock(&ctx->mutex);
9858 9977
9859 if (task) { 9978 if (task) {
@@ -9879,7 +9998,7 @@ SYSCALL_DEFINE5(perf_event_open,
9879 9998
9880err_locked: 9999err_locked:
9881 if (move_group) 10000 if (move_group)
9882 mutex_unlock(&gctx->mutex); 10001 perf_event_ctx_unlock(group_leader, gctx);
9883 mutex_unlock(&ctx->mutex); 10002 mutex_unlock(&ctx->mutex);
9884/* err_file: */ 10003/* err_file: */
9885 fput(event_file); 10004 fput(event_file);
diff --git a/kernel/futex.c b/kernel/futex.c
index 0842c8ca534b..cdf365036141 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -3323,4 +3323,4 @@ static int __init futex_init(void)
3323 3323
3324 return 0; 3324 return 0;
3325} 3325}
3326__initcall(futex_init); 3326core_initcall(futex_init);
diff --git a/kernel/irq/irqdomain.c b/kernel/irq/irqdomain.c
index 8c0a0ae43521..b59e6768c5e9 100644
--- a/kernel/irq/irqdomain.c
+++ b/kernel/irq/irqdomain.c
@@ -1346,6 +1346,30 @@ void irq_domain_free_irqs_parent(struct irq_domain *domain,
1346} 1346}
1347EXPORT_SYMBOL_GPL(irq_domain_free_irqs_parent); 1347EXPORT_SYMBOL_GPL(irq_domain_free_irqs_parent);
1348 1348
1349static void __irq_domain_activate_irq(struct irq_data *irq_data)
1350{
1351 if (irq_data && irq_data->domain) {
1352 struct irq_domain *domain = irq_data->domain;
1353
1354 if (irq_data->parent_data)
1355 __irq_domain_activate_irq(irq_data->parent_data);
1356 if (domain->ops->activate)
1357 domain->ops->activate(domain, irq_data);
1358 }
1359}
1360
1361static void __irq_domain_deactivate_irq(struct irq_data *irq_data)
1362{
1363 if (irq_data && irq_data->domain) {
1364 struct irq_domain *domain = irq_data->domain;
1365
1366 if (domain->ops->deactivate)
1367 domain->ops->deactivate(domain, irq_data);
1368 if (irq_data->parent_data)
1369 __irq_domain_deactivate_irq(irq_data->parent_data);
1370 }
1371}
1372
1349/** 1373/**
1350 * irq_domain_activate_irq - Call domain_ops->activate recursively to activate 1374 * irq_domain_activate_irq - Call domain_ops->activate recursively to activate
1351 * interrupt 1375 * interrupt
@@ -1356,13 +1380,9 @@ EXPORT_SYMBOL_GPL(irq_domain_free_irqs_parent);
1356 */ 1380 */
1357void irq_domain_activate_irq(struct irq_data *irq_data) 1381void irq_domain_activate_irq(struct irq_data *irq_data)
1358{ 1382{
1359 if (irq_data && irq_data->domain) { 1383 if (!irqd_is_activated(irq_data)) {
1360 struct irq_domain *domain = irq_data->domain; 1384 __irq_domain_activate_irq(irq_data);
1361 1385 irqd_set_activated(irq_data);
1362 if (irq_data->parent_data)
1363 irq_domain_activate_irq(irq_data->parent_data);
1364 if (domain->ops->activate)
1365 domain->ops->activate(domain, irq_data);
1366 } 1386 }
1367} 1387}
1368 1388
@@ -1376,13 +1396,9 @@ void irq_domain_activate_irq(struct irq_data *irq_data)
1376 */ 1396 */
1377void irq_domain_deactivate_irq(struct irq_data *irq_data) 1397void irq_domain_deactivate_irq(struct irq_data *irq_data)
1378{ 1398{
1379 if (irq_data && irq_data->domain) { 1399 if (irqd_is_activated(irq_data)) {
1380 struct irq_domain *domain = irq_data->domain; 1400 __irq_domain_deactivate_irq(irq_data);
1381 1401 irqd_clr_activated(irq_data);
1382 if (domain->ops->deactivate)
1383 domain->ops->deactivate(domain, irq_data);
1384 if (irq_data->parent_data)
1385 irq_domain_deactivate_irq(irq_data->parent_data);
1386 } 1402 }
1387} 1403}
1388 1404
diff --git a/kernel/jump_label.c b/kernel/jump_label.c
index 93ad6c1fb9b6..a9b8cf500591 100644
--- a/kernel/jump_label.c
+++ b/kernel/jump_label.c
@@ -182,6 +182,13 @@ void static_key_slow_dec_deferred(struct static_key_deferred *key)
182} 182}
183EXPORT_SYMBOL_GPL(static_key_slow_dec_deferred); 183EXPORT_SYMBOL_GPL(static_key_slow_dec_deferred);
184 184
185void static_key_deferred_flush(struct static_key_deferred *key)
186{
187 STATIC_KEY_CHECK_USE();
188 flush_delayed_work(&key->work);
189}
190EXPORT_SYMBOL_GPL(static_key_deferred_flush);
191
185void jump_label_rate_limit(struct static_key_deferred *key, 192void jump_label_rate_limit(struct static_key_deferred *key,
186 unsigned long rl) 193 unsigned long rl)
187{ 194{
diff --git a/kernel/memremap.c b/kernel/memremap.c
index b501e390bb34..9ecedc28b928 100644
--- a/kernel/memremap.c
+++ b/kernel/memremap.c
@@ -246,7 +246,9 @@ static void devm_memremap_pages_release(struct device *dev, void *data)
246 /* pages are dead and unused, undo the arch mapping */ 246 /* pages are dead and unused, undo the arch mapping */
247 align_start = res->start & ~(SECTION_SIZE - 1); 247 align_start = res->start & ~(SECTION_SIZE - 1);
248 align_size = ALIGN(resource_size(res), SECTION_SIZE); 248 align_size = ALIGN(resource_size(res), SECTION_SIZE);
249 mem_hotplug_begin();
249 arch_remove_memory(align_start, align_size); 250 arch_remove_memory(align_start, align_size);
251 mem_hotplug_done();
250 untrack_pfn(NULL, PHYS_PFN(align_start), align_size); 252 untrack_pfn(NULL, PHYS_PFN(align_start), align_size);
251 pgmap_radix_release(res); 253 pgmap_radix_release(res);
252 dev_WARN_ONCE(dev, pgmap->altmap && pgmap->altmap->alloc, 254 dev_WARN_ONCE(dev, pgmap->altmap && pgmap->altmap->alloc,
@@ -358,7 +360,9 @@ void *devm_memremap_pages(struct device *dev, struct resource *res,
358 if (error) 360 if (error)
359 goto err_pfn_remap; 361 goto err_pfn_remap;
360 362
363 mem_hotplug_begin();
361 error = arch_add_memory(nid, align_start, align_size, true); 364 error = arch_add_memory(nid, align_start, align_size, true);
365 mem_hotplug_done();
362 if (error) 366 if (error)
363 goto err_add_memory; 367 goto err_add_memory;
364 368
diff --git a/kernel/module.c b/kernel/module.c
index 5088784c0cf9..3d8f126208e3 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -389,16 +389,16 @@ extern const struct kernel_symbol __start___ksymtab_gpl[];
389extern const struct kernel_symbol __stop___ksymtab_gpl[]; 389extern const struct kernel_symbol __stop___ksymtab_gpl[];
390extern const struct kernel_symbol __start___ksymtab_gpl_future[]; 390extern const struct kernel_symbol __start___ksymtab_gpl_future[];
391extern const struct kernel_symbol __stop___ksymtab_gpl_future[]; 391extern const struct kernel_symbol __stop___ksymtab_gpl_future[];
392extern const unsigned long __start___kcrctab[]; 392extern const s32 __start___kcrctab[];
393extern const unsigned long __start___kcrctab_gpl[]; 393extern const s32 __start___kcrctab_gpl[];
394extern const unsigned long __start___kcrctab_gpl_future[]; 394extern const s32 __start___kcrctab_gpl_future[];
395#ifdef CONFIG_UNUSED_SYMBOLS 395#ifdef CONFIG_UNUSED_SYMBOLS
396extern const struct kernel_symbol __start___ksymtab_unused[]; 396extern const struct kernel_symbol __start___ksymtab_unused[];
397extern const struct kernel_symbol __stop___ksymtab_unused[]; 397extern const struct kernel_symbol __stop___ksymtab_unused[];
398extern const struct kernel_symbol __start___ksymtab_unused_gpl[]; 398extern const struct kernel_symbol __start___ksymtab_unused_gpl[];
399extern const struct kernel_symbol __stop___ksymtab_unused_gpl[]; 399extern const struct kernel_symbol __stop___ksymtab_unused_gpl[];
400extern const unsigned long __start___kcrctab_unused[]; 400extern const s32 __start___kcrctab_unused[];
401extern const unsigned long __start___kcrctab_unused_gpl[]; 401extern const s32 __start___kcrctab_unused_gpl[];
402#endif 402#endif
403 403
404#ifndef CONFIG_MODVERSIONS 404#ifndef CONFIG_MODVERSIONS
@@ -497,7 +497,7 @@ struct find_symbol_arg {
497 497
498 /* Output */ 498 /* Output */
499 struct module *owner; 499 struct module *owner;
500 const unsigned long *crc; 500 const s32 *crc;
501 const struct kernel_symbol *sym; 501 const struct kernel_symbol *sym;
502}; 502};
503 503
@@ -563,7 +563,7 @@ static bool find_symbol_in_section(const struct symsearch *syms,
563 * (optional) module which owns it. Needs preempt disabled or module_mutex. */ 563 * (optional) module which owns it. Needs preempt disabled or module_mutex. */
564const struct kernel_symbol *find_symbol(const char *name, 564const struct kernel_symbol *find_symbol(const char *name,
565 struct module **owner, 565 struct module **owner,
566 const unsigned long **crc, 566 const s32 **crc,
567 bool gplok, 567 bool gplok,
568 bool warn) 568 bool warn)
569{ 569{
@@ -1145,7 +1145,7 @@ static size_t module_flags_taint(struct module *mod, char *buf)
1145 1145
1146 for (i = 0; i < TAINT_FLAGS_COUNT; i++) { 1146 for (i = 0; i < TAINT_FLAGS_COUNT; i++) {
1147 if (taint_flags[i].module && test_bit(i, &mod->taints)) 1147 if (taint_flags[i].module && test_bit(i, &mod->taints))
1148 buf[l++] = taint_flags[i].true; 1148 buf[l++] = taint_flags[i].c_true;
1149 } 1149 }
1150 1150
1151 return l; 1151 return l;
@@ -1249,23 +1249,17 @@ static int try_to_force_load(struct module *mod, const char *reason)
1249} 1249}
1250 1250
1251#ifdef CONFIG_MODVERSIONS 1251#ifdef CONFIG_MODVERSIONS
1252/* If the arch applies (non-zero) relocations to kernel kcrctab, unapply it. */ 1252
1253static unsigned long maybe_relocated(unsigned long crc, 1253static u32 resolve_rel_crc(const s32 *crc)
1254 const struct module *crc_owner)
1255{ 1254{
1256#ifdef ARCH_RELOCATES_KCRCTAB 1255 return *(u32 *)((void *)crc + *crc);
1257 if (crc_owner == NULL)
1258 return crc - (unsigned long)reloc_start;
1259#endif
1260 return crc;
1261} 1256}
1262 1257
1263static int check_version(Elf_Shdr *sechdrs, 1258static int check_version(Elf_Shdr *sechdrs,
1264 unsigned int versindex, 1259 unsigned int versindex,
1265 const char *symname, 1260 const char *symname,
1266 struct module *mod, 1261 struct module *mod,
1267 const unsigned long *crc, 1262 const s32 *crc)
1268 const struct module *crc_owner)
1269{ 1263{
1270 unsigned int i, num_versions; 1264 unsigned int i, num_versions;
1271 struct modversion_info *versions; 1265 struct modversion_info *versions;
@@ -1283,13 +1277,19 @@ static int check_version(Elf_Shdr *sechdrs,
1283 / sizeof(struct modversion_info); 1277 / sizeof(struct modversion_info);
1284 1278
1285 for (i = 0; i < num_versions; i++) { 1279 for (i = 0; i < num_versions; i++) {
1280 u32 crcval;
1281
1286 if (strcmp(versions[i].name, symname) != 0) 1282 if (strcmp(versions[i].name, symname) != 0)
1287 continue; 1283 continue;
1288 1284
1289 if (versions[i].crc == maybe_relocated(*crc, crc_owner)) 1285 if (IS_ENABLED(CONFIG_MODULE_REL_CRCS))
1286 crcval = resolve_rel_crc(crc);
1287 else
1288 crcval = *crc;
1289 if (versions[i].crc == crcval)
1290 return 1; 1290 return 1;
1291 pr_debug("Found checksum %lX vs module %lX\n", 1291 pr_debug("Found checksum %X vs module %lX\n",
1292 maybe_relocated(*crc, crc_owner), versions[i].crc); 1292 crcval, versions[i].crc);
1293 goto bad_version; 1293 goto bad_version;
1294 } 1294 }
1295 1295
@@ -1307,7 +1307,7 @@ static inline int check_modstruct_version(Elf_Shdr *sechdrs,
1307 unsigned int versindex, 1307 unsigned int versindex,
1308 struct module *mod) 1308 struct module *mod)
1309{ 1309{
1310 const unsigned long *crc; 1310 const s32 *crc;
1311 1311
1312 /* 1312 /*
1313 * Since this should be found in kernel (which can't be removed), no 1313 * Since this should be found in kernel (which can't be removed), no
@@ -1321,8 +1321,7 @@ static inline int check_modstruct_version(Elf_Shdr *sechdrs,
1321 } 1321 }
1322 preempt_enable(); 1322 preempt_enable();
1323 return check_version(sechdrs, versindex, 1323 return check_version(sechdrs, versindex,
1324 VMLINUX_SYMBOL_STR(module_layout), mod, crc, 1324 VMLINUX_SYMBOL_STR(module_layout), mod, crc);
1325 NULL);
1326} 1325}
1327 1326
1328/* First part is kernel version, which we ignore if module has crcs. */ 1327/* First part is kernel version, which we ignore if module has crcs. */
@@ -1340,8 +1339,7 @@ static inline int check_version(Elf_Shdr *sechdrs,
1340 unsigned int versindex, 1339 unsigned int versindex,
1341 const char *symname, 1340 const char *symname,
1342 struct module *mod, 1341 struct module *mod,
1343 const unsigned long *crc, 1342 const s32 *crc)
1344 const struct module *crc_owner)
1345{ 1343{
1346 return 1; 1344 return 1;
1347} 1345}
@@ -1368,7 +1366,7 @@ static const struct kernel_symbol *resolve_symbol(struct module *mod,
1368{ 1366{
1369 struct module *owner; 1367 struct module *owner;
1370 const struct kernel_symbol *sym; 1368 const struct kernel_symbol *sym;
1371 const unsigned long *crc; 1369 const s32 *crc;
1372 int err; 1370 int err;
1373 1371
1374 /* 1372 /*
@@ -1383,8 +1381,7 @@ static const struct kernel_symbol *resolve_symbol(struct module *mod,
1383 if (!sym) 1381 if (!sym)
1384 goto unlock; 1382 goto unlock;
1385 1383
1386 if (!check_version(info->sechdrs, info->index.vers, name, mod, crc, 1384 if (!check_version(info->sechdrs, info->index.vers, name, mod, crc)) {
1387 owner)) {
1388 sym = ERR_PTR(-EINVAL); 1385 sym = ERR_PTR(-EINVAL);
1389 goto getname; 1386 goto getname;
1390 } 1387 }
diff --git a/kernel/panic.c b/kernel/panic.c
index c51edaa04fce..08aa88dde7de 100644
--- a/kernel/panic.c
+++ b/kernel/panic.c
@@ -249,7 +249,7 @@ void panic(const char *fmt, ...)
249 * Delay timeout seconds before rebooting the machine. 249 * Delay timeout seconds before rebooting the machine.
250 * We can't use the "normal" timers since we just panicked. 250 * We can't use the "normal" timers since we just panicked.
251 */ 251 */
252 pr_emerg("Rebooting in %d seconds..", panic_timeout); 252 pr_emerg("Rebooting in %d seconds..\n", panic_timeout);
253 253
254 for (i = 0; i < panic_timeout * 1000; i += PANIC_TIMER_STEP) { 254 for (i = 0; i < panic_timeout * 1000; i += PANIC_TIMER_STEP) {
255 touch_nmi_watchdog(); 255 touch_nmi_watchdog();
@@ -355,7 +355,7 @@ const char *print_tainted(void)
355 for (i = 0; i < TAINT_FLAGS_COUNT; i++) { 355 for (i = 0; i < TAINT_FLAGS_COUNT; i++) {
356 const struct taint_flag *t = &taint_flags[i]; 356 const struct taint_flag *t = &taint_flags[i];
357 *s++ = test_bit(i, &tainted_mask) ? 357 *s++ = test_bit(i, &tainted_mask) ?
358 t->true : t->false; 358 t->c_true : t->c_false;
359 } 359 }
360 *s = 0; 360 *s = 0;
361 } else 361 } else
diff --git a/kernel/pid_namespace.c b/kernel/pid_namespace.c
index df9e8e9e0be7..eef2ce968636 100644
--- a/kernel/pid_namespace.c
+++ b/kernel/pid_namespace.c
@@ -151,8 +151,12 @@ out:
151 151
152static void delayed_free_pidns(struct rcu_head *p) 152static void delayed_free_pidns(struct rcu_head *p)
153{ 153{
154 kmem_cache_free(pid_ns_cachep, 154 struct pid_namespace *ns = container_of(p, struct pid_namespace, rcu);
155 container_of(p, struct pid_namespace, rcu)); 155
156 dec_pid_namespaces(ns->ucounts);
157 put_user_ns(ns->user_ns);
158
159 kmem_cache_free(pid_ns_cachep, ns);
156} 160}
157 161
158static void destroy_pid_namespace(struct pid_namespace *ns) 162static void destroy_pid_namespace(struct pid_namespace *ns)
@@ -162,8 +166,6 @@ static void destroy_pid_namespace(struct pid_namespace *ns)
162 ns_free_inum(&ns->ns); 166 ns_free_inum(&ns->ns);
163 for (i = 0; i < PIDMAP_ENTRIES; i++) 167 for (i = 0; i < PIDMAP_ENTRIES; i++)
164 kfree(ns->pidmap[i].page); 168 kfree(ns->pidmap[i].page);
165 dec_pid_namespaces(ns->ucounts);
166 put_user_ns(ns->user_ns);
167 call_rcu(&ns->rcu, delayed_free_pidns); 169 call_rcu(&ns->rcu, delayed_free_pidns);
168} 170}
169 171
diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c
index f67ceb7768b8..15e6baef5c73 100644
--- a/kernel/power/suspend.c
+++ b/kernel/power/suspend.c
@@ -46,7 +46,7 @@ static const char * const mem_sleep_labels[] = {
46const char *mem_sleep_states[PM_SUSPEND_MAX]; 46const char *mem_sleep_states[PM_SUSPEND_MAX];
47 47
48suspend_state_t mem_sleep_current = PM_SUSPEND_FREEZE; 48suspend_state_t mem_sleep_current = PM_SUSPEND_FREEZE;
49suspend_state_t mem_sleep_default = PM_SUSPEND_MAX; 49static suspend_state_t mem_sleep_default = PM_SUSPEND_MEM;
50 50
51unsigned int pm_suspend_global_flags; 51unsigned int pm_suspend_global_flags;
52EXPORT_SYMBOL_GPL(pm_suspend_global_flags); 52EXPORT_SYMBOL_GPL(pm_suspend_global_flags);
@@ -168,7 +168,7 @@ void suspend_set_ops(const struct platform_suspend_ops *ops)
168 } 168 }
169 if (valid_state(PM_SUSPEND_MEM)) { 169 if (valid_state(PM_SUSPEND_MEM)) {
170 mem_sleep_states[PM_SUSPEND_MEM] = mem_sleep_labels[PM_SUSPEND_MEM]; 170 mem_sleep_states[PM_SUSPEND_MEM] = mem_sleep_labels[PM_SUSPEND_MEM];
171 if (mem_sleep_default >= PM_SUSPEND_MEM) 171 if (mem_sleep_default == PM_SUSPEND_MEM)
172 mem_sleep_current = PM_SUSPEND_MEM; 172 mem_sleep_current = PM_SUSPEND_MEM;
173 } 173 }
174 174
diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
index 8b2696420abb..4ba3d34938c0 100644
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
@@ -1516,7 +1516,7 @@ static void call_console_drivers(int level,
1516{ 1516{
1517 struct console *con; 1517 struct console *con;
1518 1518
1519 trace_console(text, len); 1519 trace_console_rcuidle(text, len);
1520 1520
1521 if (!console_drivers) 1521 if (!console_drivers)
1522 return; 1522 return;
diff --git a/kernel/rcu/rcu.h b/kernel/rcu/rcu.h
index 80adef7d4c3d..0d6ff3e471be 100644
--- a/kernel/rcu/rcu.h
+++ b/kernel/rcu/rcu.h
@@ -136,6 +136,7 @@ int rcu_jiffies_till_stall_check(void);
136#define TPS(x) tracepoint_string(x) 136#define TPS(x) tracepoint_string(x)
137 137
138void rcu_early_boot_tests(void); 138void rcu_early_boot_tests(void);
139void rcu_test_sync_prims(void);
139 140
140/* 141/*
141 * This function really isn't for public consumption, but RCU is special in 142 * This function really isn't for public consumption, but RCU is special in
diff --git a/kernel/rcu/tiny.c b/kernel/rcu/tiny.c
index 1898559e6b60..b23a4d076f3d 100644
--- a/kernel/rcu/tiny.c
+++ b/kernel/rcu/tiny.c
@@ -185,9 +185,6 @@ static __latent_entropy void rcu_process_callbacks(struct softirq_action *unused
185 * benefits of doing might_sleep() to reduce latency.) 185 * benefits of doing might_sleep() to reduce latency.)
186 * 186 *
187 * Cool, huh? (Due to Josh Triplett.) 187 * Cool, huh? (Due to Josh Triplett.)
188 *
189 * But we want to make this a static inline later. The cond_resched()
190 * currently makes this problematic.
191 */ 188 */
192void synchronize_sched(void) 189void synchronize_sched(void)
193{ 190{
@@ -195,7 +192,6 @@ void synchronize_sched(void)
195 lock_is_held(&rcu_lock_map) || 192 lock_is_held(&rcu_lock_map) ||
196 lock_is_held(&rcu_sched_lock_map), 193 lock_is_held(&rcu_sched_lock_map),
197 "Illegal synchronize_sched() in RCU read-side critical section"); 194 "Illegal synchronize_sched() in RCU read-side critical section");
198 cond_resched();
199} 195}
200EXPORT_SYMBOL_GPL(synchronize_sched); 196EXPORT_SYMBOL_GPL(synchronize_sched);
201 197
diff --git a/kernel/rcu/tiny_plugin.h b/kernel/rcu/tiny_plugin.h
index 196f0302e2f4..c64b827ecbca 100644
--- a/kernel/rcu/tiny_plugin.h
+++ b/kernel/rcu/tiny_plugin.h
@@ -60,12 +60,17 @@ EXPORT_SYMBOL_GPL(rcu_scheduler_active);
60 60
61/* 61/*
62 * During boot, we forgive RCU lockdep issues. After this function is 62 * During boot, we forgive RCU lockdep issues. After this function is
63 * invoked, we start taking RCU lockdep issues seriously. 63 * invoked, we start taking RCU lockdep issues seriously. Note that unlike
64 * Tree RCU, Tiny RCU transitions directly from RCU_SCHEDULER_INACTIVE
65 * to RCU_SCHEDULER_RUNNING, skipping the RCU_SCHEDULER_INIT stage.
66 * The reason for this is that Tiny RCU does not need kthreads, so does
67 * not have to care about the fact that the scheduler is half-initialized
68 * at a certain phase of the boot process.
64 */ 69 */
65void __init rcu_scheduler_starting(void) 70void __init rcu_scheduler_starting(void)
66{ 71{
67 WARN_ON(nr_context_switches() > 0); 72 WARN_ON(nr_context_switches() > 0);
68 rcu_scheduler_active = 1; 73 rcu_scheduler_active = RCU_SCHEDULER_RUNNING;
69} 74}
70 75
71#endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ 76#endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 96c52e43f7ca..cb4e2056ccf3 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -127,13 +127,16 @@ int rcu_num_nodes __read_mostly = NUM_RCU_NODES; /* Total # rcu_nodes in use. */
127int sysctl_panic_on_rcu_stall __read_mostly; 127int sysctl_panic_on_rcu_stall __read_mostly;
128 128
129/* 129/*
130 * The rcu_scheduler_active variable transitions from zero to one just 130 * The rcu_scheduler_active variable is initialized to the value
131 * before the first task is spawned. So when this variable is zero, RCU 131 * RCU_SCHEDULER_INACTIVE and transitions RCU_SCHEDULER_INIT just before the
132 * can assume that there is but one task, allowing RCU to (for example) 132 * first task is spawned. So when this variable is RCU_SCHEDULER_INACTIVE,
133 * RCU can assume that there is but one task, allowing RCU to (for example)
133 * optimize synchronize_rcu() to a simple barrier(). When this variable 134 * optimize synchronize_rcu() to a simple barrier(). When this variable
134 * is one, RCU must actually do all the hard work required to detect real 135 * is RCU_SCHEDULER_INIT, RCU must actually do all the hard work required
135 * grace periods. This variable is also used to suppress boot-time false 136 * to detect real grace periods. This variable is also used to suppress
136 * positives from lockdep-RCU error checking. 137 * boot-time false positives from lockdep-RCU error checking. Finally, it
138 * transitions from RCU_SCHEDULER_INIT to RCU_SCHEDULER_RUNNING after RCU
139 * is fully initialized, including all of its kthreads having been spawned.
137 */ 140 */
138int rcu_scheduler_active __read_mostly; 141int rcu_scheduler_active __read_mostly;
139EXPORT_SYMBOL_GPL(rcu_scheduler_active); 142EXPORT_SYMBOL_GPL(rcu_scheduler_active);
@@ -3980,18 +3983,22 @@ static int __init rcu_spawn_gp_kthread(void)
3980early_initcall(rcu_spawn_gp_kthread); 3983early_initcall(rcu_spawn_gp_kthread);
3981 3984
3982/* 3985/*
3983 * This function is invoked towards the end of the scheduler's initialization 3986 * This function is invoked towards the end of the scheduler's
3984 * process. Before this is called, the idle task might contain 3987 * initialization process. Before this is called, the idle task might
3985 * RCU read-side critical sections (during which time, this idle 3988 * contain synchronous grace-period primitives (during which time, this idle
3986 * task is booting the system). After this function is called, the 3989 * task is booting the system, and such primitives are no-ops). After this
3987 * idle tasks are prohibited from containing RCU read-side critical 3990 * function is called, any synchronous grace-period primitives are run as
3988 * sections. This function also enables RCU lockdep checking. 3991 * expedited, with the requesting task driving the grace period forward.
3992 * A later core_initcall() rcu_exp_runtime_mode() will switch to full
3993 * runtime RCU functionality.
3989 */ 3994 */
3990void rcu_scheduler_starting(void) 3995void rcu_scheduler_starting(void)
3991{ 3996{
3992 WARN_ON(num_online_cpus() != 1); 3997 WARN_ON(num_online_cpus() != 1);
3993 WARN_ON(nr_context_switches() > 0); 3998 WARN_ON(nr_context_switches() > 0);
3994 rcu_scheduler_active = 1; 3999 rcu_test_sync_prims();
4000 rcu_scheduler_active = RCU_SCHEDULER_INIT;
4001 rcu_test_sync_prims();
3995} 4002}
3996 4003
3997/* 4004/*
diff --git a/kernel/rcu/tree_exp.h b/kernel/rcu/tree_exp.h
index d3053e99fdb6..e59e1849b89a 100644
--- a/kernel/rcu/tree_exp.h
+++ b/kernel/rcu/tree_exp.h
@@ -532,18 +532,28 @@ struct rcu_exp_work {
532}; 532};
533 533
534/* 534/*
535 * Common code to drive an expedited grace period forward, used by
536 * workqueues and mid-boot-time tasks.
537 */
538static void rcu_exp_sel_wait_wake(struct rcu_state *rsp,
539 smp_call_func_t func, unsigned long s)
540{
541 /* Initialize the rcu_node tree in preparation for the wait. */
542 sync_rcu_exp_select_cpus(rsp, func);
543
544 /* Wait and clean up, including waking everyone. */
545 rcu_exp_wait_wake(rsp, s);
546}
547
548/*
535 * Work-queue handler to drive an expedited grace period forward. 549 * Work-queue handler to drive an expedited grace period forward.
536 */ 550 */
537static void wait_rcu_exp_gp(struct work_struct *wp) 551static void wait_rcu_exp_gp(struct work_struct *wp)
538{ 552{
539 struct rcu_exp_work *rewp; 553 struct rcu_exp_work *rewp;
540 554
541 /* Initialize the rcu_node tree in preparation for the wait. */
542 rewp = container_of(wp, struct rcu_exp_work, rew_work); 555 rewp = container_of(wp, struct rcu_exp_work, rew_work);
543 sync_rcu_exp_select_cpus(rewp->rew_rsp, rewp->rew_func); 556 rcu_exp_sel_wait_wake(rewp->rew_rsp, rewp->rew_func, rewp->rew_s);
544
545 /* Wait and clean up, including waking everyone. */
546 rcu_exp_wait_wake(rewp->rew_rsp, rewp->rew_s);
547} 557}
548 558
549/* 559/*
@@ -569,12 +579,18 @@ static void _synchronize_rcu_expedited(struct rcu_state *rsp,
569 if (exp_funnel_lock(rsp, s)) 579 if (exp_funnel_lock(rsp, s))
570 return; /* Someone else did our work for us. */ 580 return; /* Someone else did our work for us. */
571 581
572 /* Marshall arguments and schedule the expedited grace period. */ 582 /* Ensure that load happens before action based on it. */
573 rew.rew_func = func; 583 if (unlikely(rcu_scheduler_active == RCU_SCHEDULER_INIT)) {
574 rew.rew_rsp = rsp; 584 /* Direct call during scheduler init and early_initcalls(). */
575 rew.rew_s = s; 585 rcu_exp_sel_wait_wake(rsp, func, s);
576 INIT_WORK_ONSTACK(&rew.rew_work, wait_rcu_exp_gp); 586 } else {
577 schedule_work(&rew.rew_work); 587 /* Marshall arguments & schedule the expedited grace period. */
588 rew.rew_func = func;
589 rew.rew_rsp = rsp;
590 rew.rew_s = s;
591 INIT_WORK_ONSTACK(&rew.rew_work, wait_rcu_exp_gp);
592 schedule_work(&rew.rew_work);
593 }
578 594
579 /* Wait for expedited grace period to complete. */ 595 /* Wait for expedited grace period to complete. */
580 rdp = per_cpu_ptr(rsp->rda, raw_smp_processor_id()); 596 rdp = per_cpu_ptr(rsp->rda, raw_smp_processor_id());
@@ -676,6 +692,8 @@ void synchronize_rcu_expedited(void)
676{ 692{
677 struct rcu_state *rsp = rcu_state_p; 693 struct rcu_state *rsp = rcu_state_p;
678 694
695 if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE)
696 return;
679 _synchronize_rcu_expedited(rsp, sync_rcu_exp_handler); 697 _synchronize_rcu_expedited(rsp, sync_rcu_exp_handler);
680} 698}
681EXPORT_SYMBOL_GPL(synchronize_rcu_expedited); 699EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
@@ -693,3 +711,15 @@ void synchronize_rcu_expedited(void)
693EXPORT_SYMBOL_GPL(synchronize_rcu_expedited); 711EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
694 712
695#endif /* #else #ifdef CONFIG_PREEMPT_RCU */ 713#endif /* #else #ifdef CONFIG_PREEMPT_RCU */
714
715/*
716 * Switch to run-time mode once Tree RCU has fully initialized.
717 */
718static int __init rcu_exp_runtime_mode(void)
719{
720 rcu_test_sync_prims();
721 rcu_scheduler_active = RCU_SCHEDULER_RUNNING;
722 rcu_test_sync_prims();
723 return 0;
724}
725core_initcall(rcu_exp_runtime_mode);
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
index 85c5a883c6e3..56583e764ebf 100644
--- a/kernel/rcu/tree_plugin.h
+++ b/kernel/rcu/tree_plugin.h
@@ -670,7 +670,7 @@ void synchronize_rcu(void)
670 lock_is_held(&rcu_lock_map) || 670 lock_is_held(&rcu_lock_map) ||
671 lock_is_held(&rcu_sched_lock_map), 671 lock_is_held(&rcu_sched_lock_map),
672 "Illegal synchronize_rcu() in RCU read-side critical section"); 672 "Illegal synchronize_rcu() in RCU read-side critical section");
673 if (!rcu_scheduler_active) 673 if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE)
674 return; 674 return;
675 if (rcu_gp_is_expedited()) 675 if (rcu_gp_is_expedited())
676 synchronize_rcu_expedited(); 676 synchronize_rcu_expedited();
diff --git a/kernel/rcu/update.c b/kernel/rcu/update.c
index f19271dce0a9..4f6db7e6a117 100644
--- a/kernel/rcu/update.c
+++ b/kernel/rcu/update.c
@@ -121,11 +121,14 @@ EXPORT_SYMBOL(rcu_read_lock_sched_held);
121 * Should expedited grace-period primitives always fall back to their 121 * Should expedited grace-period primitives always fall back to their
122 * non-expedited counterparts? Intended for use within RCU. Note 122 * non-expedited counterparts? Intended for use within RCU. Note
123 * that if the user specifies both rcu_expedited and rcu_normal, then 123 * that if the user specifies both rcu_expedited and rcu_normal, then
124 * rcu_normal wins. 124 * rcu_normal wins. (Except during the time period during boot from
125 * when the first task is spawned until the rcu_exp_runtime_mode()
126 * core_initcall() is invoked, at which point everything is expedited.)
125 */ 127 */
126bool rcu_gp_is_normal(void) 128bool rcu_gp_is_normal(void)
127{ 129{
128 return READ_ONCE(rcu_normal); 130 return READ_ONCE(rcu_normal) &&
131 rcu_scheduler_active != RCU_SCHEDULER_INIT;
129} 132}
130EXPORT_SYMBOL_GPL(rcu_gp_is_normal); 133EXPORT_SYMBOL_GPL(rcu_gp_is_normal);
131 134
@@ -135,13 +138,14 @@ static atomic_t rcu_expedited_nesting =
135/* 138/*
136 * Should normal grace-period primitives be expedited? Intended for 139 * Should normal grace-period primitives be expedited? Intended for
137 * use within RCU. Note that this function takes the rcu_expedited 140 * use within RCU. Note that this function takes the rcu_expedited
138 * sysfs/boot variable into account as well as the rcu_expedite_gp() 141 * sysfs/boot variable and rcu_scheduler_active into account as well
139 * nesting. So looping on rcu_unexpedite_gp() until rcu_gp_is_expedited() 142 * as the rcu_expedite_gp() nesting. So looping on rcu_unexpedite_gp()
140 * returns false is a -really- bad idea. 143 * until rcu_gp_is_expedited() returns false is a -really- bad idea.
141 */ 144 */
142bool rcu_gp_is_expedited(void) 145bool rcu_gp_is_expedited(void)
143{ 146{
144 return rcu_expedited || atomic_read(&rcu_expedited_nesting); 147 return rcu_expedited || atomic_read(&rcu_expedited_nesting) ||
148 rcu_scheduler_active == RCU_SCHEDULER_INIT;
145} 149}
146EXPORT_SYMBOL_GPL(rcu_gp_is_expedited); 150EXPORT_SYMBOL_GPL(rcu_gp_is_expedited);
147 151
@@ -257,7 +261,7 @@ EXPORT_SYMBOL_GPL(rcu_callback_map);
257 261
258int notrace debug_lockdep_rcu_enabled(void) 262int notrace debug_lockdep_rcu_enabled(void)
259{ 263{
260 return rcu_scheduler_active && debug_locks && 264 return rcu_scheduler_active != RCU_SCHEDULER_INACTIVE && debug_locks &&
261 current->lockdep_recursion == 0; 265 current->lockdep_recursion == 0;
262} 266}
263EXPORT_SYMBOL_GPL(debug_lockdep_rcu_enabled); 267EXPORT_SYMBOL_GPL(debug_lockdep_rcu_enabled);
@@ -591,7 +595,7 @@ EXPORT_SYMBOL_GPL(call_rcu_tasks);
591void synchronize_rcu_tasks(void) 595void synchronize_rcu_tasks(void)
592{ 596{
593 /* Complain if the scheduler has not started. */ 597 /* Complain if the scheduler has not started. */
594 RCU_LOCKDEP_WARN(!rcu_scheduler_active, 598 RCU_LOCKDEP_WARN(rcu_scheduler_active == RCU_SCHEDULER_INACTIVE,
595 "synchronize_rcu_tasks called too soon"); 599 "synchronize_rcu_tasks called too soon");
596 600
597 /* Wait for the grace period. */ 601 /* Wait for the grace period. */
@@ -813,6 +817,23 @@ static void rcu_spawn_tasks_kthread(void)
813 817
814#endif /* #ifdef CONFIG_TASKS_RCU */ 818#endif /* #ifdef CONFIG_TASKS_RCU */
815 819
820/*
821 * Test each non-SRCU synchronous grace-period wait API. This is
822 * useful just after a change in mode for these primitives, and
823 * during early boot.
824 */
825void rcu_test_sync_prims(void)
826{
827 if (!IS_ENABLED(CONFIG_PROVE_RCU))
828 return;
829 synchronize_rcu();
830 synchronize_rcu_bh();
831 synchronize_sched();
832 synchronize_rcu_expedited();
833 synchronize_rcu_bh_expedited();
834 synchronize_sched_expedited();
835}
836
816#ifdef CONFIG_PROVE_RCU 837#ifdef CONFIG_PROVE_RCU
817 838
818/* 839/*
@@ -865,6 +886,7 @@ void rcu_early_boot_tests(void)
865 early_boot_test_call_rcu_bh(); 886 early_boot_test_call_rcu_bh();
866 if (rcu_self_test_sched) 887 if (rcu_self_test_sched)
867 early_boot_test_call_rcu_sched(); 888 early_boot_test_call_rcu_sched();
889 rcu_test_sync_prims();
868} 890}
869 891
870static int rcu_verify_early_boot_tests(void) 892static int rcu_verify_early_boot_tests(void)
diff --git a/kernel/signal.c b/kernel/signal.c
index ff046b73ff2d..3603d93a1968 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -346,7 +346,7 @@ static bool task_participate_group_stop(struct task_struct *task)
346 * fresh group stop. Read comment in do_signal_stop() for details. 346 * fresh group stop. Read comment in do_signal_stop() for details.
347 */ 347 */
348 if (!sig->group_stop_count && !(sig->flags & SIGNAL_STOP_STOPPED)) { 348 if (!sig->group_stop_count && !(sig->flags & SIGNAL_STOP_STOPPED)) {
349 sig->flags = SIGNAL_STOP_STOPPED; 349 signal_set_stop_flags(sig, SIGNAL_STOP_STOPPED);
350 return true; 350 return true;
351 } 351 }
352 return false; 352 return false;
@@ -843,7 +843,7 @@ static bool prepare_signal(int sig, struct task_struct *p, bool force)
843 * will take ->siglock, notice SIGNAL_CLD_MASK, and 843 * will take ->siglock, notice SIGNAL_CLD_MASK, and
844 * notify its parent. See get_signal_to_deliver(). 844 * notify its parent. See get_signal_to_deliver().
845 */ 845 */
846 signal->flags = why | SIGNAL_STOP_CONTINUED; 846 signal_set_stop_flags(signal, why | SIGNAL_STOP_CONTINUED);
847 signal->group_stop_count = 0; 847 signal->group_stop_count = 0;
848 signal->group_exit_code = 0; 848 signal->group_exit_code = 0;
849 } 849 }
diff --git a/kernel/stacktrace.c b/kernel/stacktrace.c
index b6e4c16377c7..9c15a9124e83 100644
--- a/kernel/stacktrace.c
+++ b/kernel/stacktrace.c
@@ -18,10 +18,8 @@ void print_stack_trace(struct stack_trace *trace, int spaces)
18 if (WARN_ON(!trace->entries)) 18 if (WARN_ON(!trace->entries))
19 return; 19 return;
20 20
21 for (i = 0; i < trace->nr_entries; i++) { 21 for (i = 0; i < trace->nr_entries; i++)
22 printk("%*c", 1 + spaces, ' '); 22 printk("%*c%pS\n", 1 + spaces, ' ', (void *)trace->entries[i]);
23 print_ip_sym(trace->entries[i]);
24 }
25} 23}
26EXPORT_SYMBOL_GPL(print_stack_trace); 24EXPORT_SYMBOL_GPL(print_stack_trace);
27 25
@@ -29,7 +27,6 @@ int snprint_stack_trace(char *buf, size_t size,
29 struct stack_trace *trace, int spaces) 27 struct stack_trace *trace, int spaces)
30{ 28{
31 int i; 29 int i;
32 unsigned long ip;
33 int generated; 30 int generated;
34 int total = 0; 31 int total = 0;
35 32
@@ -37,9 +34,8 @@ int snprint_stack_trace(char *buf, size_t size,
37 return 0; 34 return 0;
38 35
39 for (i = 0; i < trace->nr_entries; i++) { 36 for (i = 0; i < trace->nr_entries; i++) {
40 ip = trace->entries[i]; 37 generated = snprintf(buf, size, "%*c%pS\n", 1 + spaces, ' ',
41 generated = snprintf(buf, size, "%*c[<%p>] %pS\n", 38 (void *)trace->entries[i]);
42 1 + spaces, ' ', (void *) ip, (void *) ip);
43 39
44 total += generated; 40 total += generated;
45 41
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 8dbaec0e4f7f..1aea594a54db 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -2475,6 +2475,7 @@ static int __do_proc_doulongvec_minmax(void *data, struct ctl_table *table, int
2475 break; 2475 break;
2476 if (neg) 2476 if (neg)
2477 continue; 2477 continue;
2478 val = convmul * val / convdiv;
2478 if ((min && val < *min) || (max && val > *max)) 2479 if ((min && val < *min) || (max && val > *max))
2479 continue; 2480 continue;
2480 *i = val; 2481 *i = val;
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
index 3109204c87cc..17ac99b60ee5 100644
--- a/kernel/time/tick-broadcast.c
+++ b/kernel/time/tick-broadcast.c
@@ -347,17 +347,16 @@ static void tick_handle_periodic_broadcast(struct clock_event_device *dev)
347 * 347 *
348 * Called when the system enters a state where affected tick devices 348 * Called when the system enters a state where affected tick devices
349 * might stop. Note: TICK_BROADCAST_FORCE cannot be undone. 349 * might stop. Note: TICK_BROADCAST_FORCE cannot be undone.
350 *
351 * Called with interrupts disabled, so clockevents_lock is not
352 * required here because the local clock event device cannot go away
353 * under us.
354 */ 350 */
355void tick_broadcast_control(enum tick_broadcast_mode mode) 351void tick_broadcast_control(enum tick_broadcast_mode mode)
356{ 352{
357 struct clock_event_device *bc, *dev; 353 struct clock_event_device *bc, *dev;
358 struct tick_device *td; 354 struct tick_device *td;
359 int cpu, bc_stopped; 355 int cpu, bc_stopped;
356 unsigned long flags;
360 357
358 /* Protects also the local clockevent device. */
359 raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
361 td = this_cpu_ptr(&tick_cpu_device); 360 td = this_cpu_ptr(&tick_cpu_device);
362 dev = td->evtdev; 361 dev = td->evtdev;
363 362
@@ -365,12 +364,11 @@ void tick_broadcast_control(enum tick_broadcast_mode mode)
365 * Is the device not affected by the powerstate ? 364 * Is the device not affected by the powerstate ?
366 */ 365 */
367 if (!dev || !(dev->features & CLOCK_EVT_FEAT_C3STOP)) 366 if (!dev || !(dev->features & CLOCK_EVT_FEAT_C3STOP))
368 return; 367 goto out;
369 368
370 if (!tick_device_is_functional(dev)) 369 if (!tick_device_is_functional(dev))
371 return; 370 goto out;
372 371
373 raw_spin_lock(&tick_broadcast_lock);
374 cpu = smp_processor_id(); 372 cpu = smp_processor_id();
375 bc = tick_broadcast_device.evtdev; 373 bc = tick_broadcast_device.evtdev;
376 bc_stopped = cpumask_empty(tick_broadcast_mask); 374 bc_stopped = cpumask_empty(tick_broadcast_mask);
@@ -420,7 +418,8 @@ void tick_broadcast_control(enum tick_broadcast_mode mode)
420 tick_broadcast_setup_oneshot(bc); 418 tick_broadcast_setup_oneshot(bc);
421 } 419 }
422 } 420 }
423 raw_spin_unlock(&tick_broadcast_lock); 421out:
422 raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
424} 423}
425EXPORT_SYMBOL_GPL(tick_broadcast_control); 424EXPORT_SYMBOL_GPL(tick_broadcast_control);
426 425
diff --git a/kernel/time/timekeeping_debug.c b/kernel/time/timekeeping_debug.c
index ca9fb800336b..38bc4d2208e8 100644
--- a/kernel/time/timekeeping_debug.c
+++ b/kernel/time/timekeeping_debug.c
@@ -75,7 +75,7 @@ void tk_debug_account_sleep_time(struct timespec64 *t)
75 int bin = min(fls(t->tv_sec), NUM_BINS-1); 75 int bin = min(fls(t->tv_sec), NUM_BINS-1);
76 76
77 sleep_time_bin[bin]++; 77 sleep_time_bin[bin]++;
78 pr_info("Suspended for %lld.%03lu seconds\n", (s64)t->tv_sec, 78 printk_deferred(KERN_INFO "Suspended for %lld.%03lu seconds\n",
79 t->tv_nsec / NSEC_PER_MSEC); 79 (s64)t->tv_sec, t->tv_nsec / NSEC_PER_MSEC);
80} 80}
81 81
diff --git a/kernel/trace/trace_hwlat.c b/kernel/trace/trace_hwlat.c
index 775569ec50d0..af344a1bf0d0 100644
--- a/kernel/trace/trace_hwlat.c
+++ b/kernel/trace/trace_hwlat.c
@@ -266,7 +266,7 @@ out:
266static struct cpumask save_cpumask; 266static struct cpumask save_cpumask;
267static bool disable_migrate; 267static bool disable_migrate;
268 268
269static void move_to_next_cpu(void) 269static void move_to_next_cpu(bool initmask)
270{ 270{
271 static struct cpumask *current_mask; 271 static struct cpumask *current_mask;
272 int next_cpu; 272 int next_cpu;
@@ -275,7 +275,7 @@ static void move_to_next_cpu(void)
275 return; 275 return;
276 276
277 /* Just pick the first CPU on first iteration */ 277 /* Just pick the first CPU on first iteration */
278 if (!current_mask) { 278 if (initmask) {
279 current_mask = &save_cpumask; 279 current_mask = &save_cpumask;
280 get_online_cpus(); 280 get_online_cpus();
281 cpumask_and(current_mask, cpu_online_mask, tracing_buffer_mask); 281 cpumask_and(current_mask, cpu_online_mask, tracing_buffer_mask);
@@ -330,10 +330,12 @@ static void move_to_next_cpu(void)
330static int kthread_fn(void *data) 330static int kthread_fn(void *data)
331{ 331{
332 u64 interval; 332 u64 interval;
333 bool initmask = true;
333 334
334 while (!kthread_should_stop()) { 335 while (!kthread_should_stop()) {
335 336
336 move_to_next_cpu(); 337 move_to_next_cpu(initmask);
338 initmask = false;
337 339
338 local_irq_disable(); 340 local_irq_disable();
339 get_sample(); 341 get_sample();
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
index a133ecd741e4..7ad9e53ad174 100644
--- a/kernel/trace/trace_kprobe.c
+++ b/kernel/trace/trace_kprobe.c
@@ -1372,7 +1372,7 @@ kprobe_trace_selftest_target(int a1, int a2, int a3, int a4, int a5, int a6)
1372 return a1 + a2 + a3 + a4 + a5 + a6; 1372 return a1 + a2 + a3 + a4 + a5 + a6;
1373} 1373}
1374 1374
1375static struct __init trace_event_file * 1375static __init struct trace_event_file *
1376find_trace_probe_file(struct trace_kprobe *tk, struct trace_array *tr) 1376find_trace_probe_file(struct trace_kprobe *tk, struct trace_array *tr)
1377{ 1377{
1378 struct trace_event_file *file; 1378 struct trace_event_file *file;
diff --git a/kernel/ucount.c b/kernel/ucount.c
index 9d20d5dd298a..95c6336fc2b3 100644
--- a/kernel/ucount.c
+++ b/kernel/ucount.c
@@ -128,10 +128,10 @@ static struct ucounts *get_ucounts(struct user_namespace *ns, kuid_t uid)
128 struct hlist_head *hashent = ucounts_hashentry(ns, uid); 128 struct hlist_head *hashent = ucounts_hashentry(ns, uid);
129 struct ucounts *ucounts, *new; 129 struct ucounts *ucounts, *new;
130 130
131 spin_lock(&ucounts_lock); 131 spin_lock_irq(&ucounts_lock);
132 ucounts = find_ucounts(ns, uid, hashent); 132 ucounts = find_ucounts(ns, uid, hashent);
133 if (!ucounts) { 133 if (!ucounts) {
134 spin_unlock(&ucounts_lock); 134 spin_unlock_irq(&ucounts_lock);
135 135
136 new = kzalloc(sizeof(*new), GFP_KERNEL); 136 new = kzalloc(sizeof(*new), GFP_KERNEL);
137 if (!new) 137 if (!new)
@@ -141,7 +141,7 @@ static struct ucounts *get_ucounts(struct user_namespace *ns, kuid_t uid)
141 new->uid = uid; 141 new->uid = uid;
142 atomic_set(&new->count, 0); 142 atomic_set(&new->count, 0);
143 143
144 spin_lock(&ucounts_lock); 144 spin_lock_irq(&ucounts_lock);
145 ucounts = find_ucounts(ns, uid, hashent); 145 ucounts = find_ucounts(ns, uid, hashent);
146 if (ucounts) { 146 if (ucounts) {
147 kfree(new); 147 kfree(new);
@@ -152,16 +152,18 @@ static struct ucounts *get_ucounts(struct user_namespace *ns, kuid_t uid)
152 } 152 }
153 if (!atomic_add_unless(&ucounts->count, 1, INT_MAX)) 153 if (!atomic_add_unless(&ucounts->count, 1, INT_MAX))
154 ucounts = NULL; 154 ucounts = NULL;
155 spin_unlock(&ucounts_lock); 155 spin_unlock_irq(&ucounts_lock);
156 return ucounts; 156 return ucounts;
157} 157}
158 158
159static void put_ucounts(struct ucounts *ucounts) 159static void put_ucounts(struct ucounts *ucounts)
160{ 160{
161 unsigned long flags;
162
161 if (atomic_dec_and_test(&ucounts->count)) { 163 if (atomic_dec_and_test(&ucounts->count)) {
162 spin_lock(&ucounts_lock); 164 spin_lock_irqsave(&ucounts_lock, flags);
163 hlist_del_init(&ucounts->node); 165 hlist_del_init(&ucounts->node);
164 spin_unlock(&ucounts_lock); 166 spin_unlock_irqrestore(&ucounts_lock, flags);
165 167
166 kfree(ucounts); 168 kfree(ucounts);
167 } 169 }
@@ -225,11 +227,10 @@ static __init int user_namespace_sysctl_init(void)
225 * properly. 227 * properly.
226 */ 228 */
227 user_header = register_sysctl("user", empty); 229 user_header = register_sysctl("user", empty);
230 kmemleak_ignore(user_header);
228 BUG_ON(!user_header); 231 BUG_ON(!user_header);
229 BUG_ON(!setup_userns_sysctls(&init_user_ns)); 232 BUG_ON(!setup_userns_sysctls(&init_user_ns));
230#endif 233#endif
231 return 0; 234 return 0;
232} 235}
233subsys_initcall(user_namespace_sysctl_init); 236subsys_initcall(user_namespace_sysctl_init);
234
235
diff --git a/kernel/watchdog.c b/kernel/watchdog.c
index d4b0fa01cae3..63177be0159e 100644
--- a/kernel/watchdog.c
+++ b/kernel/watchdog.c
@@ -49,6 +49,8 @@ unsigned long *watchdog_cpumask_bits = cpumask_bits(&watchdog_cpumask);
49#define for_each_watchdog_cpu(cpu) \ 49#define for_each_watchdog_cpu(cpu) \
50 for_each_cpu_and((cpu), cpu_online_mask, &watchdog_cpumask) 50 for_each_cpu_and((cpu), cpu_online_mask, &watchdog_cpumask)
51 51
52atomic_t watchdog_park_in_progress = ATOMIC_INIT(0);
53
52/* 54/*
53 * The 'watchdog_running' variable is set to 1 when the watchdog threads 55 * The 'watchdog_running' variable is set to 1 when the watchdog threads
54 * are registered/started and is set to 0 when the watchdog threads are 56 * are registered/started and is set to 0 when the watchdog threads are
@@ -260,6 +262,9 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
260 int duration; 262 int duration;
261 int softlockup_all_cpu_backtrace = sysctl_softlockup_all_cpu_backtrace; 263 int softlockup_all_cpu_backtrace = sysctl_softlockup_all_cpu_backtrace;
262 264
265 if (atomic_read(&watchdog_park_in_progress) != 0)
266 return HRTIMER_NORESTART;
267
263 /* kick the hardlockup detector */ 268 /* kick the hardlockup detector */
264 watchdog_interrupt_count(); 269 watchdog_interrupt_count();
265 270
@@ -467,12 +472,16 @@ static int watchdog_park_threads(void)
467{ 472{
468 int cpu, ret = 0; 473 int cpu, ret = 0;
469 474
475 atomic_set(&watchdog_park_in_progress, 1);
476
470 for_each_watchdog_cpu(cpu) { 477 for_each_watchdog_cpu(cpu) {
471 ret = kthread_park(per_cpu(softlockup_watchdog, cpu)); 478 ret = kthread_park(per_cpu(softlockup_watchdog, cpu));
472 if (ret) 479 if (ret)
473 break; 480 break;
474 } 481 }
475 482
483 atomic_set(&watchdog_park_in_progress, 0);
484
476 return ret; 485 return ret;
477} 486}
478 487
diff --git a/kernel/watchdog_hld.c b/kernel/watchdog_hld.c
index 84016c8aee6b..12b8dd640786 100644
--- a/kernel/watchdog_hld.c
+++ b/kernel/watchdog_hld.c
@@ -84,6 +84,9 @@ static void watchdog_overflow_callback(struct perf_event *event,
84 /* Ensure the watchdog never gets throttled */ 84 /* Ensure the watchdog never gets throttled */
85 event->hw.interrupts = 0; 85 event->hw.interrupts = 0;
86 86
87 if (atomic_read(&watchdog_park_in_progress) != 0)
88 return;
89
87 if (__this_cpu_read(watchdog_nmi_touch) == true) { 90 if (__this_cpu_read(watchdog_nmi_touch) == true) {
88 __this_cpu_write(watchdog_nmi_touch, false); 91 __this_cpu_write(watchdog_nmi_touch, false);
89 return; 92 return;
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index b06848a104e6..eb9e9a7870fa 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -164,7 +164,7 @@ config DEBUG_INFO_REDUCED
164 164
165config DEBUG_INFO_SPLIT 165config DEBUG_INFO_SPLIT
166 bool "Produce split debuginfo in .dwo files" 166 bool "Produce split debuginfo in .dwo files"
167 depends on DEBUG_INFO 167 depends on DEBUG_INFO && !FRV
168 help 168 help
169 Generate debug info into separate .dwo files. This significantly 169 Generate debug info into separate .dwo files. This significantly
170 reduces the build directory size for builds with DEBUG_INFO, 170 reduces the build directory size for builds with DEBUG_INFO,
diff --git a/lib/ioremap.c b/lib/ioremap.c
index 86c8911b0e3a..a3e14ce92a56 100644
--- a/lib/ioremap.c
+++ b/lib/ioremap.c
@@ -144,4 +144,3 @@ int ioremap_page_range(unsigned long addr,
144 144
145 return err; 145 return err;
146} 146}
147EXPORT_SYMBOL_GPL(ioremap_page_range);
diff --git a/lib/iov_iter.c b/lib/iov_iter.c
index 25f572303801..e68604ae3ced 100644
--- a/lib/iov_iter.c
+++ b/lib/iov_iter.c
@@ -730,43 +730,50 @@ size_t iov_iter_copy_from_user_atomic(struct page *page,
730} 730}
731EXPORT_SYMBOL(iov_iter_copy_from_user_atomic); 731EXPORT_SYMBOL(iov_iter_copy_from_user_atomic);
732 732
733static inline void pipe_truncate(struct iov_iter *i)
734{
735 struct pipe_inode_info *pipe = i->pipe;
736 if (pipe->nrbufs) {
737 size_t off = i->iov_offset;
738 int idx = i->idx;
739 int nrbufs = (idx - pipe->curbuf) & (pipe->buffers - 1);
740 if (off) {
741 pipe->bufs[idx].len = off - pipe->bufs[idx].offset;
742 idx = next_idx(idx, pipe);
743 nrbufs++;
744 }
745 while (pipe->nrbufs > nrbufs) {
746 pipe_buf_release(pipe, &pipe->bufs[idx]);
747 idx = next_idx(idx, pipe);
748 pipe->nrbufs--;
749 }
750 }
751}
752
733static void pipe_advance(struct iov_iter *i, size_t size) 753static void pipe_advance(struct iov_iter *i, size_t size)
734{ 754{
735 struct pipe_inode_info *pipe = i->pipe; 755 struct pipe_inode_info *pipe = i->pipe;
736 struct pipe_buffer *buf;
737 int idx = i->idx;
738 size_t off = i->iov_offset, orig_sz;
739
740 if (unlikely(i->count < size)) 756 if (unlikely(i->count < size))
741 size = i->count; 757 size = i->count;
742 orig_sz = size;
743
744 if (size) { 758 if (size) {
759 struct pipe_buffer *buf;
760 size_t off = i->iov_offset, left = size;
761 int idx = i->idx;
745 if (off) /* make it relative to the beginning of buffer */ 762 if (off) /* make it relative to the beginning of buffer */
746 size += off - pipe->bufs[idx].offset; 763 left += off - pipe->bufs[idx].offset;
747 while (1) { 764 while (1) {
748 buf = &pipe->bufs[idx]; 765 buf = &pipe->bufs[idx];
749 if (size <= buf->len) 766 if (left <= buf->len)
750 break; 767 break;
751 size -= buf->len; 768 left -= buf->len;
752 idx = next_idx(idx, pipe); 769 idx = next_idx(idx, pipe);
753 } 770 }
754 buf->len = size;
755 i->idx = idx; 771 i->idx = idx;
756 off = i->iov_offset = buf->offset + size; 772 i->iov_offset = buf->offset + left;
757 }
758 if (off)
759 idx = next_idx(idx, pipe);
760 if (pipe->nrbufs) {
761 int unused = (pipe->curbuf + pipe->nrbufs) & (pipe->buffers - 1);
762 /* [curbuf,unused) is in use. Free [idx,unused) */
763 while (idx != unused) {
764 pipe_buf_release(pipe, &pipe->bufs[idx]);
765 idx = next_idx(idx, pipe);
766 pipe->nrbufs--;
767 }
768 } 773 }
769 i->count -= orig_sz; 774 i->count -= size;
775 /* ... and discard everything past that point */
776 pipe_truncate(i);
770} 777}
771 778
772void iov_iter_advance(struct iov_iter *i, size_t size) 779void iov_iter_advance(struct iov_iter *i, size_t size)
@@ -826,6 +833,7 @@ void iov_iter_pipe(struct iov_iter *i, int direction,
826 size_t count) 833 size_t count)
827{ 834{
828 BUG_ON(direction != ITER_PIPE); 835 BUG_ON(direction != ITER_PIPE);
836 WARN_ON(pipe->nrbufs == pipe->buffers);
829 i->type = direction; 837 i->type = direction;
830 i->pipe = pipe; 838 i->pipe = pipe;
831 i->idx = (pipe->curbuf + pipe->nrbufs) & (pipe->buffers - 1); 839 i->idx = (pipe->curbuf + pipe->nrbufs) & (pipe->buffers - 1);
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index 6f382e07de77..84812a9fb16f 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -640,6 +640,7 @@ static inline void radix_tree_shrink(struct radix_tree_root *root,
640 update_node(node, private); 640 update_node(node, private);
641 } 641 }
642 642
643 WARN_ON_ONCE(!list_empty(&node->private_list));
643 radix_tree_node_free(node); 644 radix_tree_node_free(node);
644 } 645 }
645} 646}
@@ -666,6 +667,7 @@ static void delete_node(struct radix_tree_root *root,
666 root->rnode = NULL; 667 root->rnode = NULL;
667 } 668 }
668 669
670 WARN_ON_ONCE(!list_empty(&node->private_list));
669 radix_tree_node_free(node); 671 radix_tree_node_free(node);
670 672
671 node = parent; 673 node = parent;
@@ -767,6 +769,7 @@ static void radix_tree_free_nodes(struct radix_tree_node *node)
767 struct radix_tree_node *old = child; 769 struct radix_tree_node *old = child;
768 offset = child->offset + 1; 770 offset = child->offset + 1;
769 child = child->parent; 771 child = child->parent;
772 WARN_ON_ONCE(!list_empty(&old->private_list));
770 radix_tree_node_free(old); 773 radix_tree_node_free(old);
771 if (old == entry_to_node(node)) 774 if (old == entry_to_node(node))
772 return; 775 return;
@@ -1824,15 +1827,19 @@ EXPORT_SYMBOL(radix_tree_gang_lookup_tag_slot);
1824 * __radix_tree_delete_node - try to free node after clearing a slot 1827 * __radix_tree_delete_node - try to free node after clearing a slot
1825 * @root: radix tree root 1828 * @root: radix tree root
1826 * @node: node containing @index 1829 * @node: node containing @index
1830 * @update_node: callback for changing leaf nodes
1831 * @private: private data to pass to @update_node
1827 * 1832 *
1828 * After clearing the slot at @index in @node from radix tree 1833 * After clearing the slot at @index in @node from radix tree
1829 * rooted at @root, call this function to attempt freeing the 1834 * rooted at @root, call this function to attempt freeing the
1830 * node and shrinking the tree. 1835 * node and shrinking the tree.
1831 */ 1836 */
1832void __radix_tree_delete_node(struct radix_tree_root *root, 1837void __radix_tree_delete_node(struct radix_tree_root *root,
1833 struct radix_tree_node *node) 1838 struct radix_tree_node *node,
1839 radix_tree_update_node_t update_node,
1840 void *private)
1834{ 1841{
1835 delete_node(root, node, NULL, NULL); 1842 delete_node(root, node, update_node, private);
1836} 1843}
1837 1844
1838/** 1845/**
diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index cb1b54ee8527..a8d74a733a38 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -53,7 +53,7 @@
53 */ 53 */
54#define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT) 54#define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT)
55 55
56int swiotlb_force; 56enum swiotlb_force swiotlb_force;
57 57
58/* 58/*
59 * Used to do a quick range check in swiotlb_tbl_unmap_single and 59 * Used to do a quick range check in swiotlb_tbl_unmap_single and
@@ -83,6 +83,12 @@ static unsigned int *io_tlb_list;
83static unsigned int io_tlb_index; 83static unsigned int io_tlb_index;
84 84
85/* 85/*
86 * Max segment that we can provide which (if pages are contingous) will
87 * not be bounced (unless SWIOTLB_FORCE is set).
88 */
89unsigned int max_segment;
90
91/*
86 * We need to save away the original address corresponding to a mapped entry 92 * We need to save away the original address corresponding to a mapped entry
87 * for the sync operations. 93 * for the sync operations.
88 */ 94 */
@@ -106,8 +112,12 @@ setup_io_tlb_npages(char *str)
106 } 112 }
107 if (*str == ',') 113 if (*str == ',')
108 ++str; 114 ++str;
109 if (!strcmp(str, "force")) 115 if (!strcmp(str, "force")) {
110 swiotlb_force = 1; 116 swiotlb_force = SWIOTLB_FORCE;
117 } else if (!strcmp(str, "noforce")) {
118 swiotlb_force = SWIOTLB_NO_FORCE;
119 io_tlb_nslabs = 1;
120 }
111 121
112 return 0; 122 return 0;
113} 123}
@@ -120,6 +130,20 @@ unsigned long swiotlb_nr_tbl(void)
120} 130}
121EXPORT_SYMBOL_GPL(swiotlb_nr_tbl); 131EXPORT_SYMBOL_GPL(swiotlb_nr_tbl);
122 132
133unsigned int swiotlb_max_segment(void)
134{
135 return max_segment;
136}
137EXPORT_SYMBOL_GPL(swiotlb_max_segment);
138
139void swiotlb_set_max_segment(unsigned int val)
140{
141 if (swiotlb_force == SWIOTLB_FORCE)
142 max_segment = 1;
143 else
144 max_segment = rounddown(val, PAGE_SIZE);
145}
146
123/* default to 64MB */ 147/* default to 64MB */
124#define IO_TLB_DEFAULT_SIZE (64UL<<20) 148#define IO_TLB_DEFAULT_SIZE (64UL<<20)
125unsigned long swiotlb_size_or_default(void) 149unsigned long swiotlb_size_or_default(void)
@@ -201,6 +225,7 @@ int __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose)
201 if (verbose) 225 if (verbose)
202 swiotlb_print_info(); 226 swiotlb_print_info();
203 227
228 swiotlb_set_max_segment(io_tlb_nslabs << IO_TLB_SHIFT);
204 return 0; 229 return 0;
205} 230}
206 231
@@ -279,6 +304,7 @@ swiotlb_late_init_with_default_size(size_t default_size)
279 rc = swiotlb_late_init_with_tbl(vstart, io_tlb_nslabs); 304 rc = swiotlb_late_init_with_tbl(vstart, io_tlb_nslabs);
280 if (rc) 305 if (rc)
281 free_pages((unsigned long)vstart, order); 306 free_pages((unsigned long)vstart, order);
307
282 return rc; 308 return rc;
283} 309}
284 310
@@ -333,6 +359,8 @@ swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs)
333 359
334 late_alloc = 1; 360 late_alloc = 1;
335 361
362 swiotlb_set_max_segment(io_tlb_nslabs << IO_TLB_SHIFT);
363
336 return 0; 364 return 0;
337 365
338cleanup4: 366cleanup4:
@@ -347,6 +375,7 @@ cleanup2:
347 io_tlb_end = 0; 375 io_tlb_end = 0;
348 io_tlb_start = 0; 376 io_tlb_start = 0;
349 io_tlb_nslabs = 0; 377 io_tlb_nslabs = 0;
378 max_segment = 0;
350 return -ENOMEM; 379 return -ENOMEM;
351} 380}
352 381
@@ -375,6 +404,7 @@ void __init swiotlb_free(void)
375 PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT)); 404 PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT));
376 } 405 }
377 io_tlb_nslabs = 0; 406 io_tlb_nslabs = 0;
407 max_segment = 0;
378} 408}
379 409
380int is_swiotlb_buffer(phys_addr_t paddr) 410int is_swiotlb_buffer(phys_addr_t paddr)
@@ -453,11 +483,11 @@ phys_addr_t swiotlb_tbl_map_single(struct device *hwdev,
453 : 1UL << (BITS_PER_LONG - IO_TLB_SHIFT); 483 : 1UL << (BITS_PER_LONG - IO_TLB_SHIFT);
454 484
455 /* 485 /*
456 * For mappings greater than a page, we limit the stride (and 486 * For mappings greater than or equal to a page, we limit the stride
457 * hence alignment) to a page size. 487 * (and hence alignment) to a page size.
458 */ 488 */
459 nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT; 489 nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
460 if (size > PAGE_SIZE) 490 if (size >= PAGE_SIZE)
461 stride = (1 << (PAGE_SHIFT - IO_TLB_SHIFT)); 491 stride = (1 << (PAGE_SHIFT - IO_TLB_SHIFT));
462 else 492 else
463 stride = 1; 493 stride = 1;
@@ -543,8 +573,15 @@ static phys_addr_t
543map_single(struct device *hwdev, phys_addr_t phys, size_t size, 573map_single(struct device *hwdev, phys_addr_t phys, size_t size,
544 enum dma_data_direction dir, unsigned long attrs) 574 enum dma_data_direction dir, unsigned long attrs)
545{ 575{
546 dma_addr_t start_dma_addr = phys_to_dma(hwdev, io_tlb_start); 576 dma_addr_t start_dma_addr;
547 577
578 if (swiotlb_force == SWIOTLB_NO_FORCE) {
579 dev_warn_ratelimited(hwdev, "Cannot do DMA to address %pa\n",
580 &phys);
581 return SWIOTLB_MAP_ERROR;
582 }
583
584 start_dma_addr = phys_to_dma(hwdev, io_tlb_start);
548 return swiotlb_tbl_map_single(hwdev, start_dma_addr, phys, size, 585 return swiotlb_tbl_map_single(hwdev, start_dma_addr, phys, size,
549 dir, attrs); 586 dir, attrs);
550} 587}
@@ -721,6 +758,9 @@ static void
721swiotlb_full(struct device *dev, size_t size, enum dma_data_direction dir, 758swiotlb_full(struct device *dev, size_t size, enum dma_data_direction dir,
722 int do_panic) 759 int do_panic)
723{ 760{
761 if (swiotlb_force == SWIOTLB_NO_FORCE)
762 return;
763
724 /* 764 /*
725 * Ran out of IOMMU space for this operation. This is very bad. 765 * Ran out of IOMMU space for this operation. This is very bad.
726 * Unfortunately the drivers cannot handle this operation properly. 766 * Unfortunately the drivers cannot handle this operation properly.
@@ -763,7 +803,7 @@ dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
763 * we can safely return the device addr and not worry about bounce 803 * we can safely return the device addr and not worry about bounce
764 * buffering it. 804 * buffering it.
765 */ 805 */
766 if (dma_capable(dev, dev_addr, size) && !swiotlb_force) 806 if (dma_capable(dev, dev_addr, size) && swiotlb_force != SWIOTLB_FORCE)
767 return dev_addr; 807 return dev_addr;
768 808
769 trace_swiotlb_bounced(dev, dev_addr, size, swiotlb_force); 809 trace_swiotlb_bounced(dev, dev_addr, size, swiotlb_force);
@@ -904,7 +944,7 @@ swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems,
904 phys_addr_t paddr = sg_phys(sg); 944 phys_addr_t paddr = sg_phys(sg);
905 dma_addr_t dev_addr = phys_to_dma(hwdev, paddr); 945 dma_addr_t dev_addr = phys_to_dma(hwdev, paddr);
906 946
907 if (swiotlb_force || 947 if (swiotlb_force == SWIOTLB_FORCE ||
908 !dma_capable(hwdev, dev_addr, sg->length)) { 948 !dma_capable(hwdev, dev_addr, sg->length)) {
909 phys_addr_t map = map_single(hwdev, sg_phys(sg), 949 phys_addr_t map = map_single(hwdev, sg_phys(sg),
910 sg->length, dir, attrs); 950 sg->length, dir, attrs);
diff --git a/mm/filemap.c b/mm/filemap.c
index 82f26cde830c..3f9afded581b 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -138,7 +138,7 @@ static int page_cache_tree_insert(struct address_space *mapping,
138 dax_radix_locked_entry(0, RADIX_DAX_EMPTY)); 138 dax_radix_locked_entry(0, RADIX_DAX_EMPTY));
139 /* Wakeup waiters for exceptional entry lock */ 139 /* Wakeup waiters for exceptional entry lock */
140 dax_wake_mapping_entry_waiter(mapping, page->index, p, 140 dax_wake_mapping_entry_waiter(mapping, page->index, p,
141 false); 141 true);
142 } 142 }
143 } 143 }
144 __radix_tree_replace(&mapping->page_tree, node, slot, page, 144 __radix_tree_replace(&mapping->page_tree, node, slot, page,
@@ -912,6 +912,29 @@ void add_page_wait_queue(struct page *page, wait_queue_t *waiter)
912} 912}
913EXPORT_SYMBOL_GPL(add_page_wait_queue); 913EXPORT_SYMBOL_GPL(add_page_wait_queue);
914 914
915#ifndef clear_bit_unlock_is_negative_byte
916
917/*
918 * PG_waiters is the high bit in the same byte as PG_lock.
919 *
920 * On x86 (and on many other architectures), we can clear PG_lock and
921 * test the sign bit at the same time. But if the architecture does
922 * not support that special operation, we just do this all by hand
923 * instead.
924 *
925 * The read of PG_waiters has to be after (or concurrently with) PG_locked
926 * being cleared, but a memory barrier should be unneccssary since it is
927 * in the same byte as PG_locked.
928 */
929static inline bool clear_bit_unlock_is_negative_byte(long nr, volatile void *mem)
930{
931 clear_bit_unlock(nr, mem);
932 /* smp_mb__after_atomic(); */
933 return test_bit(PG_waiters, mem);
934}
935
936#endif
937
915/** 938/**
916 * unlock_page - unlock a locked page 939 * unlock_page - unlock a locked page
917 * @page: the page 940 * @page: the page
@@ -921,16 +944,19 @@ EXPORT_SYMBOL_GPL(add_page_wait_queue);
921 * mechanism between PageLocked pages and PageWriteback pages is shared. 944 * mechanism between PageLocked pages and PageWriteback pages is shared.
922 * But that's OK - sleepers in wait_on_page_writeback() just go back to sleep. 945 * But that's OK - sleepers in wait_on_page_writeback() just go back to sleep.
923 * 946 *
924 * The mb is necessary to enforce ordering between the clear_bit and the read 947 * Note that this depends on PG_waiters being the sign bit in the byte
925 * of the waitqueue (to avoid SMP races with a parallel wait_on_page_locked()). 948 * that contains PG_locked - thus the BUILD_BUG_ON(). That allows us to
949 * clear the PG_locked bit and test PG_waiters at the same time fairly
950 * portably (architectures that do LL/SC can test any bit, while x86 can
951 * test the sign bit).
926 */ 952 */
927void unlock_page(struct page *page) 953void unlock_page(struct page *page)
928{ 954{
955 BUILD_BUG_ON(PG_waiters != 7);
929 page = compound_head(page); 956 page = compound_head(page);
930 VM_BUG_ON_PAGE(!PageLocked(page), page); 957 VM_BUG_ON_PAGE(!PageLocked(page), page);
931 clear_bit_unlock(PG_locked, &page->flags); 958 if (clear_bit_unlock_is_negative_byte(PG_locked, &page->flags))
932 smp_mb__after_atomic(); 959 wake_up_page_bit(page, PG_locked);
933 wake_up_page(page, PG_locked);
934} 960}
935EXPORT_SYMBOL(unlock_page); 961EXPORT_SYMBOL(unlock_page);
936 962
@@ -1765,6 +1791,11 @@ static ssize_t do_generic_file_read(struct file *filp, loff_t *ppos,
1765 1791
1766 cond_resched(); 1792 cond_resched();
1767find_page: 1793find_page:
1794 if (fatal_signal_pending(current)) {
1795 error = -EINTR;
1796 goto out;
1797 }
1798
1768 page = find_get_page(mapping, index); 1799 page = find_get_page(mapping, index);
1769 if (!page) { 1800 if (!page) {
1770 page_cache_sync_readahead(mapping, 1801 page_cache_sync_readahead(mapping,
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 10eedbf14421..5f3ad65c85de 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -783,6 +783,12 @@ struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr,
783 783
784 assert_spin_locked(pmd_lockptr(mm, pmd)); 784 assert_spin_locked(pmd_lockptr(mm, pmd));
785 785
786 /*
787 * When we COW a devmap PMD entry, we split it into PTEs, so we should
788 * not be in this function with `flags & FOLL_COW` set.
789 */
790 WARN_ONCE(flags & FOLL_COW, "mm: In follow_devmap_pmd with FOLL_COW set");
791
786 if (flags & FOLL_WRITE && !pmd_write(*pmd)) 792 if (flags & FOLL_WRITE && !pmd_write(*pmd))
787 return NULL; 793 return NULL;
788 794
@@ -883,15 +889,17 @@ void huge_pmd_set_accessed(struct vm_fault *vmf, pmd_t orig_pmd)
883{ 889{
884 pmd_t entry; 890 pmd_t entry;
885 unsigned long haddr; 891 unsigned long haddr;
892 bool write = vmf->flags & FAULT_FLAG_WRITE;
886 893
887 vmf->ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd); 894 vmf->ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd);
888 if (unlikely(!pmd_same(*vmf->pmd, orig_pmd))) 895 if (unlikely(!pmd_same(*vmf->pmd, orig_pmd)))
889 goto unlock; 896 goto unlock;
890 897
891 entry = pmd_mkyoung(orig_pmd); 898 entry = pmd_mkyoung(orig_pmd);
899 if (write)
900 entry = pmd_mkdirty(entry);
892 haddr = vmf->address & HPAGE_PMD_MASK; 901 haddr = vmf->address & HPAGE_PMD_MASK;
893 if (pmdp_set_access_flags(vmf->vma, haddr, vmf->pmd, entry, 902 if (pmdp_set_access_flags(vmf->vma, haddr, vmf->pmd, entry, write))
894 vmf->flags & FAULT_FLAG_WRITE))
895 update_mmu_cache_pmd(vmf->vma, vmf->address, vmf->pmd); 903 update_mmu_cache_pmd(vmf->vma, vmf->address, vmf->pmd);
896 904
897unlock: 905unlock:
@@ -919,8 +927,7 @@ static int do_huge_pmd_wp_page_fallback(struct vm_fault *vmf, pmd_t orig_pmd,
919 } 927 }
920 928
921 for (i = 0; i < HPAGE_PMD_NR; i++) { 929 for (i = 0; i < HPAGE_PMD_NR; i++) {
922 pages[i] = alloc_page_vma_node(GFP_HIGHUSER_MOVABLE | 930 pages[i] = alloc_page_vma_node(GFP_HIGHUSER_MOVABLE, vma,
923 __GFP_OTHER_NODE, vma,
924 vmf->address, page_to_nid(page)); 931 vmf->address, page_to_nid(page));
925 if (unlikely(!pages[i] || 932 if (unlikely(!pages[i] ||
926 mem_cgroup_try_charge(pages[i], vma->vm_mm, 933 mem_cgroup_try_charge(pages[i], vma->vm_mm,
@@ -1127,6 +1134,16 @@ out_unlock:
1127 return ret; 1134 return ret;
1128} 1135}
1129 1136
1137/*
1138 * FOLL_FORCE can write to even unwritable pmd's, but only
1139 * after we've gone through a COW cycle and they are dirty.
1140 */
1141static inline bool can_follow_write_pmd(pmd_t pmd, unsigned int flags)
1142{
1143 return pmd_write(pmd) ||
1144 ((flags & FOLL_FORCE) && (flags & FOLL_COW) && pmd_dirty(pmd));
1145}
1146
1130struct page *follow_trans_huge_pmd(struct vm_area_struct *vma, 1147struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
1131 unsigned long addr, 1148 unsigned long addr,
1132 pmd_t *pmd, 1149 pmd_t *pmd,
@@ -1137,7 +1154,7 @@ struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
1137 1154
1138 assert_spin_locked(pmd_lockptr(mm, pmd)); 1155 assert_spin_locked(pmd_lockptr(mm, pmd));
1139 1156
1140 if (flags & FOLL_WRITE && !pmd_write(*pmd)) 1157 if (flags & FOLL_WRITE && !can_follow_write_pmd(*pmd, flags))
1141 goto out; 1158 goto out;
1142 1159
1143 /* Avoid dumping huge zero page */ 1160 /* Avoid dumping huge zero page */
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 3edb759c5c7d..c7025c132670 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1773,23 +1773,32 @@ free:
1773} 1773}
1774 1774
1775/* 1775/*
1776 * When releasing a hugetlb pool reservation, any surplus pages that were 1776 * This routine has two main purposes:
1777 * allocated to satisfy the reservation must be explicitly freed if they were 1777 * 1) Decrement the reservation count (resv_huge_pages) by the value passed
1778 * never used. 1778 * in unused_resv_pages. This corresponds to the prior adjustments made
1779 * Called with hugetlb_lock held. 1779 * to the associated reservation map.
1780 * 2) Free any unused surplus pages that may have been allocated to satisfy
1781 * the reservation. As many as unused_resv_pages may be freed.
1782 *
1783 * Called with hugetlb_lock held. However, the lock could be dropped (and
1784 * reacquired) during calls to cond_resched_lock. Whenever dropping the lock,
1785 * we must make sure nobody else can claim pages we are in the process of
1786 * freeing. Do this by ensuring resv_huge_page always is greater than the
1787 * number of huge pages we plan to free when dropping the lock.
1780 */ 1788 */
1781static void return_unused_surplus_pages(struct hstate *h, 1789static void return_unused_surplus_pages(struct hstate *h,
1782 unsigned long unused_resv_pages) 1790 unsigned long unused_resv_pages)
1783{ 1791{
1784 unsigned long nr_pages; 1792 unsigned long nr_pages;
1785 1793
1786 /* Uncommit the reservation */
1787 h->resv_huge_pages -= unused_resv_pages;
1788
1789 /* Cannot return gigantic pages currently */ 1794 /* Cannot return gigantic pages currently */
1790 if (hstate_is_gigantic(h)) 1795 if (hstate_is_gigantic(h))
1791 return; 1796 goto out;
1792 1797
1798 /*
1799 * Part (or even all) of the reservation could have been backed
1800 * by pre-allocated pages. Only free surplus pages.
1801 */
1793 nr_pages = min(unused_resv_pages, h->surplus_huge_pages); 1802 nr_pages = min(unused_resv_pages, h->surplus_huge_pages);
1794 1803
1795 /* 1804 /*
@@ -1799,12 +1808,22 @@ static void return_unused_surplus_pages(struct hstate *h,
1799 * when the nodes with surplus pages have no free pages. 1808 * when the nodes with surplus pages have no free pages.
1800 * free_pool_huge_page() will balance the the freed pages across the 1809 * free_pool_huge_page() will balance the the freed pages across the
1801 * on-line nodes with memory and will handle the hstate accounting. 1810 * on-line nodes with memory and will handle the hstate accounting.
1811 *
1812 * Note that we decrement resv_huge_pages as we free the pages. If
1813 * we drop the lock, resv_huge_pages will still be sufficiently large
1814 * to cover subsequent pages we may free.
1802 */ 1815 */
1803 while (nr_pages--) { 1816 while (nr_pages--) {
1817 h->resv_huge_pages--;
1818 unused_resv_pages--;
1804 if (!free_pool_huge_page(h, &node_states[N_MEMORY], 1)) 1819 if (!free_pool_huge_page(h, &node_states[N_MEMORY], 1))
1805 break; 1820 goto out;
1806 cond_resched_lock(&hugetlb_lock); 1821 cond_resched_lock(&hugetlb_lock);
1807 } 1822 }
1823
1824out:
1825 /* Fully uncommit the reservation */
1826 h->resv_huge_pages -= unused_resv_pages;
1808} 1827}
1809 1828
1810 1829
diff --git a/mm/kasan/report.c b/mm/kasan/report.c
index b82b3e215157..f479365530b6 100644
--- a/mm/kasan/report.c
+++ b/mm/kasan/report.c
@@ -13,6 +13,7 @@
13 * 13 *
14 */ 14 */
15 15
16#include <linux/ftrace.h>
16#include <linux/kernel.h> 17#include <linux/kernel.h>
17#include <linux/mm.h> 18#include <linux/mm.h>
18#include <linux/printk.h> 19#include <linux/printk.h>
@@ -300,6 +301,8 @@ void kasan_report(unsigned long addr, size_t size,
300 if (likely(!kasan_report_enabled())) 301 if (likely(!kasan_report_enabled()))
301 return; 302 return;
302 303
304 disable_trace_on_warning();
305
303 info.access_addr = (void *)addr; 306 info.access_addr = (void *)addr;
304 info.access_size = size; 307 info.access_size = size;
305 info.is_write = is_write; 308 info.is_write = is_write;
diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index e32389a97030..77ae3239c3de 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -943,7 +943,7 @@ static void collapse_huge_page(struct mm_struct *mm,
943 VM_BUG_ON(address & ~HPAGE_PMD_MASK); 943 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
944 944
945 /* Only allocate from the target node */ 945 /* Only allocate from the target node */
946 gfp = alloc_hugepage_khugepaged_gfpmask() | __GFP_OTHER_NODE | __GFP_THISNODE; 946 gfp = alloc_hugepage_khugepaged_gfpmask() | __GFP_THISNODE;
947 947
948 /* 948 /*
949 * Before allocating the hugepage, release the mmap_sem read lock. 949 * Before allocating the hugepage, release the mmap_sem read lock.
@@ -1242,7 +1242,6 @@ static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff)
1242 struct vm_area_struct *vma; 1242 struct vm_area_struct *vma;
1243 unsigned long addr; 1243 unsigned long addr;
1244 pmd_t *pmd, _pmd; 1244 pmd_t *pmd, _pmd;
1245 bool deposited = false;
1246 1245
1247 i_mmap_lock_write(mapping); 1246 i_mmap_lock_write(mapping);
1248 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) { 1247 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
@@ -1267,26 +1266,10 @@ static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff)
1267 spinlock_t *ptl = pmd_lock(vma->vm_mm, pmd); 1266 spinlock_t *ptl = pmd_lock(vma->vm_mm, pmd);
1268 /* assume page table is clear */ 1267 /* assume page table is clear */
1269 _pmd = pmdp_collapse_flush(vma, addr, pmd); 1268 _pmd = pmdp_collapse_flush(vma, addr, pmd);
1270 /*
1271 * now deposit the pgtable for arch that need it
1272 * otherwise free it.
1273 */
1274 if (arch_needs_pgtable_deposit()) {
1275 /*
1276 * The deposit should be visibile only after
1277 * collapse is seen by others.
1278 */
1279 smp_wmb();
1280 pgtable_trans_huge_deposit(vma->vm_mm, pmd,
1281 pmd_pgtable(_pmd));
1282 deposited = true;
1283 }
1284 spin_unlock(ptl); 1269 spin_unlock(ptl);
1285 up_write(&vma->vm_mm->mmap_sem); 1270 up_write(&vma->vm_mm->mmap_sem);
1286 if (!deposited) { 1271 atomic_long_dec(&vma->vm_mm->nr_ptes);
1287 atomic_long_dec(&vma->vm_mm->nr_ptes); 1272 pte_free(vma->vm_mm, pmd_pgtable(_pmd));
1288 pte_free(vma->vm_mm, pmd_pgtable(_pmd));
1289 }
1290 } 1273 }
1291 } 1274 }
1292 i_mmap_unlock_write(mapping); 1275 i_mmap_unlock_write(mapping);
@@ -1326,8 +1309,7 @@ static void collapse_shmem(struct mm_struct *mm,
1326 VM_BUG_ON(start & (HPAGE_PMD_NR - 1)); 1309 VM_BUG_ON(start & (HPAGE_PMD_NR - 1));
1327 1310
1328 /* Only allocate from the target node */ 1311 /* Only allocate from the target node */
1329 gfp = alloc_hugepage_khugepaged_gfpmask() | 1312 gfp = alloc_hugepage_khugepaged_gfpmask() | __GFP_THISNODE;
1330 __GFP_OTHER_NODE | __GFP_THISNODE;
1331 1313
1332 new_page = khugepaged_alloc_page(hpage, gfp, node); 1314 new_page = khugepaged_alloc_page(hpage, gfp, node);
1333 if (!new_page) { 1315 if (!new_page) {
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 4048897e7b01..b822e158b319 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -625,8 +625,8 @@ static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg,
625unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg, 625unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
626 int nid, unsigned int lru_mask) 626 int nid, unsigned int lru_mask)
627{ 627{
628 struct lruvec *lruvec = mem_cgroup_lruvec(NODE_DATA(nid), memcg);
628 unsigned long nr = 0; 629 unsigned long nr = 0;
629 struct mem_cgroup_per_node *mz;
630 enum lru_list lru; 630 enum lru_list lru;
631 631
632 VM_BUG_ON((unsigned)nid >= nr_node_ids); 632 VM_BUG_ON((unsigned)nid >= nr_node_ids);
@@ -634,8 +634,7 @@ unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
634 for_each_lru(lru) { 634 for_each_lru(lru) {
635 if (!(BIT(lru) & lru_mask)) 635 if (!(BIT(lru) & lru_mask))
636 continue; 636 continue;
637 mz = mem_cgroup_nodeinfo(memcg, nid); 637 nr += mem_cgroup_get_lru_size(lruvec, lru);
638 nr += mz->lru_size[lru];
639 } 638 }
640 return nr; 639 return nr;
641} 640}
@@ -1002,6 +1001,7 @@ out:
1002 * mem_cgroup_update_lru_size - account for adding or removing an lru page 1001 * mem_cgroup_update_lru_size - account for adding or removing an lru page
1003 * @lruvec: mem_cgroup per zone lru vector 1002 * @lruvec: mem_cgroup per zone lru vector
1004 * @lru: index of lru list the page is sitting on 1003 * @lru: index of lru list the page is sitting on
1004 * @zid: zone id of the accounted pages
1005 * @nr_pages: positive when adding or negative when removing 1005 * @nr_pages: positive when adding or negative when removing
1006 * 1006 *
1007 * This function must be called under lru_lock, just before a page is added 1007 * This function must be called under lru_lock, just before a page is added
@@ -1009,27 +1009,25 @@ out:
1009 * so as to allow it to check that lru_size 0 is consistent with list_empty). 1009 * so as to allow it to check that lru_size 0 is consistent with list_empty).
1010 */ 1010 */
1011void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru, 1011void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
1012 int nr_pages) 1012 int zid, int nr_pages)
1013{ 1013{
1014 struct mem_cgroup_per_node *mz; 1014 struct mem_cgroup_per_node *mz;
1015 unsigned long *lru_size; 1015 unsigned long *lru_size;
1016 long size; 1016 long size;
1017 bool empty;
1018 1017
1019 if (mem_cgroup_disabled()) 1018 if (mem_cgroup_disabled())
1020 return; 1019 return;
1021 1020
1022 mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec); 1021 mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
1023 lru_size = mz->lru_size + lru; 1022 lru_size = &mz->lru_zone_size[zid][lru];
1024 empty = list_empty(lruvec->lists + lru);
1025 1023
1026 if (nr_pages < 0) 1024 if (nr_pages < 0)
1027 *lru_size += nr_pages; 1025 *lru_size += nr_pages;
1028 1026
1029 size = *lru_size; 1027 size = *lru_size;
1030 if (WARN_ONCE(size < 0 || empty != !size, 1028 if (WARN_ONCE(size < 0,
1031 "%s(%p, %d, %d): lru_size %ld but %sempty\n", 1029 "%s(%p, %d, %d): lru_size %ld\n",
1032 __func__, lruvec, lru, nr_pages, size, empty ? "" : "not ")) { 1030 __func__, lruvec, lru, nr_pages, size)) {
1033 VM_BUG_ON(1); 1031 VM_BUG_ON(1);
1034 *lru_size = 0; 1032 *lru_size = 0;
1035 } 1033 }
@@ -4355,9 +4353,9 @@ static int mem_cgroup_do_precharge(unsigned long count)
4355 return ret; 4353 return ret;
4356 } 4354 }
4357 4355
4358 /* Try charges one by one with reclaim */ 4356 /* Try charges one by one with reclaim, but do not retry */
4359 while (count--) { 4357 while (count--) {
4360 ret = try_charge(mc.to, GFP_KERNEL & ~__GFP_NORETRY, 1); 4358 ret = try_charge(mc.to, GFP_KERNEL | __GFP_NORETRY, 1);
4361 if (ret) 4359 if (ret)
4362 return ret; 4360 return ret;
4363 mc.precharge++; 4361 mc.precharge++;
diff --git a/mm/memory.c b/mm/memory.c
index 7d23b5050248..6bf2b471e30c 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -3008,13 +3008,6 @@ static int do_set_pmd(struct vm_fault *vmf, struct page *page)
3008 ret = 0; 3008 ret = 0;
3009 count_vm_event(THP_FILE_MAPPED); 3009 count_vm_event(THP_FILE_MAPPED);
3010out: 3010out:
3011 /*
3012 * If we are going to fallback to pte mapping, do a
3013 * withdraw with pmd lock held.
3014 */
3015 if (arch_needs_pgtable_deposit() && ret == VM_FAULT_FALLBACK)
3016 vmf->prealloc_pte = pgtable_trans_huge_withdraw(vma->vm_mm,
3017 vmf->pmd);
3018 spin_unlock(vmf->ptl); 3011 spin_unlock(vmf->ptl);
3019 return ret; 3012 return ret;
3020} 3013}
@@ -3055,20 +3048,18 @@ int alloc_set_pte(struct vm_fault *vmf, struct mem_cgroup *memcg,
3055 3048
3056 ret = do_set_pmd(vmf, page); 3049 ret = do_set_pmd(vmf, page);
3057 if (ret != VM_FAULT_FALLBACK) 3050 if (ret != VM_FAULT_FALLBACK)
3058 goto fault_handled; 3051 return ret;
3059 } 3052 }
3060 3053
3061 if (!vmf->pte) { 3054 if (!vmf->pte) {
3062 ret = pte_alloc_one_map(vmf); 3055 ret = pte_alloc_one_map(vmf);
3063 if (ret) 3056 if (ret)
3064 goto fault_handled; 3057 return ret;
3065 } 3058 }
3066 3059
3067 /* Re-check under ptl */ 3060 /* Re-check under ptl */
3068 if (unlikely(!pte_none(*vmf->pte))) { 3061 if (unlikely(!pte_none(*vmf->pte)))
3069 ret = VM_FAULT_NOPAGE; 3062 return VM_FAULT_NOPAGE;
3070 goto fault_handled;
3071 }
3072 3063
3073 flush_icache_page(vma, page); 3064 flush_icache_page(vma, page);
3074 entry = mk_pte(page, vma->vm_page_prot); 3065 entry = mk_pte(page, vma->vm_page_prot);
@@ -3088,15 +3079,8 @@ int alloc_set_pte(struct vm_fault *vmf, struct mem_cgroup *memcg,
3088 3079
3089 /* no need to invalidate: a not-present page won't be cached */ 3080 /* no need to invalidate: a not-present page won't be cached */
3090 update_mmu_cache(vma, vmf->address, vmf->pte); 3081 update_mmu_cache(vma, vmf->address, vmf->pte);
3091 ret = 0;
3092 3082
3093fault_handled: 3083 return 0;
3094 /* preallocated pagetable is unused: free it */
3095 if (vmf->prealloc_pte) {
3096 pte_free(vmf->vma->vm_mm, vmf->prealloc_pte);
3097 vmf->prealloc_pte = 0;
3098 }
3099 return ret;
3100} 3084}
3101 3085
3102 3086
@@ -3360,15 +3344,24 @@ static int do_shared_fault(struct vm_fault *vmf)
3360static int do_fault(struct vm_fault *vmf) 3344static int do_fault(struct vm_fault *vmf)
3361{ 3345{
3362 struct vm_area_struct *vma = vmf->vma; 3346 struct vm_area_struct *vma = vmf->vma;
3347 int ret;
3363 3348
3364 /* The VMA was not fully populated on mmap() or missing VM_DONTEXPAND */ 3349 /* The VMA was not fully populated on mmap() or missing VM_DONTEXPAND */
3365 if (!vma->vm_ops->fault) 3350 if (!vma->vm_ops->fault)
3366 return VM_FAULT_SIGBUS; 3351 ret = VM_FAULT_SIGBUS;
3367 if (!(vmf->flags & FAULT_FLAG_WRITE)) 3352 else if (!(vmf->flags & FAULT_FLAG_WRITE))
3368 return do_read_fault(vmf); 3353 ret = do_read_fault(vmf);
3369 if (!(vma->vm_flags & VM_SHARED)) 3354 else if (!(vma->vm_flags & VM_SHARED))
3370 return do_cow_fault(vmf); 3355 ret = do_cow_fault(vmf);
3371 return do_shared_fault(vmf); 3356 else
3357 ret = do_shared_fault(vmf);
3358
3359 /* preallocated pagetable is unused: free it */
3360 if (vmf->prealloc_pte) {
3361 pte_free(vma->vm_mm, vmf->prealloc_pte);
3362 vmf->prealloc_pte = 0;
3363 }
3364 return ret;
3372} 3365}
3373 3366
3374static int numa_migrate_prep(struct page *page, struct vm_area_struct *vma, 3367static int numa_migrate_prep(struct page *page, struct vm_area_struct *vma,
@@ -3779,8 +3772,8 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
3779} 3772}
3780#endif /* __PAGETABLE_PMD_FOLDED */ 3773#endif /* __PAGETABLE_PMD_FOLDED */
3781 3774
3782static int __follow_pte(struct mm_struct *mm, unsigned long address, 3775static int __follow_pte_pmd(struct mm_struct *mm, unsigned long address,
3783 pte_t **ptepp, spinlock_t **ptlp) 3776 pte_t **ptepp, pmd_t **pmdpp, spinlock_t **ptlp)
3784{ 3777{
3785 pgd_t *pgd; 3778 pgd_t *pgd;
3786 pud_t *pud; 3779 pud_t *pud;
@@ -3797,11 +3790,20 @@ static int __follow_pte(struct mm_struct *mm, unsigned long address,
3797 3790
3798 pmd = pmd_offset(pud, address); 3791 pmd = pmd_offset(pud, address);
3799 VM_BUG_ON(pmd_trans_huge(*pmd)); 3792 VM_BUG_ON(pmd_trans_huge(*pmd));
3800 if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
3801 goto out;
3802 3793
3803 /* We cannot handle huge page PFN maps. Luckily they don't exist. */ 3794 if (pmd_huge(*pmd)) {
3804 if (pmd_huge(*pmd)) 3795 if (!pmdpp)
3796 goto out;
3797
3798 *ptlp = pmd_lock(mm, pmd);
3799 if (pmd_huge(*pmd)) {
3800 *pmdpp = pmd;
3801 return 0;
3802 }
3803 spin_unlock(*ptlp);
3804 }
3805
3806 if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
3805 goto out; 3807 goto out;
3806 3808
3807 ptep = pte_offset_map_lock(mm, pmd, address, ptlp); 3809 ptep = pte_offset_map_lock(mm, pmd, address, ptlp);
@@ -3817,16 +3819,30 @@ out:
3817 return -EINVAL; 3819 return -EINVAL;
3818} 3820}
3819 3821
3820int follow_pte(struct mm_struct *mm, unsigned long address, pte_t **ptepp, 3822static inline int follow_pte(struct mm_struct *mm, unsigned long address,
3821 spinlock_t **ptlp) 3823 pte_t **ptepp, spinlock_t **ptlp)
3824{
3825 int res;
3826
3827 /* (void) is needed to make gcc happy */
3828 (void) __cond_lock(*ptlp,
3829 !(res = __follow_pte_pmd(mm, address, ptepp, NULL,
3830 ptlp)));
3831 return res;
3832}
3833
3834int follow_pte_pmd(struct mm_struct *mm, unsigned long address,
3835 pte_t **ptepp, pmd_t **pmdpp, spinlock_t **ptlp)
3822{ 3836{
3823 int res; 3837 int res;
3824 3838
3825 /* (void) is needed to make gcc happy */ 3839 /* (void) is needed to make gcc happy */
3826 (void) __cond_lock(*ptlp, 3840 (void) __cond_lock(*ptlp,
3827 !(res = __follow_pte(mm, address, ptepp, ptlp))); 3841 !(res = __follow_pte_pmd(mm, address, ptepp, pmdpp,
3842 ptlp)));
3828 return res; 3843 return res;
3829} 3844}
3845EXPORT_SYMBOL(follow_pte_pmd);
3830 3846
3831/** 3847/**
3832 * follow_pfn - look up PFN at a user virtual address 3848 * follow_pfn - look up PFN at a user virtual address
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index e43142c15631..b8c11e063ff0 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -1033,36 +1033,39 @@ static void node_states_set_node(int node, struct memory_notify *arg)
1033 node_set_state(node, N_MEMORY); 1033 node_set_state(node, N_MEMORY);
1034} 1034}
1035 1035
1036int zone_can_shift(unsigned long pfn, unsigned long nr_pages, 1036bool zone_can_shift(unsigned long pfn, unsigned long nr_pages,
1037 enum zone_type target) 1037 enum zone_type target, int *zone_shift)
1038{ 1038{
1039 struct zone *zone = page_zone(pfn_to_page(pfn)); 1039 struct zone *zone = page_zone(pfn_to_page(pfn));
1040 enum zone_type idx = zone_idx(zone); 1040 enum zone_type idx = zone_idx(zone);
1041 int i; 1041 int i;
1042 1042
1043 *zone_shift = 0;
1044
1043 if (idx < target) { 1045 if (idx < target) {
1044 /* pages must be at end of current zone */ 1046 /* pages must be at end of current zone */
1045 if (pfn + nr_pages != zone_end_pfn(zone)) 1047 if (pfn + nr_pages != zone_end_pfn(zone))
1046 return 0; 1048 return false;
1047 1049
1048 /* no zones in use between current zone and target */ 1050 /* no zones in use between current zone and target */
1049 for (i = idx + 1; i < target; i++) 1051 for (i = idx + 1; i < target; i++)
1050 if (zone_is_initialized(zone - idx + i)) 1052 if (zone_is_initialized(zone - idx + i))
1051 return 0; 1053 return false;
1052 } 1054 }
1053 1055
1054 if (target < idx) { 1056 if (target < idx) {
1055 /* pages must be at beginning of current zone */ 1057 /* pages must be at beginning of current zone */
1056 if (pfn != zone->zone_start_pfn) 1058 if (pfn != zone->zone_start_pfn)
1057 return 0; 1059 return false;
1058 1060
1059 /* no zones in use between current zone and target */ 1061 /* no zones in use between current zone and target */
1060 for (i = target + 1; i < idx; i++) 1062 for (i = target + 1; i < idx; i++)
1061 if (zone_is_initialized(zone - idx + i)) 1063 if (zone_is_initialized(zone - idx + i))
1062 return 0; 1064 return false;
1063 } 1065 }
1064 1066
1065 return target - idx; 1067 *zone_shift = target - idx;
1068 return true;
1066} 1069}
1067 1070
1068/* Must be protected by mem_hotplug_begin() */ 1071/* Must be protected by mem_hotplug_begin() */
@@ -1089,10 +1092,13 @@ int __ref online_pages(unsigned long pfn, unsigned long nr_pages, int online_typ
1089 !can_online_high_movable(zone)) 1092 !can_online_high_movable(zone))
1090 return -EINVAL; 1093 return -EINVAL;
1091 1094
1092 if (online_type == MMOP_ONLINE_KERNEL) 1095 if (online_type == MMOP_ONLINE_KERNEL) {
1093 zone_shift = zone_can_shift(pfn, nr_pages, ZONE_NORMAL); 1096 if (!zone_can_shift(pfn, nr_pages, ZONE_NORMAL, &zone_shift))
1094 else if (online_type == MMOP_ONLINE_MOVABLE) 1097 return -EINVAL;
1095 zone_shift = zone_can_shift(pfn, nr_pages, ZONE_MOVABLE); 1098 } else if (online_type == MMOP_ONLINE_MOVABLE) {
1099 if (!zone_can_shift(pfn, nr_pages, ZONE_MOVABLE, &zone_shift))
1100 return -EINVAL;
1101 }
1096 1102
1097 zone = move_pfn_range(zone_shift, pfn, pfn + nr_pages); 1103 zone = move_pfn_range(zone_shift, pfn, pfn + nr_pages);
1098 if (!zone) 1104 if (!zone)
@@ -1477,17 +1483,20 @@ bool is_mem_section_removable(unsigned long start_pfn, unsigned long nr_pages)
1477} 1483}
1478 1484
1479/* 1485/*
1480 * Confirm all pages in a range [start, end) is belongs to the same zone. 1486 * Confirm all pages in a range [start, end) belong to the same zone.
1487 * When true, return its valid [start, end).
1481 */ 1488 */
1482int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn) 1489int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn,
1490 unsigned long *valid_start, unsigned long *valid_end)
1483{ 1491{
1484 unsigned long pfn, sec_end_pfn; 1492 unsigned long pfn, sec_end_pfn;
1493 unsigned long start, end;
1485 struct zone *zone = NULL; 1494 struct zone *zone = NULL;
1486 struct page *page; 1495 struct page *page;
1487 int i; 1496 int i;
1488 for (pfn = start_pfn, sec_end_pfn = SECTION_ALIGN_UP(start_pfn); 1497 for (pfn = start_pfn, sec_end_pfn = SECTION_ALIGN_UP(start_pfn + 1);
1489 pfn < end_pfn; 1498 pfn < end_pfn;
1490 pfn = sec_end_pfn + 1, sec_end_pfn += PAGES_PER_SECTION) { 1499 pfn = sec_end_pfn, sec_end_pfn += PAGES_PER_SECTION) {
1491 /* Make sure the memory section is present first */ 1500 /* Make sure the memory section is present first */
1492 if (!present_section_nr(pfn_to_section_nr(pfn))) 1501 if (!present_section_nr(pfn_to_section_nr(pfn)))
1493 continue; 1502 continue;
@@ -1503,10 +1512,20 @@ int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn)
1503 page = pfn_to_page(pfn + i); 1512 page = pfn_to_page(pfn + i);
1504 if (zone && page_zone(page) != zone) 1513 if (zone && page_zone(page) != zone)
1505 return 0; 1514 return 0;
1515 if (!zone)
1516 start = pfn + i;
1506 zone = page_zone(page); 1517 zone = page_zone(page);
1518 end = pfn + MAX_ORDER_NR_PAGES;
1507 } 1519 }
1508 } 1520 }
1509 return 1; 1521
1522 if (zone) {
1523 *valid_start = start;
1524 *valid_end = end;
1525 return 1;
1526 } else {
1527 return 0;
1528 }
1510} 1529}
1511 1530
1512/* 1531/*
@@ -1833,6 +1852,7 @@ static int __ref __offline_pages(unsigned long start_pfn,
1833 long offlined_pages; 1852 long offlined_pages;
1834 int ret, drain, retry_max, node; 1853 int ret, drain, retry_max, node;
1835 unsigned long flags; 1854 unsigned long flags;
1855 unsigned long valid_start, valid_end;
1836 struct zone *zone; 1856 struct zone *zone;
1837 struct memory_notify arg; 1857 struct memory_notify arg;
1838 1858
@@ -1843,10 +1863,10 @@ static int __ref __offline_pages(unsigned long start_pfn,
1843 return -EINVAL; 1863 return -EINVAL;
1844 /* This makes hotplug much easier...and readable. 1864 /* This makes hotplug much easier...and readable.
1845 we assume this for now. .*/ 1865 we assume this for now. .*/
1846 if (!test_pages_in_a_zone(start_pfn, end_pfn)) 1866 if (!test_pages_in_a_zone(start_pfn, end_pfn, &valid_start, &valid_end))
1847 return -EINVAL; 1867 return -EINVAL;
1848 1868
1849 zone = page_zone(pfn_to_page(start_pfn)); 1869 zone = page_zone(pfn_to_page(valid_start));
1850 node = zone_to_nid(zone); 1870 node = zone_to_nid(zone);
1851 nr_pages = end_pfn - start_pfn; 1871 nr_pages = end_pfn - start_pfn;
1852 1872
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 2e346645eb80..1e7873e40c9a 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -2017,8 +2017,8 @@ retry_cpuset:
2017 2017
2018 nmask = policy_nodemask(gfp, pol); 2018 nmask = policy_nodemask(gfp, pol);
2019 zl = policy_zonelist(gfp, pol, node); 2019 zl = policy_zonelist(gfp, pol, node);
2020 mpol_cond_put(pol);
2021 page = __alloc_pages_nodemask(gfp, order, zl, nmask); 2020 page = __alloc_pages_nodemask(gfp, order, zl, nmask);
2021 mpol_cond_put(pol);
2022out: 2022out:
2023 if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie))) 2023 if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie)))
2024 goto retry_cpuset; 2024 goto retry_cpuset;
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 2c6d5f64feca..f3e0c69a97b7 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1864,14 +1864,14 @@ int move_freepages(struct zone *zone,
1864#endif 1864#endif
1865 1865
1866 for (page = start_page; page <= end_page;) { 1866 for (page = start_page; page <= end_page;) {
1867 /* Make sure we are not inadvertently changing nodes */
1868 VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page);
1869
1870 if (!pfn_valid_within(page_to_pfn(page))) { 1867 if (!pfn_valid_within(page_to_pfn(page))) {
1871 page++; 1868 page++;
1872 continue; 1869 continue;
1873 } 1870 }
1874 1871
1872 /* Make sure we are not inadvertently changing nodes */
1873 VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page);
1874
1875 if (!PageBuddy(page)) { 1875 if (!PageBuddy(page)) {
1876 page++; 1876 page++;
1877 continue; 1877 continue;
@@ -2583,30 +2583,22 @@ int __isolate_free_page(struct page *page, unsigned int order)
2583 * Update NUMA hit/miss statistics 2583 * Update NUMA hit/miss statistics
2584 * 2584 *
2585 * Must be called with interrupts disabled. 2585 * Must be called with interrupts disabled.
2586 *
2587 * When __GFP_OTHER_NODE is set assume the node of the preferred
2588 * zone is the local node. This is useful for daemons who allocate
2589 * memory on behalf of other processes.
2590 */ 2586 */
2591static inline void zone_statistics(struct zone *preferred_zone, struct zone *z, 2587static inline void zone_statistics(struct zone *preferred_zone, struct zone *z)
2592 gfp_t flags)
2593{ 2588{
2594#ifdef CONFIG_NUMA 2589#ifdef CONFIG_NUMA
2595 int local_nid = numa_node_id();
2596 enum zone_stat_item local_stat = NUMA_LOCAL; 2590 enum zone_stat_item local_stat = NUMA_LOCAL;
2597 2591
2598 if (unlikely(flags & __GFP_OTHER_NODE)) { 2592 if (z->node != numa_node_id())
2599 local_stat = NUMA_OTHER; 2593 local_stat = NUMA_OTHER;
2600 local_nid = preferred_zone->node;
2601 }
2602 2594
2603 if (z->node == local_nid) { 2595 if (z->node == preferred_zone->node)
2604 __inc_zone_state(z, NUMA_HIT); 2596 __inc_zone_state(z, NUMA_HIT);
2605 __inc_zone_state(z, local_stat); 2597 else {
2606 } else {
2607 __inc_zone_state(z, NUMA_MISS); 2598 __inc_zone_state(z, NUMA_MISS);
2608 __inc_zone_state(preferred_zone, NUMA_FOREIGN); 2599 __inc_zone_state(preferred_zone, NUMA_FOREIGN);
2609 } 2600 }
2601 __inc_zone_state(z, local_stat);
2610#endif 2602#endif
2611} 2603}
2612 2604
@@ -2674,7 +2666,7 @@ struct page *buffered_rmqueue(struct zone *preferred_zone,
2674 } 2666 }
2675 2667
2676 __count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order); 2668 __count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order);
2677 zone_statistics(preferred_zone, zone, gfp_flags); 2669 zone_statistics(preferred_zone, zone);
2678 local_irq_restore(flags); 2670 local_irq_restore(flags);
2679 2671
2680 VM_BUG_ON_PAGE(bad_range(zone, page), page); 2672 VM_BUG_ON_PAGE(bad_range(zone, page), page);
@@ -3531,12 +3523,13 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
3531 struct page *page = NULL; 3523 struct page *page = NULL;
3532 unsigned int alloc_flags; 3524 unsigned int alloc_flags;
3533 unsigned long did_some_progress; 3525 unsigned long did_some_progress;
3534 enum compact_priority compact_priority = DEF_COMPACT_PRIORITY; 3526 enum compact_priority compact_priority;
3535 enum compact_result compact_result; 3527 enum compact_result compact_result;
3536 int compaction_retries = 0; 3528 int compaction_retries;
3537 int no_progress_loops = 0; 3529 int no_progress_loops;
3538 unsigned long alloc_start = jiffies; 3530 unsigned long alloc_start = jiffies;
3539 unsigned int stall_timeout = 10 * HZ; 3531 unsigned int stall_timeout = 10 * HZ;
3532 unsigned int cpuset_mems_cookie;
3540 3533
3541 /* 3534 /*
3542 * In the slowpath, we sanity check order to avoid ever trying to 3535 * In the slowpath, we sanity check order to avoid ever trying to
@@ -3557,6 +3550,23 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
3557 (__GFP_ATOMIC|__GFP_DIRECT_RECLAIM))) 3550 (__GFP_ATOMIC|__GFP_DIRECT_RECLAIM)))
3558 gfp_mask &= ~__GFP_ATOMIC; 3551 gfp_mask &= ~__GFP_ATOMIC;
3559 3552
3553retry_cpuset:
3554 compaction_retries = 0;
3555 no_progress_loops = 0;
3556 compact_priority = DEF_COMPACT_PRIORITY;
3557 cpuset_mems_cookie = read_mems_allowed_begin();
3558 /*
3559 * We need to recalculate the starting point for the zonelist iterator
3560 * because we might have used different nodemask in the fast path, or
3561 * there was a cpuset modification and we are retrying - otherwise we
3562 * could end up iterating over non-eligible zones endlessly.
3563 */
3564 ac->preferred_zoneref = first_zones_zonelist(ac->zonelist,
3565 ac->high_zoneidx, ac->nodemask);
3566 if (!ac->preferred_zoneref->zone)
3567 goto nopage;
3568
3569
3560 /* 3570 /*
3561 * The fast path uses conservative alloc_flags to succeed only until 3571 * The fast path uses conservative alloc_flags to succeed only until
3562 * kswapd needs to be woken up, and to avoid the cost of setting up 3572 * kswapd needs to be woken up, and to avoid the cost of setting up
@@ -3716,6 +3726,13 @@ retry:
3716 &compaction_retries)) 3726 &compaction_retries))
3717 goto retry; 3727 goto retry;
3718 3728
3729 /*
3730 * It's possible we raced with cpuset update so the OOM would be
3731 * premature (see below the nopage: label for full explanation).
3732 */
3733 if (read_mems_allowed_retry(cpuset_mems_cookie))
3734 goto retry_cpuset;
3735
3719 /* Reclaim has failed us, start killing things */ 3736 /* Reclaim has failed us, start killing things */
3720 page = __alloc_pages_may_oom(gfp_mask, order, ac, &did_some_progress); 3737 page = __alloc_pages_may_oom(gfp_mask, order, ac, &did_some_progress);
3721 if (page) 3738 if (page)
@@ -3728,6 +3745,16 @@ retry:
3728 } 3745 }
3729 3746
3730nopage: 3747nopage:
3748 /*
3749 * When updating a task's mems_allowed or mempolicy nodemask, it is
3750 * possible to race with parallel threads in such a way that our
3751 * allocation can fail while the mask is being updated. If we are about
3752 * to fail, check if the cpuset changed during allocation and if so,
3753 * retry.
3754 */
3755 if (read_mems_allowed_retry(cpuset_mems_cookie))
3756 goto retry_cpuset;
3757
3731 warn_alloc(gfp_mask, 3758 warn_alloc(gfp_mask,
3732 "page allocation failure: order:%u", order); 3759 "page allocation failure: order:%u", order);
3733got_pg: 3760got_pg:
@@ -3742,7 +3769,6 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
3742 struct zonelist *zonelist, nodemask_t *nodemask) 3769 struct zonelist *zonelist, nodemask_t *nodemask)
3743{ 3770{
3744 struct page *page; 3771 struct page *page;
3745 unsigned int cpuset_mems_cookie;
3746 unsigned int alloc_flags = ALLOC_WMARK_LOW; 3772 unsigned int alloc_flags = ALLOC_WMARK_LOW;
3747 gfp_t alloc_mask = gfp_mask; /* The gfp_t that was actually used for allocation */ 3773 gfp_t alloc_mask = gfp_mask; /* The gfp_t that was actually used for allocation */
3748 struct alloc_context ac = { 3774 struct alloc_context ac = {
@@ -3779,9 +3805,6 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
3779 if (IS_ENABLED(CONFIG_CMA) && ac.migratetype == MIGRATE_MOVABLE) 3805 if (IS_ENABLED(CONFIG_CMA) && ac.migratetype == MIGRATE_MOVABLE)
3780 alloc_flags |= ALLOC_CMA; 3806 alloc_flags |= ALLOC_CMA;
3781 3807
3782retry_cpuset:
3783 cpuset_mems_cookie = read_mems_allowed_begin();
3784
3785 /* Dirty zone balancing only done in the fast path */ 3808 /* Dirty zone balancing only done in the fast path */
3786 ac.spread_dirty_pages = (gfp_mask & __GFP_WRITE); 3809 ac.spread_dirty_pages = (gfp_mask & __GFP_WRITE);
3787 3810
@@ -3792,8 +3815,13 @@ retry_cpuset:
3792 */ 3815 */
3793 ac.preferred_zoneref = first_zones_zonelist(ac.zonelist, 3816 ac.preferred_zoneref = first_zones_zonelist(ac.zonelist,
3794 ac.high_zoneidx, ac.nodemask); 3817 ac.high_zoneidx, ac.nodemask);
3795 if (!ac.preferred_zoneref) { 3818 if (!ac.preferred_zoneref->zone) {
3796 page = NULL; 3819 page = NULL;
3820 /*
3821 * This might be due to race with cpuset_current_mems_allowed
3822 * update, so make sure we retry with original nodemask in the
3823 * slow path.
3824 */
3797 goto no_zone; 3825 goto no_zone;
3798 } 3826 }
3799 3827
@@ -3802,6 +3830,7 @@ retry_cpuset:
3802 if (likely(page)) 3830 if (likely(page))
3803 goto out; 3831 goto out;
3804 3832
3833no_zone:
3805 /* 3834 /*
3806 * Runtime PM, block IO and its error handling path can deadlock 3835 * Runtime PM, block IO and its error handling path can deadlock
3807 * because I/O on the device might not complete. 3836 * because I/O on the device might not complete.
@@ -3813,21 +3842,10 @@ retry_cpuset:
3813 * Restore the original nodemask if it was potentially replaced with 3842 * Restore the original nodemask if it was potentially replaced with
3814 * &cpuset_current_mems_allowed to optimize the fast-path attempt. 3843 * &cpuset_current_mems_allowed to optimize the fast-path attempt.
3815 */ 3844 */
3816 if (cpusets_enabled()) 3845 if (unlikely(ac.nodemask != nodemask))
3817 ac.nodemask = nodemask; 3846 ac.nodemask = nodemask;
3818 page = __alloc_pages_slowpath(alloc_mask, order, &ac);
3819 3847
3820no_zone: 3848 page = __alloc_pages_slowpath(alloc_mask, order, &ac);
3821 /*
3822 * When updating a task's mems_allowed, it is possible to race with
3823 * parallel threads in such a way that an allocation can fail while
3824 * the mask is being updated. If a page allocation is about to fail,
3825 * check if the cpuset changed during allocation and if so, retry.
3826 */
3827 if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie))) {
3828 alloc_mask = gfp_mask;
3829 goto retry_cpuset;
3830 }
3831 3849
3832out: 3850out:
3833 if (memcg_kmem_enabled() && (gfp_mask & __GFP_ACCOUNT) && page && 3851 if (memcg_kmem_enabled() && (gfp_mask & __GFP_ACCOUNT) && page &&
@@ -3904,8 +3922,8 @@ EXPORT_SYMBOL(free_pages);
3904 * drivers to provide a backing region of memory for use as either an 3922 * drivers to provide a backing region of memory for use as either an
3905 * sk_buff->head, or to be used in the "frags" portion of skb_shared_info. 3923 * sk_buff->head, or to be used in the "frags" portion of skb_shared_info.
3906 */ 3924 */
3907static struct page *__page_frag_refill(struct page_frag_cache *nc, 3925static struct page *__page_frag_cache_refill(struct page_frag_cache *nc,
3908 gfp_t gfp_mask) 3926 gfp_t gfp_mask)
3909{ 3927{
3910 struct page *page = NULL; 3928 struct page *page = NULL;
3911 gfp_t gfp = gfp_mask; 3929 gfp_t gfp = gfp_mask;
@@ -3925,22 +3943,23 @@ static struct page *__page_frag_refill(struct page_frag_cache *nc,
3925 return page; 3943 return page;
3926} 3944}
3927 3945
3928void __page_frag_drain(struct page *page, unsigned int order, 3946void __page_frag_cache_drain(struct page *page, unsigned int count)
3929 unsigned int count)
3930{ 3947{
3931 VM_BUG_ON_PAGE(page_ref_count(page) == 0, page); 3948 VM_BUG_ON_PAGE(page_ref_count(page) == 0, page);
3932 3949
3933 if (page_ref_sub_and_test(page, count)) { 3950 if (page_ref_sub_and_test(page, count)) {
3951 unsigned int order = compound_order(page);
3952
3934 if (order == 0) 3953 if (order == 0)
3935 free_hot_cold_page(page, false); 3954 free_hot_cold_page(page, false);
3936 else 3955 else
3937 __free_pages_ok(page, order); 3956 __free_pages_ok(page, order);
3938 } 3957 }
3939} 3958}
3940EXPORT_SYMBOL(__page_frag_drain); 3959EXPORT_SYMBOL(__page_frag_cache_drain);
3941 3960
3942void *__alloc_page_frag(struct page_frag_cache *nc, 3961void *page_frag_alloc(struct page_frag_cache *nc,
3943 unsigned int fragsz, gfp_t gfp_mask) 3962 unsigned int fragsz, gfp_t gfp_mask)
3944{ 3963{
3945 unsigned int size = PAGE_SIZE; 3964 unsigned int size = PAGE_SIZE;
3946 struct page *page; 3965 struct page *page;
@@ -3948,7 +3967,7 @@ void *__alloc_page_frag(struct page_frag_cache *nc,
3948 3967
3949 if (unlikely(!nc->va)) { 3968 if (unlikely(!nc->va)) {
3950refill: 3969refill:
3951 page = __page_frag_refill(nc, gfp_mask); 3970 page = __page_frag_cache_refill(nc, gfp_mask);
3952 if (!page) 3971 if (!page)
3953 return NULL; 3972 return NULL;
3954 3973
@@ -3991,19 +4010,19 @@ refill:
3991 4010
3992 return nc->va + offset; 4011 return nc->va + offset;
3993} 4012}
3994EXPORT_SYMBOL(__alloc_page_frag); 4013EXPORT_SYMBOL(page_frag_alloc);
3995 4014
3996/* 4015/*
3997 * Frees a page fragment allocated out of either a compound or order 0 page. 4016 * Frees a page fragment allocated out of either a compound or order 0 page.
3998 */ 4017 */
3999void __free_page_frag(void *addr) 4018void page_frag_free(void *addr)
4000{ 4019{
4001 struct page *page = virt_to_head_page(addr); 4020 struct page *page = virt_to_head_page(addr);
4002 4021
4003 if (unlikely(put_page_testzero(page))) 4022 if (unlikely(put_page_testzero(page)))
4004 __free_pages_ok(page, compound_order(page)); 4023 __free_pages_ok(page, compound_order(page));
4005} 4024}
4006EXPORT_SYMBOL(__free_page_frag); 4025EXPORT_SYMBOL(page_frag_free);
4007 4026
4008static void *make_alloc_exact(unsigned long addr, unsigned int order, 4027static void *make_alloc_exact(unsigned long addr, unsigned int order,
4009 size_t size) 4028 size_t size)
@@ -7255,6 +7274,7 @@ int alloc_contig_range(unsigned long start, unsigned long end,
7255 .zone = page_zone(pfn_to_page(start)), 7274 .zone = page_zone(pfn_to_page(start)),
7256 .mode = MIGRATE_SYNC, 7275 .mode = MIGRATE_SYNC,
7257 .ignore_skip_hint = true, 7276 .ignore_skip_hint = true,
7277 .gfp_mask = GFP_KERNEL,
7258 }; 7278 };
7259 INIT_LIST_HEAD(&cc.migratepages); 7279 INIT_LIST_HEAD(&cc.migratepages);
7260 7280
diff --git a/mm/shmem.c b/mm/shmem.c
index bb53285a1d99..3a7587a0314d 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -415,6 +415,7 @@ static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo,
415 struct shrink_control *sc, unsigned long nr_to_split) 415 struct shrink_control *sc, unsigned long nr_to_split)
416{ 416{
417 LIST_HEAD(list), *pos, *next; 417 LIST_HEAD(list), *pos, *next;
418 LIST_HEAD(to_remove);
418 struct inode *inode; 419 struct inode *inode;
419 struct shmem_inode_info *info; 420 struct shmem_inode_info *info;
420 struct page *page; 421 struct page *page;
@@ -441,9 +442,8 @@ static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo,
441 /* Check if there's anything to gain */ 442 /* Check if there's anything to gain */
442 if (round_up(inode->i_size, PAGE_SIZE) == 443 if (round_up(inode->i_size, PAGE_SIZE) ==
443 round_up(inode->i_size, HPAGE_PMD_SIZE)) { 444 round_up(inode->i_size, HPAGE_PMD_SIZE)) {
444 list_del_init(&info->shrinklist); 445 list_move(&info->shrinklist, &to_remove);
445 removed++; 446 removed++;
446 iput(inode);
447 goto next; 447 goto next;
448 } 448 }
449 449
@@ -454,6 +454,13 @@ next:
454 } 454 }
455 spin_unlock(&sbinfo->shrinklist_lock); 455 spin_unlock(&sbinfo->shrinklist_lock);
456 456
457 list_for_each_safe(pos, next, &to_remove) {
458 info = list_entry(pos, struct shmem_inode_info, shrinklist);
459 inode = &info->vfs_inode;
460 list_del_init(&info->shrinklist);
461 iput(inode);
462 }
463
457 list_for_each_safe(pos, next, &list) { 464 list_for_each_safe(pos, next, &list) {
458 int ret; 465 int ret;
459 466
diff --git a/mm/slab.c b/mm/slab.c
index 29bc6c0dedd0..4f2ec6bb46eb 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -2457,7 +2457,6 @@ union freelist_init_state {
2457 unsigned int pos; 2457 unsigned int pos;
2458 unsigned int *list; 2458 unsigned int *list;
2459 unsigned int count; 2459 unsigned int count;
2460 unsigned int rand;
2461 }; 2460 };
2462 struct rnd_state rnd_state; 2461 struct rnd_state rnd_state;
2463}; 2462};
@@ -2483,8 +2482,7 @@ static bool freelist_state_initialize(union freelist_init_state *state,
2483 } else { 2482 } else {
2484 state->list = cachep->random_seq; 2483 state->list = cachep->random_seq;
2485 state->count = count; 2484 state->count = count;
2486 state->pos = 0; 2485 state->pos = rand % count;
2487 state->rand = rand;
2488 ret = true; 2486 ret = true;
2489 } 2487 }
2490 return ret; 2488 return ret;
@@ -2493,7 +2491,9 @@ static bool freelist_state_initialize(union freelist_init_state *state,
2493/* Get the next entry on the list and randomize it using a random shift */ 2491/* Get the next entry on the list and randomize it using a random shift */
2494static freelist_idx_t next_random_slot(union freelist_init_state *state) 2492static freelist_idx_t next_random_slot(union freelist_init_state *state)
2495{ 2493{
2496 return (state->list[state->pos++] + state->rand) % state->count; 2494 if (state->pos >= state->count)
2495 state->pos = 0;
2496 return state->list[state->pos++];
2497} 2497}
2498 2498
2499/* Swap two freelist entries */ 2499/* Swap two freelist entries */
diff --git a/mm/slub.c b/mm/slub.c
index 067598a00849..7ec0a965c6a3 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -496,10 +496,11 @@ static inline int check_valid_pointer(struct kmem_cache *s,
496 return 1; 496 return 1;
497} 497}
498 498
499static void print_section(char *text, u8 *addr, unsigned int length) 499static void print_section(char *level, char *text, u8 *addr,
500 unsigned int length)
500{ 501{
501 metadata_access_enable(); 502 metadata_access_enable();
502 print_hex_dump(KERN_ERR, text, DUMP_PREFIX_ADDRESS, 16, 1, addr, 503 print_hex_dump(level, text, DUMP_PREFIX_ADDRESS, 16, 1, addr,
503 length, 1); 504 length, 1);
504 metadata_access_disable(); 505 metadata_access_disable();
505} 506}
@@ -636,14 +637,15 @@ static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p)
636 p, p - addr, get_freepointer(s, p)); 637 p, p - addr, get_freepointer(s, p));
637 638
638 if (s->flags & SLAB_RED_ZONE) 639 if (s->flags & SLAB_RED_ZONE)
639 print_section("Redzone ", p - s->red_left_pad, s->red_left_pad); 640 print_section(KERN_ERR, "Redzone ", p - s->red_left_pad,
641 s->red_left_pad);
640 else if (p > addr + 16) 642 else if (p > addr + 16)
641 print_section("Bytes b4 ", p - 16, 16); 643 print_section(KERN_ERR, "Bytes b4 ", p - 16, 16);
642 644
643 print_section("Object ", p, min_t(unsigned long, s->object_size, 645 print_section(KERN_ERR, "Object ", p,
644 PAGE_SIZE)); 646 min_t(unsigned long, s->object_size, PAGE_SIZE));
645 if (s->flags & SLAB_RED_ZONE) 647 if (s->flags & SLAB_RED_ZONE)
646 print_section("Redzone ", p + s->object_size, 648 print_section(KERN_ERR, "Redzone ", p + s->object_size,
647 s->inuse - s->object_size); 649 s->inuse - s->object_size);
648 650
649 if (s->offset) 651 if (s->offset)
@@ -658,7 +660,8 @@ static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p)
658 660
659 if (off != size_from_object(s)) 661 if (off != size_from_object(s))
660 /* Beginning of the filler is the free pointer */ 662 /* Beginning of the filler is the free pointer */
661 print_section("Padding ", p + off, size_from_object(s) - off); 663 print_section(KERN_ERR, "Padding ", p + off,
664 size_from_object(s) - off);
662 665
663 dump_stack(); 666 dump_stack();
664} 667}
@@ -820,7 +823,7 @@ static int slab_pad_check(struct kmem_cache *s, struct page *page)
820 end--; 823 end--;
821 824
822 slab_err(s, page, "Padding overwritten. 0x%p-0x%p", fault, end - 1); 825 slab_err(s, page, "Padding overwritten. 0x%p-0x%p", fault, end - 1);
823 print_section("Padding ", end - remainder, remainder); 826 print_section(KERN_ERR, "Padding ", end - remainder, remainder);
824 827
825 restore_bytes(s, "slab padding", POISON_INUSE, end - remainder, end); 828 restore_bytes(s, "slab padding", POISON_INUSE, end - remainder, end);
826 return 0; 829 return 0;
@@ -973,7 +976,7 @@ static void trace(struct kmem_cache *s, struct page *page, void *object,
973 page->freelist); 976 page->freelist);
974 977
975 if (!alloc) 978 if (!alloc)
976 print_section("Object ", (void *)object, 979 print_section(KERN_INFO, "Object ", (void *)object,
977 s->object_size); 980 s->object_size);
978 981
979 dump_stack(); 982 dump_stack();
@@ -1419,6 +1422,10 @@ static int init_cache_random_seq(struct kmem_cache *s)
1419 int err; 1422 int err;
1420 unsigned long i, count = oo_objects(s->oo); 1423 unsigned long i, count = oo_objects(s->oo);
1421 1424
1425 /* Bailout if already initialised */
1426 if (s->random_seq)
1427 return 0;
1428
1422 err = cache_random_seq_create(s, count, GFP_KERNEL); 1429 err = cache_random_seq_create(s, count, GFP_KERNEL);
1423 if (err) { 1430 if (err) {
1424 pr_err("SLUB: Unable to initialize free list for %s\n", 1431 pr_err("SLUB: Unable to initialize free list for %s\n",
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 1c6e0321205d..4761701d1721 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -943,11 +943,25 @@ bool reuse_swap_page(struct page *page, int *total_mapcount)
943 count = page_trans_huge_mapcount(page, total_mapcount); 943 count = page_trans_huge_mapcount(page, total_mapcount);
944 if (count <= 1 && PageSwapCache(page)) { 944 if (count <= 1 && PageSwapCache(page)) {
945 count += page_swapcount(page); 945 count += page_swapcount(page);
946 if (count == 1 && !PageWriteback(page)) { 946 if (count != 1)
947 goto out;
948 if (!PageWriteback(page)) {
947 delete_from_swap_cache(page); 949 delete_from_swap_cache(page);
948 SetPageDirty(page); 950 SetPageDirty(page);
951 } else {
952 swp_entry_t entry;
953 struct swap_info_struct *p;
954
955 entry.val = page_private(page);
956 p = swap_info_get(entry);
957 if (p->flags & SWP_STABLE_WRITES) {
958 spin_unlock(&p->lock);
959 return false;
960 }
961 spin_unlock(&p->lock);
949 } 962 }
950 } 963 }
964out:
951 return count <= 1; 965 return count <= 1;
952} 966}
953 967
@@ -2448,6 +2462,10 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
2448 error = -ENOMEM; 2462 error = -ENOMEM;
2449 goto bad_swap; 2463 goto bad_swap;
2450 } 2464 }
2465
2466 if (bdi_cap_stable_pages_required(inode_to_bdi(inode)))
2467 p->flags |= SWP_STABLE_WRITES;
2468
2451 if (p->bdev && blk_queue_nonrot(bdev_get_queue(p->bdev))) { 2469 if (p->bdev && blk_queue_nonrot(bdev_get_queue(p->bdev))) {
2452 int cpu; 2470 int cpu;
2453 2471
diff --git a/mm/truncate.c b/mm/truncate.c
index fd97f1dbce29..dd7b24e083c5 100644
--- a/mm/truncate.c
+++ b/mm/truncate.c
@@ -24,20 +24,12 @@
24#include <linux/rmap.h> 24#include <linux/rmap.h>
25#include "internal.h" 25#include "internal.h"
26 26
27static void clear_exceptional_entry(struct address_space *mapping, 27static void clear_shadow_entry(struct address_space *mapping, pgoff_t index,
28 pgoff_t index, void *entry) 28 void *entry)
29{ 29{
30 struct radix_tree_node *node; 30 struct radix_tree_node *node;
31 void **slot; 31 void **slot;
32 32
33 /* Handled by shmem itself */
34 if (shmem_mapping(mapping))
35 return;
36
37 if (dax_mapping(mapping)) {
38 dax_delete_mapping_entry(mapping, index);
39 return;
40 }
41 spin_lock_irq(&mapping->tree_lock); 33 spin_lock_irq(&mapping->tree_lock);
42 /* 34 /*
43 * Regular page slots are stabilized by the page lock even 35 * Regular page slots are stabilized by the page lock even
@@ -55,6 +47,56 @@ unlock:
55 spin_unlock_irq(&mapping->tree_lock); 47 spin_unlock_irq(&mapping->tree_lock);
56} 48}
57 49
50/*
51 * Unconditionally remove exceptional entry. Usually called from truncate path.
52 */
53static void truncate_exceptional_entry(struct address_space *mapping,
54 pgoff_t index, void *entry)
55{
56 /* Handled by shmem itself */
57 if (shmem_mapping(mapping))
58 return;
59
60 if (dax_mapping(mapping)) {
61 dax_delete_mapping_entry(mapping, index);
62 return;
63 }
64 clear_shadow_entry(mapping, index, entry);
65}
66
67/*
68 * Invalidate exceptional entry if easily possible. This handles exceptional
69 * entries for invalidate_inode_pages() so for DAX it evicts only unlocked and
70 * clean entries.
71 */
72static int invalidate_exceptional_entry(struct address_space *mapping,
73 pgoff_t index, void *entry)
74{
75 /* Handled by shmem itself */
76 if (shmem_mapping(mapping))
77 return 1;
78 if (dax_mapping(mapping))
79 return dax_invalidate_mapping_entry(mapping, index);
80 clear_shadow_entry(mapping, index, entry);
81 return 1;
82}
83
84/*
85 * Invalidate exceptional entry if clean. This handles exceptional entries for
86 * invalidate_inode_pages2() so for DAX it evicts only clean entries.
87 */
88static int invalidate_exceptional_entry2(struct address_space *mapping,
89 pgoff_t index, void *entry)
90{
91 /* Handled by shmem itself */
92 if (shmem_mapping(mapping))
93 return 1;
94 if (dax_mapping(mapping))
95 return dax_invalidate_mapping_entry_sync(mapping, index);
96 clear_shadow_entry(mapping, index, entry);
97 return 1;
98}
99
58/** 100/**
59 * do_invalidatepage - invalidate part or all of a page 101 * do_invalidatepage - invalidate part or all of a page
60 * @page: the page which is affected 102 * @page: the page which is affected
@@ -262,7 +304,8 @@ void truncate_inode_pages_range(struct address_space *mapping,
262 break; 304 break;
263 305
264 if (radix_tree_exceptional_entry(page)) { 306 if (radix_tree_exceptional_entry(page)) {
265 clear_exceptional_entry(mapping, index, page); 307 truncate_exceptional_entry(mapping, index,
308 page);
266 continue; 309 continue;
267 } 310 }
268 311
@@ -351,7 +394,8 @@ void truncate_inode_pages_range(struct address_space *mapping,
351 } 394 }
352 395
353 if (radix_tree_exceptional_entry(page)) { 396 if (radix_tree_exceptional_entry(page)) {
354 clear_exceptional_entry(mapping, index, page); 397 truncate_exceptional_entry(mapping, index,
398 page);
355 continue; 399 continue;
356 } 400 }
357 401
@@ -470,7 +514,8 @@ unsigned long invalidate_mapping_pages(struct address_space *mapping,
470 break; 514 break;
471 515
472 if (radix_tree_exceptional_entry(page)) { 516 if (radix_tree_exceptional_entry(page)) {
473 clear_exceptional_entry(mapping, index, page); 517 invalidate_exceptional_entry(mapping, index,
518 page);
474 continue; 519 continue;
475 } 520 }
476 521
@@ -592,7 +637,9 @@ int invalidate_inode_pages2_range(struct address_space *mapping,
592 break; 637 break;
593 638
594 if (radix_tree_exceptional_entry(page)) { 639 if (radix_tree_exceptional_entry(page)) {
595 clear_exceptional_entry(mapping, index, page); 640 if (!invalidate_exceptional_entry2(mapping,
641 index, page))
642 ret = -EBUSY;
596 continue; 643 continue;
597 } 644 }
598 645
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 6aa5b01d3e75..532a2a750952 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -242,6 +242,16 @@ unsigned long lruvec_lru_size(struct lruvec *lruvec, enum lru_list lru)
242 return node_page_state(lruvec_pgdat(lruvec), NR_LRU_BASE + lru); 242 return node_page_state(lruvec_pgdat(lruvec), NR_LRU_BASE + lru);
243} 243}
244 244
245unsigned long lruvec_zone_lru_size(struct lruvec *lruvec, enum lru_list lru,
246 int zone_idx)
247{
248 if (!mem_cgroup_disabled())
249 return mem_cgroup_get_zone_lru_size(lruvec, lru, zone_idx);
250
251 return zone_page_state(&lruvec_pgdat(lruvec)->node_zones[zone_idx],
252 NR_ZONE_LRU_BASE + lru);
253}
254
245/* 255/*
246 * Add a shrinker callback to be called from the vm. 256 * Add a shrinker callback to be called from the vm.
247 */ 257 */
@@ -1382,8 +1392,7 @@ int __isolate_lru_page(struct page *page, isolate_mode_t mode)
1382 * be complete before mem_cgroup_update_lru_size due to a santity check. 1392 * be complete before mem_cgroup_update_lru_size due to a santity check.
1383 */ 1393 */
1384static __always_inline void update_lru_sizes(struct lruvec *lruvec, 1394static __always_inline void update_lru_sizes(struct lruvec *lruvec,
1385 enum lru_list lru, unsigned long *nr_zone_taken, 1395 enum lru_list lru, unsigned long *nr_zone_taken)
1386 unsigned long nr_taken)
1387{ 1396{
1388 int zid; 1397 int zid;
1389 1398
@@ -1392,11 +1401,11 @@ static __always_inline void update_lru_sizes(struct lruvec *lruvec,
1392 continue; 1401 continue;
1393 1402
1394 __update_lru_size(lruvec, lru, zid, -nr_zone_taken[zid]); 1403 __update_lru_size(lruvec, lru, zid, -nr_zone_taken[zid]);
1395 }
1396
1397#ifdef CONFIG_MEMCG 1404#ifdef CONFIG_MEMCG
1398 mem_cgroup_update_lru_size(lruvec, lru, -nr_taken); 1405 mem_cgroup_update_lru_size(lruvec, lru, zid, -nr_zone_taken[zid]);
1399#endif 1406#endif
1407 }
1408
1400} 1409}
1401 1410
1402/* 1411/*
@@ -1501,7 +1510,7 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
1501 *nr_scanned = scan; 1510 *nr_scanned = scan;
1502 trace_mm_vmscan_lru_isolate(sc->reclaim_idx, sc->order, nr_to_scan, scan, 1511 trace_mm_vmscan_lru_isolate(sc->reclaim_idx, sc->order, nr_to_scan, scan,
1503 nr_taken, mode, is_file_lru(lru)); 1512 nr_taken, mode, is_file_lru(lru));
1504 update_lru_sizes(lruvec, lru, nr_zone_taken, nr_taken); 1513 update_lru_sizes(lruvec, lru, nr_zone_taken);
1505 return nr_taken; 1514 return nr_taken;
1506} 1515}
1507 1516
@@ -2047,10 +2056,8 @@ static bool inactive_list_is_low(struct lruvec *lruvec, bool file,
2047 if (!managed_zone(zone)) 2056 if (!managed_zone(zone))
2048 continue; 2057 continue;
2049 2058
2050 inactive_zone = zone_page_state(zone, 2059 inactive_zone = lruvec_zone_lru_size(lruvec, file * LRU_FILE, zid);
2051 NR_ZONE_LRU_BASE + (file * LRU_FILE)); 2060 active_zone = lruvec_zone_lru_size(lruvec, (file * LRU_FILE) + LRU_ACTIVE, zid);
2052 active_zone = zone_page_state(zone,
2053 NR_ZONE_LRU_BASE + (file * LRU_FILE) + LRU_ACTIVE);
2054 2061
2055 inactive -= min(inactive, inactive_zone); 2062 inactive -= min(inactive, inactive_zone);
2056 active -= min(active, active_zone); 2063 active -= min(active, active_zone);
diff --git a/mm/workingset.c b/mm/workingset.c
index 241fa5d6b3b2..abb58ffa3c64 100644
--- a/mm/workingset.c
+++ b/mm/workingset.c
@@ -473,7 +473,8 @@ static enum lru_status shadow_lru_isolate(struct list_head *item,
473 if (WARN_ON_ONCE(node->exceptional)) 473 if (WARN_ON_ONCE(node->exceptional))
474 goto out_invalid; 474 goto out_invalid;
475 inc_node_state(page_pgdat(virt_to_page(node)), WORKINGSET_NODERECLAIM); 475 inc_node_state(page_pgdat(virt_to_page(node)), WORKINGSET_NODERECLAIM);
476 __radix_tree_delete_node(&mapping->page_tree, node); 476 __radix_tree_delete_node(&mapping->page_tree, node,
477 workingset_update_node, mapping);
477 478
478out_invalid: 479out_invalid:
479 spin_unlock(&mapping->tree_lock); 480 spin_unlock(&mapping->tree_lock);
diff --git a/mm/zswap.c b/mm/zswap.c
index 067a0d62f318..cabf09e0128b 100644
--- a/mm/zswap.c
+++ b/mm/zswap.c
@@ -78,7 +78,13 @@ static u64 zswap_duplicate_entry;
78 78
79/* Enable/disable zswap (disabled by default) */ 79/* Enable/disable zswap (disabled by default) */
80static bool zswap_enabled; 80static bool zswap_enabled;
81module_param_named(enabled, zswap_enabled, bool, 0644); 81static int zswap_enabled_param_set(const char *,
82 const struct kernel_param *);
83static struct kernel_param_ops zswap_enabled_param_ops = {
84 .set = zswap_enabled_param_set,
85 .get = param_get_bool,
86};
87module_param_cb(enabled, &zswap_enabled_param_ops, &zswap_enabled, 0644);
82 88
83/* Crypto compressor to use */ 89/* Crypto compressor to use */
84#define ZSWAP_COMPRESSOR_DEFAULT "lzo" 90#define ZSWAP_COMPRESSOR_DEFAULT "lzo"
@@ -176,6 +182,9 @@ static atomic_t zswap_pools_count = ATOMIC_INIT(0);
176/* used by param callback function */ 182/* used by param callback function */
177static bool zswap_init_started; 183static bool zswap_init_started;
178 184
185/* fatal error during init */
186static bool zswap_init_failed;
187
179/********************************* 188/*********************************
180* helpers and fwd declarations 189* helpers and fwd declarations
181**********************************/ 190**********************************/
@@ -624,6 +633,11 @@ static int __zswap_param_set(const char *val, const struct kernel_param *kp,
624 char *s = strstrip((char *)val); 633 char *s = strstrip((char *)val);
625 int ret; 634 int ret;
626 635
636 if (zswap_init_failed) {
637 pr_err("can't set param, initialization failed\n");
638 return -ENODEV;
639 }
640
627 /* no change required */ 641 /* no change required */
628 if (!strcmp(s, *(char **)kp->arg)) 642 if (!strcmp(s, *(char **)kp->arg))
629 return 0; 643 return 0;
@@ -703,6 +717,17 @@ static int zswap_zpool_param_set(const char *val,
703 return __zswap_param_set(val, kp, NULL, zswap_compressor); 717 return __zswap_param_set(val, kp, NULL, zswap_compressor);
704} 718}
705 719
720static int zswap_enabled_param_set(const char *val,
721 const struct kernel_param *kp)
722{
723 if (zswap_init_failed) {
724 pr_err("can't enable, initialization failed\n");
725 return -ENODEV;
726 }
727
728 return param_set_bool(val, kp);
729}
730
706/********************************* 731/*********************************
707* writeback code 732* writeback code
708**********************************/ 733**********************************/
@@ -1201,6 +1226,9 @@ hp_fail:
1201dstmem_fail: 1226dstmem_fail:
1202 zswap_entry_cache_destroy(); 1227 zswap_entry_cache_destroy();
1203cache_fail: 1228cache_fail:
1229 /* if built-in, we aren't unloaded on failure; don't allow use */
1230 zswap_init_failed = true;
1231 zswap_enabled = false;
1204 return -ENOMEM; 1232 return -ENOMEM;
1205} 1233}
1206/* must be late so crypto has time to come up */ 1234/* must be late so crypto has time to come up */
diff --git a/net/Kconfig b/net/Kconfig
index a1005007224c..a29bb4b41c50 100644
--- a/net/Kconfig
+++ b/net/Kconfig
@@ -258,10 +258,6 @@ config XPS
258config HWBM 258config HWBM
259 bool 259 bool
260 260
261config SOCK_CGROUP_DATA
262 bool
263 default n
264
265config CGROUP_NET_PRIO 261config CGROUP_NET_PRIO
266 bool "Network priority cgroup" 262 bool "Network priority cgroup"
267 depends on CGROUPS 263 depends on CGROUPS
diff --git a/net/atm/lec.c b/net/atm/lec.c
index 019557d0a11d..09cfe87f0a44 100644
--- a/net/atm/lec.c
+++ b/net/atm/lec.c
@@ -1059,7 +1059,9 @@ static void __exit lane_module_cleanup(void)
1059{ 1059{
1060 int i; 1060 int i;
1061 1061
1062#ifdef CONFIG_PROC_FS
1062 remove_proc_entry("lec", atm_proc_root); 1063 remove_proc_entry("lec", atm_proc_root);
1064#endif
1063 1065
1064 deregister_atm_ioctl(&lane_ioctl_ops); 1066 deregister_atm_ioctl(&lane_ioctl_ops);
1065 1067
diff --git a/net/ax25/ax25_subr.c b/net/ax25/ax25_subr.c
index 4855d18a8511..038b109b2be7 100644
--- a/net/ax25/ax25_subr.c
+++ b/net/ax25/ax25_subr.c
@@ -264,7 +264,7 @@ void ax25_disconnect(ax25_cb *ax25, int reason)
264{ 264{
265 ax25_clear_queues(ax25); 265 ax25_clear_queues(ax25);
266 266
267 if (!sock_flag(ax25->sk, SOCK_DESTROY)) 267 if (!ax25->sk || !sock_flag(ax25->sk, SOCK_DESTROY))
268 ax25_stop_heartbeat(ax25); 268 ax25_stop_heartbeat(ax25);
269 ax25_stop_t1timer(ax25); 269 ax25_stop_t1timer(ax25);
270 ax25_stop_t2timer(ax25); 270 ax25_stop_t2timer(ax25);
diff --git a/net/batman-adv/fragmentation.c b/net/batman-adv/fragmentation.c
index 9c561e683f4b..0854ebd8613e 100644
--- a/net/batman-adv/fragmentation.c
+++ b/net/batman-adv/fragmentation.c
@@ -474,7 +474,7 @@ int batadv_frag_send_packet(struct sk_buff *skb,
474 primary_if = batadv_primary_if_get_selected(bat_priv); 474 primary_if = batadv_primary_if_get_selected(bat_priv);
475 if (!primary_if) { 475 if (!primary_if) {
476 ret = -EINVAL; 476 ret = -EINVAL;
477 goto put_primary_if; 477 goto free_skb;
478 } 478 }
479 479
480 /* Create one header to be copied to all fragments */ 480 /* Create one header to be copied to all fragments */
@@ -502,7 +502,7 @@ int batadv_frag_send_packet(struct sk_buff *skb,
502 skb_fragment = batadv_frag_create(skb, &frag_header, mtu); 502 skb_fragment = batadv_frag_create(skb, &frag_header, mtu);
503 if (!skb_fragment) { 503 if (!skb_fragment) {
504 ret = -ENOMEM; 504 ret = -ENOMEM;
505 goto free_skb; 505 goto put_primary_if;
506 } 506 }
507 507
508 batadv_inc_counter(bat_priv, BATADV_CNT_FRAG_TX); 508 batadv_inc_counter(bat_priv, BATADV_CNT_FRAG_TX);
@@ -511,7 +511,7 @@ int batadv_frag_send_packet(struct sk_buff *skb,
511 ret = batadv_send_unicast_skb(skb_fragment, neigh_node); 511 ret = batadv_send_unicast_skb(skb_fragment, neigh_node);
512 if (ret != NET_XMIT_SUCCESS) { 512 if (ret != NET_XMIT_SUCCESS) {
513 ret = NET_XMIT_DROP; 513 ret = NET_XMIT_DROP;
514 goto free_skb; 514 goto put_primary_if;
515 } 515 }
516 516
517 frag_header.no++; 517 frag_header.no++;
@@ -519,7 +519,7 @@ int batadv_frag_send_packet(struct sk_buff *skb,
519 /* The initial check in this function should cover this case */ 519 /* The initial check in this function should cover this case */
520 if (frag_header.no == BATADV_FRAG_MAX_FRAGMENTS - 1) { 520 if (frag_header.no == BATADV_FRAG_MAX_FRAGMENTS - 1) {
521 ret = -EINVAL; 521 ret = -EINVAL;
522 goto free_skb; 522 goto put_primary_if;
523 } 523 }
524 } 524 }
525 525
@@ -527,7 +527,7 @@ int batadv_frag_send_packet(struct sk_buff *skb,
527 if (batadv_skb_head_push(skb, header_size) < 0 || 527 if (batadv_skb_head_push(skb, header_size) < 0 ||
528 pskb_expand_head(skb, header_size + ETH_HLEN, 0, GFP_ATOMIC) < 0) { 528 pskb_expand_head(skb, header_size + ETH_HLEN, 0, GFP_ATOMIC) < 0) {
529 ret = -ENOMEM; 529 ret = -ENOMEM;
530 goto free_skb; 530 goto put_primary_if;
531 } 531 }
532 532
533 memcpy(skb->data, &frag_header, header_size); 533 memcpy(skb->data, &frag_header, header_size);
diff --git a/net/bridge/br_netfilter_hooks.c b/net/bridge/br_netfilter_hooks.c
index 8ca6a929bf12..95087e6e8258 100644
--- a/net/bridge/br_netfilter_hooks.c
+++ b/net/bridge/br_netfilter_hooks.c
@@ -399,7 +399,7 @@ bridged_dnat:
399 br_nf_hook_thresh(NF_BR_PRE_ROUTING, 399 br_nf_hook_thresh(NF_BR_PRE_ROUTING,
400 net, sk, skb, skb->dev, 400 net, sk, skb, skb->dev,
401 NULL, 401 NULL,
402 br_nf_pre_routing_finish); 402 br_nf_pre_routing_finish_bridge);
403 return 0; 403 return 0;
404 } 404 }
405 ether_addr_copy(eth_hdr(skb)->h_dest, dev->dev_addr); 405 ether_addr_copy(eth_hdr(skb)->h_dest, dev->dev_addr);
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
index 71c7453268c1..7109b389ea58 100644
--- a/net/bridge/br_netlink.c
+++ b/net/bridge/br_netlink.c
@@ -781,20 +781,6 @@ static int br_validate(struct nlattr *tb[], struct nlattr *data[])
781 return 0; 781 return 0;
782} 782}
783 783
784static int br_dev_newlink(struct net *src_net, struct net_device *dev,
785 struct nlattr *tb[], struct nlattr *data[])
786{
787 struct net_bridge *br = netdev_priv(dev);
788
789 if (tb[IFLA_ADDRESS]) {
790 spin_lock_bh(&br->lock);
791 br_stp_change_bridge_id(br, nla_data(tb[IFLA_ADDRESS]));
792 spin_unlock_bh(&br->lock);
793 }
794
795 return register_netdevice(dev);
796}
797
798static int br_port_slave_changelink(struct net_device *brdev, 784static int br_port_slave_changelink(struct net_device *brdev,
799 struct net_device *dev, 785 struct net_device *dev,
800 struct nlattr *tb[], 786 struct nlattr *tb[],
@@ -1115,6 +1101,25 @@ static int br_changelink(struct net_device *brdev, struct nlattr *tb[],
1115 return 0; 1101 return 0;
1116} 1102}
1117 1103
1104static int br_dev_newlink(struct net *src_net, struct net_device *dev,
1105 struct nlattr *tb[], struct nlattr *data[])
1106{
1107 struct net_bridge *br = netdev_priv(dev);
1108 int err;
1109
1110 if (tb[IFLA_ADDRESS]) {
1111 spin_lock_bh(&br->lock);
1112 br_stp_change_bridge_id(br, nla_data(tb[IFLA_ADDRESS]));
1113 spin_unlock_bh(&br->lock);
1114 }
1115
1116 err = br_changelink(dev, tb, data);
1117 if (err)
1118 return err;
1119
1120 return register_netdevice(dev);
1121}
1122
1118static size_t br_get_size(const struct net_device *brdev) 1123static size_t br_get_size(const struct net_device *brdev)
1119{ 1124{
1120 return nla_total_size(sizeof(u32)) + /* IFLA_BR_FORWARD_DELAY */ 1125 return nla_total_size(sizeof(u32)) + /* IFLA_BR_FORWARD_DELAY */
diff --git a/net/can/af_can.c b/net/can/af_can.c
index 1108079d934f..5488e4a6ccd0 100644
--- a/net/can/af_can.c
+++ b/net/can/af_can.c
@@ -445,6 +445,7 @@ static struct hlist_head *find_rcv_list(canid_t *can_id, canid_t *mask,
445 * @func: callback function on filter match 445 * @func: callback function on filter match
446 * @data: returned parameter for callback function 446 * @data: returned parameter for callback function
447 * @ident: string for calling module identification 447 * @ident: string for calling module identification
448 * @sk: socket pointer (might be NULL)
448 * 449 *
449 * Description: 450 * Description:
450 * Invokes the callback function with the received sk_buff and the given 451 * Invokes the callback function with the received sk_buff and the given
@@ -468,7 +469,7 @@ static struct hlist_head *find_rcv_list(canid_t *can_id, canid_t *mask,
468 */ 469 */
469int can_rx_register(struct net_device *dev, canid_t can_id, canid_t mask, 470int can_rx_register(struct net_device *dev, canid_t can_id, canid_t mask,
470 void (*func)(struct sk_buff *, void *), void *data, 471 void (*func)(struct sk_buff *, void *), void *data,
471 char *ident) 472 char *ident, struct sock *sk)
472{ 473{
473 struct receiver *r; 474 struct receiver *r;
474 struct hlist_head *rl; 475 struct hlist_head *rl;
@@ -496,6 +497,7 @@ int can_rx_register(struct net_device *dev, canid_t can_id, canid_t mask,
496 r->func = func; 497 r->func = func;
497 r->data = data; 498 r->data = data;
498 r->ident = ident; 499 r->ident = ident;
500 r->sk = sk;
499 501
500 hlist_add_head_rcu(&r->list, rl); 502 hlist_add_head_rcu(&r->list, rl);
501 d->entries++; 503 d->entries++;
@@ -520,8 +522,11 @@ EXPORT_SYMBOL(can_rx_register);
520static void can_rx_delete_receiver(struct rcu_head *rp) 522static void can_rx_delete_receiver(struct rcu_head *rp)
521{ 523{
522 struct receiver *r = container_of(rp, struct receiver, rcu); 524 struct receiver *r = container_of(rp, struct receiver, rcu);
525 struct sock *sk = r->sk;
523 526
524 kmem_cache_free(rcv_cache, r); 527 kmem_cache_free(rcv_cache, r);
528 if (sk)
529 sock_put(sk);
525} 530}
526 531
527/** 532/**
@@ -596,8 +601,11 @@ void can_rx_unregister(struct net_device *dev, canid_t can_id, canid_t mask,
596 spin_unlock(&can_rcvlists_lock); 601 spin_unlock(&can_rcvlists_lock);
597 602
598 /* schedule the receiver item for deletion */ 603 /* schedule the receiver item for deletion */
599 if (r) 604 if (r) {
605 if (r->sk)
606 sock_hold(r->sk);
600 call_rcu(&r->rcu, can_rx_delete_receiver); 607 call_rcu(&r->rcu, can_rx_delete_receiver);
608 }
601} 609}
602EXPORT_SYMBOL(can_rx_unregister); 610EXPORT_SYMBOL(can_rx_unregister);
603 611
diff --git a/net/can/af_can.h b/net/can/af_can.h
index fca0fe9fc45a..b86f5129e838 100644
--- a/net/can/af_can.h
+++ b/net/can/af_can.h
@@ -50,13 +50,14 @@
50 50
51struct receiver { 51struct receiver {
52 struct hlist_node list; 52 struct hlist_node list;
53 struct rcu_head rcu;
54 canid_t can_id; 53 canid_t can_id;
55 canid_t mask; 54 canid_t mask;
56 unsigned long matches; 55 unsigned long matches;
57 void (*func)(struct sk_buff *, void *); 56 void (*func)(struct sk_buff *, void *);
58 void *data; 57 void *data;
59 char *ident; 58 char *ident;
59 struct sock *sk;
60 struct rcu_head rcu;
60}; 61};
61 62
62#define CAN_SFF_RCV_ARRAY_SZ (1 << CAN_SFF_ID_BITS) 63#define CAN_SFF_RCV_ARRAY_SZ (1 << CAN_SFF_ID_BITS)
diff --git a/net/can/bcm.c b/net/can/bcm.c
index 21ac75390e3d..95d13b233c65 100644
--- a/net/can/bcm.c
+++ b/net/can/bcm.c
@@ -734,14 +734,23 @@ static struct bcm_op *bcm_find_op(struct list_head *ops,
734 734
735static void bcm_remove_op(struct bcm_op *op) 735static void bcm_remove_op(struct bcm_op *op)
736{ 736{
737 hrtimer_cancel(&op->timer); 737 if (op->tsklet.func) {
738 hrtimer_cancel(&op->thrtimer); 738 while (test_bit(TASKLET_STATE_SCHED, &op->tsklet.state) ||
739 739 test_bit(TASKLET_STATE_RUN, &op->tsklet.state) ||
740 if (op->tsklet.func) 740 hrtimer_active(&op->timer)) {
741 tasklet_kill(&op->tsklet); 741 hrtimer_cancel(&op->timer);
742 tasklet_kill(&op->tsklet);
743 }
744 }
742 745
743 if (op->thrtsklet.func) 746 if (op->thrtsklet.func) {
744 tasklet_kill(&op->thrtsklet); 747 while (test_bit(TASKLET_STATE_SCHED, &op->thrtsklet.state) ||
748 test_bit(TASKLET_STATE_RUN, &op->thrtsklet.state) ||
749 hrtimer_active(&op->thrtimer)) {
750 hrtimer_cancel(&op->thrtimer);
751 tasklet_kill(&op->thrtsklet);
752 }
753 }
745 754
746 if ((op->frames) && (op->frames != &op->sframe)) 755 if ((op->frames) && (op->frames != &op->sframe))
747 kfree(op->frames); 756 kfree(op->frames);
@@ -1216,7 +1225,7 @@ static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
1216 err = can_rx_register(dev, op->can_id, 1225 err = can_rx_register(dev, op->can_id,
1217 REGMASK(op->can_id), 1226 REGMASK(op->can_id),
1218 bcm_rx_handler, op, 1227 bcm_rx_handler, op,
1219 "bcm"); 1228 "bcm", sk);
1220 1229
1221 op->rx_reg_dev = dev; 1230 op->rx_reg_dev = dev;
1222 dev_put(dev); 1231 dev_put(dev);
@@ -1225,7 +1234,7 @@ static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
1225 } else 1234 } else
1226 err = can_rx_register(NULL, op->can_id, 1235 err = can_rx_register(NULL, op->can_id,
1227 REGMASK(op->can_id), 1236 REGMASK(op->can_id),
1228 bcm_rx_handler, op, "bcm"); 1237 bcm_rx_handler, op, "bcm", sk);
1229 if (err) { 1238 if (err) {
1230 /* this bcm rx op is broken -> remove it */ 1239 /* this bcm rx op is broken -> remove it */
1231 list_del(&op->list); 1240 list_del(&op->list);
diff --git a/net/can/gw.c b/net/can/gw.c
index a54ab0c82104..7056a1a2bb70 100644
--- a/net/can/gw.c
+++ b/net/can/gw.c
@@ -442,7 +442,7 @@ static inline int cgw_register_filter(struct cgw_job *gwj)
442{ 442{
443 return can_rx_register(gwj->src.dev, gwj->ccgw.filter.can_id, 443 return can_rx_register(gwj->src.dev, gwj->ccgw.filter.can_id,
444 gwj->ccgw.filter.can_mask, can_can_gw_rcv, 444 gwj->ccgw.filter.can_mask, can_can_gw_rcv,
445 gwj, "gw"); 445 gwj, "gw", NULL);
446} 446}
447 447
448static inline void cgw_unregister_filter(struct cgw_job *gwj) 448static inline void cgw_unregister_filter(struct cgw_job *gwj)
diff --git a/net/can/raw.c b/net/can/raw.c
index b075f028d7e2..6dc546a06673 100644
--- a/net/can/raw.c
+++ b/net/can/raw.c
@@ -190,7 +190,7 @@ static int raw_enable_filters(struct net_device *dev, struct sock *sk,
190 for (i = 0; i < count; i++) { 190 for (i = 0; i < count; i++) {
191 err = can_rx_register(dev, filter[i].can_id, 191 err = can_rx_register(dev, filter[i].can_id,
192 filter[i].can_mask, 192 filter[i].can_mask,
193 raw_rcv, sk, "raw"); 193 raw_rcv, sk, "raw", sk);
194 if (err) { 194 if (err) {
195 /* clean up successfully registered filters */ 195 /* clean up successfully registered filters */
196 while (--i >= 0) 196 while (--i >= 0)
@@ -211,7 +211,7 @@ static int raw_enable_errfilter(struct net_device *dev, struct sock *sk,
211 211
212 if (err_mask) 212 if (err_mask)
213 err = can_rx_register(dev, 0, err_mask | CAN_ERR_FLAG, 213 err = can_rx_register(dev, 0, err_mask | CAN_ERR_FLAG,
214 raw_rcv, sk, "raw"); 214 raw_rcv, sk, "raw", sk);
215 215
216 return err; 216 return err;
217} 217}
diff --git a/net/ceph/crypto.c b/net/ceph/crypto.c
index 3949ce70be07..292e33bd916e 100644
--- a/net/ceph/crypto.c
+++ b/net/ceph/crypto.c
@@ -214,7 +214,7 @@ static int ceph_aes_crypt(const struct ceph_crypto_key *key, bool encrypt,
214 SKCIPHER_REQUEST_ON_STACK(req, key->tfm); 214 SKCIPHER_REQUEST_ON_STACK(req, key->tfm);
215 struct sg_table sgt; 215 struct sg_table sgt;
216 struct scatterlist prealloc_sg; 216 struct scatterlist prealloc_sg;
217 char iv[AES_BLOCK_SIZE]; 217 char iv[AES_BLOCK_SIZE] __aligned(8);
218 int pad_byte = AES_BLOCK_SIZE - (in_len & (AES_BLOCK_SIZE - 1)); 218 int pad_byte = AES_BLOCK_SIZE - (in_len & (AES_BLOCK_SIZE - 1));
219 int crypt_len = encrypt ? in_len + pad_byte : in_len; 219 int crypt_len = encrypt ? in_len + pad_byte : in_len;
220 int ret; 220 int ret;
diff --git a/net/core/datagram.c b/net/core/datagram.c
index 662bea587165..ea633342ab0d 100644
--- a/net/core/datagram.c
+++ b/net/core/datagram.c
@@ -332,7 +332,9 @@ void __skb_free_datagram_locked(struct sock *sk, struct sk_buff *skb, int len)
332EXPORT_SYMBOL(__skb_free_datagram_locked); 332EXPORT_SYMBOL(__skb_free_datagram_locked);
333 333
334int __sk_queue_drop_skb(struct sock *sk, struct sk_buff *skb, 334int __sk_queue_drop_skb(struct sock *sk, struct sk_buff *skb,
335 unsigned int flags) 335 unsigned int flags,
336 void (*destructor)(struct sock *sk,
337 struct sk_buff *skb))
336{ 338{
337 int err = 0; 339 int err = 0;
338 340
@@ -342,6 +344,8 @@ int __sk_queue_drop_skb(struct sock *sk, struct sk_buff *skb,
342 if (skb == skb_peek(&sk->sk_receive_queue)) { 344 if (skb == skb_peek(&sk->sk_receive_queue)) {
343 __skb_unlink(skb, &sk->sk_receive_queue); 345 __skb_unlink(skb, &sk->sk_receive_queue);
344 atomic_dec(&skb->users); 346 atomic_dec(&skb->users);
347 if (destructor)
348 destructor(sk, skb);
345 err = 0; 349 err = 0;
346 } 350 }
347 spin_unlock_bh(&sk->sk_receive_queue.lock); 351 spin_unlock_bh(&sk->sk_receive_queue.lock);
@@ -375,7 +379,7 @@ EXPORT_SYMBOL(__sk_queue_drop_skb);
375 379
376int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags) 380int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags)
377{ 381{
378 int err = __sk_queue_drop_skb(sk, skb, flags); 382 int err = __sk_queue_drop_skb(sk, skb, flags, NULL);
379 383
380 kfree_skb(skb); 384 kfree_skb(skb);
381 sk_mem_reclaim_partial(sk); 385 sk_mem_reclaim_partial(sk);
diff --git a/net/core/dev.c b/net/core/dev.c
index 8db5a0b4b520..29101c98399f 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1695,24 +1695,19 @@ EXPORT_SYMBOL_GPL(net_dec_egress_queue);
1695 1695
1696static struct static_key netstamp_needed __read_mostly; 1696static struct static_key netstamp_needed __read_mostly;
1697#ifdef HAVE_JUMP_LABEL 1697#ifdef HAVE_JUMP_LABEL
1698/* We are not allowed to call static_key_slow_dec() from irq context
1699 * If net_disable_timestamp() is called from irq context, defer the
1700 * static_key_slow_dec() calls.
1701 */
1702static atomic_t netstamp_needed_deferred; 1698static atomic_t netstamp_needed_deferred;
1703#endif 1699static void netstamp_clear(struct work_struct *work)
1704
1705void net_enable_timestamp(void)
1706{ 1700{
1707#ifdef HAVE_JUMP_LABEL
1708 int deferred = atomic_xchg(&netstamp_needed_deferred, 0); 1701 int deferred = atomic_xchg(&netstamp_needed_deferred, 0);
1709 1702
1710 if (deferred) { 1703 while (deferred--)
1711 while (--deferred) 1704 static_key_slow_dec(&netstamp_needed);
1712 static_key_slow_dec(&netstamp_needed); 1705}
1713 return; 1706static DECLARE_WORK(netstamp_work, netstamp_clear);
1714 }
1715#endif 1707#endif
1708
1709void net_enable_timestamp(void)
1710{
1716 static_key_slow_inc(&netstamp_needed); 1711 static_key_slow_inc(&netstamp_needed);
1717} 1712}
1718EXPORT_SYMBOL(net_enable_timestamp); 1713EXPORT_SYMBOL(net_enable_timestamp);
@@ -1720,12 +1715,12 @@ EXPORT_SYMBOL(net_enable_timestamp);
1720void net_disable_timestamp(void) 1715void net_disable_timestamp(void)
1721{ 1716{
1722#ifdef HAVE_JUMP_LABEL 1717#ifdef HAVE_JUMP_LABEL
1723 if (in_interrupt()) { 1718 /* net_disable_timestamp() can be called from non process context */
1724 atomic_inc(&netstamp_needed_deferred); 1719 atomic_inc(&netstamp_needed_deferred);
1725 return; 1720 schedule_work(&netstamp_work);
1726 } 1721#else
1727#endif
1728 static_key_slow_dec(&netstamp_needed); 1722 static_key_slow_dec(&netstamp_needed);
1723#endif
1729} 1724}
1730EXPORT_SYMBOL(net_disable_timestamp); 1725EXPORT_SYMBOL(net_disable_timestamp);
1731 1726
@@ -2795,9 +2790,9 @@ static netdev_features_t harmonize_features(struct sk_buff *skb,
2795 if (skb->ip_summed != CHECKSUM_NONE && 2790 if (skb->ip_summed != CHECKSUM_NONE &&
2796 !can_checksum_protocol(features, type)) { 2791 !can_checksum_protocol(features, type)) {
2797 features &= ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); 2792 features &= ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
2798 } else if (illegal_highdma(skb->dev, skb)) {
2799 features &= ~NETIF_F_SG;
2800 } 2793 }
2794 if (illegal_highdma(skb->dev, skb))
2795 features &= ~NETIF_F_SG;
2801 2796
2802 return features; 2797 return features;
2803} 2798}
@@ -4441,7 +4436,9 @@ static void skb_gro_reset_offset(struct sk_buff *skb)
4441 pinfo->nr_frags && 4436 pinfo->nr_frags &&
4442 !PageHighMem(skb_frag_page(frag0))) { 4437 !PageHighMem(skb_frag_page(frag0))) {
4443 NAPI_GRO_CB(skb)->frag0 = skb_frag_address(frag0); 4438 NAPI_GRO_CB(skb)->frag0 = skb_frag_address(frag0);
4444 NAPI_GRO_CB(skb)->frag0_len = skb_frag_size(frag0); 4439 NAPI_GRO_CB(skb)->frag0_len = min_t(unsigned int,
4440 skb_frag_size(frag0),
4441 skb->end - skb->tail);
4445 } 4442 }
4446} 4443}
4447 4444
diff --git a/net/core/drop_monitor.c b/net/core/drop_monitor.c
index 8e0c0635ee97..fb55327dcfea 100644
--- a/net/core/drop_monitor.c
+++ b/net/core/drop_monitor.c
@@ -75,6 +75,7 @@ static struct sk_buff *reset_per_cpu_data(struct per_cpu_dm_data *data)
75 struct nlattr *nla; 75 struct nlattr *nla;
76 struct sk_buff *skb; 76 struct sk_buff *skb;
77 unsigned long flags; 77 unsigned long flags;
78 void *msg_header;
78 79
79 al = sizeof(struct net_dm_alert_msg); 80 al = sizeof(struct net_dm_alert_msg);
80 al += dm_hit_limit * sizeof(struct net_dm_drop_point); 81 al += dm_hit_limit * sizeof(struct net_dm_drop_point);
@@ -82,21 +83,41 @@ static struct sk_buff *reset_per_cpu_data(struct per_cpu_dm_data *data)
82 83
83 skb = genlmsg_new(al, GFP_KERNEL); 84 skb = genlmsg_new(al, GFP_KERNEL);
84 85
85 if (skb) { 86 if (!skb)
86 genlmsg_put(skb, 0, 0, &net_drop_monitor_family, 87 goto err;
87 0, NET_DM_CMD_ALERT); 88
88 nla = nla_reserve(skb, NLA_UNSPEC, 89 msg_header = genlmsg_put(skb, 0, 0, &net_drop_monitor_family,
89 sizeof(struct net_dm_alert_msg)); 90 0, NET_DM_CMD_ALERT);
90 msg = nla_data(nla); 91 if (!msg_header) {
91 memset(msg, 0, al); 92 nlmsg_free(skb);
92 } else { 93 skb = NULL;
93 mod_timer(&data->send_timer, jiffies + HZ / 10); 94 goto err;
95 }
96 nla = nla_reserve(skb, NLA_UNSPEC,
97 sizeof(struct net_dm_alert_msg));
98 if (!nla) {
99 nlmsg_free(skb);
100 skb = NULL;
101 goto err;
94 } 102 }
103 msg = nla_data(nla);
104 memset(msg, 0, al);
105 goto out;
95 106
107err:
108 mod_timer(&data->send_timer, jiffies + HZ / 10);
109out:
96 spin_lock_irqsave(&data->lock, flags); 110 spin_lock_irqsave(&data->lock, flags);
97 swap(data->skb, skb); 111 swap(data->skb, skb);
98 spin_unlock_irqrestore(&data->lock, flags); 112 spin_unlock_irqrestore(&data->lock, flags);
99 113
114 if (skb) {
115 struct nlmsghdr *nlh = (struct nlmsghdr *)skb->data;
116 struct genlmsghdr *gnlh = (struct genlmsghdr *)nlmsg_data(nlh);
117
118 genlmsg_end(skb, genlmsg_data(gnlh));
119 }
120
100 return skb; 121 return skb;
101} 122}
102 123
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index e23766c7e3ba..d92de0a1f0a4 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -1405,9 +1405,12 @@ static int ethtool_get_regs(struct net_device *dev, char __user *useraddr)
1405 if (regs.len > reglen) 1405 if (regs.len > reglen)
1406 regs.len = reglen; 1406 regs.len = reglen;
1407 1407
1408 regbuf = vzalloc(reglen); 1408 regbuf = NULL;
1409 if (reglen && !regbuf) 1409 if (reglen) {
1410 return -ENOMEM; 1410 regbuf = vzalloc(reglen);
1411 if (!regbuf)
1412 return -ENOMEM;
1413 }
1411 1414
1412 ops->get_regs(dev, &regs, regbuf); 1415 ops->get_regs(dev, &regs, regbuf);
1413 1416
@@ -1712,7 +1715,7 @@ static noinline_for_stack int ethtool_get_channels(struct net_device *dev,
1712static noinline_for_stack int ethtool_set_channels(struct net_device *dev, 1715static noinline_for_stack int ethtool_set_channels(struct net_device *dev,
1713 void __user *useraddr) 1716 void __user *useraddr)
1714{ 1717{
1715 struct ethtool_channels channels, max; 1718 struct ethtool_channels channels, max = { .cmd = ETHTOOL_GCHANNELS };
1716 u32 max_rx_in_use = 0; 1719 u32 max_rx_in_use = 0;
1717 1720
1718 if (!dev->ethtool_ops->set_channels || !dev->ethtool_ops->get_channels) 1721 if (!dev->ethtool_ops->set_channels || !dev->ethtool_ops->get_channels)
diff --git a/net/core/filter.c b/net/core/filter.c
index e6c412b94dec..1969b3f118c1 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -2972,12 +2972,6 @@ void bpf_warn_invalid_xdp_action(u32 act)
2972} 2972}
2973EXPORT_SYMBOL_GPL(bpf_warn_invalid_xdp_action); 2973EXPORT_SYMBOL_GPL(bpf_warn_invalid_xdp_action);
2974 2974
2975void bpf_warn_invalid_xdp_buffer(void)
2976{
2977 WARN_ONCE(1, "Illegal XDP buffer encountered, expect throughput degradation\n");
2978}
2979EXPORT_SYMBOL_GPL(bpf_warn_invalid_xdp_buffer);
2980
2981static u32 sk_filter_convert_ctx_access(enum bpf_access_type type, int dst_reg, 2975static u32 sk_filter_convert_ctx_access(enum bpf_access_type type, int dst_reg,
2982 int src_reg, int ctx_off, 2976 int src_reg, int ctx_off,
2983 struct bpf_insn *insn_buf, 2977 struct bpf_insn *insn_buf,
diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
index d6447dc10371..1b7673aac59d 100644
--- a/net/core/flow_dissector.c
+++ b/net/core/flow_dissector.c
@@ -67,8 +67,8 @@ EXPORT_SYMBOL(skb_flow_dissector_init);
67 * The function will try to retrieve a be32 entity at 67 * The function will try to retrieve a be32 entity at
68 * offset poff 68 * offset poff
69 */ 69 */
70__be16 skb_flow_get_be16(const struct sk_buff *skb, int poff, void *data, 70static __be16 skb_flow_get_be16(const struct sk_buff *skb, int poff,
71 int hlen) 71 void *data, int hlen)
72{ 72{
73 __be16 *u, _u; 73 __be16 *u, _u;
74 74
@@ -468,8 +468,9 @@ ip_proto_again:
468 if (hdr->flags & GRE_ACK) 468 if (hdr->flags & GRE_ACK)
469 offset += sizeof(((struct pptp_gre_header *)0)->ack); 469 offset += sizeof(((struct pptp_gre_header *)0)->ack);
470 470
471 ppp_hdr = skb_header_pointer(skb, nhoff + offset, 471 ppp_hdr = __skb_header_pointer(skb, nhoff + offset,
472 sizeof(_ppp_hdr), _ppp_hdr); 472 sizeof(_ppp_hdr),
473 data, hlen, _ppp_hdr);
473 if (!ppp_hdr) 474 if (!ppp_hdr)
474 goto out_bad; 475 goto out_bad;
475 476
diff --git a/net/core/lwt_bpf.c b/net/core/lwt_bpf.c
index 71bb3e2eca08..b3eef90b2df9 100644
--- a/net/core/lwt_bpf.c
+++ b/net/core/lwt_bpf.c
@@ -386,6 +386,7 @@ static const struct lwtunnel_encap_ops bpf_encap_ops = {
386 .fill_encap = bpf_fill_encap_info, 386 .fill_encap = bpf_fill_encap_info,
387 .get_encap_size = bpf_encap_nlsize, 387 .get_encap_size = bpf_encap_nlsize,
388 .cmp_encap = bpf_encap_cmp, 388 .cmp_encap = bpf_encap_cmp,
389 .owner = THIS_MODULE,
389}; 390};
390 391
391static int __init bpf_lwt_init(void) 392static int __init bpf_lwt_init(void)
diff --git a/net/core/lwtunnel.c b/net/core/lwtunnel.c
index a5d4e866ce88..c23465005f2f 100644
--- a/net/core/lwtunnel.c
+++ b/net/core/lwtunnel.c
@@ -26,6 +26,7 @@
26#include <net/lwtunnel.h> 26#include <net/lwtunnel.h>
27#include <net/rtnetlink.h> 27#include <net/rtnetlink.h>
28#include <net/ip6_fib.h> 28#include <net/ip6_fib.h>
29#include <net/nexthop.h>
29 30
30#ifdef CONFIG_MODULES 31#ifdef CONFIG_MODULES
31 32
@@ -114,25 +115,77 @@ int lwtunnel_build_state(struct net_device *dev, u16 encap_type,
114 ret = -EOPNOTSUPP; 115 ret = -EOPNOTSUPP;
115 rcu_read_lock(); 116 rcu_read_lock();
116 ops = rcu_dereference(lwtun_encaps[encap_type]); 117 ops = rcu_dereference(lwtun_encaps[encap_type]);
118 if (likely(ops && ops->build_state && try_module_get(ops->owner))) {
119 ret = ops->build_state(dev, encap, family, cfg, lws);
120 if (ret)
121 module_put(ops->owner);
122 }
123 rcu_read_unlock();
124
125 return ret;
126}
127EXPORT_SYMBOL(lwtunnel_build_state);
128
129int lwtunnel_valid_encap_type(u16 encap_type)
130{
131 const struct lwtunnel_encap_ops *ops;
132 int ret = -EINVAL;
133
134 if (encap_type == LWTUNNEL_ENCAP_NONE ||
135 encap_type > LWTUNNEL_ENCAP_MAX)
136 return ret;
137
138 rcu_read_lock();
139 ops = rcu_dereference(lwtun_encaps[encap_type]);
140 rcu_read_unlock();
117#ifdef CONFIG_MODULES 141#ifdef CONFIG_MODULES
118 if (!ops) { 142 if (!ops) {
119 const char *encap_type_str = lwtunnel_encap_str(encap_type); 143 const char *encap_type_str = lwtunnel_encap_str(encap_type);
120 144
121 if (encap_type_str) { 145 if (encap_type_str) {
122 rcu_read_unlock(); 146 __rtnl_unlock();
123 request_module("rtnl-lwt-%s", encap_type_str); 147 request_module("rtnl-lwt-%s", encap_type_str);
148 rtnl_lock();
149
124 rcu_read_lock(); 150 rcu_read_lock();
125 ops = rcu_dereference(lwtun_encaps[encap_type]); 151 ops = rcu_dereference(lwtun_encaps[encap_type]);
152 rcu_read_unlock();
126 } 153 }
127 } 154 }
128#endif 155#endif
129 if (likely(ops && ops->build_state)) 156 return ops ? 0 : -EOPNOTSUPP;
130 ret = ops->build_state(dev, encap, family, cfg, lws); 157}
131 rcu_read_unlock(); 158EXPORT_SYMBOL(lwtunnel_valid_encap_type);
132 159
133 return ret; 160int lwtunnel_valid_encap_type_attr(struct nlattr *attr, int remaining)
161{
162 struct rtnexthop *rtnh = (struct rtnexthop *)attr;
163 struct nlattr *nla_entype;
164 struct nlattr *attrs;
165 struct nlattr *nla;
166 u16 encap_type;
167 int attrlen;
168
169 while (rtnh_ok(rtnh, remaining)) {
170 attrlen = rtnh_attrlen(rtnh);
171 if (attrlen > 0) {
172 attrs = rtnh_attrs(rtnh);
173 nla = nla_find(attrs, attrlen, RTA_ENCAP);
174 nla_entype = nla_find(attrs, attrlen, RTA_ENCAP_TYPE);
175
176 if (nla_entype) {
177 encap_type = nla_get_u16(nla_entype);
178
179 if (lwtunnel_valid_encap_type(encap_type) != 0)
180 return -EOPNOTSUPP;
181 }
182 }
183 rtnh = rtnh_next(rtnh, &remaining);
184 }
185
186 return 0;
134} 187}
135EXPORT_SYMBOL(lwtunnel_build_state); 188EXPORT_SYMBOL(lwtunnel_valid_encap_type_attr);
136 189
137void lwtstate_free(struct lwtunnel_state *lws) 190void lwtstate_free(struct lwtunnel_state *lws)
138{ 191{
@@ -144,6 +197,7 @@ void lwtstate_free(struct lwtunnel_state *lws)
144 } else { 197 } else {
145 kfree(lws); 198 kfree(lws);
146 } 199 }
200 module_put(ops->owner);
147} 201}
148EXPORT_SYMBOL(lwtstate_free); 202EXPORT_SYMBOL(lwtstate_free);
149 203
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index 7bb12e07ffef..e7c12caa20c8 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -2923,7 +2923,8 @@ static void neigh_proc_update(struct ctl_table *ctl, int write)
2923 return; 2923 return;
2924 2924
2925 set_bit(index, p->data_state); 2925 set_bit(index, p->data_state);
2926 call_netevent_notifiers(NETEVENT_DELAY_PROBE_TIME_UPDATE, p); 2926 if (index == NEIGH_VAR_DELAY_PROBE_TIME)
2927 call_netevent_notifiers(NETEVENT_DELAY_PROBE_TIME_UPDATE, p);
2927 if (!dev) /* NULL dev means this is default value */ 2928 if (!dev) /* NULL dev means this is default value */
2928 neigh_copy_dflt_parms(net, p, index); 2929 neigh_copy_dflt_parms(net, p, index);
2929} 2930}
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 18b5aae99bec..75e3ea7bda08 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -3898,6 +3898,9 @@ static int rtnl_stats_get(struct sk_buff *skb, struct nlmsghdr *nlh)
3898 u32 filter_mask; 3898 u32 filter_mask;
3899 int err; 3899 int err;
3900 3900
3901 if (nlmsg_len(nlh) < sizeof(*ifsm))
3902 return -EINVAL;
3903
3901 ifsm = nlmsg_data(nlh); 3904 ifsm = nlmsg_data(nlh);
3902 if (ifsm->ifindex > 0) 3905 if (ifsm->ifindex > 0)
3903 dev = __dev_get_by_index(net, ifsm->ifindex); 3906 dev = __dev_get_by_index(net, ifsm->ifindex);
@@ -3947,6 +3950,9 @@ static int rtnl_stats_dump(struct sk_buff *skb, struct netlink_callback *cb)
3947 3950
3948 cb->seq = net->dev_base_seq; 3951 cb->seq = net->dev_base_seq;
3949 3952
3953 if (nlmsg_len(cb->nlh) < sizeof(*ifsm))
3954 return -EINVAL;
3955
3950 ifsm = nlmsg_data(cb->nlh); 3956 ifsm = nlmsg_data(cb->nlh);
3951 filter_mask = ifsm->filter_mask; 3957 filter_mask = ifsm->filter_mask;
3952 if (!filter_mask) 3958 if (!filter_mask)
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 5a03730fbc1a..734c71468b01 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -369,7 +369,7 @@ static void *__netdev_alloc_frag(unsigned int fragsz, gfp_t gfp_mask)
369 369
370 local_irq_save(flags); 370 local_irq_save(flags);
371 nc = this_cpu_ptr(&netdev_alloc_cache); 371 nc = this_cpu_ptr(&netdev_alloc_cache);
372 data = __alloc_page_frag(nc, fragsz, gfp_mask); 372 data = page_frag_alloc(nc, fragsz, gfp_mask);
373 local_irq_restore(flags); 373 local_irq_restore(flags);
374 return data; 374 return data;
375} 375}
@@ -391,7 +391,7 @@ static void *__napi_alloc_frag(unsigned int fragsz, gfp_t gfp_mask)
391{ 391{
392 struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache); 392 struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
393 393
394 return __alloc_page_frag(&nc->page, fragsz, gfp_mask); 394 return page_frag_alloc(&nc->page, fragsz, gfp_mask);
395} 395}
396 396
397void *napi_alloc_frag(unsigned int fragsz) 397void *napi_alloc_frag(unsigned int fragsz)
@@ -441,7 +441,7 @@ struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int len,
441 local_irq_save(flags); 441 local_irq_save(flags);
442 442
443 nc = this_cpu_ptr(&netdev_alloc_cache); 443 nc = this_cpu_ptr(&netdev_alloc_cache);
444 data = __alloc_page_frag(nc, len, gfp_mask); 444 data = page_frag_alloc(nc, len, gfp_mask);
445 pfmemalloc = nc->pfmemalloc; 445 pfmemalloc = nc->pfmemalloc;
446 446
447 local_irq_restore(flags); 447 local_irq_restore(flags);
@@ -505,7 +505,7 @@ struct sk_buff *__napi_alloc_skb(struct napi_struct *napi, unsigned int len,
505 if (sk_memalloc_socks()) 505 if (sk_memalloc_socks())
506 gfp_mask |= __GFP_MEMALLOC; 506 gfp_mask |= __GFP_MEMALLOC;
507 507
508 data = __alloc_page_frag(&nc->page, len, gfp_mask); 508 data = page_frag_alloc(&nc->page, len, gfp_mask);
509 if (unlikely(!data)) 509 if (unlikely(!data))
510 return NULL; 510 return NULL;
511 511
diff --git a/net/core/sock.c b/net/core/sock.c
index f560e0826009..4eca27dc5c94 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -222,7 +222,7 @@ static const char *const af_family_key_strings[AF_MAX+1] = {
222 "sk_lock-AF_RXRPC" , "sk_lock-AF_ISDN" , "sk_lock-AF_PHONET" , 222 "sk_lock-AF_RXRPC" , "sk_lock-AF_ISDN" , "sk_lock-AF_PHONET" ,
223 "sk_lock-AF_IEEE802154", "sk_lock-AF_CAIF" , "sk_lock-AF_ALG" , 223 "sk_lock-AF_IEEE802154", "sk_lock-AF_CAIF" , "sk_lock-AF_ALG" ,
224 "sk_lock-AF_NFC" , "sk_lock-AF_VSOCK" , "sk_lock-AF_KCM" , 224 "sk_lock-AF_NFC" , "sk_lock-AF_VSOCK" , "sk_lock-AF_KCM" ,
225 "sk_lock-AF_MAX" 225 "sk_lock-AF_QIPCRTR", "sk_lock-AF_MAX"
226}; 226};
227static const char *const af_family_slock_key_strings[AF_MAX+1] = { 227static const char *const af_family_slock_key_strings[AF_MAX+1] = {
228 "slock-AF_UNSPEC", "slock-AF_UNIX" , "slock-AF_INET" , 228 "slock-AF_UNSPEC", "slock-AF_UNIX" , "slock-AF_INET" ,
@@ -239,7 +239,7 @@ static const char *const af_family_slock_key_strings[AF_MAX+1] = {
239 "slock-AF_RXRPC" , "slock-AF_ISDN" , "slock-AF_PHONET" , 239 "slock-AF_RXRPC" , "slock-AF_ISDN" , "slock-AF_PHONET" ,
240 "slock-AF_IEEE802154", "slock-AF_CAIF" , "slock-AF_ALG" , 240 "slock-AF_IEEE802154", "slock-AF_CAIF" , "slock-AF_ALG" ,
241 "slock-AF_NFC" , "slock-AF_VSOCK" ,"slock-AF_KCM" , 241 "slock-AF_NFC" , "slock-AF_VSOCK" ,"slock-AF_KCM" ,
242 "slock-AF_MAX" 242 "slock-AF_QIPCRTR", "slock-AF_MAX"
243}; 243};
244static const char *const af_family_clock_key_strings[AF_MAX+1] = { 244static const char *const af_family_clock_key_strings[AF_MAX+1] = {
245 "clock-AF_UNSPEC", "clock-AF_UNIX" , "clock-AF_INET" , 245 "clock-AF_UNSPEC", "clock-AF_UNIX" , "clock-AF_INET" ,
@@ -256,7 +256,7 @@ static const char *const af_family_clock_key_strings[AF_MAX+1] = {
256 "clock-AF_RXRPC" , "clock-AF_ISDN" , "clock-AF_PHONET" , 256 "clock-AF_RXRPC" , "clock-AF_ISDN" , "clock-AF_PHONET" ,
257 "clock-AF_IEEE802154", "clock-AF_CAIF" , "clock-AF_ALG" , 257 "clock-AF_IEEE802154", "clock-AF_CAIF" , "clock-AF_ALG" ,
258 "clock-AF_NFC" , "clock-AF_VSOCK" , "clock-AF_KCM" , 258 "clock-AF_NFC" , "clock-AF_VSOCK" , "clock-AF_KCM" ,
259 "clock-AF_MAX" 259 "clock-AF_QIPCRTR", "clock-AF_MAX"
260}; 260};
261 261
262/* 262/*
diff --git a/net/dccp/input.c b/net/dccp/input.c
index ba347184bda9..8fedc2d49770 100644
--- a/net/dccp/input.c
+++ b/net/dccp/input.c
@@ -606,7 +606,8 @@ int dccp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
606 if (inet_csk(sk)->icsk_af_ops->conn_request(sk, 606 if (inet_csk(sk)->icsk_af_ops->conn_request(sk,
607 skb) < 0) 607 skb) < 0)
608 return 1; 608 return 1;
609 goto discard; 609 consume_skb(skb);
610 return 0;
610 } 611 }
611 if (dh->dccph_type == DCCP_PKT_RESET) 612 if (dh->dccph_type == DCCP_PKT_RESET)
612 goto discard; 613 goto discard;
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
index adfc790f7193..c4e879c02186 100644
--- a/net/dccp/ipv6.c
+++ b/net/dccp/ipv6.c
@@ -227,7 +227,7 @@ static int dccp_v6_send_response(const struct sock *sk, struct request_sock *req
227 opt = ireq->ipv6_opt; 227 opt = ireq->ipv6_opt;
228 if (!opt) 228 if (!opt)
229 opt = rcu_dereference(np->opt); 229 opt = rcu_dereference(np->opt);
230 err = ip6_xmit(sk, skb, &fl6, opt, np->tclass); 230 err = ip6_xmit(sk, skb, &fl6, sk->sk_mark, opt, np->tclass);
231 rcu_read_unlock(); 231 rcu_read_unlock();
232 err = net_xmit_eval(err); 232 err = net_xmit_eval(err);
233 } 233 }
@@ -281,7 +281,7 @@ static void dccp_v6_ctl_send_reset(const struct sock *sk, struct sk_buff *rxskb)
281 dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL); 281 dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL);
282 if (!IS_ERR(dst)) { 282 if (!IS_ERR(dst)) {
283 skb_dst_set(skb, dst); 283 skb_dst_set(skb, dst);
284 ip6_xmit(ctl_sk, skb, &fl6, NULL, 0); 284 ip6_xmit(ctl_sk, skb, &fl6, 0, NULL, 0);
285 DCCP_INC_STATS(DCCP_MIB_OUTSEGS); 285 DCCP_INC_STATS(DCCP_MIB_OUTSEGS);
286 DCCP_INC_STATS(DCCP_MIB_OUTRSTS); 286 DCCP_INC_STATS(DCCP_MIB_OUTRSTS);
287 return; 287 return;
diff --git a/net/dsa/dsa2.c b/net/dsa/dsa2.c
index 5fff951a0a49..0f99297b2fb3 100644
--- a/net/dsa/dsa2.c
+++ b/net/dsa/dsa2.c
@@ -273,6 +273,7 @@ static int dsa_user_port_apply(struct device_node *port, u32 index,
273 if (err) { 273 if (err) {
274 dev_warn(ds->dev, "Failed to create slave %d: %d\n", 274 dev_warn(ds->dev, "Failed to create slave %d: %d\n",
275 index, err); 275 index, err);
276 ds->ports[index].netdev = NULL;
276 return err; 277 return err;
277 } 278 }
278 279
@@ -394,9 +395,11 @@ static int dsa_dst_apply(struct dsa_switch_tree *dst)
394 return err; 395 return err;
395 } 396 }
396 397
397 err = dsa_cpu_port_ethtool_setup(dst->ds[0]); 398 if (dst->ds[0]) {
398 if (err) 399 err = dsa_cpu_port_ethtool_setup(dst->ds[0]);
399 return err; 400 if (err)
401 return err;
402 }
400 403
401 /* If we use a tagging format that doesn't have an ethertype 404 /* If we use a tagging format that doesn't have an ethertype
402 * field, make sure that all packets from this point on get 405 * field, make sure that all packets from this point on get
@@ -433,7 +436,8 @@ static void dsa_dst_unapply(struct dsa_switch_tree *dst)
433 dsa_ds_unapply(dst, ds); 436 dsa_ds_unapply(dst, ds);
434 } 437 }
435 438
436 dsa_cpu_port_ethtool_restore(dst->ds[0]); 439 if (dst->ds[0])
440 dsa_cpu_port_ethtool_restore(dst->ds[0]);
437 441
438 pr_info("DSA: tree %d unapplied\n", dst->tree); 442 pr_info("DSA: tree %d unapplied\n", dst->tree);
439 dst->applied = false; 443 dst->applied = false;
diff --git a/net/dsa/slave.c b/net/dsa/slave.c
index 68c9eea00518..7d4596110851 100644
--- a/net/dsa/slave.c
+++ b/net/dsa/slave.c
@@ -1105,10 +1105,8 @@ static int dsa_slave_phy_connect(struct dsa_slave_priv *p,
1105 /* Use already configured phy mode */ 1105 /* Use already configured phy mode */
1106 if (p->phy_interface == PHY_INTERFACE_MODE_NA) 1106 if (p->phy_interface == PHY_INTERFACE_MODE_NA)
1107 p->phy_interface = p->phy->interface; 1107 p->phy_interface = p->phy->interface;
1108 phy_connect_direct(slave_dev, p->phy, dsa_slave_adjust_link, 1108 return phy_connect_direct(slave_dev, p->phy, dsa_slave_adjust_link,
1109 p->phy_interface); 1109 p->phy_interface);
1110
1111 return 0;
1112} 1110}
1113 1111
1114static int dsa_slave_phy_setup(struct dsa_slave_priv *p, 1112static int dsa_slave_phy_setup(struct dsa_slave_priv *p,
@@ -1203,6 +1201,8 @@ int dsa_slave_suspend(struct net_device *slave_dev)
1203{ 1201{
1204 struct dsa_slave_priv *p = netdev_priv(slave_dev); 1202 struct dsa_slave_priv *p = netdev_priv(slave_dev);
1205 1203
1204 netif_device_detach(slave_dev);
1205
1206 if (p->phy) { 1206 if (p->phy) {
1207 phy_stop(p->phy); 1207 phy_stop(p->phy);
1208 p->old_pause = -1; 1208 p->old_pause = -1;
diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c
index 8c5a479681ca..516c87e75de7 100644
--- a/net/ethernet/eth.c
+++ b/net/ethernet/eth.c
@@ -356,6 +356,7 @@ void ether_setup(struct net_device *dev)
356 dev->header_ops = &eth_header_ops; 356 dev->header_ops = &eth_header_ops;
357 dev->type = ARPHRD_ETHER; 357 dev->type = ARPHRD_ETHER;
358 dev->hard_header_len = ETH_HLEN; 358 dev->hard_header_len = ETH_HLEN;
359 dev->min_header_len = ETH_HLEN;
359 dev->mtu = ETH_DATA_LEN; 360 dev->mtu = ETH_DATA_LEN;
360 dev->min_mtu = ETH_MIN_MTU; 361 dev->min_mtu = ETH_MIN_MTU;
361 dev->max_mtu = ETH_DATA_LEN; 362 dev->max_mtu = ETH_DATA_LEN;
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
index 89a8cac4726a..51b27ae09fbd 100644
--- a/net/ipv4/arp.c
+++ b/net/ipv4/arp.c
@@ -1263,7 +1263,7 @@ void __init arp_init(void)
1263/* 1263/*
1264 * ax25 -> ASCII conversion 1264 * ax25 -> ASCII conversion
1265 */ 1265 */
1266static char *ax2asc2(ax25_address *a, char *buf) 1266static void ax2asc2(ax25_address *a, char *buf)
1267{ 1267{
1268 char c, *s; 1268 char c, *s;
1269 int n; 1269 int n;
@@ -1285,10 +1285,10 @@ static char *ax2asc2(ax25_address *a, char *buf)
1285 *s++ = n + '0'; 1285 *s++ = n + '0';
1286 *s++ = '\0'; 1286 *s++ = '\0';
1287 1287
1288 if (*buf == '\0' || *buf == '-') 1288 if (*buf == '\0' || *buf == '-') {
1289 return "*"; 1289 buf[0] = '*';
1290 1290 buf[1] = '\0';
1291 return buf; 1291 }
1292} 1292}
1293#endif /* CONFIG_AX25 */ 1293#endif /* CONFIG_AX25 */
1294 1294
@@ -1322,7 +1322,7 @@ static void arp_format_neigh_entry(struct seq_file *seq,
1322 } 1322 }
1323#endif 1323#endif
1324 sprintf(tbuf, "%pI4", n->primary_key); 1324 sprintf(tbuf, "%pI4", n->primary_key);
1325 seq_printf(seq, "%-16s 0x%-10x0x%-10x%s * %s\n", 1325 seq_printf(seq, "%-16s 0x%-10x0x%-10x%-17s * %s\n",
1326 tbuf, hatype, arp_state_to_flags(n), hbuffer, dev->name); 1326 tbuf, hatype, arp_state_to_flags(n), hbuffer, dev->name);
1327 read_unlock(&n->lock); 1327 read_unlock(&n->lock);
1328} 1328}
diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c
index 72d6f056d863..ae206163c273 100644
--- a/net/ipv4/cipso_ipv4.c
+++ b/net/ipv4/cipso_ipv4.c
@@ -1587,6 +1587,10 @@ int cipso_v4_validate(const struct sk_buff *skb, unsigned char **option)
1587 goto validate_return_locked; 1587 goto validate_return_locked;
1588 } 1588 }
1589 1589
1590 if (opt_iter + 1 == opt_len) {
1591 err_offset = opt_iter;
1592 goto validate_return_locked;
1593 }
1590 tag_len = tag[1]; 1594 tag_len = tag[1];
1591 if (tag_len > (opt_len - opt_iter)) { 1595 if (tag_len > (opt_len - opt_iter)) {
1592 err_offset = opt_iter + 1; 1596 err_offset = opt_iter + 1;
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index 3ff8938893ec..7db2ad2e82d3 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -46,6 +46,7 @@
46#include <net/rtnetlink.h> 46#include <net/rtnetlink.h>
47#include <net/xfrm.h> 47#include <net/xfrm.h>
48#include <net/l3mdev.h> 48#include <net/l3mdev.h>
49#include <net/lwtunnel.h>
49#include <trace/events/fib.h> 50#include <trace/events/fib.h>
50 51
51#ifndef CONFIG_IP_MULTIPLE_TABLES 52#ifndef CONFIG_IP_MULTIPLE_TABLES
@@ -85,7 +86,7 @@ struct fib_table *fib_new_table(struct net *net, u32 id)
85 if (tb) 86 if (tb)
86 return tb; 87 return tb;
87 88
88 if (id == RT_TABLE_LOCAL) 89 if (id == RT_TABLE_LOCAL && !net->ipv4.fib_has_custom_rules)
89 alias = fib_new_table(net, RT_TABLE_MAIN); 90 alias = fib_new_table(net, RT_TABLE_MAIN);
90 91
91 tb = fib_trie_table(id, alias); 92 tb = fib_trie_table(id, alias);
@@ -677,6 +678,10 @@ static int rtm_to_fib_config(struct net *net, struct sk_buff *skb,
677 cfg->fc_mx_len = nla_len(attr); 678 cfg->fc_mx_len = nla_len(attr);
678 break; 679 break;
679 case RTA_MULTIPATH: 680 case RTA_MULTIPATH:
681 err = lwtunnel_valid_encap_type_attr(nla_data(attr),
682 nla_len(attr));
683 if (err < 0)
684 goto errout;
680 cfg->fc_mp = nla_data(attr); 685 cfg->fc_mp = nla_data(attr);
681 cfg->fc_mp_len = nla_len(attr); 686 cfg->fc_mp_len = nla_len(attr);
682 break; 687 break;
@@ -691,6 +696,9 @@ static int rtm_to_fib_config(struct net *net, struct sk_buff *skb,
691 break; 696 break;
692 case RTA_ENCAP_TYPE: 697 case RTA_ENCAP_TYPE:
693 cfg->fc_encap_type = nla_get_u16(attr); 698 cfg->fc_encap_type = nla_get_u16(attr);
699 err = lwtunnel_valid_encap_type(cfg->fc_encap_type);
700 if (err < 0)
701 goto errout;
694 break; 702 break;
695 } 703 }
696 } 704 }
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
index 7a5b4c7d9a87..9a375b908d01 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -1279,8 +1279,9 @@ int fib_dump_info(struct sk_buff *skb, u32 portid, u32 seq, int event,
1279 nla_put_u32(skb, RTA_FLOW, fi->fib_nh[0].nh_tclassid)) 1279 nla_put_u32(skb, RTA_FLOW, fi->fib_nh[0].nh_tclassid))
1280 goto nla_put_failure; 1280 goto nla_put_failure;
1281#endif 1281#endif
1282 if (fi->fib_nh->nh_lwtstate) 1282 if (fi->fib_nh->nh_lwtstate &&
1283 lwtunnel_fill_encap(skb, fi->fib_nh->nh_lwtstate); 1283 lwtunnel_fill_encap(skb, fi->fib_nh->nh_lwtstate) < 0)
1284 goto nla_put_failure;
1284 } 1285 }
1285#ifdef CONFIG_IP_ROUTE_MULTIPATH 1286#ifdef CONFIG_IP_ROUTE_MULTIPATH
1286 if (fi->fib_nhs > 1) { 1287 if (fi->fib_nhs > 1) {
@@ -1316,8 +1317,10 @@ int fib_dump_info(struct sk_buff *skb, u32 portid, u32 seq, int event,
1316 nla_put_u32(skb, RTA_FLOW, nh->nh_tclassid)) 1317 nla_put_u32(skb, RTA_FLOW, nh->nh_tclassid))
1317 goto nla_put_failure; 1318 goto nla_put_failure;
1318#endif 1319#endif
1319 if (nh->nh_lwtstate) 1320 if (nh->nh_lwtstate &&
1320 lwtunnel_fill_encap(skb, nh->nh_lwtstate); 1321 lwtunnel_fill_encap(skb, nh->nh_lwtstate) < 0)
1322 goto nla_put_failure;
1323
1321 /* length of rtnetlink header + attributes */ 1324 /* length of rtnetlink header + attributes */
1322 rtnh->rtnh_len = nlmsg_get_pos(skb) - (void *) rtnh; 1325 rtnh->rtnh_len = nlmsg_get_pos(skb) - (void *) rtnh;
1323 } endfor_nexthops(fi); 1326 } endfor_nexthops(fi);
@@ -1618,8 +1621,13 @@ void fib_select_multipath(struct fib_result *res, int hash)
1618void fib_select_path(struct net *net, struct fib_result *res, 1621void fib_select_path(struct net *net, struct fib_result *res,
1619 struct flowi4 *fl4, int mp_hash) 1622 struct flowi4 *fl4, int mp_hash)
1620{ 1623{
1624 bool oif_check;
1625
1626 oif_check = (fl4->flowi4_oif == 0 ||
1627 fl4->flowi4_flags & FLOWI_FLAG_SKIP_NH_OIF);
1628
1621#ifdef CONFIG_IP_ROUTE_MULTIPATH 1629#ifdef CONFIG_IP_ROUTE_MULTIPATH
1622 if (res->fi->fib_nhs > 1 && fl4->flowi4_oif == 0) { 1630 if (res->fi->fib_nhs > 1 && oif_check) {
1623 if (mp_hash < 0) 1631 if (mp_hash < 0)
1624 mp_hash = get_hash_from_flowi4(fl4) >> 1; 1632 mp_hash = get_hash_from_flowi4(fl4) >> 1;
1625 1633
@@ -1629,7 +1637,7 @@ void fib_select_path(struct net *net, struct fib_result *res,
1629#endif 1637#endif
1630 if (!res->prefixlen && 1638 if (!res->prefixlen &&
1631 res->table->tb_num_default > 1 && 1639 res->table->tb_num_default > 1 &&
1632 res->type == RTN_UNICAST && !fl4->flowi4_oif) 1640 res->type == RTN_UNICAST && oif_check)
1633 fib_select_default(fl4, res); 1641 fib_select_default(fl4, res);
1634 1642
1635 if (!fl4->saddr) 1643 if (!fl4->saddr)
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index 68d622133f53..44fd86de2823 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -219,9 +219,14 @@ static void igmp_start_timer(struct ip_mc_list *im, int max_delay)
219static void igmp_gq_start_timer(struct in_device *in_dev) 219static void igmp_gq_start_timer(struct in_device *in_dev)
220{ 220{
221 int tv = prandom_u32() % in_dev->mr_maxdelay; 221 int tv = prandom_u32() % in_dev->mr_maxdelay;
222 unsigned long exp = jiffies + tv + 2;
223
224 if (in_dev->mr_gq_running &&
225 time_after_eq(exp, (in_dev->mr_gq_timer).expires))
226 return;
222 227
223 in_dev->mr_gq_running = 1; 228 in_dev->mr_gq_running = 1;
224 if (!mod_timer(&in_dev->mr_gq_timer, jiffies+tv+2)) 229 if (!mod_timer(&in_dev->mr_gq_timer, exp))
225 in_dev_hold(in_dev); 230 in_dev_hold(in_dev);
226} 231}
227 232
@@ -1167,6 +1172,7 @@ static void igmpv3_del_delrec(struct in_device *in_dev, struct ip_mc_list *im)
1167 psf->sf_crcount = im->crcount; 1172 psf->sf_crcount = im->crcount;
1168 } 1173 }
1169 in_dev_put(pmc->interface); 1174 in_dev_put(pmc->interface);
1175 kfree(pmc);
1170 } 1176 }
1171 spin_unlock_bh(&im->lock); 1177 spin_unlock_bh(&im->lock);
1172} 1178}
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index fac275c48108..b67719f45953 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -1629,6 +1629,7 @@ void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb,
1629 sk->sk_protocol = ip_hdr(skb)->protocol; 1629 sk->sk_protocol = ip_hdr(skb)->protocol;
1630 sk->sk_bound_dev_if = arg->bound_dev_if; 1630 sk->sk_bound_dev_if = arg->bound_dev_if;
1631 sk->sk_sndbuf = sysctl_wmem_default; 1631 sk->sk_sndbuf = sysctl_wmem_default;
1632 sk->sk_mark = fl4.flowi4_mark;
1632 err = ip_append_data(sk, &fl4, ip_reply_glue_bits, arg->iov->iov_base, 1633 err = ip_append_data(sk, &fl4, ip_reply_glue_bits, arg->iov->iov_base,
1633 len, 0, &ipc, &rt, MSG_DONTWAIT); 1634 len, 0, &ipc, &rt, MSG_DONTWAIT);
1634 if (unlikely(err)) { 1635 if (unlikely(err)) {
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index 57e1405e8282..900011709e3b 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -1225,14 +1225,27 @@ void ipv4_pktinfo_prepare(const struct sock *sk, struct sk_buff *skb)
1225 * which has interface index (iif) as the first member of the 1225 * which has interface index (iif) as the first member of the
1226 * underlying inet{6}_skb_parm struct. This code then overlays 1226 * underlying inet{6}_skb_parm struct. This code then overlays
1227 * PKTINFO_SKB_CB and in_pktinfo also has iif as the first 1227 * PKTINFO_SKB_CB and in_pktinfo also has iif as the first
1228 * element so the iif is picked up from the prior IPCB 1228 * element so the iif is picked up from the prior IPCB. If iif
1229 * is the loopback interface, then return the sending interface
1230 * (e.g., process binds socket to eth0 for Tx which is
1231 * redirected to loopback in the rtable/dst).
1229 */ 1232 */
1233 if (pktinfo->ipi_ifindex == LOOPBACK_IFINDEX)
1234 pktinfo->ipi_ifindex = inet_iif(skb);
1235
1230 pktinfo->ipi_spec_dst.s_addr = fib_compute_spec_dst(skb); 1236 pktinfo->ipi_spec_dst.s_addr = fib_compute_spec_dst(skb);
1231 } else { 1237 } else {
1232 pktinfo->ipi_ifindex = 0; 1238 pktinfo->ipi_ifindex = 0;
1233 pktinfo->ipi_spec_dst.s_addr = 0; 1239 pktinfo->ipi_spec_dst.s_addr = 0;
1234 } 1240 }
1235 skb_dst_drop(skb); 1241 /* We need to keep the dst for __ip_options_echo()
1242 * We could restrict the test to opt.ts_needtime || opt.srr,
1243 * but the following is good enough as IP options are not often used.
1244 */
1245 if (unlikely(IPCB(skb)->opt.optlen))
1246 skb_dst_force(skb);
1247 else
1248 skb_dst_drop(skb);
1236} 1249}
1237 1250
1238int ip_setsockopt(struct sock *sk, int level, 1251int ip_setsockopt(struct sock *sk, int level,
diff --git a/net/ipv4/ip_tunnel_core.c b/net/ipv4/ip_tunnel_core.c
index fed3d29f9eb3..0fd1976ab63b 100644
--- a/net/ipv4/ip_tunnel_core.c
+++ b/net/ipv4/ip_tunnel_core.c
@@ -313,6 +313,7 @@ static const struct lwtunnel_encap_ops ip_tun_lwt_ops = {
313 .fill_encap = ip_tun_fill_encap_info, 313 .fill_encap = ip_tun_fill_encap_info,
314 .get_encap_size = ip_tun_encap_nlsize, 314 .get_encap_size = ip_tun_encap_nlsize,
315 .cmp_encap = ip_tun_cmp_encap, 315 .cmp_encap = ip_tun_cmp_encap,
316 .owner = THIS_MODULE,
316}; 317};
317 318
318static const struct nla_policy ip6_tun_policy[LWTUNNEL_IP6_MAX + 1] = { 319static const struct nla_policy ip6_tun_policy[LWTUNNEL_IP6_MAX + 1] = {
@@ -403,6 +404,7 @@ static const struct lwtunnel_encap_ops ip6_tun_lwt_ops = {
403 .fill_encap = ip6_tun_fill_encap_info, 404 .fill_encap = ip6_tun_fill_encap_info,
404 .get_encap_size = ip6_tun_encap_nlsize, 405 .get_encap_size = ip6_tun_encap_nlsize,
405 .cmp_encap = ip_tun_cmp_encap, 406 .cmp_encap = ip_tun_cmp_encap,
407 .owner = THIS_MODULE,
406}; 408};
407 409
408void __init ip_tunnel_core_init(void) 410void __init ip_tunnel_core_init(void)
diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c
index 21db00d0362b..0a783cd73faf 100644
--- a/net/ipv4/netfilter/ipt_CLUSTERIP.c
+++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c
@@ -144,6 +144,11 @@ clusterip_config_find_get(struct net *net, __be32 clusterip, int entry)
144 rcu_read_lock_bh(); 144 rcu_read_lock_bh();
145 c = __clusterip_config_find(net, clusterip); 145 c = __clusterip_config_find(net, clusterip);
146 if (c) { 146 if (c) {
147#ifdef CONFIG_PROC_FS
148 if (!c->pde)
149 c = NULL;
150 else
151#endif
147 if (unlikely(!atomic_inc_not_zero(&c->refcount))) 152 if (unlikely(!atomic_inc_not_zero(&c->refcount)))
148 c = NULL; 153 c = NULL;
149 else if (entry) 154 else if (entry)
@@ -166,14 +171,15 @@ clusterip_config_init_nodelist(struct clusterip_config *c,
166 171
167static struct clusterip_config * 172static struct clusterip_config *
168clusterip_config_init(const struct ipt_clusterip_tgt_info *i, __be32 ip, 173clusterip_config_init(const struct ipt_clusterip_tgt_info *i, __be32 ip,
169 struct net_device *dev) 174 struct net_device *dev)
170{ 175{
176 struct net *net = dev_net(dev);
171 struct clusterip_config *c; 177 struct clusterip_config *c;
172 struct clusterip_net *cn = net_generic(dev_net(dev), clusterip_net_id); 178 struct clusterip_net *cn = net_generic(net, clusterip_net_id);
173 179
174 c = kzalloc(sizeof(*c), GFP_ATOMIC); 180 c = kzalloc(sizeof(*c), GFP_ATOMIC);
175 if (!c) 181 if (!c)
176 return NULL; 182 return ERR_PTR(-ENOMEM);
177 183
178 c->dev = dev; 184 c->dev = dev;
179 c->clusterip = ip; 185 c->clusterip = ip;
@@ -185,6 +191,17 @@ clusterip_config_init(const struct ipt_clusterip_tgt_info *i, __be32 ip,
185 atomic_set(&c->refcount, 1); 191 atomic_set(&c->refcount, 1);
186 atomic_set(&c->entries, 1); 192 atomic_set(&c->entries, 1);
187 193
194 spin_lock_bh(&cn->lock);
195 if (__clusterip_config_find(net, ip)) {
196 spin_unlock_bh(&cn->lock);
197 kfree(c);
198
199 return ERR_PTR(-EBUSY);
200 }
201
202 list_add_rcu(&c->list, &cn->configs);
203 spin_unlock_bh(&cn->lock);
204
188#ifdef CONFIG_PROC_FS 205#ifdef CONFIG_PROC_FS
189 { 206 {
190 char buffer[16]; 207 char buffer[16];
@@ -195,16 +212,16 @@ clusterip_config_init(const struct ipt_clusterip_tgt_info *i, __be32 ip,
195 cn->procdir, 212 cn->procdir,
196 &clusterip_proc_fops, c); 213 &clusterip_proc_fops, c);
197 if (!c->pde) { 214 if (!c->pde) {
215 spin_lock_bh(&cn->lock);
216 list_del_rcu(&c->list);
217 spin_unlock_bh(&cn->lock);
198 kfree(c); 218 kfree(c);
199 return NULL; 219
220 return ERR_PTR(-ENOMEM);
200 } 221 }
201 } 222 }
202#endif 223#endif
203 224
204 spin_lock_bh(&cn->lock);
205 list_add_rcu(&c->list, &cn->configs);
206 spin_unlock_bh(&cn->lock);
207
208 return c; 225 return c;
209} 226}
210 227
@@ -410,9 +427,9 @@ static int clusterip_tg_check(const struct xt_tgchk_param *par)
410 427
411 config = clusterip_config_init(cipinfo, 428 config = clusterip_config_init(cipinfo,
412 e->ip.dst.s_addr, dev); 429 e->ip.dst.s_addr, dev);
413 if (!config) { 430 if (IS_ERR(config)) {
414 dev_put(dev); 431 dev_put(dev);
415 return -ENOMEM; 432 return PTR_ERR(config);
416 } 433 }
417 dev_mc_add(config->dev, config->clustermac); 434 dev_mc_add(config->dev, config->clustermac);
418 } 435 }
diff --git a/net/ipv4/netfilter/ipt_rpfilter.c b/net/ipv4/netfilter/ipt_rpfilter.c
index f273098e48fd..37fb9552e858 100644
--- a/net/ipv4/netfilter/ipt_rpfilter.c
+++ b/net/ipv4/netfilter/ipt_rpfilter.c
@@ -63,10 +63,10 @@ static bool rpfilter_lookup_reverse(struct net *net, struct flowi4 *fl4,
63 return dev_match || flags & XT_RPFILTER_LOOSE; 63 return dev_match || flags & XT_RPFILTER_LOOSE;
64} 64}
65 65
66static bool rpfilter_is_local(const struct sk_buff *skb) 66static bool
67rpfilter_is_loopback(const struct sk_buff *skb, const struct net_device *in)
67{ 68{
68 const struct rtable *rt = skb_rtable(skb); 69 return skb->pkt_type == PACKET_LOOPBACK || in->flags & IFF_LOOPBACK;
69 return rt && (rt->rt_flags & RTCF_LOCAL);
70} 70}
71 71
72static bool rpfilter_mt(const struct sk_buff *skb, struct xt_action_param *par) 72static bool rpfilter_mt(const struct sk_buff *skb, struct xt_action_param *par)
@@ -79,7 +79,7 @@ static bool rpfilter_mt(const struct sk_buff *skb, struct xt_action_param *par)
79 info = par->matchinfo; 79 info = par->matchinfo;
80 invert = info->flags & XT_RPFILTER_INVERT; 80 invert = info->flags & XT_RPFILTER_INVERT;
81 81
82 if (rpfilter_is_local(skb)) 82 if (rpfilter_is_loopback(skb, xt_in(par)))
83 return true ^ invert; 83 return true ^ invert;
84 84
85 iph = ip_hdr(skb); 85 iph = ip_hdr(skb);
diff --git a/net/ipv4/netfilter/nf_reject_ipv4.c b/net/ipv4/netfilter/nf_reject_ipv4.c
index fd8220213afc..146d86105183 100644
--- a/net/ipv4/netfilter/nf_reject_ipv4.c
+++ b/net/ipv4/netfilter/nf_reject_ipv4.c
@@ -126,6 +126,8 @@ void nf_send_reset(struct net *net, struct sk_buff *oldskb, int hook)
126 /* ip_route_me_harder expects skb->dst to be set */ 126 /* ip_route_me_harder expects skb->dst to be set */
127 skb_dst_set_noref(nskb, skb_dst(oldskb)); 127 skb_dst_set_noref(nskb, skb_dst(oldskb));
128 128
129 nskb->mark = IP4_REPLY_MARK(net, oldskb->mark);
130
129 skb_reserve(nskb, LL_MAX_HEADER); 131 skb_reserve(nskb, LL_MAX_HEADER);
130 niph = nf_reject_iphdr_put(nskb, oldskb, IPPROTO_TCP, 132 niph = nf_reject_iphdr_put(nskb, oldskb, IPPROTO_TCP,
131 ip4_dst_hoplimit(skb_dst(nskb))); 133 ip4_dst_hoplimit(skb_dst(nskb)));
diff --git a/net/ipv4/netfilter/nft_fib_ipv4.c b/net/ipv4/netfilter/nft_fib_ipv4.c
index 965b1a161369..2981291910dd 100644
--- a/net/ipv4/netfilter/nft_fib_ipv4.c
+++ b/net/ipv4/netfilter/nft_fib_ipv4.c
@@ -26,13 +26,6 @@ static __be32 get_saddr(__be32 addr)
26 return addr; 26 return addr;
27} 27}
28 28
29static bool fib4_is_local(const struct sk_buff *skb)
30{
31 const struct rtable *rt = skb_rtable(skb);
32
33 return rt && (rt->rt_flags & RTCF_LOCAL);
34}
35
36#define DSCP_BITS 0xfc 29#define DSCP_BITS 0xfc
37 30
38void nft_fib4_eval_type(const struct nft_expr *expr, struct nft_regs *regs, 31void nft_fib4_eval_type(const struct nft_expr *expr, struct nft_regs *regs,
@@ -95,8 +88,10 @@ void nft_fib4_eval(const struct nft_expr *expr, struct nft_regs *regs,
95 else 88 else
96 oif = NULL; 89 oif = NULL;
97 90
98 if (nft_hook(pkt) == NF_INET_PRE_ROUTING && fib4_is_local(pkt->skb)) { 91 if (nft_hook(pkt) == NF_INET_PRE_ROUTING &&
99 nft_fib_store_result(dest, priv->result, pkt, LOOPBACK_IFINDEX); 92 nft_fib_is_loopback(pkt->skb, nft_in(pkt))) {
93 nft_fib_store_result(dest, priv->result, pkt,
94 nft_in(pkt)->ifindex);
100 return; 95 return;
101 } 96 }
102 97
@@ -131,7 +126,7 @@ void nft_fib4_eval(const struct nft_expr *expr, struct nft_regs *regs,
131 switch (res.type) { 126 switch (res.type) {
132 case RTN_UNICAST: 127 case RTN_UNICAST:
133 break; 128 break;
134 case RTN_LOCAL: /* should not appear here, see fib4_is_local() above */ 129 case RTN_LOCAL: /* Should not see RTN_LOCAL here */
135 return; 130 return;
136 default: 131 default:
137 break; 132 break;
diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
index 86cca610f4c2..68d77b1f1495 100644
--- a/net/ipv4/ping.c
+++ b/net/ipv4/ping.c
@@ -642,6 +642,8 @@ static int ping_v4_push_pending_frames(struct sock *sk, struct pingfakehdr *pfh,
642{ 642{
643 struct sk_buff *skb = skb_peek(&sk->sk_write_queue); 643 struct sk_buff *skb = skb_peek(&sk->sk_write_queue);
644 644
645 if (!skb)
646 return 0;
645 pfh->wcheck = csum_partial((char *)&pfh->icmph, 647 pfh->wcheck = csum_partial((char *)&pfh->icmph,
646 sizeof(struct icmphdr), pfh->wcheck); 648 sizeof(struct icmphdr), pfh->wcheck);
647 pfh->icmph.checksum = csum_fold(pfh->wcheck); 649 pfh->icmph.checksum = csum_fold(pfh->wcheck);
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index a82a11747b3f..709ffe67d1de 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -1914,7 +1914,8 @@ local_input:
1914 } 1914 }
1915 } 1915 }
1916 1916
1917 rth = rt_dst_alloc(net->loopback_dev, flags | RTCF_LOCAL, res.type, 1917 rth = rt_dst_alloc(l3mdev_master_dev_rcu(dev) ? : net->loopback_dev,
1918 flags | RTCF_LOCAL, res.type,
1918 IN_DEV_CONF_GET(in_dev, NOPOLICY), false, do_cache); 1919 IN_DEV_CONF_GET(in_dev, NOPOLICY), false, do_cache);
1919 if (!rth) 1920 if (!rth)
1920 goto e_nobufs; 1921 goto e_nobufs;
@@ -2471,7 +2472,7 @@ static int rt_fill_info(struct net *net, __be32 dst, __be32 src, u32 table_id,
2471 r->rtm_dst_len = 32; 2472 r->rtm_dst_len = 32;
2472 r->rtm_src_len = 0; 2473 r->rtm_src_len = 0;
2473 r->rtm_tos = fl4->flowi4_tos; 2474 r->rtm_tos = fl4->flowi4_tos;
2474 r->rtm_table = table_id; 2475 r->rtm_table = table_id < 256 ? table_id : RT_TABLE_COMPAT;
2475 if (nla_put_u32(skb, RTA_TABLE, table_id)) 2476 if (nla_put_u32(skb, RTA_TABLE, table_id))
2476 goto nla_put_failure; 2477 goto nla_put_failure;
2477 r->rtm_type = rt->rt_type; 2478 r->rtm_type = rt->rt_type;
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
index 80bc36b25de2..b2fa498b15d1 100644
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
@@ -433,13 +433,6 @@ static struct ctl_table ipv4_table[] = {
433 .extra2 = &tcp_adv_win_scale_max, 433 .extra2 = &tcp_adv_win_scale_max,
434 }, 434 },
435 { 435 {
436 .procname = "tcp_tw_reuse",
437 .data = &sysctl_tcp_tw_reuse,
438 .maxlen = sizeof(int),
439 .mode = 0644,
440 .proc_handler = proc_dointvec
441 },
442 {
443 .procname = "tcp_frto", 436 .procname = "tcp_frto",
444 .data = &sysctl_tcp_frto, 437 .data = &sysctl_tcp_frto,
445 .maxlen = sizeof(int), 438 .maxlen = sizeof(int),
@@ -958,7 +951,14 @@ static struct ctl_table ipv4_net_table[] = {
958 .data = &init_net.ipv4.sysctl_tcp_notsent_lowat, 951 .data = &init_net.ipv4.sysctl_tcp_notsent_lowat,
959 .maxlen = sizeof(unsigned int), 952 .maxlen = sizeof(unsigned int),
960 .mode = 0644, 953 .mode = 0644,
961 .proc_handler = proc_dointvec, 954 .proc_handler = proc_douintvec,
955 },
956 {
957 .procname = "tcp_tw_reuse",
958 .data = &init_net.ipv4.sysctl_tcp_tw_reuse,
959 .maxlen = sizeof(int),
960 .mode = 0644,
961 .proc_handler = proc_dointvec
962 }, 962 },
963#ifdef CONFIG_IP_ROUTE_MULTIPATH 963#ifdef CONFIG_IP_ROUTE_MULTIPATH
964 { 964 {
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 4a044964da66..0efb4c7f6704 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -770,6 +770,12 @@ ssize_t tcp_splice_read(struct socket *sock, loff_t *ppos,
770 ret = -EAGAIN; 770 ret = -EAGAIN;
771 break; 771 break;
772 } 772 }
773 /* if __tcp_splice_read() got nothing while we have
774 * an skb in receive queue, we do not want to loop.
775 * This might happen with URG data.
776 */
777 if (!skb_queue_empty(&sk->sk_receive_queue))
778 break;
773 sk_wait_data(sk, &timeo, NULL); 779 sk_wait_data(sk, &timeo, NULL);
774 if (signal_pending(current)) { 780 if (signal_pending(current)) {
775 ret = sock_intr_errno(timeo); 781 ret = sock_intr_errno(timeo);
diff --git a/net/ipv4/tcp_fastopen.c b/net/ipv4/tcp_fastopen.c
index 4e777a3243f9..dd2560c83a85 100644
--- a/net/ipv4/tcp_fastopen.c
+++ b/net/ipv4/tcp_fastopen.c
@@ -113,7 +113,7 @@ static bool tcp_fastopen_cookie_gen(struct request_sock *req,
113 struct tcp_fastopen_cookie tmp; 113 struct tcp_fastopen_cookie tmp;
114 114
115 if (__tcp_fastopen_cookie_gen(&ip6h->saddr, &tmp)) { 115 if (__tcp_fastopen_cookie_gen(&ip6h->saddr, &tmp)) {
116 struct in6_addr *buf = (struct in6_addr *) tmp.val; 116 struct in6_addr *buf = &tmp.addr;
117 int i; 117 int i;
118 118
119 for (i = 0; i < 4; i++) 119 for (i = 0; i < 4; i++)
@@ -205,6 +205,7 @@ static struct sock *tcp_fastopen_create_child(struct sock *sk,
205 * scaled. So correct it appropriately. 205 * scaled. So correct it appropriately.
206 */ 206 */
207 tp->snd_wnd = ntohs(tcp_hdr(skb)->window); 207 tp->snd_wnd = ntohs(tcp_hdr(skb)->window);
208 tp->max_window = tp->snd_wnd;
208 209
209 /* Activate the retrans timer so that SYNACK can be retransmitted. 210 /* Activate the retrans timer so that SYNACK can be retransmitted.
210 * The request socket is not added to the ehash 211 * The request socket is not added to the ehash
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 6c790754ae3e..41dcbd568cbe 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -5078,7 +5078,7 @@ static void tcp_check_space(struct sock *sk)
5078 if (sock_flag(sk, SOCK_QUEUE_SHRUNK)) { 5078 if (sock_flag(sk, SOCK_QUEUE_SHRUNK)) {
5079 sock_reset_flag(sk, SOCK_QUEUE_SHRUNK); 5079 sock_reset_flag(sk, SOCK_QUEUE_SHRUNK);
5080 /* pairs with tcp_poll() */ 5080 /* pairs with tcp_poll() */
5081 smp_mb__after_atomic(); 5081 smp_mb();
5082 if (sk->sk_socket && 5082 if (sk->sk_socket &&
5083 test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) { 5083 test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) {
5084 tcp_new_space(sk); 5084 tcp_new_space(sk);
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 30d81f533ada..fe9da4fb96bf 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -84,7 +84,6 @@
84#include <crypto/hash.h> 84#include <crypto/hash.h>
85#include <linux/scatterlist.h> 85#include <linux/scatterlist.h>
86 86
87int sysctl_tcp_tw_reuse __read_mostly;
88int sysctl_tcp_low_latency __read_mostly; 87int sysctl_tcp_low_latency __read_mostly;
89 88
90#ifdef CONFIG_TCP_MD5SIG 89#ifdef CONFIG_TCP_MD5SIG
@@ -120,7 +119,7 @@ int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
120 and use initial timestamp retrieved from peer table. 119 and use initial timestamp retrieved from peer table.
121 */ 120 */
122 if (tcptw->tw_ts_recent_stamp && 121 if (tcptw->tw_ts_recent_stamp &&
123 (!twp || (sysctl_tcp_tw_reuse && 122 (!twp || (sock_net(sk)->ipv4.sysctl_tcp_tw_reuse &&
124 get_seconds() - tcptw->tw_ts_recent_stamp > 1))) { 123 get_seconds() - tcptw->tw_ts_recent_stamp > 1))) {
125 tp->write_seq = tcptw->tw_snd_nxt + 65535 + 2; 124 tp->write_seq = tcptw->tw_snd_nxt + 65535 + 2;
126 if (tp->write_seq == 0) 125 if (tp->write_seq == 0)
@@ -2456,6 +2455,7 @@ static int __net_init tcp_sk_init(struct net *net)
2456 net->ipv4.sysctl_tcp_orphan_retries = 0; 2455 net->ipv4.sysctl_tcp_orphan_retries = 0;
2457 net->ipv4.sysctl_tcp_fin_timeout = TCP_FIN_TIMEOUT; 2456 net->ipv4.sysctl_tcp_fin_timeout = TCP_FIN_TIMEOUT;
2458 net->ipv4.sysctl_tcp_notsent_lowat = UINT_MAX; 2457 net->ipv4.sysctl_tcp_notsent_lowat = UINT_MAX;
2458 net->ipv4.sysctl_tcp_tw_reuse = 0;
2459 2459
2460 return 0; 2460 return 0;
2461fail: 2461fail:
diff --git a/net/ipv4/tcp_metrics.c b/net/ipv4/tcp_metrics.c
index d46f4d5b1c62..ba8f02d0f283 100644
--- a/net/ipv4/tcp_metrics.c
+++ b/net/ipv4/tcp_metrics.c
@@ -606,7 +606,6 @@ bool tcp_peer_is_proven(struct request_sock *req, struct dst_entry *dst,
606 606
607 return ret; 607 return ret;
608} 608}
609EXPORT_SYMBOL_GPL(tcp_peer_is_proven);
610 609
611void tcp_fetch_timewait_stamp(struct sock *sk, struct dst_entry *dst) 610void tcp_fetch_timewait_stamp(struct sock *sk, struct dst_entry *dst)
612{ 611{
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 1d5331a1b1dc..8ce50dc3ab8c 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -2518,9 +2518,11 @@ u32 __tcp_select_window(struct sock *sk)
2518 int full_space = min_t(int, tp->window_clamp, allowed_space); 2518 int full_space = min_t(int, tp->window_clamp, allowed_space);
2519 int window; 2519 int window;
2520 2520
2521 if (mss > full_space) 2521 if (unlikely(mss > full_space)) {
2522 mss = full_space; 2522 mss = full_space;
2523 2523 if (mss <= 0)
2524 return 0;
2525 }
2524 if (free_space < (full_space >> 1)) { 2526 if (free_space < (full_space >> 1)) {
2525 icsk->icsk_ack.quick = 0; 2527 icsk->icsk_ack.quick = 0;
2526 2528
diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c
index f6c50af24a64..3d063eb37848 100644
--- a/net/ipv4/tcp_probe.c
+++ b/net/ipv4/tcp_probe.c
@@ -117,7 +117,7 @@ static void jtcp_rcv_established(struct sock *sk, struct sk_buff *skb,
117 (fwmark > 0 && skb->mark == fwmark)) && 117 (fwmark > 0 && skb->mark == fwmark)) &&
118 (full || tp->snd_cwnd != tcp_probe.lastcwnd)) { 118 (full || tp->snd_cwnd != tcp_probe.lastcwnd)) {
119 119
120 spin_lock(&tcp_probe.lock); 120 spin_lock_bh(&tcp_probe.lock);
121 /* If log fills, just silently drop */ 121 /* If log fills, just silently drop */
122 if (tcp_probe_avail() > 1) { 122 if (tcp_probe_avail() > 1) {
123 struct tcp_log *p = tcp_probe.log + tcp_probe.head; 123 struct tcp_log *p = tcp_probe.log + tcp_probe.head;
@@ -157,7 +157,7 @@ static void jtcp_rcv_established(struct sock *sk, struct sk_buff *skb,
157 tcp_probe.head = (tcp_probe.head + 1) & (bufsize - 1); 157 tcp_probe.head = (tcp_probe.head + 1) & (bufsize - 1);
158 } 158 }
159 tcp_probe.lastcwnd = tp->snd_cwnd; 159 tcp_probe.lastcwnd = tp->snd_cwnd;
160 spin_unlock(&tcp_probe.lock); 160 spin_unlock_bh(&tcp_probe.lock);
161 161
162 wake_up(&tcp_probe.wait); 162 wake_up(&tcp_probe.wait);
163 } 163 }
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 1307a7c2e544..8aab7d78d25b 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -1501,7 +1501,7 @@ try_again:
1501 return err; 1501 return err;
1502 1502
1503csum_copy_err: 1503csum_copy_err:
1504 if (!__sk_queue_drop_skb(sk, skb, flags)) { 1504 if (!__sk_queue_drop_skb(sk, skb, flags, udp_skb_destructor)) {
1505 UDP_INC_STATS(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite); 1505 UDP_INC_STATS(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
1506 UDP_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite); 1506 UDP_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
1507 } 1507 }
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index c1e124bc8e1e..a7bcc0ab5e99 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -3386,9 +3386,15 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
3386 } 3386 }
3387 3387
3388 if (idev) { 3388 if (idev) {
3389 if (idev->if_flags & IF_READY) 3389 if (idev->if_flags & IF_READY) {
3390 /* device is already configured. */ 3390 /* device is already configured -
3391 * but resend MLD reports, we might
3392 * have roamed and need to update
3393 * multicast snooping switches
3394 */
3395 ipv6_mc_up(idev);
3391 break; 3396 break;
3397 }
3392 idev->if_flags |= IF_READY; 3398 idev->if_flags |= IF_READY;
3393 } 3399 }
3394 3400
@@ -4009,6 +4015,12 @@ static void addrconf_dad_completed(struct inet6_ifaddr *ifp, bool bump_id)
4009 4015
4010 if (bump_id) 4016 if (bump_id)
4011 rt_genid_bump_ipv6(dev_net(dev)); 4017 rt_genid_bump_ipv6(dev_net(dev));
4018
4019 /* Make sure that a new temporary address will be created
4020 * before this temporary address becomes deprecated.
4021 */
4022 if (ifp->flags & IFA_F_TEMPORARY)
4023 addrconf_verify_rtnl();
4012} 4024}
4013 4025
4014static void addrconf_dad_run(struct inet6_dev *idev) 4026static void addrconf_dad_run(struct inet6_dev *idev)
@@ -5540,8 +5552,7 @@ static void addrconf_disable_change(struct net *net, __s32 newf)
5540 struct net_device *dev; 5552 struct net_device *dev;
5541 struct inet6_dev *idev; 5553 struct inet6_dev *idev;
5542 5554
5543 rcu_read_lock(); 5555 for_each_netdev(net, dev) {
5544 for_each_netdev_rcu(net, dev) {
5545 idev = __in6_dev_get(dev); 5556 idev = __in6_dev_get(dev);
5546 if (idev) { 5557 if (idev) {
5547 int changed = (!idev->cnf.disable_ipv6) ^ (!newf); 5558 int changed = (!idev->cnf.disable_ipv6) ^ (!newf);
@@ -5550,7 +5561,6 @@ static void addrconf_disable_change(struct net *net, __s32 newf)
5550 dev_disable_change(idev); 5561 dev_disable_change(idev);
5551 } 5562 }
5552 } 5563 }
5553 rcu_read_unlock();
5554} 5564}
5555 5565
5556static int addrconf_disable_ipv6(struct ctl_table *table, int *p, int newf) 5566static int addrconf_disable_ipv6(struct ctl_table *table, int *p, int newf)
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
index a3eaafd87100..eec27f87efac 100644
--- a/net/ipv6/datagram.c
+++ b/net/ipv6/datagram.c
@@ -167,18 +167,22 @@ int __ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr,
167 if (np->sndflow) 167 if (np->sndflow)
168 fl6_flowlabel = usin->sin6_flowinfo & IPV6_FLOWINFO_MASK; 168 fl6_flowlabel = usin->sin6_flowinfo & IPV6_FLOWINFO_MASK;
169 169
170 addr_type = ipv6_addr_type(&usin->sin6_addr); 170 if (ipv6_addr_any(&usin->sin6_addr)) {
171
172 if (addr_type == IPV6_ADDR_ANY) {
173 /* 171 /*
174 * connect to self 172 * connect to self
175 */ 173 */
176 usin->sin6_addr.s6_addr[15] = 0x01; 174 if (ipv6_addr_v4mapped(&sk->sk_v6_rcv_saddr))
175 ipv6_addr_set_v4mapped(htonl(INADDR_LOOPBACK),
176 &usin->sin6_addr);
177 else
178 usin->sin6_addr = in6addr_loopback;
177 } 179 }
178 180
181 addr_type = ipv6_addr_type(&usin->sin6_addr);
182
179 daddr = &usin->sin6_addr; 183 daddr = &usin->sin6_addr;
180 184
181 if (addr_type == IPV6_ADDR_MAPPED) { 185 if (addr_type & IPV6_ADDR_MAPPED) {
182 struct sockaddr_in sin; 186 struct sockaddr_in sin;
183 187
184 if (__ipv6_only_sock(sk)) { 188 if (__ipv6_only_sock(sk)) {
diff --git a/net/ipv6/exthdrs.c b/net/ipv6/exthdrs.c
index e4198502fd98..275cac628a95 100644
--- a/net/ipv6/exthdrs.c
+++ b/net/ipv6/exthdrs.c
@@ -327,7 +327,6 @@ static int ipv6_srh_rcv(struct sk_buff *skb)
327 struct ipv6_sr_hdr *hdr; 327 struct ipv6_sr_hdr *hdr;
328 struct inet6_dev *idev; 328 struct inet6_dev *idev;
329 struct in6_addr *addr; 329 struct in6_addr *addr;
330 bool cleanup = false;
331 int accept_seg6; 330 int accept_seg6;
332 331
333 hdr = (struct ipv6_sr_hdr *)skb_transport_header(skb); 332 hdr = (struct ipv6_sr_hdr *)skb_transport_header(skb);
@@ -351,11 +350,7 @@ static int ipv6_srh_rcv(struct sk_buff *skb)
351#endif 350#endif
352 351
353looped_back: 352looped_back:
354 if (hdr->segments_left > 0) { 353 if (hdr->segments_left == 0) {
355 if (hdr->nexthdr != NEXTHDR_IPV6 && hdr->segments_left == 1 &&
356 sr_has_cleanup(hdr))
357 cleanup = true;
358 } else {
359 if (hdr->nexthdr == NEXTHDR_IPV6) { 354 if (hdr->nexthdr == NEXTHDR_IPV6) {
360 int offset = (hdr->hdrlen + 1) << 3; 355 int offset = (hdr->hdrlen + 1) << 3;
361 356
@@ -418,21 +413,6 @@ looped_back:
418 413
419 ipv6_hdr(skb)->daddr = *addr; 414 ipv6_hdr(skb)->daddr = *addr;
420 415
421 if (cleanup) {
422 int srhlen = (hdr->hdrlen + 1) << 3;
423 int nh = hdr->nexthdr;
424
425 skb_pull_rcsum(skb, sizeof(struct ipv6hdr) + srhlen);
426 memmove(skb_network_header(skb) + srhlen,
427 skb_network_header(skb),
428 (unsigned char *)hdr - skb_network_header(skb));
429 skb->network_header += srhlen;
430 ipv6_hdr(skb)->nexthdr = nh;
431 ipv6_hdr(skb)->payload_len = htons(skb->len -
432 sizeof(struct ipv6hdr));
433 skb_push_rcsum(skb, sizeof(struct ipv6hdr));
434 }
435
436 skb_dst_drop(skb); 416 skb_dst_drop(skb);
437 417
438 ip6_route_input(skb); 418 ip6_route_input(skb);
@@ -453,13 +433,8 @@ looped_back:
453 } 433 }
454 ipv6_hdr(skb)->hop_limit--; 434 ipv6_hdr(skb)->hop_limit--;
455 435
456 /* be sure that srh is still present before reinjecting */ 436 skb_pull(skb, sizeof(struct ipv6hdr));
457 if (!cleanup) { 437 goto looped_back;
458 skb_pull(skb, sizeof(struct ipv6hdr));
459 goto looped_back;
460 }
461 skb_set_transport_header(skb, sizeof(struct ipv6hdr));
462 IP6CB(skb)->nhoff = offsetof(struct ipv6hdr, nexthdr);
463 } 438 }
464 439
465 dst_input(skb); 440 dst_input(skb);
diff --git a/net/ipv6/ila/ila_lwt.c b/net/ipv6/ila/ila_lwt.c
index a7bc54ab46e2..13b5e85fe0d5 100644
--- a/net/ipv6/ila/ila_lwt.c
+++ b/net/ipv6/ila/ila_lwt.c
@@ -238,6 +238,7 @@ static const struct lwtunnel_encap_ops ila_encap_ops = {
238 .fill_encap = ila_fill_encap_info, 238 .fill_encap = ila_fill_encap_info,
239 .get_encap_size = ila_encap_nlsize, 239 .get_encap_size = ila_encap_nlsize,
240 .cmp_encap = ila_encap_cmp, 240 .cmp_encap = ila_encap_cmp,
241 .owner = THIS_MODULE,
241}; 242};
242 243
243int ila_lwt_init(void) 244int ila_lwt_init(void)
diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c
index 7396e75e161b..75c308239243 100644
--- a/net/ipv6/inet6_connection_sock.c
+++ b/net/ipv6/inet6_connection_sock.c
@@ -176,7 +176,7 @@ int inet6_csk_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl_unused
176 /* Restore final destination back after routing done */ 176 /* Restore final destination back after routing done */
177 fl6.daddr = sk->sk_v6_daddr; 177 fl6.daddr = sk->sk_v6_daddr;
178 178
179 res = ip6_xmit(sk, skb, &fl6, rcu_dereference(np->opt), 179 res = ip6_xmit(sk, skb, &fl6, sk->sk_mark, rcu_dereference(np->opt),
180 np->tclass); 180 np->tclass);
181 rcu_read_unlock(); 181 rcu_read_unlock();
182 return res; 182 return res;
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index 75b6108234dd..630b73be5999 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -367,35 +367,37 @@ static void ip6gre_tunnel_uninit(struct net_device *dev)
367 367
368 368
369static void ip6gre_err(struct sk_buff *skb, struct inet6_skb_parm *opt, 369static void ip6gre_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
370 u8 type, u8 code, int offset, __be32 info) 370 u8 type, u8 code, int offset, __be32 info)
371{ 371{
372 const struct ipv6hdr *ipv6h = (const struct ipv6hdr *)skb->data; 372 const struct gre_base_hdr *greh;
373 __be16 *p = (__be16 *)(skb->data + offset); 373 const struct ipv6hdr *ipv6h;
374 int grehlen = offset + 4; 374 int grehlen = sizeof(*greh);
375 struct ip6_tnl *t; 375 struct ip6_tnl *t;
376 int key_off = 0;
376 __be16 flags; 377 __be16 flags;
378 __be32 key;
377 379
378 flags = p[0]; 380 if (!pskb_may_pull(skb, offset + grehlen))
379 if (flags&(GRE_CSUM|GRE_KEY|GRE_SEQ|GRE_ROUTING|GRE_VERSION)) { 381 return;
380 if (flags&(GRE_VERSION|GRE_ROUTING)) 382 greh = (const struct gre_base_hdr *)(skb->data + offset);
381 return; 383 flags = greh->flags;
382 if (flags&GRE_KEY) { 384 if (flags & (GRE_VERSION | GRE_ROUTING))
383 grehlen += 4; 385 return;
384 if (flags&GRE_CSUM) 386 if (flags & GRE_CSUM)
385 grehlen += 4; 387 grehlen += 4;
386 } 388 if (flags & GRE_KEY) {
389 key_off = grehlen + offset;
390 grehlen += 4;
387 } 391 }
388 392
389 /* If only 8 bytes returned, keyed message will be dropped here */ 393 if (!pskb_may_pull(skb, offset + grehlen))
390 if (!pskb_may_pull(skb, grehlen))
391 return; 394 return;
392 ipv6h = (const struct ipv6hdr *)skb->data; 395 ipv6h = (const struct ipv6hdr *)skb->data;
393 p = (__be16 *)(skb->data + offset); 396 greh = (const struct gre_base_hdr *)(skb->data + offset);
397 key = key_off ? *(__be32 *)(skb->data + key_off) : 0;
394 398
395 t = ip6gre_tunnel_lookup(skb->dev, &ipv6h->daddr, &ipv6h->saddr, 399 t = ip6gre_tunnel_lookup(skb->dev, &ipv6h->daddr, &ipv6h->saddr,
396 flags & GRE_KEY ? 400 key, greh->protocol);
397 *(((__be32 *)p) + (grehlen / 4) - 1) : 0,
398 p[1]);
399 if (!t) 401 if (!t)
400 return; 402 return;
401 403
@@ -582,6 +584,9 @@ static inline int ip6gre_xmit_ipv6(struct sk_buff *skb, struct net_device *dev)
582 return -1; 584 return -1;
583 585
584 offset = ip6_tnl_parse_tlv_enc_lim(skb, skb_network_header(skb)); 586 offset = ip6_tnl_parse_tlv_enc_lim(skb, skb_network_header(skb));
587 /* ip6_tnl_parse_tlv_enc_lim() might have reallocated skb->head */
588 ipv6h = ipv6_hdr(skb);
589
585 if (offset > 0) { 590 if (offset > 0) {
586 struct ipv6_tlv_tnl_enc_lim *tel; 591 struct ipv6_tlv_tnl_enc_lim *tel;
587 tel = (struct ipv6_tlv_tnl_enc_lim *)&skb_network_header(skb)[offset]; 592 tel = (struct ipv6_tlv_tnl_enc_lim *)&skb_network_header(skb)[offset];
diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c
index 89c59e656f44..fc7b4017ba24 100644
--- a/net/ipv6/ip6_offload.c
+++ b/net/ipv6/ip6_offload.c
@@ -191,6 +191,7 @@ static struct sk_buff **ipv6_gro_receive(struct sk_buff **head,
191 ops = rcu_dereference(inet6_offloads[proto]); 191 ops = rcu_dereference(inet6_offloads[proto]);
192 if (!ops || !ops->callbacks.gro_receive) { 192 if (!ops || !ops->callbacks.gro_receive) {
193 __pskb_pull(skb, skb_gro_offset(skb)); 193 __pskb_pull(skb, skb_gro_offset(skb));
194 skb_gro_frag0_invalidate(skb);
194 proto = ipv6_gso_pull_exthdrs(skb, proto); 195 proto = ipv6_gso_pull_exthdrs(skb, proto);
195 skb_gro_pull(skb, -skb_transport_offset(skb)); 196 skb_gro_pull(skb, -skb_transport_offset(skb));
196 skb_reset_transport_header(skb); 197 skb_reset_transport_header(skb);
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 70d0de404197..7cebee58e55b 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -172,7 +172,7 @@ int ip6_output(struct net *net, struct sock *sk, struct sk_buff *skb)
172 * which are using proper atomic operations or spinlocks. 172 * which are using proper atomic operations or spinlocks.
173 */ 173 */
174int ip6_xmit(const struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6, 174int ip6_xmit(const struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
175 struct ipv6_txoptions *opt, int tclass) 175 __u32 mark, struct ipv6_txoptions *opt, int tclass)
176{ 176{
177 struct net *net = sock_net(sk); 177 struct net *net = sock_net(sk);
178 const struct ipv6_pinfo *np = inet6_sk(sk); 178 const struct ipv6_pinfo *np = inet6_sk(sk);
@@ -240,7 +240,7 @@ int ip6_xmit(const struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
240 240
241 skb->protocol = htons(ETH_P_IPV6); 241 skb->protocol = htons(ETH_P_IPV6);
242 skb->priority = sk->sk_priority; 242 skb->priority = sk->sk_priority;
243 skb->mark = sk->sk_mark; 243 skb->mark = mark;
244 244
245 mtu = dst_mtu(dst); 245 mtu = dst_mtu(dst);
246 if ((skb->len <= mtu) || skb->ignore_df || skb_is_gso(skb)) { 246 if ((skb->len <= mtu) || skb->ignore_df || skb_is_gso(skb)) {
@@ -1021,6 +1021,11 @@ static int ip6_dst_lookup_tail(struct net *net, const struct sock *sk,
1021 } 1021 }
1022 } 1022 }
1023#endif 1023#endif
1024 if (ipv6_addr_v4mapped(&fl6->saddr) &&
1025 !(ipv6_addr_v4mapped(&fl6->daddr) || ipv6_addr_any(&fl6->daddr))) {
1026 err = -EAFNOSUPPORT;
1027 goto out_err_release;
1028 }
1024 1029
1025 return 0; 1030 return 0;
1026 1031
@@ -1344,7 +1349,7 @@ emsgsize:
1344 */ 1349 */
1345 if (transhdrlen && sk->sk_protocol == IPPROTO_UDP && 1350 if (transhdrlen && sk->sk_protocol == IPPROTO_UDP &&
1346 headersize == sizeof(struct ipv6hdr) && 1351 headersize == sizeof(struct ipv6hdr) &&
1347 length < mtu - headersize && 1352 length <= mtu - headersize &&
1348 !(flags & MSG_MORE) && 1353 !(flags & MSG_MORE) &&
1349 rt->dst.dev->features & (NETIF_F_IPV6_CSUM | NETIF_F_HW_CSUM)) 1354 rt->dst.dev->features & (NETIF_F_IPV6_CSUM | NETIF_F_HW_CSUM))
1350 csummode = CHECKSUM_PARTIAL; 1355 csummode = CHECKSUM_PARTIAL;
@@ -1373,7 +1378,7 @@ emsgsize:
1373 */ 1378 */
1374 1379
1375 cork->length += length; 1380 cork->length += length;
1376 if (((length > mtu) || 1381 if ((((length + fragheaderlen) > mtu) ||
1377 (skb && skb_is_gso(skb))) && 1382 (skb && skb_is_gso(skb))) &&
1378 (sk->sk_protocol == IPPROTO_UDP) && 1383 (sk->sk_protocol == IPPROTO_UDP) &&
1379 (rt->dst.dev->features & NETIF_F_UFO) && !rt->dst.header_len && 1384 (rt->dst.dev->features & NETIF_F_UFO) && !rt->dst.header_len &&
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 36d292180942..75fac933c209 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -400,18 +400,19 @@ ip6_tnl_dev_uninit(struct net_device *dev)
400 400
401__u16 ip6_tnl_parse_tlv_enc_lim(struct sk_buff *skb, __u8 *raw) 401__u16 ip6_tnl_parse_tlv_enc_lim(struct sk_buff *skb, __u8 *raw)
402{ 402{
403 const struct ipv6hdr *ipv6h = (const struct ipv6hdr *) raw; 403 const struct ipv6hdr *ipv6h = (const struct ipv6hdr *)raw;
404 __u8 nexthdr = ipv6h->nexthdr; 404 unsigned int nhoff = raw - skb->data;
405 __u16 off = sizeof(*ipv6h); 405 unsigned int off = nhoff + sizeof(*ipv6h);
406 u8 next, nexthdr = ipv6h->nexthdr;
406 407
407 while (ipv6_ext_hdr(nexthdr) && nexthdr != NEXTHDR_NONE) { 408 while (ipv6_ext_hdr(nexthdr) && nexthdr != NEXTHDR_NONE) {
408 __u16 optlen = 0;
409 struct ipv6_opt_hdr *hdr; 409 struct ipv6_opt_hdr *hdr;
410 if (raw + off + sizeof(*hdr) > skb->data && 410 u16 optlen;
411 !pskb_may_pull(skb, raw - skb->data + off + sizeof (*hdr))) 411
412 if (!pskb_may_pull(skb, off + sizeof(*hdr)))
412 break; 413 break;
413 414
414 hdr = (struct ipv6_opt_hdr *) (raw + off); 415 hdr = (struct ipv6_opt_hdr *)(skb->data + off);
415 if (nexthdr == NEXTHDR_FRAGMENT) { 416 if (nexthdr == NEXTHDR_FRAGMENT) {
416 struct frag_hdr *frag_hdr = (struct frag_hdr *) hdr; 417 struct frag_hdr *frag_hdr = (struct frag_hdr *) hdr;
417 if (frag_hdr->frag_off) 418 if (frag_hdr->frag_off)
@@ -422,20 +423,29 @@ __u16 ip6_tnl_parse_tlv_enc_lim(struct sk_buff *skb, __u8 *raw)
422 } else { 423 } else {
423 optlen = ipv6_optlen(hdr); 424 optlen = ipv6_optlen(hdr);
424 } 425 }
426 /* cache hdr->nexthdr, since pskb_may_pull() might
427 * invalidate hdr
428 */
429 next = hdr->nexthdr;
425 if (nexthdr == NEXTHDR_DEST) { 430 if (nexthdr == NEXTHDR_DEST) {
426 __u16 i = off + 2; 431 u16 i = 2;
432
433 /* Remember : hdr is no longer valid at this point. */
434 if (!pskb_may_pull(skb, off + optlen))
435 break;
436
427 while (1) { 437 while (1) {
428 struct ipv6_tlv_tnl_enc_lim *tel; 438 struct ipv6_tlv_tnl_enc_lim *tel;
429 439
430 /* No more room for encapsulation limit */ 440 /* No more room for encapsulation limit */
431 if (i + sizeof (*tel) > off + optlen) 441 if (i + sizeof(*tel) > optlen)
432 break; 442 break;
433 443
434 tel = (struct ipv6_tlv_tnl_enc_lim *) &raw[i]; 444 tel = (struct ipv6_tlv_tnl_enc_lim *)(skb->data + off + i);
435 /* return index of option if found and valid */ 445 /* return index of option if found and valid */
436 if (tel->type == IPV6_TLV_TNL_ENCAP_LIMIT && 446 if (tel->type == IPV6_TLV_TNL_ENCAP_LIMIT &&
437 tel->length == 1) 447 tel->length == 1)
438 return i; 448 return i + off - nhoff;
439 /* else jump to next option */ 449 /* else jump to next option */
440 if (tel->type) 450 if (tel->type)
441 i += tel->length + 2; 451 i += tel->length + 2;
@@ -443,7 +453,7 @@ __u16 ip6_tnl_parse_tlv_enc_lim(struct sk_buff *skb, __u8 *raw)
443 i++; 453 i++;
444 } 454 }
445 } 455 }
446 nexthdr = hdr->nexthdr; 456 nexthdr = next;
447 off += optlen; 457 off += optlen;
448 } 458 }
449 return 0; 459 return 0;
@@ -1108,7 +1118,7 @@ route_lookup:
1108 t->parms.name); 1118 t->parms.name);
1109 goto tx_err_dst_release; 1119 goto tx_err_dst_release;
1110 } 1120 }
1111 mtu = dst_mtu(dst) - psh_hlen; 1121 mtu = dst_mtu(dst) - psh_hlen - t->tun_hlen;
1112 if (encap_limit >= 0) { 1122 if (encap_limit >= 0) {
1113 max_headroom += 8; 1123 max_headroom += 8;
1114 mtu -= 8; 1124 mtu -= 8;
@@ -1117,7 +1127,7 @@ route_lookup:
1117 mtu = IPV6_MIN_MTU; 1127 mtu = IPV6_MIN_MTU;
1118 if (skb_dst(skb) && !t->parms.collect_md) 1128 if (skb_dst(skb) && !t->parms.collect_md)
1119 skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu); 1129 skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu);
1120 if (skb->len > mtu && !skb_is_gso(skb)) { 1130 if (skb->len - t->tun_hlen > mtu && !skb_is_gso(skb)) {
1121 *pmtu = mtu; 1131 *pmtu = mtu;
1122 err = -EMSGSIZE; 1132 err = -EMSGSIZE;
1123 goto tx_err_dst_release; 1133 goto tx_err_dst_release;
@@ -1303,6 +1313,8 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
1303 fl6.flowlabel = key->label; 1313 fl6.flowlabel = key->label;
1304 } else { 1314 } else {
1305 offset = ip6_tnl_parse_tlv_enc_lim(skb, skb_network_header(skb)); 1315 offset = ip6_tnl_parse_tlv_enc_lim(skb, skb_network_header(skb));
1316 /* ip6_tnl_parse_tlv_enc_lim() might have reallocated skb->head */
1317 ipv6h = ipv6_hdr(skb);
1306 if (offset > 0) { 1318 if (offset > 0) {
1307 struct ipv6_tlv_tnl_enc_lim *tel; 1319 struct ipv6_tlv_tnl_enc_lim *tel;
1308 1320
diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c
index f4b4a4a5f4ba..d82042c8d8fd 100644
--- a/net/ipv6/ip6_vti.c
+++ b/net/ipv6/ip6_vti.c
@@ -189,12 +189,12 @@ static int vti6_tnl_create2(struct net_device *dev)
189 struct vti6_net *ip6n = net_generic(net, vti6_net_id); 189 struct vti6_net *ip6n = net_generic(net, vti6_net_id);
190 int err; 190 int err;
191 191
192 dev->rtnl_link_ops = &vti6_link_ops;
192 err = register_netdevice(dev); 193 err = register_netdevice(dev);
193 if (err < 0) 194 if (err < 0)
194 goto out; 195 goto out;
195 196
196 strcpy(t->parms.name, dev->name); 197 strcpy(t->parms.name, dev->name);
197 dev->rtnl_link_ops = &vti6_link_ops;
198 198
199 dev_hold(dev); 199 dev_hold(dev);
200 vti6_tnl_link(ip6n, t); 200 vti6_tnl_link(ip6n, t);
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
index 14a3903f1c82..1bdc703cb966 100644
--- a/net/ipv6/mcast.c
+++ b/net/ipv6/mcast.c
@@ -81,7 +81,7 @@ static void mld_gq_timer_expire(unsigned long data);
81static void mld_ifc_timer_expire(unsigned long data); 81static void mld_ifc_timer_expire(unsigned long data);
82static void mld_ifc_event(struct inet6_dev *idev); 82static void mld_ifc_event(struct inet6_dev *idev);
83static void mld_add_delrec(struct inet6_dev *idev, struct ifmcaddr6 *pmc); 83static void mld_add_delrec(struct inet6_dev *idev, struct ifmcaddr6 *pmc);
84static void mld_del_delrec(struct inet6_dev *idev, const struct in6_addr *addr); 84static void mld_del_delrec(struct inet6_dev *idev, struct ifmcaddr6 *pmc);
85static void mld_clear_delrec(struct inet6_dev *idev); 85static void mld_clear_delrec(struct inet6_dev *idev);
86static bool mld_in_v1_mode(const struct inet6_dev *idev); 86static bool mld_in_v1_mode(const struct inet6_dev *idev);
87static int sf_setstate(struct ifmcaddr6 *pmc); 87static int sf_setstate(struct ifmcaddr6 *pmc);
@@ -692,9 +692,9 @@ static void igmp6_group_dropped(struct ifmcaddr6 *mc)
692 dev_mc_del(dev, buf); 692 dev_mc_del(dev, buf);
693 } 693 }
694 694
695 if (mc->mca_flags & MAF_NOREPORT)
696 goto done;
697 spin_unlock_bh(&mc->mca_lock); 695 spin_unlock_bh(&mc->mca_lock);
696 if (mc->mca_flags & MAF_NOREPORT)
697 return;
698 698
699 if (!mc->idev->dead) 699 if (!mc->idev->dead)
700 igmp6_leave_group(mc); 700 igmp6_leave_group(mc);
@@ -702,8 +702,6 @@ static void igmp6_group_dropped(struct ifmcaddr6 *mc)
702 spin_lock_bh(&mc->mca_lock); 702 spin_lock_bh(&mc->mca_lock);
703 if (del_timer(&mc->mca_timer)) 703 if (del_timer(&mc->mca_timer))
704 atomic_dec(&mc->mca_refcnt); 704 atomic_dec(&mc->mca_refcnt);
705done:
706 ip6_mc_clear_src(mc);
707 spin_unlock_bh(&mc->mca_lock); 705 spin_unlock_bh(&mc->mca_lock);
708} 706}
709 707
@@ -748,10 +746,11 @@ static void mld_add_delrec(struct inet6_dev *idev, struct ifmcaddr6 *im)
748 spin_unlock_bh(&idev->mc_lock); 746 spin_unlock_bh(&idev->mc_lock);
749} 747}
750 748
751static void mld_del_delrec(struct inet6_dev *idev, const struct in6_addr *pmca) 749static void mld_del_delrec(struct inet6_dev *idev, struct ifmcaddr6 *im)
752{ 750{
753 struct ifmcaddr6 *pmc, *pmc_prev; 751 struct ifmcaddr6 *pmc, *pmc_prev;
754 struct ip6_sf_list *psf, *psf_next; 752 struct ip6_sf_list *psf;
753 struct in6_addr *pmca = &im->mca_addr;
755 754
756 spin_lock_bh(&idev->mc_lock); 755 spin_lock_bh(&idev->mc_lock);
757 pmc_prev = NULL; 756 pmc_prev = NULL;
@@ -768,14 +767,21 @@ static void mld_del_delrec(struct inet6_dev *idev, const struct in6_addr *pmca)
768 } 767 }
769 spin_unlock_bh(&idev->mc_lock); 768 spin_unlock_bh(&idev->mc_lock);
770 769
770 spin_lock_bh(&im->mca_lock);
771 if (pmc) { 771 if (pmc) {
772 for (psf = pmc->mca_tomb; psf; psf = psf_next) { 772 im->idev = pmc->idev;
773 psf_next = psf->sf_next; 773 im->mca_crcount = idev->mc_qrv;
774 kfree(psf); 774 im->mca_sfmode = pmc->mca_sfmode;
775 if (pmc->mca_sfmode == MCAST_INCLUDE) {
776 im->mca_tomb = pmc->mca_tomb;
777 im->mca_sources = pmc->mca_sources;
778 for (psf = im->mca_sources; psf; psf = psf->sf_next)
779 psf->sf_crcount = im->mca_crcount;
775 } 780 }
776 in6_dev_put(pmc->idev); 781 in6_dev_put(pmc->idev);
777 kfree(pmc); 782 kfree(pmc);
778 } 783 }
784 spin_unlock_bh(&im->mca_lock);
779} 785}
780 786
781static void mld_clear_delrec(struct inet6_dev *idev) 787static void mld_clear_delrec(struct inet6_dev *idev)
@@ -904,7 +910,7 @@ int ipv6_dev_mc_inc(struct net_device *dev, const struct in6_addr *addr)
904 mca_get(mc); 910 mca_get(mc);
905 write_unlock_bh(&idev->lock); 911 write_unlock_bh(&idev->lock);
906 912
907 mld_del_delrec(idev, &mc->mca_addr); 913 mld_del_delrec(idev, mc);
908 igmp6_group_added(mc); 914 igmp6_group_added(mc);
909 ma_put(mc); 915 ma_put(mc);
910 return 0; 916 return 0;
@@ -927,6 +933,7 @@ int __ipv6_dev_mc_dec(struct inet6_dev *idev, const struct in6_addr *addr)
927 write_unlock_bh(&idev->lock); 933 write_unlock_bh(&idev->lock);
928 934
929 igmp6_group_dropped(ma); 935 igmp6_group_dropped(ma);
936 ip6_mc_clear_src(ma);
930 937
931 ma_put(ma); 938 ma_put(ma);
932 return 0; 939 return 0;
@@ -2501,15 +2508,17 @@ void ipv6_mc_down(struct inet6_dev *idev)
2501 /* Withdraw multicast list */ 2508 /* Withdraw multicast list */
2502 2509
2503 read_lock_bh(&idev->lock); 2510 read_lock_bh(&idev->lock);
2504 mld_ifc_stop_timer(idev);
2505 mld_gq_stop_timer(idev);
2506 mld_dad_stop_timer(idev);
2507 2511
2508 for (i = idev->mc_list; i; i = i->next) 2512 for (i = idev->mc_list; i; i = i->next)
2509 igmp6_group_dropped(i); 2513 igmp6_group_dropped(i);
2510 read_unlock_bh(&idev->lock);
2511 2514
2512 mld_clear_delrec(idev); 2515 /* Should stop timer after group drop. or we will
2516 * start timer again in mld_ifc_event()
2517 */
2518 mld_ifc_stop_timer(idev);
2519 mld_gq_stop_timer(idev);
2520 mld_dad_stop_timer(idev);
2521 read_unlock_bh(&idev->lock);
2513} 2522}
2514 2523
2515static void ipv6_mc_reset(struct inet6_dev *idev) 2524static void ipv6_mc_reset(struct inet6_dev *idev)
@@ -2531,8 +2540,10 @@ void ipv6_mc_up(struct inet6_dev *idev)
2531 2540
2532 read_lock_bh(&idev->lock); 2541 read_lock_bh(&idev->lock);
2533 ipv6_mc_reset(idev); 2542 ipv6_mc_reset(idev);
2534 for (i = idev->mc_list; i; i = i->next) 2543 for (i = idev->mc_list; i; i = i->next) {
2544 mld_del_delrec(idev, i);
2535 igmp6_group_added(i); 2545 igmp6_group_added(i);
2546 }
2536 read_unlock_bh(&idev->lock); 2547 read_unlock_bh(&idev->lock);
2537} 2548}
2538 2549
@@ -2565,6 +2576,7 @@ void ipv6_mc_destroy_dev(struct inet6_dev *idev)
2565 2576
2566 /* Deactivate timers */ 2577 /* Deactivate timers */
2567 ipv6_mc_down(idev); 2578 ipv6_mc_down(idev);
2579 mld_clear_delrec(idev);
2568 2580
2569 /* Delete all-nodes address. */ 2581 /* Delete all-nodes address. */
2570 /* We cannot call ipv6_dev_mc_dec() directly, our caller in 2582 /* We cannot call ipv6_dev_mc_dec() directly, our caller in
@@ -2579,11 +2591,9 @@ void ipv6_mc_destroy_dev(struct inet6_dev *idev)
2579 write_lock_bh(&idev->lock); 2591 write_lock_bh(&idev->lock);
2580 while ((i = idev->mc_list) != NULL) { 2592 while ((i = idev->mc_list) != NULL) {
2581 idev->mc_list = i->next; 2593 idev->mc_list = i->next;
2582 write_unlock_bh(&idev->lock);
2583 2594
2584 igmp6_group_dropped(i); 2595 write_unlock_bh(&idev->lock);
2585 ma_put(i); 2596 ma_put(i);
2586
2587 write_lock_bh(&idev->lock); 2597 write_lock_bh(&idev->lock);
2588 } 2598 }
2589 write_unlock_bh(&idev->lock); 2599 write_unlock_bh(&idev->lock);
diff --git a/net/ipv6/netfilter/ip6t_rpfilter.c b/net/ipv6/netfilter/ip6t_rpfilter.c
index d5263dc364a9..b12e61b7b16c 100644
--- a/net/ipv6/netfilter/ip6t_rpfilter.c
+++ b/net/ipv6/netfilter/ip6t_rpfilter.c
@@ -72,10 +72,10 @@ static bool rpfilter_lookup_reverse6(struct net *net, const struct sk_buff *skb,
72 return ret; 72 return ret;
73} 73}
74 74
75static bool rpfilter_is_local(const struct sk_buff *skb) 75static bool
76rpfilter_is_loopback(const struct sk_buff *skb, const struct net_device *in)
76{ 77{
77 const struct rt6_info *rt = (const void *) skb_dst(skb); 78 return skb->pkt_type == PACKET_LOOPBACK || in->flags & IFF_LOOPBACK;
78 return rt && (rt->rt6i_flags & RTF_LOCAL);
79} 79}
80 80
81static bool rpfilter_mt(const struct sk_buff *skb, struct xt_action_param *par) 81static bool rpfilter_mt(const struct sk_buff *skb, struct xt_action_param *par)
@@ -85,7 +85,7 @@ static bool rpfilter_mt(const struct sk_buff *skb, struct xt_action_param *par)
85 struct ipv6hdr *iph; 85 struct ipv6hdr *iph;
86 bool invert = info->flags & XT_RPFILTER_INVERT; 86 bool invert = info->flags & XT_RPFILTER_INVERT;
87 87
88 if (rpfilter_is_local(skb)) 88 if (rpfilter_is_loopback(skb, xt_in(par)))
89 return true ^ invert; 89 return true ^ invert;
90 90
91 iph = ipv6_hdr(skb); 91 iph = ipv6_hdr(skb);
diff --git a/net/ipv6/netfilter/nf_reject_ipv6.c b/net/ipv6/netfilter/nf_reject_ipv6.c
index 10090400c72f..eedee5d108d9 100644
--- a/net/ipv6/netfilter/nf_reject_ipv6.c
+++ b/net/ipv6/netfilter/nf_reject_ipv6.c
@@ -157,6 +157,7 @@ void nf_send_reset6(struct net *net, struct sk_buff *oldskb, int hook)
157 fl6.fl6_sport = otcph->dest; 157 fl6.fl6_sport = otcph->dest;
158 fl6.fl6_dport = otcph->source; 158 fl6.fl6_dport = otcph->source;
159 fl6.flowi6_oif = l3mdev_master_ifindex(skb_dst(oldskb)->dev); 159 fl6.flowi6_oif = l3mdev_master_ifindex(skb_dst(oldskb)->dev);
160 fl6.flowi6_mark = IP6_REPLY_MARK(net, oldskb->mark);
160 security_skb_classify_flow(oldskb, flowi6_to_flowi(&fl6)); 161 security_skb_classify_flow(oldskb, flowi6_to_flowi(&fl6));
161 dst = ip6_route_output(net, NULL, &fl6); 162 dst = ip6_route_output(net, NULL, &fl6);
162 if (dst->error) { 163 if (dst->error) {
@@ -180,6 +181,8 @@ void nf_send_reset6(struct net *net, struct sk_buff *oldskb, int hook)
180 181
181 skb_dst_set(nskb, dst); 182 skb_dst_set(nskb, dst);
182 183
184 nskb->mark = fl6.flowi6_mark;
185
183 skb_reserve(nskb, hh_len + dst->header_len); 186 skb_reserve(nskb, hh_len + dst->header_len);
184 ip6h = nf_reject_ip6hdr_put(nskb, oldskb, IPPROTO_TCP, 187 ip6h = nf_reject_ip6hdr_put(nskb, oldskb, IPPROTO_TCP,
185 ip6_dst_hoplimit(dst)); 188 ip6_dst_hoplimit(dst));
diff --git a/net/ipv6/netfilter/nft_fib_ipv6.c b/net/ipv6/netfilter/nft_fib_ipv6.c
index c947aad8bcc6..765facf03d45 100644
--- a/net/ipv6/netfilter/nft_fib_ipv6.c
+++ b/net/ipv6/netfilter/nft_fib_ipv6.c
@@ -18,13 +18,6 @@
18#include <net/ip6_fib.h> 18#include <net/ip6_fib.h>
19#include <net/ip6_route.h> 19#include <net/ip6_route.h>
20 20
21static bool fib6_is_local(const struct sk_buff *skb)
22{
23 const struct rt6_info *rt = (const void *)skb_dst(skb);
24
25 return rt && (rt->rt6i_flags & RTF_LOCAL);
26}
27
28static int get_ifindex(const struct net_device *dev) 21static int get_ifindex(const struct net_device *dev)
29{ 22{
30 return dev ? dev->ifindex : 0; 23 return dev ? dev->ifindex : 0;
@@ -164,8 +157,10 @@ void nft_fib6_eval(const struct nft_expr *expr, struct nft_regs *regs,
164 157
165 lookup_flags = nft_fib6_flowi_init(&fl6, priv, pkt, oif); 158 lookup_flags = nft_fib6_flowi_init(&fl6, priv, pkt, oif);
166 159
167 if (nft_hook(pkt) == NF_INET_PRE_ROUTING && fib6_is_local(pkt->skb)) { 160 if (nft_hook(pkt) == NF_INET_PRE_ROUTING &&
168 nft_fib_store_result(dest, priv->result, pkt, LOOPBACK_IFINDEX); 161 nft_fib_is_loopback(pkt->skb, nft_in(pkt))) {
162 nft_fib_store_result(dest, priv->result, pkt,
163 nft_in(pkt)->ifindex);
169 return; 164 return;
170 } 165 }
171 166
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 8417c41d8ec8..7ea85370c11c 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -1464,7 +1464,7 @@ static struct rt6_info *__ip6_route_redirect(struct net *net,
1464 struct fib6_node *fn; 1464 struct fib6_node *fn;
1465 1465
1466 /* Get the "current" route for this destination and 1466 /* Get the "current" route for this destination and
1467 * check if the redirect has come from approriate router. 1467 * check if the redirect has come from appropriate router.
1468 * 1468 *
1469 * RFC 4861 specifies that redirects should only be 1469 * RFC 4861 specifies that redirects should only be
1470 * accepted if they come from the nexthop to the target. 1470 * accepted if they come from the nexthop to the target.
@@ -2768,7 +2768,7 @@ static int rt6_mtu_change_route(struct rt6_info *rt, void *p_arg)
2768 old MTU is the lowest MTU in the path, update the route PMTU 2768 old MTU is the lowest MTU in the path, update the route PMTU
2769 to reflect the increase. In this case if the other nodes' MTU 2769 to reflect the increase. In this case if the other nodes' MTU
2770 also have the lowest MTU, TOO BIG MESSAGE will be lead to 2770 also have the lowest MTU, TOO BIG MESSAGE will be lead to
2771 PMTU discouvery. 2771 PMTU discovery.
2772 */ 2772 */
2773 if (rt->dst.dev == arg->dev && 2773 if (rt->dst.dev == arg->dev &&
2774 dst_metric_raw(&rt->dst, RTAX_MTU) && 2774 dst_metric_raw(&rt->dst, RTAX_MTU) &&
@@ -2896,6 +2896,11 @@ static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh,
2896 if (tb[RTA_MULTIPATH]) { 2896 if (tb[RTA_MULTIPATH]) {
2897 cfg->fc_mp = nla_data(tb[RTA_MULTIPATH]); 2897 cfg->fc_mp = nla_data(tb[RTA_MULTIPATH]);
2898 cfg->fc_mp_len = nla_len(tb[RTA_MULTIPATH]); 2898 cfg->fc_mp_len = nla_len(tb[RTA_MULTIPATH]);
2899
2900 err = lwtunnel_valid_encap_type_attr(cfg->fc_mp,
2901 cfg->fc_mp_len);
2902 if (err < 0)
2903 goto errout;
2899 } 2904 }
2900 2905
2901 if (tb[RTA_PREF]) { 2906 if (tb[RTA_PREF]) {
@@ -2909,9 +2914,14 @@ static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh,
2909 if (tb[RTA_ENCAP]) 2914 if (tb[RTA_ENCAP])
2910 cfg->fc_encap = tb[RTA_ENCAP]; 2915 cfg->fc_encap = tb[RTA_ENCAP];
2911 2916
2912 if (tb[RTA_ENCAP_TYPE]) 2917 if (tb[RTA_ENCAP_TYPE]) {
2913 cfg->fc_encap_type = nla_get_u16(tb[RTA_ENCAP_TYPE]); 2918 cfg->fc_encap_type = nla_get_u16(tb[RTA_ENCAP_TYPE]);
2914 2919
2920 err = lwtunnel_valid_encap_type(cfg->fc_encap_type);
2921 if (err < 0)
2922 goto errout;
2923 }
2924
2915 if (tb[RTA_EXPIRES]) { 2925 if (tb[RTA_EXPIRES]) {
2916 unsigned long timeout = addrconf_timeout_fixup(nla_get_u32(tb[RTA_EXPIRES]), HZ); 2926 unsigned long timeout = addrconf_timeout_fixup(nla_get_u32(tb[RTA_EXPIRES]), HZ);
2917 2927
@@ -3317,7 +3327,8 @@ static int rt6_fill_node(struct net *net,
3317 if (nla_put_u8(skb, RTA_PREF, IPV6_EXTRACT_PREF(rt->rt6i_flags))) 3327 if (nla_put_u8(skb, RTA_PREF, IPV6_EXTRACT_PREF(rt->rt6i_flags)))
3318 goto nla_put_failure; 3328 goto nla_put_failure;
3319 3329
3320 lwtunnel_fill_encap(skb, rt->dst.lwtstate); 3330 if (lwtunnel_fill_encap(skb, rt->dst.lwtstate) < 0)
3331 goto nla_put_failure;
3321 3332
3322 nlmsg_end(skb, nlh); 3333 nlmsg_end(skb, nlh);
3323 return 0; 3334 return 0;
diff --git a/net/ipv6/seg6.c b/net/ipv6/seg6.c
index b172d85c650a..a855eb325b03 100644
--- a/net/ipv6/seg6.c
+++ b/net/ipv6/seg6.c
@@ -176,6 +176,8 @@ static int seg6_genl_set_tunsrc(struct sk_buff *skb, struct genl_info *info)
176 176
177 val = nla_data(info->attrs[SEG6_ATTR_DST]); 177 val = nla_data(info->attrs[SEG6_ATTR_DST]);
178 t_new = kmemdup(val, sizeof(*val), GFP_KERNEL); 178 t_new = kmemdup(val, sizeof(*val), GFP_KERNEL);
179 if (!t_new)
180 return -ENOMEM;
179 181
180 mutex_lock(&sdata->lock); 182 mutex_lock(&sdata->lock);
181 183
diff --git a/net/ipv6/seg6_hmac.c b/net/ipv6/seg6_hmac.c
index ef1c8a46e7ac..6ef3dfb6e811 100644
--- a/net/ipv6/seg6_hmac.c
+++ b/net/ipv6/seg6_hmac.c
@@ -174,7 +174,7 @@ int seg6_hmac_compute(struct seg6_hmac_info *hinfo, struct ipv6_sr_hdr *hdr,
174 * hash function (RadioGatun) with up to 1216 bits 174 * hash function (RadioGatun) with up to 1216 bits
175 */ 175 */
176 176
177 /* saddr(16) + first_seg(1) + cleanup(1) + keyid(4) + seglist(16n) */ 177 /* saddr(16) + first_seg(1) + flags(1) + keyid(4) + seglist(16n) */
178 plen = 16 + 1 + 1 + 4 + (hdr->first_segment + 1) * 16; 178 plen = 16 + 1 + 1 + 4 + (hdr->first_segment + 1) * 16;
179 179
180 /* this limit allows for 14 segments */ 180 /* this limit allows for 14 segments */
@@ -186,7 +186,7 @@ int seg6_hmac_compute(struct seg6_hmac_info *hinfo, struct ipv6_sr_hdr *hdr,
186 * 186 *
187 * 1. Source IPv6 address (128 bits) 187 * 1. Source IPv6 address (128 bits)
188 * 2. first_segment value (8 bits) 188 * 2. first_segment value (8 bits)
189 * 3. cleanup flag (8 bits: highest bit is cleanup value, others are 0) 189 * 3. Flags (8 bits)
190 * 4. HMAC Key ID (32 bits) 190 * 4. HMAC Key ID (32 bits)
191 * 5. All segments in the segments list (n * 128 bits) 191 * 5. All segments in the segments list (n * 128 bits)
192 */ 192 */
@@ -202,8 +202,8 @@ int seg6_hmac_compute(struct seg6_hmac_info *hinfo, struct ipv6_sr_hdr *hdr,
202 /* first_segment value */ 202 /* first_segment value */
203 *off++ = hdr->first_segment; 203 *off++ = hdr->first_segment;
204 204
205 /* cleanup flag */ 205 /* flags */
206 *off++ = !!(sr_has_cleanup(hdr)) << 7; 206 *off++ = hdr->flags;
207 207
208 /* HMAC Key ID */ 208 /* HMAC Key ID */
209 memcpy(off, &hmackeyid, 4); 209 memcpy(off, &hmackeyid, 4);
@@ -400,7 +400,7 @@ static int seg6_hmac_init_algo(void)
400 *p_tfm = tfm; 400 *p_tfm = tfm;
401 } 401 }
402 402
403 p_tfm = this_cpu_ptr(algo->tfms); 403 p_tfm = raw_cpu_ptr(algo->tfms);
404 tfm = *p_tfm; 404 tfm = *p_tfm;
405 405
406 shsize = sizeof(*shash) + crypto_shash_descsize(tfm); 406 shsize = sizeof(*shash) + crypto_shash_descsize(tfm);
diff --git a/net/ipv6/seg6_iptunnel.c b/net/ipv6/seg6_iptunnel.c
index bbfca22c34ae..c46f8cbf5ab5 100644
--- a/net/ipv6/seg6_iptunnel.c
+++ b/net/ipv6/seg6_iptunnel.c
@@ -265,7 +265,9 @@ int seg6_output(struct net *net, struct sock *sk, struct sk_buff *skb)
265 slwt = seg6_lwt_lwtunnel(orig_dst->lwtstate); 265 slwt = seg6_lwt_lwtunnel(orig_dst->lwtstate);
266 266
267#ifdef CONFIG_DST_CACHE 267#ifdef CONFIG_DST_CACHE
268 preempt_disable();
268 dst = dst_cache_get(&slwt->cache); 269 dst = dst_cache_get(&slwt->cache);
270 preempt_enable();
269#endif 271#endif
270 272
271 if (unlikely(!dst)) { 273 if (unlikely(!dst)) {
@@ -286,7 +288,9 @@ int seg6_output(struct net *net, struct sock *sk, struct sk_buff *skb)
286 } 288 }
287 289
288#ifdef CONFIG_DST_CACHE 290#ifdef CONFIG_DST_CACHE
291 preempt_disable();
289 dst_cache_set_ip6(&slwt->cache, dst, &fl6.saddr); 292 dst_cache_set_ip6(&slwt->cache, dst, &fl6.saddr);
293 preempt_enable();
290#endif 294#endif
291 } 295 }
292 296
@@ -418,6 +422,7 @@ static const struct lwtunnel_encap_ops seg6_iptun_ops = {
418 .fill_encap = seg6_fill_encap_info, 422 .fill_encap = seg6_fill_encap_info,
419 .get_encap_size = seg6_encap_nlsize, 423 .get_encap_size = seg6_encap_nlsize,
420 .cmp_encap = seg6_encap_cmp, 424 .cmp_encap = seg6_encap_cmp,
425 .owner = THIS_MODULE,
421}; 426};
422 427
423int __init seg6_iptunnel_init(void) 428int __init seg6_iptunnel_init(void)
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index fad992ad4bc8..99853c6e33a8 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -1380,6 +1380,7 @@ static int ipip6_tunnel_init(struct net_device *dev)
1380 err = dst_cache_init(&tunnel->dst_cache, GFP_KERNEL); 1380 err = dst_cache_init(&tunnel->dst_cache, GFP_KERNEL);
1381 if (err) { 1381 if (err) {
1382 free_percpu(dev->tstats); 1382 free_percpu(dev->tstats);
1383 dev->tstats = NULL;
1383 return err; 1384 return err;
1384 } 1385 }
1385 1386
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 73bc8fc68acd..4c60c6f71cd3 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -148,8 +148,13 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
148 * connect() to INADDR_ANY means loopback (BSD'ism). 148 * connect() to INADDR_ANY means loopback (BSD'ism).
149 */ 149 */
150 150
151 if (ipv6_addr_any(&usin->sin6_addr)) 151 if (ipv6_addr_any(&usin->sin6_addr)) {
152 usin->sin6_addr.s6_addr[15] = 0x1; 152 if (ipv6_addr_v4mapped(&sk->sk_v6_rcv_saddr))
153 ipv6_addr_set_v4mapped(htonl(INADDR_LOOPBACK),
154 &usin->sin6_addr);
155 else
156 usin->sin6_addr = in6addr_loopback;
157 }
153 158
154 addr_type = ipv6_addr_type(&usin->sin6_addr); 159 addr_type = ipv6_addr_type(&usin->sin6_addr);
155 160
@@ -188,7 +193,7 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
188 * TCP over IPv4 193 * TCP over IPv4
189 */ 194 */
190 195
191 if (addr_type == IPV6_ADDR_MAPPED) { 196 if (addr_type & IPV6_ADDR_MAPPED) {
192 u32 exthdrlen = icsk->icsk_ext_hdr_len; 197 u32 exthdrlen = icsk->icsk_ext_hdr_len;
193 struct sockaddr_in sin; 198 struct sockaddr_in sin;
194 199
@@ -469,7 +474,7 @@ static int tcp_v6_send_synack(const struct sock *sk, struct dst_entry *dst,
469 opt = ireq->ipv6_opt; 474 opt = ireq->ipv6_opt;
470 if (!opt) 475 if (!opt)
471 opt = rcu_dereference(np->opt); 476 opt = rcu_dereference(np->opt);
472 err = ip6_xmit(sk, skb, fl6, opt, np->tclass); 477 err = ip6_xmit(sk, skb, fl6, sk->sk_mark, opt, np->tclass);
473 rcu_read_unlock(); 478 rcu_read_unlock();
474 err = net_xmit_eval(err); 479 err = net_xmit_eval(err);
475 } 480 }
@@ -840,7 +845,7 @@ static void tcp_v6_send_response(const struct sock *sk, struct sk_buff *skb, u32
840 dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL); 845 dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL);
841 if (!IS_ERR(dst)) { 846 if (!IS_ERR(dst)) {
842 skb_dst_set(buff, dst); 847 skb_dst_set(buff, dst);
843 ip6_xmit(ctl_sk, buff, &fl6, NULL, tclass); 848 ip6_xmit(ctl_sk, buff, &fl6, fl6.flowi6_mark, NULL, tclass);
844 TCP_INC_STATS(net, TCP_MIB_OUTSEGS); 849 TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
845 if (rst) 850 if (rst)
846 TCP_INC_STATS(net, TCP_MIB_OUTRSTS); 851 TCP_INC_STATS(net, TCP_MIB_OUTRSTS);
@@ -991,6 +996,16 @@ drop:
991 return 0; /* don't send reset */ 996 return 0; /* don't send reset */
992} 997}
993 998
999static void tcp_v6_restore_cb(struct sk_buff *skb)
1000{
1001 /* We need to move header back to the beginning if xfrm6_policy_check()
1002 * and tcp_v6_fill_cb() are going to be called again.
1003 * ip6_datagram_recv_specific_ctl() also expects IP6CB to be there.
1004 */
1005 memmove(IP6CB(skb), &TCP_SKB_CB(skb)->header.h6,
1006 sizeof(struct inet6_skb_parm));
1007}
1008
994static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *skb, 1009static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
995 struct request_sock *req, 1010 struct request_sock *req,
996 struct dst_entry *dst, 1011 struct dst_entry *dst,
@@ -1182,8 +1197,10 @@ static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *
1182 sk_gfp_mask(sk, GFP_ATOMIC)); 1197 sk_gfp_mask(sk, GFP_ATOMIC));
1183 consume_skb(ireq->pktopts); 1198 consume_skb(ireq->pktopts);
1184 ireq->pktopts = NULL; 1199 ireq->pktopts = NULL;
1185 if (newnp->pktoptions) 1200 if (newnp->pktoptions) {
1201 tcp_v6_restore_cb(newnp->pktoptions);
1186 skb_set_owner_r(newnp->pktoptions, newsk); 1202 skb_set_owner_r(newnp->pktoptions, newsk);
1203 }
1187 } 1204 }
1188 } 1205 }
1189 1206
@@ -1198,16 +1215,6 @@ out:
1198 return NULL; 1215 return NULL;
1199} 1216}
1200 1217
1201static void tcp_v6_restore_cb(struct sk_buff *skb)
1202{
1203 /* We need to move header back to the beginning if xfrm6_policy_check()
1204 * and tcp_v6_fill_cb() are going to be called again.
1205 * ip6_datagram_recv_specific_ctl() also expects IP6CB to be there.
1206 */
1207 memmove(IP6CB(skb), &TCP_SKB_CB(skb)->header.h6,
1208 sizeof(struct inet6_skb_parm));
1209}
1210
1211/* The socket must have it's spinlock held when we get 1218/* The socket must have it's spinlock held when we get
1212 * here, unless it is a TCP_LISTEN socket. 1219 * here, unless it is a TCP_LISTEN socket.
1213 * 1220 *
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 4d5c4eee4b3f..221825a9407a 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -441,7 +441,7 @@ try_again:
441 return err; 441 return err;
442 442
443csum_copy_err: 443csum_copy_err:
444 if (!__sk_queue_drop_skb(sk, skb, flags)) { 444 if (!__sk_queue_drop_skb(sk, skb, flags, udp_skb_destructor)) {
445 if (is_udp4) { 445 if (is_udp4) {
446 UDP_INC_STATS(sock_net(sk), 446 UDP_INC_STATS(sock_net(sk),
447 UDP_MIB_CSUMERRORS, is_udplite); 447 UDP_MIB_CSUMERRORS, is_udplite);
@@ -1033,6 +1033,10 @@ int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
1033 if (addr_len < SIN6_LEN_RFC2133) 1033 if (addr_len < SIN6_LEN_RFC2133)
1034 return -EINVAL; 1034 return -EINVAL;
1035 daddr = &sin6->sin6_addr; 1035 daddr = &sin6->sin6_addr;
1036 if (ipv6_addr_any(daddr) &&
1037 ipv6_addr_v4mapped(&np->saddr))
1038 ipv6_addr_set_v4mapped(htonl(INADDR_LOOPBACK),
1039 daddr);
1036 break; 1040 break;
1037 case AF_INET: 1041 case AF_INET:
1038 goto do_udp_sendmsg; 1042 goto do_udp_sendmsg;
diff --git a/net/irda/irqueue.c b/net/irda/irqueue.c
index acbe61c7e683..160dc89335e2 100644
--- a/net/irda/irqueue.c
+++ b/net/irda/irqueue.c
@@ -383,9 +383,6 @@ EXPORT_SYMBOL(hashbin_new);
383 * for deallocating this structure if it's complex. If not the user can 383 * for deallocating this structure if it's complex. If not the user can
384 * just supply kfree, which should take care of the job. 384 * just supply kfree, which should take care of the job.
385 */ 385 */
386#ifdef CONFIG_LOCKDEP
387static int hashbin_lock_depth = 0;
388#endif
389int hashbin_delete( hashbin_t* hashbin, FREE_FUNC free_func) 386int hashbin_delete( hashbin_t* hashbin, FREE_FUNC free_func)
390{ 387{
391 irda_queue_t* queue; 388 irda_queue_t* queue;
@@ -396,22 +393,27 @@ int hashbin_delete( hashbin_t* hashbin, FREE_FUNC free_func)
396 IRDA_ASSERT(hashbin->magic == HB_MAGIC, return -1;); 393 IRDA_ASSERT(hashbin->magic == HB_MAGIC, return -1;);
397 394
398 /* Synchronize */ 395 /* Synchronize */
399 if ( hashbin->hb_type & HB_LOCK ) { 396 if (hashbin->hb_type & HB_LOCK)
400 spin_lock_irqsave_nested(&hashbin->hb_spinlock, flags, 397 spin_lock_irqsave(&hashbin->hb_spinlock, flags);
401 hashbin_lock_depth++);
402 }
403 398
404 /* 399 /*
405 * Free the entries in the hashbin, TODO: use hashbin_clear when 400 * Free the entries in the hashbin, TODO: use hashbin_clear when
406 * it has been shown to work 401 * it has been shown to work
407 */ 402 */
408 for (i = 0; i < HASHBIN_SIZE; i ++ ) { 403 for (i = 0; i < HASHBIN_SIZE; i ++ ) {
409 queue = dequeue_first((irda_queue_t**) &hashbin->hb_queue[i]); 404 while (1) {
410 while (queue ) { 405 queue = dequeue_first((irda_queue_t**) &hashbin->hb_queue[i]);
411 if (free_func) 406
412 (*free_func)(queue); 407 if (!queue)
413 queue = dequeue_first( 408 break;
414 (irda_queue_t**) &hashbin->hb_queue[i]); 409
410 if (free_func) {
411 if (hashbin->hb_type & HB_LOCK)
412 spin_unlock_irqrestore(&hashbin->hb_spinlock, flags);
413 free_func(queue);
414 if (hashbin->hb_type & HB_LOCK)
415 spin_lock_irqsave(&hashbin->hb_spinlock, flags);
416 }
415 } 417 }
416 } 418 }
417 419
@@ -420,12 +422,8 @@ int hashbin_delete( hashbin_t* hashbin, FREE_FUNC free_func)
420 hashbin->magic = ~HB_MAGIC; 422 hashbin->magic = ~HB_MAGIC;
421 423
422 /* Release lock */ 424 /* Release lock */
423 if ( hashbin->hb_type & HB_LOCK) { 425 if (hashbin->hb_type & HB_LOCK)
424 spin_unlock_irqrestore(&hashbin->hb_spinlock, flags); 426 spin_unlock_irqrestore(&hashbin->hb_spinlock, flags);
425#ifdef CONFIG_LOCKDEP
426 hashbin_lock_depth--;
427#endif
428 }
429 427
430 /* 428 /*
431 * Free the hashbin structure 429 * Free the hashbin structure
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
index cfb9e5f4e28f..13190b38f22e 100644
--- a/net/iucv/af_iucv.c
+++ b/net/iucv/af_iucv.c
@@ -1044,7 +1044,8 @@ static int iucv_sock_sendmsg(struct socket *sock, struct msghdr *msg,
1044{ 1044{
1045 struct sock *sk = sock->sk; 1045 struct sock *sk = sock->sk;
1046 struct iucv_sock *iucv = iucv_sk(sk); 1046 struct iucv_sock *iucv = iucv_sk(sk);
1047 size_t headroom, linear; 1047 size_t headroom = 0;
1048 size_t linear;
1048 struct sk_buff *skb; 1049 struct sk_buff *skb;
1049 struct iucv_message txmsg = {0}; 1050 struct iucv_message txmsg = {0};
1050 struct cmsghdr *cmsg; 1051 struct cmsghdr *cmsg;
@@ -1122,18 +1123,20 @@ static int iucv_sock_sendmsg(struct socket *sock, struct msghdr *msg,
1122 * this is fine for SOCK_SEQPACKET (unless we want to support 1123 * this is fine for SOCK_SEQPACKET (unless we want to support
1123 * segmented records using the MSG_EOR flag), but 1124 * segmented records using the MSG_EOR flag), but
1124 * for SOCK_STREAM we might want to improve it in future */ 1125 * for SOCK_STREAM we might want to improve it in future */
1125 headroom = (iucv->transport == AF_IUCV_TRANS_HIPER) 1126 if (iucv->transport == AF_IUCV_TRANS_HIPER) {
1126 ? sizeof(struct af_iucv_trans_hdr) + ETH_HLEN : 0; 1127 headroom = sizeof(struct af_iucv_trans_hdr) + ETH_HLEN;
1127 if (headroom + len < PAGE_SIZE) {
1128 linear = len; 1128 linear = len;
1129 } else { 1129 } else {
1130 /* In nonlinear "classic" iucv skb, 1130 if (len < PAGE_SIZE) {
1131 * reserve space for iucv_array 1131 linear = len;
1132 */ 1132 } else {
1133 if (iucv->transport != AF_IUCV_TRANS_HIPER) 1133 /* In nonlinear "classic" iucv skb,
1134 headroom += sizeof(struct iucv_array) * 1134 * reserve space for iucv_array
1135 (MAX_SKB_FRAGS + 1); 1135 */
1136 linear = PAGE_SIZE - headroom; 1136 headroom = sizeof(struct iucv_array) *
1137 (MAX_SKB_FRAGS + 1);
1138 linear = PAGE_SIZE - headroom;
1139 }
1137 } 1140 }
1138 skb = sock_alloc_send_pskb(sk, headroom + linear, len - linear, 1141 skb = sock_alloc_send_pskb(sk, headroom + linear, len - linear,
1139 noblock, &err, 0); 1142 noblock, &err, 0);
diff --git a/net/kcm/kcmsock.c b/net/kcm/kcmsock.c
index 7e08a4d3d77d..a646f3481240 100644
--- a/net/kcm/kcmsock.c
+++ b/net/kcm/kcmsock.c
@@ -929,23 +929,25 @@ static int kcm_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
929 goto out_error; 929 goto out_error;
930 } 930 }
931 931
932 /* New message, alloc head skb */ 932 if (msg_data_left(msg)) {
933 head = alloc_skb(0, sk->sk_allocation); 933 /* New message, alloc head skb */
934 while (!head) {
935 kcm_push(kcm);
936 err = sk_stream_wait_memory(sk, &timeo);
937 if (err)
938 goto out_error;
939
940 head = alloc_skb(0, sk->sk_allocation); 934 head = alloc_skb(0, sk->sk_allocation);
941 } 935 while (!head) {
936 kcm_push(kcm);
937 err = sk_stream_wait_memory(sk, &timeo);
938 if (err)
939 goto out_error;
942 940
943 skb = head; 941 head = alloc_skb(0, sk->sk_allocation);
942 }
944 943
945 /* Set ip_summed to CHECKSUM_UNNECESSARY to avoid calling 944 skb = head;
946 * csum_and_copy_from_iter from skb_do_copy_data_nocache. 945
947 */ 946 /* Set ip_summed to CHECKSUM_UNNECESSARY to avoid calling
948 skb->ip_summed = CHECKSUM_UNNECESSARY; 947 * csum_and_copy_from_iter from skb_do_copy_data_nocache.
948 */
949 skb->ip_summed = CHECKSUM_UNNECESSARY;
950 }
949 951
950start: 952start:
951 while (msg_data_left(msg)) { 953 while (msg_data_left(msg)) {
@@ -1018,10 +1020,12 @@ wait_for_memory:
1018 if (eor) { 1020 if (eor) {
1019 bool not_busy = skb_queue_empty(&sk->sk_write_queue); 1021 bool not_busy = skb_queue_empty(&sk->sk_write_queue);
1020 1022
1021 /* Message complete, queue it on send buffer */ 1023 if (head) {
1022 __skb_queue_tail(&sk->sk_write_queue, head); 1024 /* Message complete, queue it on send buffer */
1023 kcm->seq_skb = NULL; 1025 __skb_queue_tail(&sk->sk_write_queue, head);
1024 KCM_STATS_INCR(kcm->stats.tx_msgs); 1026 kcm->seq_skb = NULL;
1027 KCM_STATS_INCR(kcm->stats.tx_msgs);
1028 }
1025 1029
1026 if (msg->msg_flags & MSG_BATCH) { 1030 if (msg->msg_flags & MSG_BATCH) {
1027 kcm->tx_wait_more = true; 1031 kcm->tx_wait_more = true;
@@ -1040,8 +1044,10 @@ wait_for_memory:
1040 } else { 1044 } else {
1041 /* Message not complete, save state */ 1045 /* Message not complete, save state */
1042partial_message: 1046partial_message:
1043 kcm->seq_skb = head; 1047 if (head) {
1044 kcm_tx_msg(head)->last_skb = skb; 1048 kcm->seq_skb = head;
1049 kcm_tx_msg(head)->last_skb = skb;
1050 }
1045 } 1051 }
1046 1052
1047 KCM_STATS_ADD(kcm->stats.tx_bytes, copied); 1053 KCM_STATS_ADD(kcm->stats.tx_bytes, copied);
diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h
index 8f560f7140a0..aebf281d09ee 100644
--- a/net/l2tp/l2tp_core.h
+++ b/net/l2tp/l2tp_core.h
@@ -263,6 +263,7 @@ int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb,
263int l2tp_nl_register_ops(enum l2tp_pwtype pw_type, 263int l2tp_nl_register_ops(enum l2tp_pwtype pw_type,
264 const struct l2tp_nl_cmd_ops *ops); 264 const struct l2tp_nl_cmd_ops *ops);
265void l2tp_nl_unregister_ops(enum l2tp_pwtype pw_type); 265void l2tp_nl_unregister_ops(enum l2tp_pwtype pw_type);
266int l2tp_ioctl(struct sock *sk, int cmd, unsigned long arg);
266 267
267/* Session reference counts. Incremented when code obtains a reference 268/* Session reference counts. Incremented when code obtains a reference
268 * to a session. 269 * to a session.
diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c
index 8938b6ba57a0..28c21546d5b6 100644
--- a/net/l2tp/l2tp_ip.c
+++ b/net/l2tp/l2tp_ip.c
@@ -11,6 +11,7 @@
11 11
12#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 12#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13 13
14#include <asm/ioctls.h>
14#include <linux/icmp.h> 15#include <linux/icmp.h>
15#include <linux/module.h> 16#include <linux/module.h>
16#include <linux/skbuff.h> 17#include <linux/skbuff.h>
@@ -47,7 +48,8 @@ static inline struct l2tp_ip_sock *l2tp_ip_sk(const struct sock *sk)
47 return (struct l2tp_ip_sock *)sk; 48 return (struct l2tp_ip_sock *)sk;
48} 49}
49 50
50static struct sock *__l2tp_ip_bind_lookup(struct net *net, __be32 laddr, int dif, u32 tunnel_id) 51static struct sock *__l2tp_ip_bind_lookup(const struct net *net, __be32 laddr,
52 __be32 raddr, int dif, u32 tunnel_id)
51{ 53{
52 struct sock *sk; 54 struct sock *sk;
53 55
@@ -61,6 +63,7 @@ static struct sock *__l2tp_ip_bind_lookup(struct net *net, __be32 laddr, int dif
61 if ((l2tp->conn_id == tunnel_id) && 63 if ((l2tp->conn_id == tunnel_id) &&
62 net_eq(sock_net(sk), net) && 64 net_eq(sock_net(sk), net) &&
63 !(inet->inet_rcv_saddr && inet->inet_rcv_saddr != laddr) && 65 !(inet->inet_rcv_saddr && inet->inet_rcv_saddr != laddr) &&
66 (!inet->inet_daddr || !raddr || inet->inet_daddr == raddr) &&
64 (!sk->sk_bound_dev_if || !dif || 67 (!sk->sk_bound_dev_if || !dif ||
65 sk->sk_bound_dev_if == dif)) 68 sk->sk_bound_dev_if == dif))
66 goto found; 69 goto found;
@@ -71,15 +74,6 @@ found:
71 return sk; 74 return sk;
72} 75}
73 76
74static inline struct sock *l2tp_ip_bind_lookup(struct net *net, __be32 laddr, int dif, u32 tunnel_id)
75{
76 struct sock *sk = __l2tp_ip_bind_lookup(net, laddr, dif, tunnel_id);
77 if (sk)
78 sock_hold(sk);
79
80 return sk;
81}
82
83/* When processing receive frames, there are two cases to 77/* When processing receive frames, there are two cases to
84 * consider. Data frames consist of a non-zero session-id and an 78 * consider. Data frames consist of a non-zero session-id and an
85 * optional cookie. Control frames consist of a regular L2TP header 79 * optional cookie. Control frames consist of a regular L2TP header
@@ -183,8 +177,8 @@ pass_up:
183 struct iphdr *iph = (struct iphdr *) skb_network_header(skb); 177 struct iphdr *iph = (struct iphdr *) skb_network_header(skb);
184 178
185 read_lock_bh(&l2tp_ip_lock); 179 read_lock_bh(&l2tp_ip_lock);
186 sk = __l2tp_ip_bind_lookup(net, iph->daddr, inet_iif(skb), 180 sk = __l2tp_ip_bind_lookup(net, iph->daddr, iph->saddr,
187 tunnel_id); 181 inet_iif(skb), tunnel_id);
188 if (!sk) { 182 if (!sk) {
189 read_unlock_bh(&l2tp_ip_lock); 183 read_unlock_bh(&l2tp_ip_lock);
190 goto discard; 184 goto discard;
@@ -280,7 +274,7 @@ static int l2tp_ip_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
280 inet->inet_saddr = 0; /* Use device */ 274 inet->inet_saddr = 0; /* Use device */
281 275
282 write_lock_bh(&l2tp_ip_lock); 276 write_lock_bh(&l2tp_ip_lock);
283 if (__l2tp_ip_bind_lookup(net, addr->l2tp_addr.s_addr, 277 if (__l2tp_ip_bind_lookup(net, addr->l2tp_addr.s_addr, 0,
284 sk->sk_bound_dev_if, addr->l2tp_conn_id)) { 278 sk->sk_bound_dev_if, addr->l2tp_conn_id)) {
285 write_unlock_bh(&l2tp_ip_lock); 279 write_unlock_bh(&l2tp_ip_lock);
286 ret = -EADDRINUSE; 280 ret = -EADDRINUSE;
@@ -560,6 +554,30 @@ out:
560 return err ? err : copied; 554 return err ? err : copied;
561} 555}
562 556
557int l2tp_ioctl(struct sock *sk, int cmd, unsigned long arg)
558{
559 struct sk_buff *skb;
560 int amount;
561
562 switch (cmd) {
563 case SIOCOUTQ:
564 amount = sk_wmem_alloc_get(sk);
565 break;
566 case SIOCINQ:
567 spin_lock_bh(&sk->sk_receive_queue.lock);
568 skb = skb_peek(&sk->sk_receive_queue);
569 amount = skb ? skb->len : 0;
570 spin_unlock_bh(&sk->sk_receive_queue.lock);
571 break;
572
573 default:
574 return -ENOIOCTLCMD;
575 }
576
577 return put_user(amount, (int __user *)arg);
578}
579EXPORT_SYMBOL(l2tp_ioctl);
580
563static struct proto l2tp_ip_prot = { 581static struct proto l2tp_ip_prot = {
564 .name = "L2TP/IP", 582 .name = "L2TP/IP",
565 .owner = THIS_MODULE, 583 .owner = THIS_MODULE,
@@ -568,7 +586,7 @@ static struct proto l2tp_ip_prot = {
568 .bind = l2tp_ip_bind, 586 .bind = l2tp_ip_bind,
569 .connect = l2tp_ip_connect, 587 .connect = l2tp_ip_connect,
570 .disconnect = l2tp_ip_disconnect, 588 .disconnect = l2tp_ip_disconnect,
571 .ioctl = udp_ioctl, 589 .ioctl = l2tp_ioctl,
572 .destroy = l2tp_ip_destroy_sock, 590 .destroy = l2tp_ip_destroy_sock,
573 .setsockopt = ip_setsockopt, 591 .setsockopt = ip_setsockopt,
574 .getsockopt = ip_getsockopt, 592 .getsockopt = ip_getsockopt,
diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c
index f092ac441fdd..f47c45250f86 100644
--- a/net/l2tp/l2tp_ip6.c
+++ b/net/l2tp/l2tp_ip6.c
@@ -59,12 +59,14 @@ static inline struct l2tp_ip6_sock *l2tp_ip6_sk(const struct sock *sk)
59 59
60static struct sock *__l2tp_ip6_bind_lookup(struct net *net, 60static struct sock *__l2tp_ip6_bind_lookup(struct net *net,
61 struct in6_addr *laddr, 61 struct in6_addr *laddr,
62 const struct in6_addr *raddr,
62 int dif, u32 tunnel_id) 63 int dif, u32 tunnel_id)
63{ 64{
64 struct sock *sk; 65 struct sock *sk;
65 66
66 sk_for_each_bound(sk, &l2tp_ip6_bind_table) { 67 sk_for_each_bound(sk, &l2tp_ip6_bind_table) {
67 const struct in6_addr *addr = inet6_rcv_saddr(sk); 68 const struct in6_addr *sk_laddr = inet6_rcv_saddr(sk);
69 const struct in6_addr *sk_raddr = &sk->sk_v6_daddr;
68 struct l2tp_ip6_sock *l2tp = l2tp_ip6_sk(sk); 70 struct l2tp_ip6_sock *l2tp = l2tp_ip6_sk(sk);
69 71
70 if (l2tp == NULL) 72 if (l2tp == NULL)
@@ -72,7 +74,8 @@ static struct sock *__l2tp_ip6_bind_lookup(struct net *net,
72 74
73 if ((l2tp->conn_id == tunnel_id) && 75 if ((l2tp->conn_id == tunnel_id) &&
74 net_eq(sock_net(sk), net) && 76 net_eq(sock_net(sk), net) &&
75 (!addr || ipv6_addr_equal(addr, laddr)) && 77 (!sk_laddr || ipv6_addr_any(sk_laddr) || ipv6_addr_equal(sk_laddr, laddr)) &&
78 (!raddr || ipv6_addr_any(sk_raddr) || ipv6_addr_equal(sk_raddr, raddr)) &&
76 (!sk->sk_bound_dev_if || !dif || 79 (!sk->sk_bound_dev_if || !dif ||
77 sk->sk_bound_dev_if == dif)) 80 sk->sk_bound_dev_if == dif))
78 goto found; 81 goto found;
@@ -83,17 +86,6 @@ found:
83 return sk; 86 return sk;
84} 87}
85 88
86static inline struct sock *l2tp_ip6_bind_lookup(struct net *net,
87 struct in6_addr *laddr,
88 int dif, u32 tunnel_id)
89{
90 struct sock *sk = __l2tp_ip6_bind_lookup(net, laddr, dif, tunnel_id);
91 if (sk)
92 sock_hold(sk);
93
94 return sk;
95}
96
97/* When processing receive frames, there are two cases to 89/* When processing receive frames, there are two cases to
98 * consider. Data frames consist of a non-zero session-id and an 90 * consider. Data frames consist of a non-zero session-id and an
99 * optional cookie. Control frames consist of a regular L2TP header 91 * optional cookie. Control frames consist of a regular L2TP header
@@ -197,8 +189,8 @@ pass_up:
197 struct ipv6hdr *iph = ipv6_hdr(skb); 189 struct ipv6hdr *iph = ipv6_hdr(skb);
198 190
199 read_lock_bh(&l2tp_ip6_lock); 191 read_lock_bh(&l2tp_ip6_lock);
200 sk = __l2tp_ip6_bind_lookup(net, &iph->daddr, inet6_iif(skb), 192 sk = __l2tp_ip6_bind_lookup(net, &iph->daddr, &iph->saddr,
201 tunnel_id); 193 inet6_iif(skb), tunnel_id);
202 if (!sk) { 194 if (!sk) {
203 read_unlock_bh(&l2tp_ip6_lock); 195 read_unlock_bh(&l2tp_ip6_lock);
204 goto discard; 196 goto discard;
@@ -330,7 +322,7 @@ static int l2tp_ip6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
330 rcu_read_unlock(); 322 rcu_read_unlock();
331 323
332 write_lock_bh(&l2tp_ip6_lock); 324 write_lock_bh(&l2tp_ip6_lock);
333 if (__l2tp_ip6_bind_lookup(net, &addr->l2tp_addr, bound_dev_if, 325 if (__l2tp_ip6_bind_lookup(net, &addr->l2tp_addr, NULL, bound_dev_if,
334 addr->l2tp_conn_id)) { 326 addr->l2tp_conn_id)) {
335 write_unlock_bh(&l2tp_ip6_lock); 327 write_unlock_bh(&l2tp_ip6_lock);
336 err = -EADDRINUSE; 328 err = -EADDRINUSE;
@@ -730,7 +722,7 @@ static struct proto l2tp_ip6_prot = {
730 .bind = l2tp_ip6_bind, 722 .bind = l2tp_ip6_bind,
731 .connect = l2tp_ip6_connect, 723 .connect = l2tp_ip6_connect,
732 .disconnect = l2tp_ip6_disconnect, 724 .disconnect = l2tp_ip6_disconnect,
733 .ioctl = udp_ioctl, 725 .ioctl = l2tp_ioctl,
734 .destroy = l2tp_ip6_destroy_sock, 726 .destroy = l2tp_ip6_destroy_sock,
735 .setsockopt = ipv6_setsockopt, 727 .setsockopt = ipv6_setsockopt,
736 .getsockopt = ipv6_getsockopt, 728 .getsockopt = ipv6_getsockopt,
diff --git a/net/llc/llc_conn.c b/net/llc/llc_conn.c
index 3e821daf9dd4..8bc5a1bd2d45 100644
--- a/net/llc/llc_conn.c
+++ b/net/llc/llc_conn.c
@@ -821,7 +821,10 @@ void llc_conn_handler(struct llc_sap *sap, struct sk_buff *skb)
821 * another trick required to cope with how the PROCOM state 821 * another trick required to cope with how the PROCOM state
822 * machine works. -acme 822 * machine works. -acme
823 */ 823 */
824 skb_orphan(skb);
825 sock_hold(sk);
824 skb->sk = sk; 826 skb->sk = sk;
827 skb->destructor = sock_efree;
825 } 828 }
826 if (!sock_owned_by_user(sk)) 829 if (!sock_owned_by_user(sk))
827 llc_conn_rcv(sk, skb); 830 llc_conn_rcv(sk, skb);
diff --git a/net/llc/llc_sap.c b/net/llc/llc_sap.c
index d0e1e804ebd7..5404d0d195cc 100644
--- a/net/llc/llc_sap.c
+++ b/net/llc/llc_sap.c
@@ -290,7 +290,10 @@ static void llc_sap_rcv(struct llc_sap *sap, struct sk_buff *skb,
290 290
291 ev->type = LLC_SAP_EV_TYPE_PDU; 291 ev->type = LLC_SAP_EV_TYPE_PDU;
292 ev->reason = 0; 292 ev->reason = 0;
293 skb_orphan(skb);
294 sock_hold(sk);
293 skb->sk = sk; 295 skb->sk = sk;
296 skb->destructor = sock_efree;
294 llc_sap_state_process(sap, skb); 297 llc_sap_state_process(sap, skb);
295} 298}
296 299
diff --git a/net/mac80211/chan.c b/net/mac80211/chan.c
index e75cbf6ecc26..a0d901d8992e 100644
--- a/net/mac80211/chan.c
+++ b/net/mac80211/chan.c
@@ -231,9 +231,6 @@ ieee80211_get_max_required_bw(struct ieee80211_sub_if_data *sdata)
231 !(sta->sdata->bss && sta->sdata->bss == sdata->bss)) 231 !(sta->sdata->bss && sta->sdata->bss == sdata->bss))
232 continue; 232 continue;
233 233
234 if (!sta->uploaded || !test_sta_flag(sta, WLAN_STA_ASSOC))
235 continue;
236
237 max_bw = max(max_bw, ieee80211_get_sta_bw(&sta->sta)); 234 max_bw = max(max_bw, ieee80211_get_sta_bw(&sta->sta));
238 } 235 }
239 rcu_read_unlock(); 236 rcu_read_unlock();
diff --git a/net/mac80211/fils_aead.c b/net/mac80211/fils_aead.c
index ecfdd97758a3..5c3af5eb4052 100644
--- a/net/mac80211/fils_aead.c
+++ b/net/mac80211/fils_aead.c
@@ -124,7 +124,7 @@ static int aes_siv_encrypt(const u8 *key, size_t key_len,
124 124
125 /* CTR */ 125 /* CTR */
126 126
127 tfm2 = crypto_alloc_skcipher("ctr(aes)", 0, 0); 127 tfm2 = crypto_alloc_skcipher("ctr(aes)", 0, CRYPTO_ALG_ASYNC);
128 if (IS_ERR(tfm2)) { 128 if (IS_ERR(tfm2)) {
129 kfree(tmp); 129 kfree(tmp);
130 return PTR_ERR(tfm2); 130 return PTR_ERR(tfm2);
@@ -183,7 +183,7 @@ static int aes_siv_decrypt(const u8 *key, size_t key_len,
183 183
184 /* CTR */ 184 /* CTR */
185 185
186 tfm2 = crypto_alloc_skcipher("ctr(aes)", 0, 0); 186 tfm2 = crypto_alloc_skcipher("ctr(aes)", 0, CRYPTO_ALG_ASYNC);
187 if (IS_ERR(tfm2)) 187 if (IS_ERR(tfm2))
188 return PTR_ERR(tfm2); 188 return PTR_ERR(tfm2);
189 /* K2 for CTR */ 189 /* K2 for CTR */
@@ -272,7 +272,7 @@ int fils_encrypt_assoc_req(struct sk_buff *skb,
272 crypt_len = skb->data + skb->len - encr; 272 crypt_len = skb->data + skb->len - encr;
273 skb_put(skb, AES_BLOCK_SIZE); 273 skb_put(skb, AES_BLOCK_SIZE);
274 return aes_siv_encrypt(assoc_data->fils_kek, assoc_data->fils_kek_len, 274 return aes_siv_encrypt(assoc_data->fils_kek, assoc_data->fils_kek_len,
275 encr, crypt_len, 1, addr, len, encr); 275 encr, crypt_len, 5, addr, len, encr);
276} 276}
277 277
278int fils_decrypt_assoc_resp(struct ieee80211_sub_if_data *sdata, 278int fils_decrypt_assoc_resp(struct ieee80211_sub_if_data *sdata,
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index 41497b670e2b..d37ae7dc114b 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -6,6 +6,7 @@
6 * Copyright (c) 2006 Jiri Benc <jbenc@suse.cz> 6 * Copyright (c) 2006 Jiri Benc <jbenc@suse.cz>
7 * Copyright 2008, Johannes Berg <johannes@sipsolutions.net> 7 * Copyright 2008, Johannes Berg <johannes@sipsolutions.net>
8 * Copyright 2013-2014 Intel Mobile Communications GmbH 8 * Copyright 2013-2014 Intel Mobile Communications GmbH
9 * Copyright (c) 2016 Intel Deutschland GmbH
9 * 10 *
10 * This program is free software; you can redistribute it and/or modify 11 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as 12 * it under the terms of the GNU General Public License version 2 as
@@ -1295,6 +1296,26 @@ static void ieee80211_iface_work(struct work_struct *work)
1295 } else if (ieee80211_is_action(mgmt->frame_control) && 1296 } else if (ieee80211_is_action(mgmt->frame_control) &&
1296 mgmt->u.action.category == WLAN_CATEGORY_VHT) { 1297 mgmt->u.action.category == WLAN_CATEGORY_VHT) {
1297 switch (mgmt->u.action.u.vht_group_notif.action_code) { 1298 switch (mgmt->u.action.u.vht_group_notif.action_code) {
1299 case WLAN_VHT_ACTION_OPMODE_NOTIF: {
1300 struct ieee80211_rx_status *status;
1301 enum nl80211_band band;
1302 u8 opmode;
1303
1304 status = IEEE80211_SKB_RXCB(skb);
1305 band = status->band;
1306 opmode = mgmt->u.action.u.vht_opmode_notif.operating_mode;
1307
1308 mutex_lock(&local->sta_mtx);
1309 sta = sta_info_get_bss(sdata, mgmt->sa);
1310
1311 if (sta)
1312 ieee80211_vht_handle_opmode(sdata, sta,
1313 opmode,
1314 band);
1315
1316 mutex_unlock(&local->sta_mtx);
1317 break;
1318 }
1298 case WLAN_VHT_ACTION_GROUPID_MGMT: 1319 case WLAN_VHT_ACTION_GROUPID_MGMT:
1299 ieee80211_process_mu_groups(sdata, mgmt); 1320 ieee80211_process_mu_groups(sdata, mgmt);
1300 break; 1321 break;
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index 1822c77f2b1c..56fb47953b72 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -913,12 +913,17 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
913 supp_ht = supp_ht || sband->ht_cap.ht_supported; 913 supp_ht = supp_ht || sband->ht_cap.ht_supported;
914 supp_vht = supp_vht || sband->vht_cap.vht_supported; 914 supp_vht = supp_vht || sband->vht_cap.vht_supported;
915 915
916 if (sband->ht_cap.ht_supported) 916 if (!sband->ht_cap.ht_supported)
917 local->rx_chains = 917 continue;
918 max(ieee80211_mcs_to_chains(&sband->ht_cap.mcs),
919 local->rx_chains);
920 918
921 /* TODO: consider VHT for RX chains, hopefully it's the same */ 919 /* TODO: consider VHT for RX chains, hopefully it's the same */
920 local->rx_chains =
921 max(ieee80211_mcs_to_chains(&sband->ht_cap.mcs),
922 local->rx_chains);
923
924 /* no need to mask, SM_PS_DISABLED has all bits set */
925 sband->ht_cap.cap |= WLAN_HT_CAP_SM_PS_DISABLED <<
926 IEEE80211_HT_CAP_SM_PS_SHIFT;
922 } 927 }
923 928
924 /* if low-level driver supports AP, we also support VLAN */ 929 /* if low-level driver supports AP, we also support VLAN */
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
index 42120d965263..50e1b7f78bd4 100644
--- a/net/mac80211/mesh.c
+++ b/net/mac80211/mesh.c
@@ -339,7 +339,7 @@ int mesh_add_vendor_ies(struct ieee80211_sub_if_data *sdata,
339 /* fast-forward to vendor IEs */ 339 /* fast-forward to vendor IEs */
340 offset = ieee80211_ie_split_vendor(ifmsh->ie, ifmsh->ie_len, 0); 340 offset = ieee80211_ie_split_vendor(ifmsh->ie, ifmsh->ie_len, 0);
341 341
342 if (offset) { 342 if (offset < ifmsh->ie_len) {
343 len = ifmsh->ie_len - offset; 343 len = ifmsh->ie_len - offset;
344 data = ifmsh->ie + offset; 344 data = ifmsh->ie + offset;
345 if (skb_tailroom(skb) < len) 345 if (skb_tailroom(skb) < len)
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 3e289a64ed43..3090dd4342f6 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -2472,7 +2472,8 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
2472 if (!ifmsh->mshcfg.dot11MeshForwarding) 2472 if (!ifmsh->mshcfg.dot11MeshForwarding)
2473 goto out; 2473 goto out;
2474 2474
2475 fwd_skb = skb_copy_expand(skb, local->tx_headroom, 0, GFP_ATOMIC); 2475 fwd_skb = skb_copy_expand(skb, local->tx_headroom +
2476 sdata->encrypt_headroom, 0, GFP_ATOMIC);
2476 if (!fwd_skb) { 2477 if (!fwd_skb) {
2477 net_info_ratelimited("%s: failed to clone mesh frame\n", 2478 net_info_ratelimited("%s: failed to clone mesh frame\n",
2478 sdata->name); 2479 sdata->name);
@@ -2880,17 +2881,10 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
2880 2881
2881 switch (mgmt->u.action.u.vht_opmode_notif.action_code) { 2882 switch (mgmt->u.action.u.vht_opmode_notif.action_code) {
2882 case WLAN_VHT_ACTION_OPMODE_NOTIF: { 2883 case WLAN_VHT_ACTION_OPMODE_NOTIF: {
2883 u8 opmode;
2884
2885 /* verify opmode is present */ 2884 /* verify opmode is present */
2886 if (len < IEEE80211_MIN_ACTION_SIZE + 2) 2885 if (len < IEEE80211_MIN_ACTION_SIZE + 2)
2887 goto invalid; 2886 goto invalid;
2888 2887 goto queue;
2889 opmode = mgmt->u.action.u.vht_opmode_notif.operating_mode;
2890
2891 ieee80211_vht_handle_opmode(rx->sdata, rx->sta,
2892 opmode, status->band);
2893 goto handled;
2894 } 2888 }
2895 case WLAN_VHT_ACTION_GROUPID_MGMT: { 2889 case WLAN_VHT_ACTION_GROUPID_MGMT: {
2896 if (len < IEEE80211_MIN_ACTION_SIZE + 25) 2890 if (len < IEEE80211_MIN_ACTION_SIZE + 25)
@@ -3942,21 +3936,31 @@ static bool ieee80211_invoke_fast_rx(struct ieee80211_rx_data *rx,
3942 u64_stats_update_end(&stats->syncp); 3936 u64_stats_update_end(&stats->syncp);
3943 3937
3944 if (fast_rx->internal_forward) { 3938 if (fast_rx->internal_forward) {
3945 struct sta_info *dsta = sta_info_get(rx->sdata, skb->data); 3939 struct sk_buff *xmit_skb = NULL;
3940 bool multicast = is_multicast_ether_addr(skb->data);
3941
3942 if (multicast) {
3943 xmit_skb = skb_copy(skb, GFP_ATOMIC);
3944 } else if (sta_info_get(rx->sdata, skb->data)) {
3945 xmit_skb = skb;
3946 skb = NULL;
3947 }
3946 3948
3947 if (dsta) { 3949 if (xmit_skb) {
3948 /* 3950 /*
3949 * Send to wireless media and increase priority by 256 3951 * Send to wireless media and increase priority by 256
3950 * to keep the received priority instead of 3952 * to keep the received priority instead of
3951 * reclassifying the frame (see cfg80211_classify8021d). 3953 * reclassifying the frame (see cfg80211_classify8021d).
3952 */ 3954 */
3953 skb->priority += 256; 3955 xmit_skb->priority += 256;
3954 skb->protocol = htons(ETH_P_802_3); 3956 xmit_skb->protocol = htons(ETH_P_802_3);
3955 skb_reset_network_header(skb); 3957 skb_reset_network_header(xmit_skb);
3956 skb_reset_mac_header(skb); 3958 skb_reset_mac_header(xmit_skb);
3957 dev_queue_xmit(skb); 3959 dev_queue_xmit(xmit_skb);
3958 return true;
3959 } 3960 }
3961
3962 if (!skb)
3963 return true;
3960 } 3964 }
3961 3965
3962 /* deliver to local stack */ 3966 /* deliver to local stack */
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index b6cfcf038c11..50c309094c37 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -1501,8 +1501,8 @@ ieee80211_sta_ps_deliver_response(struct sta_info *sta,
1501 1501
1502 /* This will evaluate to 1, 3, 5 or 7. */ 1502 /* This will evaluate to 1, 3, 5 or 7. */
1503 for (ac = IEEE80211_AC_VO; ac < IEEE80211_NUM_ACS; ac++) 1503 for (ac = IEEE80211_AC_VO; ac < IEEE80211_NUM_ACS; ac++)
1504 if (ignored_acs & BIT(ac)) 1504 if (!(ignored_acs & ieee80211_ac_to_qos_mask[ac]))
1505 continue; 1505 break;
1506 tid = 7 - 2 * ac; 1506 tid = 7 - 2 * ac;
1507 1507
1508 ieee80211_send_null_response(sta, tid, reason, true, false); 1508 ieee80211_send_null_response(sta, tid, reason, true, false);
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 2c21b7039136..797e847cbc49 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -1243,7 +1243,7 @@ ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata,
1243 1243
1244static struct txq_info *ieee80211_get_txq(struct ieee80211_local *local, 1244static struct txq_info *ieee80211_get_txq(struct ieee80211_local *local,
1245 struct ieee80211_vif *vif, 1245 struct ieee80211_vif *vif,
1246 struct ieee80211_sta *pubsta, 1246 struct sta_info *sta,
1247 struct sk_buff *skb) 1247 struct sk_buff *skb)
1248{ 1248{
1249 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 1249 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
@@ -1257,10 +1257,13 @@ static struct txq_info *ieee80211_get_txq(struct ieee80211_local *local,
1257 if (!ieee80211_is_data(hdr->frame_control)) 1257 if (!ieee80211_is_data(hdr->frame_control))
1258 return NULL; 1258 return NULL;
1259 1259
1260 if (pubsta) { 1260 if (sta) {
1261 u8 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK; 1261 u8 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
1262 1262
1263 txq = pubsta->txq[tid]; 1263 if (!sta->uploaded)
1264 return NULL;
1265
1266 txq = sta->sta.txq[tid];
1264 } else if (vif) { 1267 } else if (vif) {
1265 txq = vif->txq; 1268 txq = vif->txq;
1266 } 1269 }
@@ -1503,23 +1506,17 @@ static bool ieee80211_queue_skb(struct ieee80211_local *local,
1503 struct fq *fq = &local->fq; 1506 struct fq *fq = &local->fq;
1504 struct ieee80211_vif *vif; 1507 struct ieee80211_vif *vif;
1505 struct txq_info *txqi; 1508 struct txq_info *txqi;
1506 struct ieee80211_sta *pubsta;
1507 1509
1508 if (!local->ops->wake_tx_queue || 1510 if (!local->ops->wake_tx_queue ||
1509 sdata->vif.type == NL80211_IFTYPE_MONITOR) 1511 sdata->vif.type == NL80211_IFTYPE_MONITOR)
1510 return false; 1512 return false;
1511 1513
1512 if (sta && sta->uploaded)
1513 pubsta = &sta->sta;
1514 else
1515 pubsta = NULL;
1516
1517 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) 1514 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
1518 sdata = container_of(sdata->bss, 1515 sdata = container_of(sdata->bss,
1519 struct ieee80211_sub_if_data, u.ap); 1516 struct ieee80211_sub_if_data, u.ap);
1520 1517
1521 vif = &sdata->vif; 1518 vif = &sdata->vif;
1522 txqi = ieee80211_get_txq(local, vif, pubsta, skb); 1519 txqi = ieee80211_get_txq(local, vif, sta, skb);
1523 1520
1524 if (!txqi) 1521 if (!txqi)
1525 return false; 1522 return false;
@@ -3287,7 +3284,7 @@ static bool ieee80211_xmit_fast(struct ieee80211_sub_if_data *sdata,
3287 int extra_head = fast_tx->hdr_len - (ETH_HLEN - 2); 3284 int extra_head = fast_tx->hdr_len - (ETH_HLEN - 2);
3288 int hw_headroom = sdata->local->hw.extra_tx_headroom; 3285 int hw_headroom = sdata->local->hw.extra_tx_headroom;
3289 struct ethhdr eth; 3286 struct ethhdr eth;
3290 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 3287 struct ieee80211_tx_info *info;
3291 struct ieee80211_hdr *hdr = (void *)fast_tx->hdr; 3288 struct ieee80211_hdr *hdr = (void *)fast_tx->hdr;
3292 struct ieee80211_tx_data tx; 3289 struct ieee80211_tx_data tx;
3293 ieee80211_tx_result r; 3290 ieee80211_tx_result r;
@@ -3351,6 +3348,7 @@ static bool ieee80211_xmit_fast(struct ieee80211_sub_if_data *sdata,
3351 memcpy(skb->data + fast_tx->da_offs, eth.h_dest, ETH_ALEN); 3348 memcpy(skb->data + fast_tx->da_offs, eth.h_dest, ETH_ALEN);
3352 memcpy(skb->data + fast_tx->sa_offs, eth.h_source, ETH_ALEN); 3349 memcpy(skb->data + fast_tx->sa_offs, eth.h_source, ETH_ALEN);
3353 3350
3351 info = IEEE80211_SKB_CB(skb);
3354 memset(info, 0, sizeof(*info)); 3352 memset(info, 0, sizeof(*info));
3355 info->band = fast_tx->band; 3353 info->band = fast_tx->band;
3356 info->control.vif = &sdata->vif; 3354 info->control.vif = &sdata->vif;
diff --git a/net/mac80211/vht.c b/net/mac80211/vht.c
index 6832bf6ab69f..43e45bb660bc 100644
--- a/net/mac80211/vht.c
+++ b/net/mac80211/vht.c
@@ -527,8 +527,10 @@ void ieee80211_vht_handle_opmode(struct ieee80211_sub_if_data *sdata,
527 527
528 u32 changed = __ieee80211_vht_handle_opmode(sdata, sta, opmode, band); 528 u32 changed = __ieee80211_vht_handle_opmode(sdata, sta, opmode, band);
529 529
530 if (changed > 0) 530 if (changed > 0) {
531 ieee80211_recalc_min_chandef(sdata);
531 rate_control_rate_update(local, sband, sta, changed); 532 rate_control_rate_update(local, sband, sta, changed);
533 }
532} 534}
533 535
534void ieee80211_get_vht_mask_from_cap(__le16 vht_cap, 536void ieee80211_get_vht_mask_from_cap(__le16 vht_cap,
diff --git a/net/mpls/af_mpls.c b/net/mpls/af_mpls.c
index 15fe97644ffe..5b77377e5a15 100644
--- a/net/mpls/af_mpls.c
+++ b/net/mpls/af_mpls.c
@@ -98,18 +98,19 @@ bool mpls_pkt_too_big(const struct sk_buff *skb, unsigned int mtu)
98} 98}
99EXPORT_SYMBOL_GPL(mpls_pkt_too_big); 99EXPORT_SYMBOL_GPL(mpls_pkt_too_big);
100 100
101static u32 mpls_multipath_hash(struct mpls_route *rt, 101static u32 mpls_multipath_hash(struct mpls_route *rt, struct sk_buff *skb)
102 struct sk_buff *skb, bool bos)
103{ 102{
104 struct mpls_entry_decoded dec; 103 struct mpls_entry_decoded dec;
104 unsigned int mpls_hdr_len = 0;
105 struct mpls_shim_hdr *hdr; 105 struct mpls_shim_hdr *hdr;
106 bool eli_seen = false; 106 bool eli_seen = false;
107 int label_index; 107 int label_index;
108 u32 hash = 0; 108 u32 hash = 0;
109 109
110 for (label_index = 0; label_index < MAX_MP_SELECT_LABELS && !bos; 110 for (label_index = 0; label_index < MAX_MP_SELECT_LABELS;
111 label_index++) { 111 label_index++) {
112 if (!pskb_may_pull(skb, sizeof(*hdr) * label_index)) 112 mpls_hdr_len += sizeof(*hdr);
113 if (!pskb_may_pull(skb, mpls_hdr_len))
113 break; 114 break;
114 115
115 /* Read and decode the current label */ 116 /* Read and decode the current label */
@@ -134,37 +135,38 @@ static u32 mpls_multipath_hash(struct mpls_route *rt,
134 eli_seen = true; 135 eli_seen = true;
135 } 136 }
136 137
137 bos = dec.bos; 138 if (!dec.bos)
138 if (bos && pskb_may_pull(skb, sizeof(*hdr) * label_index + 139 continue;
139 sizeof(struct iphdr))) { 140
141 /* found bottom label; does skb have room for a header? */
142 if (pskb_may_pull(skb, mpls_hdr_len + sizeof(struct iphdr))) {
140 const struct iphdr *v4hdr; 143 const struct iphdr *v4hdr;
141 144
142 v4hdr = (const struct iphdr *)(mpls_hdr(skb) + 145 v4hdr = (const struct iphdr *)(hdr + 1);
143 label_index);
144 if (v4hdr->version == 4) { 146 if (v4hdr->version == 4) {
145 hash = jhash_3words(ntohl(v4hdr->saddr), 147 hash = jhash_3words(ntohl(v4hdr->saddr),
146 ntohl(v4hdr->daddr), 148 ntohl(v4hdr->daddr),
147 v4hdr->protocol, hash); 149 v4hdr->protocol, hash);
148 } else if (v4hdr->version == 6 && 150 } else if (v4hdr->version == 6 &&
149 pskb_may_pull(skb, sizeof(*hdr) * label_index + 151 pskb_may_pull(skb, mpls_hdr_len +
150 sizeof(struct ipv6hdr))) { 152 sizeof(struct ipv6hdr))) {
151 const struct ipv6hdr *v6hdr; 153 const struct ipv6hdr *v6hdr;
152 154
153 v6hdr = (const struct ipv6hdr *)(mpls_hdr(skb) + 155 v6hdr = (const struct ipv6hdr *)(hdr + 1);
154 label_index);
155
156 hash = __ipv6_addr_jhash(&v6hdr->saddr, hash); 156 hash = __ipv6_addr_jhash(&v6hdr->saddr, hash);
157 hash = __ipv6_addr_jhash(&v6hdr->daddr, hash); 157 hash = __ipv6_addr_jhash(&v6hdr->daddr, hash);
158 hash = jhash_1word(v6hdr->nexthdr, hash); 158 hash = jhash_1word(v6hdr->nexthdr, hash);
159 } 159 }
160 } 160 }
161
162 break;
161 } 163 }
162 164
163 return hash; 165 return hash;
164} 166}
165 167
166static struct mpls_nh *mpls_select_multipath(struct mpls_route *rt, 168static struct mpls_nh *mpls_select_multipath(struct mpls_route *rt,
167 struct sk_buff *skb, bool bos) 169 struct sk_buff *skb)
168{ 170{
169 int alive = ACCESS_ONCE(rt->rt_nhn_alive); 171 int alive = ACCESS_ONCE(rt->rt_nhn_alive);
170 u32 hash = 0; 172 u32 hash = 0;
@@ -180,7 +182,7 @@ static struct mpls_nh *mpls_select_multipath(struct mpls_route *rt,
180 if (alive <= 0) 182 if (alive <= 0)
181 return NULL; 183 return NULL;
182 184
183 hash = mpls_multipath_hash(rt, skb, bos); 185 hash = mpls_multipath_hash(rt, skb);
184 nh_index = hash % alive; 186 nh_index = hash % alive;
185 if (alive == rt->rt_nhn) 187 if (alive == rt->rt_nhn)
186 goto out; 188 goto out;
@@ -278,17 +280,11 @@ static int mpls_forward(struct sk_buff *skb, struct net_device *dev,
278 hdr = mpls_hdr(skb); 280 hdr = mpls_hdr(skb);
279 dec = mpls_entry_decode(hdr); 281 dec = mpls_entry_decode(hdr);
280 282
281 /* Pop the label */
282 skb_pull(skb, sizeof(*hdr));
283 skb_reset_network_header(skb);
284
285 skb_orphan(skb);
286
287 rt = mpls_route_input_rcu(net, dec.label); 283 rt = mpls_route_input_rcu(net, dec.label);
288 if (!rt) 284 if (!rt)
289 goto drop; 285 goto drop;
290 286
291 nh = mpls_select_multipath(rt, skb, dec.bos); 287 nh = mpls_select_multipath(rt, skb);
292 if (!nh) 288 if (!nh)
293 goto drop; 289 goto drop;
294 290
@@ -297,6 +293,12 @@ static int mpls_forward(struct sk_buff *skb, struct net_device *dev,
297 if (!mpls_output_possible(out_dev)) 293 if (!mpls_output_possible(out_dev))
298 goto drop; 294 goto drop;
299 295
296 /* Pop the label */
297 skb_pull(skb, sizeof(*hdr));
298 skb_reset_network_header(skb);
299
300 skb_orphan(skb);
301
300 if (skb_warn_if_lro(skb)) 302 if (skb_warn_if_lro(skb))
301 goto drop; 303 goto drop;
302 304
diff --git a/net/mpls/mpls_iptunnel.c b/net/mpls/mpls_iptunnel.c
index 2f7ccd934416..1d281c1ff7c1 100644
--- a/net/mpls/mpls_iptunnel.c
+++ b/net/mpls/mpls_iptunnel.c
@@ -215,6 +215,7 @@ static const struct lwtunnel_encap_ops mpls_iptun_ops = {
215 .fill_encap = mpls_fill_encap_info, 215 .fill_encap = mpls_fill_encap_info,
216 .get_encap_size = mpls_encap_nlsize, 216 .get_encap_size = mpls_encap_nlsize,
217 .cmp_encap = mpls_encap_cmp, 217 .cmp_encap = mpls_encap_cmp,
218 .owner = THIS_MODULE,
218}; 219};
219 220
220static int __init mpls_iptunnel_init(void) 221static int __init mpls_iptunnel_init(void)
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
index 63729b489c2c..bbc45f8a7b2d 100644
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -494,7 +494,7 @@ config NFT_CT
494 depends on NF_CONNTRACK 494 depends on NF_CONNTRACK
495 tristate "Netfilter nf_tables conntrack module" 495 tristate "Netfilter nf_tables conntrack module"
496 help 496 help
497 This option adds the "meta" expression that you can use to match 497 This option adds the "ct" expression that you can use to match
498 connection tracking information such as the flow state. 498 connection tracking information such as the flow state.
499 499
500config NFT_SET_RBTREE 500config NFT_SET_RBTREE
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 3a073cd9fcf4..4e8083c5e01d 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -85,11 +85,11 @@ static __read_mostly DEFINE_SPINLOCK(nf_conntrack_locks_all_lock);
85static __read_mostly bool nf_conntrack_locks_all; 85static __read_mostly bool nf_conntrack_locks_all;
86 86
87/* every gc cycle scans at most 1/GC_MAX_BUCKETS_DIV part of table */ 87/* every gc cycle scans at most 1/GC_MAX_BUCKETS_DIV part of table */
88#define GC_MAX_BUCKETS_DIV 64u 88#define GC_MAX_BUCKETS_DIV 128u
89/* upper bound of scan intervals */ 89/* upper bound of full table scan */
90#define GC_INTERVAL_MAX (2 * HZ) 90#define GC_MAX_SCAN_JIFFIES (16u * HZ)
91/* maximum conntracks to evict per gc run */ 91/* desired ratio of entries found to be expired */
92#define GC_MAX_EVICTS 256u 92#define GC_EVICT_RATIO 50u
93 93
94static struct conntrack_gc_work conntrack_gc_work; 94static struct conntrack_gc_work conntrack_gc_work;
95 95
@@ -938,6 +938,7 @@ static noinline int early_drop(struct net *net, unsigned int _hash)
938 938
939static void gc_worker(struct work_struct *work) 939static void gc_worker(struct work_struct *work)
940{ 940{
941 unsigned int min_interval = max(HZ / GC_MAX_BUCKETS_DIV, 1u);
941 unsigned int i, goal, buckets = 0, expired_count = 0; 942 unsigned int i, goal, buckets = 0, expired_count = 0;
942 struct conntrack_gc_work *gc_work; 943 struct conntrack_gc_work *gc_work;
943 unsigned int ratio, scanned = 0; 944 unsigned int ratio, scanned = 0;
@@ -979,8 +980,7 @@ static void gc_worker(struct work_struct *work)
979 */ 980 */
980 rcu_read_unlock(); 981 rcu_read_unlock();
981 cond_resched_rcu_qs(); 982 cond_resched_rcu_qs();
982 } while (++buckets < goal && 983 } while (++buckets < goal);
983 expired_count < GC_MAX_EVICTS);
984 984
985 if (gc_work->exiting) 985 if (gc_work->exiting)
986 return; 986 return;
@@ -997,27 +997,25 @@ static void gc_worker(struct work_struct *work)
997 * 1. Minimize time until we notice a stale entry 997 * 1. Minimize time until we notice a stale entry
998 * 2. Maximize scan intervals to not waste cycles 998 * 2. Maximize scan intervals to not waste cycles
999 * 999 *
1000 * Normally, expired_count will be 0, this increases the next_run time 1000 * Normally, expire ratio will be close to 0.
1001 * to priorize 2) above.
1002 * 1001 *
1003 * As soon as a timed-out entry is found, move towards 1) and increase 1002 * As soon as a sizeable fraction of the entries have expired
1004 * the scan frequency. 1003 * increase scan frequency.
1005 * In case we have lots of evictions next scan is done immediately.
1006 */ 1004 */
1007 ratio = scanned ? expired_count * 100 / scanned : 0; 1005 ratio = scanned ? expired_count * 100 / scanned : 0;
1008 if (ratio >= 90 || expired_count == GC_MAX_EVICTS) { 1006 if (ratio > GC_EVICT_RATIO) {
1009 gc_work->next_gc_run = 0; 1007 gc_work->next_gc_run = min_interval;
1010 next_run = 0;
1011 } else if (expired_count) {
1012 gc_work->next_gc_run /= 2U;
1013 next_run = msecs_to_jiffies(1);
1014 } else { 1008 } else {
1015 if (gc_work->next_gc_run < GC_INTERVAL_MAX) 1009 unsigned int max = GC_MAX_SCAN_JIFFIES / GC_MAX_BUCKETS_DIV;
1016 gc_work->next_gc_run += msecs_to_jiffies(1);
1017 1010
1018 next_run = gc_work->next_gc_run; 1011 BUILD_BUG_ON((GC_MAX_SCAN_JIFFIES / GC_MAX_BUCKETS_DIV) == 0);
1012
1013 gc_work->next_gc_run += min_interval;
1014 if (gc_work->next_gc_run > max)
1015 gc_work->next_gc_run = max;
1019 } 1016 }
1020 1017
1018 next_run = gc_work->next_gc_run;
1021 gc_work->last_bucket = i; 1019 gc_work->last_bucket = i;
1022 queue_delayed_work(system_long_wq, &gc_work->dwork, next_run); 1020 queue_delayed_work(system_long_wq, &gc_work->dwork, next_run);
1023} 1021}
@@ -1025,7 +1023,7 @@ static void gc_worker(struct work_struct *work)
1025static void conntrack_gc_work_init(struct conntrack_gc_work *gc_work) 1023static void conntrack_gc_work_init(struct conntrack_gc_work *gc_work)
1026{ 1024{
1027 INIT_DELAYED_WORK(&gc_work->dwork, gc_worker); 1025 INIT_DELAYED_WORK(&gc_work->dwork, gc_worker);
1028 gc_work->next_gc_run = GC_INTERVAL_MAX; 1026 gc_work->next_gc_run = HZ;
1029 gc_work->exiting = false; 1027 gc_work->exiting = false;
1030} 1028}
1031 1029
@@ -1917,7 +1915,7 @@ int nf_conntrack_init_start(void)
1917 nf_ct_untracked_status_or(IPS_CONFIRMED | IPS_UNTRACKED); 1915 nf_ct_untracked_status_or(IPS_CONFIRMED | IPS_UNTRACKED);
1918 1916
1919 conntrack_gc_work_init(&conntrack_gc_work); 1917 conntrack_gc_work_init(&conntrack_gc_work);
1920 queue_delayed_work(system_long_wq, &conntrack_gc_work.dwork, GC_INTERVAL_MAX); 1918 queue_delayed_work(system_long_wq, &conntrack_gc_work.dwork, HZ);
1921 1919
1922 return 0; 1920 return 0;
1923 1921
diff --git a/net/netfilter/nf_log.c b/net/netfilter/nf_log.c
index 3dca90dc24ad..ffb9e8ada899 100644
--- a/net/netfilter/nf_log.c
+++ b/net/netfilter/nf_log.c
@@ -13,7 +13,6 @@
13/* Internal logging interface, which relies on the real 13/* Internal logging interface, which relies on the real
14 LOG target modules */ 14 LOG target modules */
15 15
16#define NF_LOG_PREFIXLEN 128
17#define NFLOGGER_NAME_LEN 64 16#define NFLOGGER_NAME_LEN 64
18 17
19static struct nf_logger __rcu *loggers[NFPROTO_NUMPROTO][NF_LOG_TYPE_MAX] __read_mostly; 18static struct nf_logger __rcu *loggers[NFPROTO_NUMPROTO][NF_LOG_TYPE_MAX] __read_mostly;
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index a019a87e58ee..1b913760f205 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -928,7 +928,8 @@ static struct nft_chain *nf_tables_chain_lookup(const struct nft_table *table,
928} 928}
929 929
930static const struct nla_policy nft_chain_policy[NFTA_CHAIN_MAX + 1] = { 930static const struct nla_policy nft_chain_policy[NFTA_CHAIN_MAX + 1] = {
931 [NFTA_CHAIN_TABLE] = { .type = NLA_STRING }, 931 [NFTA_CHAIN_TABLE] = { .type = NLA_STRING,
932 .len = NFT_TABLE_MAXNAMELEN - 1 },
932 [NFTA_CHAIN_HANDLE] = { .type = NLA_U64 }, 933 [NFTA_CHAIN_HANDLE] = { .type = NLA_U64 },
933 [NFTA_CHAIN_NAME] = { .type = NLA_STRING, 934 [NFTA_CHAIN_NAME] = { .type = NLA_STRING,
934 .len = NFT_CHAIN_MAXNAMELEN - 1 }, 935 .len = NFT_CHAIN_MAXNAMELEN - 1 },
@@ -1854,7 +1855,8 @@ static struct nft_rule *nf_tables_rule_lookup(const struct nft_chain *chain,
1854} 1855}
1855 1856
1856static const struct nla_policy nft_rule_policy[NFTA_RULE_MAX + 1] = { 1857static const struct nla_policy nft_rule_policy[NFTA_RULE_MAX + 1] = {
1857 [NFTA_RULE_TABLE] = { .type = NLA_STRING }, 1858 [NFTA_RULE_TABLE] = { .type = NLA_STRING,
1859 .len = NFT_TABLE_MAXNAMELEN - 1 },
1858 [NFTA_RULE_CHAIN] = { .type = NLA_STRING, 1860 [NFTA_RULE_CHAIN] = { .type = NLA_STRING,
1859 .len = NFT_CHAIN_MAXNAMELEN - 1 }, 1861 .len = NFT_CHAIN_MAXNAMELEN - 1 },
1860 [NFTA_RULE_HANDLE] = { .type = NLA_U64 }, 1862 [NFTA_RULE_HANDLE] = { .type = NLA_U64 },
@@ -2115,7 +2117,7 @@ static void nf_tables_rule_destroy(const struct nft_ctx *ctx,
2115 * is called on error from nf_tables_newrule(). 2117 * is called on error from nf_tables_newrule().
2116 */ 2118 */
2117 expr = nft_expr_first(rule); 2119 expr = nft_expr_first(rule);
2118 while (expr->ops && expr != nft_expr_last(rule)) { 2120 while (expr != nft_expr_last(rule) && expr->ops) {
2119 nf_tables_expr_destroy(ctx, expr); 2121 nf_tables_expr_destroy(ctx, expr);
2120 expr = nft_expr_next(expr); 2122 expr = nft_expr_next(expr);
2121 } 2123 }
@@ -2443,7 +2445,8 @@ nft_select_set_ops(const struct nlattr * const nla[],
2443} 2445}
2444 2446
2445static const struct nla_policy nft_set_policy[NFTA_SET_MAX + 1] = { 2447static const struct nla_policy nft_set_policy[NFTA_SET_MAX + 1] = {
2446 [NFTA_SET_TABLE] = { .type = NLA_STRING }, 2448 [NFTA_SET_TABLE] = { .type = NLA_STRING,
2449 .len = NFT_TABLE_MAXNAMELEN - 1 },
2447 [NFTA_SET_NAME] = { .type = NLA_STRING, 2450 [NFTA_SET_NAME] = { .type = NLA_STRING,
2448 .len = NFT_SET_MAXNAMELEN - 1 }, 2451 .len = NFT_SET_MAXNAMELEN - 1 },
2449 [NFTA_SET_FLAGS] = { .type = NLA_U32 }, 2452 [NFTA_SET_FLAGS] = { .type = NLA_U32 },
@@ -3084,9 +3087,9 @@ static int nf_tables_delset(struct net *net, struct sock *nlsk,
3084} 3087}
3085 3088
3086static int nf_tables_bind_check_setelem(const struct nft_ctx *ctx, 3089static int nf_tables_bind_check_setelem(const struct nft_ctx *ctx,
3087 const struct nft_set *set, 3090 struct nft_set *set,
3088 const struct nft_set_iter *iter, 3091 const struct nft_set_iter *iter,
3089 const struct nft_set_elem *elem) 3092 struct nft_set_elem *elem)
3090{ 3093{
3091 const struct nft_set_ext *ext = nft_set_elem_ext(set, elem->priv); 3094 const struct nft_set_ext *ext = nft_set_elem_ext(set, elem->priv);
3092 enum nft_registers dreg; 3095 enum nft_registers dreg;
@@ -3192,8 +3195,10 @@ static const struct nla_policy nft_set_elem_policy[NFTA_SET_ELEM_MAX + 1] = {
3192}; 3195};
3193 3196
3194static const struct nla_policy nft_set_elem_list_policy[NFTA_SET_ELEM_LIST_MAX + 1] = { 3197static const struct nla_policy nft_set_elem_list_policy[NFTA_SET_ELEM_LIST_MAX + 1] = {
3195 [NFTA_SET_ELEM_LIST_TABLE] = { .type = NLA_STRING }, 3198 [NFTA_SET_ELEM_LIST_TABLE] = { .type = NLA_STRING,
3196 [NFTA_SET_ELEM_LIST_SET] = { .type = NLA_STRING }, 3199 .len = NFT_TABLE_MAXNAMELEN - 1 },
3200 [NFTA_SET_ELEM_LIST_SET] = { .type = NLA_STRING,
3201 .len = NFT_SET_MAXNAMELEN - 1 },
3197 [NFTA_SET_ELEM_LIST_ELEMENTS] = { .type = NLA_NESTED }, 3202 [NFTA_SET_ELEM_LIST_ELEMENTS] = { .type = NLA_NESTED },
3198 [NFTA_SET_ELEM_LIST_SET_ID] = { .type = NLA_U32 }, 3203 [NFTA_SET_ELEM_LIST_SET_ID] = { .type = NLA_U32 },
3199}; 3204};
@@ -3303,9 +3308,9 @@ struct nft_set_dump_args {
3303}; 3308};
3304 3309
3305static int nf_tables_dump_setelem(const struct nft_ctx *ctx, 3310static int nf_tables_dump_setelem(const struct nft_ctx *ctx,
3306 const struct nft_set *set, 3311 struct nft_set *set,
3307 const struct nft_set_iter *iter, 3312 const struct nft_set_iter *iter,
3308 const struct nft_set_elem *elem) 3313 struct nft_set_elem *elem)
3309{ 3314{
3310 struct nft_set_dump_args *args; 3315 struct nft_set_dump_args *args;
3311 3316
@@ -3317,7 +3322,7 @@ static int nf_tables_dump_set(struct sk_buff *skb, struct netlink_callback *cb)
3317{ 3322{
3318 struct net *net = sock_net(skb->sk); 3323 struct net *net = sock_net(skb->sk);
3319 u8 genmask = nft_genmask_cur(net); 3324 u8 genmask = nft_genmask_cur(net);
3320 const struct nft_set *set; 3325 struct nft_set *set;
3321 struct nft_set_dump_args args; 3326 struct nft_set_dump_args args;
3322 struct nft_ctx ctx; 3327 struct nft_ctx ctx;
3323 struct nlattr *nla[NFTA_SET_ELEM_LIST_MAX + 1]; 3328 struct nlattr *nla[NFTA_SET_ELEM_LIST_MAX + 1];
@@ -3740,10 +3745,18 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
3740 goto err5; 3745 goto err5;
3741 } 3746 }
3742 3747
3748 if (set->size &&
3749 !atomic_add_unless(&set->nelems, 1, set->size + set->ndeact)) {
3750 err = -ENFILE;
3751 goto err6;
3752 }
3753
3743 nft_trans_elem(trans) = elem; 3754 nft_trans_elem(trans) = elem;
3744 list_add_tail(&trans->list, &ctx->net->nft.commit_list); 3755 list_add_tail(&trans->list, &ctx->net->nft.commit_list);
3745 return 0; 3756 return 0;
3746 3757
3758err6:
3759 set->ops->remove(set, &elem);
3747err5: 3760err5:
3748 kfree(trans); 3761 kfree(trans);
3749err4: 3762err4:
@@ -3790,15 +3803,9 @@ static int nf_tables_newsetelem(struct net *net, struct sock *nlsk,
3790 return -EBUSY; 3803 return -EBUSY;
3791 3804
3792 nla_for_each_nested(attr, nla[NFTA_SET_ELEM_LIST_ELEMENTS], rem) { 3805 nla_for_each_nested(attr, nla[NFTA_SET_ELEM_LIST_ELEMENTS], rem) {
3793 if (set->size &&
3794 !atomic_add_unless(&set->nelems, 1, set->size + set->ndeact))
3795 return -ENFILE;
3796
3797 err = nft_add_set_elem(&ctx, set, attr, nlh->nlmsg_flags); 3806 err = nft_add_set_elem(&ctx, set, attr, nlh->nlmsg_flags);
3798 if (err < 0) { 3807 if (err < 0)
3799 atomic_dec(&set->nelems);
3800 break; 3808 break;
3801 }
3802 } 3809 }
3803 return err; 3810 return err;
3804} 3811}
@@ -3883,9 +3890,9 @@ err1:
3883} 3890}
3884 3891
3885static int nft_flush_set(const struct nft_ctx *ctx, 3892static int nft_flush_set(const struct nft_ctx *ctx,
3886 const struct nft_set *set, 3893 struct nft_set *set,
3887 const struct nft_set_iter *iter, 3894 const struct nft_set_iter *iter,
3888 const struct nft_set_elem *elem) 3895 struct nft_set_elem *elem)
3889{ 3896{
3890 struct nft_trans *trans; 3897 struct nft_trans *trans;
3891 int err; 3898 int err;
@@ -3899,9 +3906,10 @@ static int nft_flush_set(const struct nft_ctx *ctx,
3899 err = -ENOENT; 3906 err = -ENOENT;
3900 goto err1; 3907 goto err1;
3901 } 3908 }
3909 set->ndeact++;
3902 3910
3903 nft_trans_elem_set(trans) = (struct nft_set *)set; 3911 nft_trans_elem_set(trans) = set;
3904 nft_trans_elem(trans) = *((struct nft_set_elem *)elem); 3912 nft_trans_elem(trans) = *elem;
3905 list_add_tail(&trans->list, &ctx->net->nft.commit_list); 3913 list_add_tail(&trans->list, &ctx->net->nft.commit_list);
3906 3914
3907 return 0; 3915 return 0;
@@ -4032,8 +4040,10 @@ struct nft_object *nf_tables_obj_lookup(const struct nft_table *table,
4032EXPORT_SYMBOL_GPL(nf_tables_obj_lookup); 4040EXPORT_SYMBOL_GPL(nf_tables_obj_lookup);
4033 4041
4034static const struct nla_policy nft_obj_policy[NFTA_OBJ_MAX + 1] = { 4042static const struct nla_policy nft_obj_policy[NFTA_OBJ_MAX + 1] = {
4035 [NFTA_OBJ_TABLE] = { .type = NLA_STRING }, 4043 [NFTA_OBJ_TABLE] = { .type = NLA_STRING,
4036 [NFTA_OBJ_NAME] = { .type = NLA_STRING }, 4044 .len = NFT_TABLE_MAXNAMELEN - 1 },
4045 [NFTA_OBJ_NAME] = { .type = NLA_STRING,
4046 .len = NFT_OBJ_MAXNAMELEN - 1 },
4037 [NFTA_OBJ_TYPE] = { .type = NLA_U32 }, 4047 [NFTA_OBJ_TYPE] = { .type = NLA_U32 },
4038 [NFTA_OBJ_DATA] = { .type = NLA_NESTED }, 4048 [NFTA_OBJ_DATA] = { .type = NLA_NESTED },
4039}; 4049};
@@ -4262,10 +4272,11 @@ static int nf_tables_dump_obj(struct sk_buff *skb, struct netlink_callback *cb)
4262 if (idx > s_idx) 4272 if (idx > s_idx)
4263 memset(&cb->args[1], 0, 4273 memset(&cb->args[1], 0,
4264 sizeof(cb->args) - sizeof(cb->args[0])); 4274 sizeof(cb->args) - sizeof(cb->args[0]));
4265 if (filter->table[0] && 4275 if (filter && filter->table[0] &&
4266 strcmp(filter->table, table->name)) 4276 strcmp(filter->table, table->name))
4267 goto cont; 4277 goto cont;
4268 if (filter->type != NFT_OBJECT_UNSPEC && 4278 if (filter &&
4279 filter->type != NFT_OBJECT_UNSPEC &&
4269 obj->type->type != filter->type) 4280 obj->type->type != filter->type)
4270 goto cont; 4281 goto cont;
4271 4282
@@ -5009,9 +5020,9 @@ static int nf_tables_check_loops(const struct nft_ctx *ctx,
5009 const struct nft_chain *chain); 5020 const struct nft_chain *chain);
5010 5021
5011static int nf_tables_loop_check_setelem(const struct nft_ctx *ctx, 5022static int nf_tables_loop_check_setelem(const struct nft_ctx *ctx,
5012 const struct nft_set *set, 5023 struct nft_set *set,
5013 const struct nft_set_iter *iter, 5024 const struct nft_set_iter *iter,
5014 const struct nft_set_elem *elem) 5025 struct nft_set_elem *elem)
5015{ 5026{
5016 const struct nft_set_ext *ext = nft_set_elem_ext(set, elem->priv); 5027 const struct nft_set_ext *ext = nft_set_elem_ext(set, elem->priv);
5017 const struct nft_data *data; 5028 const struct nft_data *data;
@@ -5035,7 +5046,7 @@ static int nf_tables_check_loops(const struct nft_ctx *ctx,
5035{ 5046{
5036 const struct nft_rule *rule; 5047 const struct nft_rule *rule;
5037 const struct nft_expr *expr, *last; 5048 const struct nft_expr *expr, *last;
5038 const struct nft_set *set; 5049 struct nft_set *set;
5039 struct nft_set_binding *binding; 5050 struct nft_set_binding *binding;
5040 struct nft_set_iter iter; 5051 struct nft_set_iter iter;
5041 5052
diff --git a/net/netfilter/nft_dynset.c b/net/netfilter/nft_dynset.c
index 7de2f46734a4..049ad2d9ee66 100644
--- a/net/netfilter/nft_dynset.c
+++ b/net/netfilter/nft_dynset.c
@@ -98,7 +98,8 @@ out:
98} 98}
99 99
100static const struct nla_policy nft_dynset_policy[NFTA_DYNSET_MAX + 1] = { 100static const struct nla_policy nft_dynset_policy[NFTA_DYNSET_MAX + 1] = {
101 [NFTA_DYNSET_SET_NAME] = { .type = NLA_STRING }, 101 [NFTA_DYNSET_SET_NAME] = { .type = NLA_STRING,
102 .len = NFT_SET_MAXNAMELEN - 1 },
102 [NFTA_DYNSET_SET_ID] = { .type = NLA_U32 }, 103 [NFTA_DYNSET_SET_ID] = { .type = NLA_U32 },
103 [NFTA_DYNSET_OP] = { .type = NLA_U32 }, 104 [NFTA_DYNSET_OP] = { .type = NLA_U32 },
104 [NFTA_DYNSET_SREG_KEY] = { .type = NLA_U32 }, 105 [NFTA_DYNSET_SREG_KEY] = { .type = NLA_U32 },
diff --git a/net/netfilter/nft_log.c b/net/netfilter/nft_log.c
index 6271e40a3dd6..6f6e64423643 100644
--- a/net/netfilter/nft_log.c
+++ b/net/netfilter/nft_log.c
@@ -39,7 +39,8 @@ static void nft_log_eval(const struct nft_expr *expr,
39 39
40static const struct nla_policy nft_log_policy[NFTA_LOG_MAX + 1] = { 40static const struct nla_policy nft_log_policy[NFTA_LOG_MAX + 1] = {
41 [NFTA_LOG_GROUP] = { .type = NLA_U16 }, 41 [NFTA_LOG_GROUP] = { .type = NLA_U16 },
42 [NFTA_LOG_PREFIX] = { .type = NLA_STRING }, 42 [NFTA_LOG_PREFIX] = { .type = NLA_STRING,
43 .len = NF_LOG_PREFIXLEN - 1 },
43 [NFTA_LOG_SNAPLEN] = { .type = NLA_U32 }, 44 [NFTA_LOG_SNAPLEN] = { .type = NLA_U32 },
44 [NFTA_LOG_QTHRESHOLD] = { .type = NLA_U16 }, 45 [NFTA_LOG_QTHRESHOLD] = { .type = NLA_U16 },
45 [NFTA_LOG_LEVEL] = { .type = NLA_U32 }, 46 [NFTA_LOG_LEVEL] = { .type = NLA_U32 },
diff --git a/net/netfilter/nft_lookup.c b/net/netfilter/nft_lookup.c
index d4f97fa7e21d..e21aea7e5ec8 100644
--- a/net/netfilter/nft_lookup.c
+++ b/net/netfilter/nft_lookup.c
@@ -49,7 +49,8 @@ static void nft_lookup_eval(const struct nft_expr *expr,
49} 49}
50 50
51static const struct nla_policy nft_lookup_policy[NFTA_LOOKUP_MAX + 1] = { 51static const struct nla_policy nft_lookup_policy[NFTA_LOOKUP_MAX + 1] = {
52 [NFTA_LOOKUP_SET] = { .type = NLA_STRING }, 52 [NFTA_LOOKUP_SET] = { .type = NLA_STRING,
53 .len = NFT_SET_MAXNAMELEN - 1 },
53 [NFTA_LOOKUP_SET_ID] = { .type = NLA_U32 }, 54 [NFTA_LOOKUP_SET_ID] = { .type = NLA_U32 },
54 [NFTA_LOOKUP_SREG] = { .type = NLA_U32 }, 55 [NFTA_LOOKUP_SREG] = { .type = NLA_U32 },
55 [NFTA_LOOKUP_DREG] = { .type = NLA_U32 }, 56 [NFTA_LOOKUP_DREG] = { .type = NLA_U32 },
diff --git a/net/netfilter/nft_objref.c b/net/netfilter/nft_objref.c
index 415a65ba2b85..1ae8c49ca4a1 100644
--- a/net/netfilter/nft_objref.c
+++ b/net/netfilter/nft_objref.c
@@ -193,10 +193,12 @@ nft_objref_select_ops(const struct nft_ctx *ctx,
193} 193}
194 194
195static const struct nla_policy nft_objref_policy[NFTA_OBJREF_MAX + 1] = { 195static const struct nla_policy nft_objref_policy[NFTA_OBJREF_MAX + 1] = {
196 [NFTA_OBJREF_IMM_NAME] = { .type = NLA_STRING }, 196 [NFTA_OBJREF_IMM_NAME] = { .type = NLA_STRING,
197 .len = NFT_OBJ_MAXNAMELEN - 1 },
197 [NFTA_OBJREF_IMM_TYPE] = { .type = NLA_U32 }, 198 [NFTA_OBJREF_IMM_TYPE] = { .type = NLA_U32 },
198 [NFTA_OBJREF_SET_SREG] = { .type = NLA_U32 }, 199 [NFTA_OBJREF_SET_SREG] = { .type = NLA_U32 },
199 [NFTA_OBJREF_SET_NAME] = { .type = NLA_STRING }, 200 [NFTA_OBJREF_SET_NAME] = { .type = NLA_STRING,
201 .len = NFT_SET_MAXNAMELEN - 1 },
200 [NFTA_OBJREF_SET_ID] = { .type = NLA_U32 }, 202 [NFTA_OBJREF_SET_ID] = { .type = NLA_U32 },
201}; 203};
202 204
diff --git a/net/netfilter/nft_payload.c b/net/netfilter/nft_payload.c
index 36d2b1096546..7d699bbd45b0 100644
--- a/net/netfilter/nft_payload.c
+++ b/net/netfilter/nft_payload.c
@@ -250,6 +250,22 @@ static int nft_payload_l4csum_update(const struct nft_pktinfo *pkt,
250 return 0; 250 return 0;
251} 251}
252 252
253static int nft_payload_csum_inet(struct sk_buff *skb, const u32 *src,
254 __wsum fsum, __wsum tsum, int csum_offset)
255{
256 __sum16 sum;
257
258 if (skb_copy_bits(skb, csum_offset, &sum, sizeof(sum)) < 0)
259 return -1;
260
261 nft_csum_replace(&sum, fsum, tsum);
262 if (!skb_make_writable(skb, csum_offset + sizeof(sum)) ||
263 skb_store_bits(skb, csum_offset, &sum, sizeof(sum)) < 0)
264 return -1;
265
266 return 0;
267}
268
253static void nft_payload_set_eval(const struct nft_expr *expr, 269static void nft_payload_set_eval(const struct nft_expr *expr,
254 struct nft_regs *regs, 270 struct nft_regs *regs,
255 const struct nft_pktinfo *pkt) 271 const struct nft_pktinfo *pkt)
@@ -259,7 +275,6 @@ static void nft_payload_set_eval(const struct nft_expr *expr,
259 const u32 *src = &regs->data[priv->sreg]; 275 const u32 *src = &regs->data[priv->sreg];
260 int offset, csum_offset; 276 int offset, csum_offset;
261 __wsum fsum, tsum; 277 __wsum fsum, tsum;
262 __sum16 sum;
263 278
264 switch (priv->base) { 279 switch (priv->base) {
265 case NFT_PAYLOAD_LL_HEADER: 280 case NFT_PAYLOAD_LL_HEADER:
@@ -282,18 +297,14 @@ static void nft_payload_set_eval(const struct nft_expr *expr,
282 csum_offset = offset + priv->csum_offset; 297 csum_offset = offset + priv->csum_offset;
283 offset += priv->offset; 298 offset += priv->offset;
284 299
285 if (priv->csum_type == NFT_PAYLOAD_CSUM_INET && 300 if ((priv->csum_type == NFT_PAYLOAD_CSUM_INET || priv->csum_flags) &&
286 (priv->base != NFT_PAYLOAD_TRANSPORT_HEADER || 301 (priv->base != NFT_PAYLOAD_TRANSPORT_HEADER ||
287 skb->ip_summed != CHECKSUM_PARTIAL)) { 302 skb->ip_summed != CHECKSUM_PARTIAL)) {
288 if (skb_copy_bits(skb, csum_offset, &sum, sizeof(sum)) < 0)
289 goto err;
290
291 fsum = skb_checksum(skb, offset, priv->len, 0); 303 fsum = skb_checksum(skb, offset, priv->len, 0);
292 tsum = csum_partial(src, priv->len, 0); 304 tsum = csum_partial(src, priv->len, 0);
293 nft_csum_replace(&sum, fsum, tsum);
294 305
295 if (!skb_make_writable(skb, csum_offset + sizeof(sum)) || 306 if (priv->csum_type == NFT_PAYLOAD_CSUM_INET &&
296 skb_store_bits(skb, csum_offset, &sum, sizeof(sum)) < 0) 307 nft_payload_csum_inet(skb, src, fsum, tsum, csum_offset))
297 goto err; 308 goto err;
298 309
299 if (priv->csum_flags && 310 if (priv->csum_flags &&
diff --git a/net/netfilter/nft_queue.c b/net/netfilter/nft_queue.c
index 3e19fa1230dc..dbb6aaff67ec 100644
--- a/net/netfilter/nft_queue.c
+++ b/net/netfilter/nft_queue.c
@@ -38,7 +38,7 @@ static void nft_queue_eval(const struct nft_expr *expr,
38 38
39 if (priv->queues_total > 1) { 39 if (priv->queues_total > 1) {
40 if (priv->flags & NFT_QUEUE_FLAG_CPU_FANOUT) { 40 if (priv->flags & NFT_QUEUE_FLAG_CPU_FANOUT) {
41 int cpu = smp_processor_id(); 41 int cpu = raw_smp_processor_id();
42 42
43 queue = priv->queuenum + cpu % priv->queues_total; 43 queue = priv->queuenum + cpu % priv->queues_total;
44 } else { 44 } else {
diff --git a/net/netfilter/nft_quota.c b/net/netfilter/nft_quota.c
index bd6efc53f26d..2d6fe3559912 100644
--- a/net/netfilter/nft_quota.c
+++ b/net/netfilter/nft_quota.c
@@ -110,30 +110,32 @@ static int nft_quota_obj_init(const struct nlattr * const tb[],
110static int nft_quota_do_dump(struct sk_buff *skb, struct nft_quota *priv, 110static int nft_quota_do_dump(struct sk_buff *skb, struct nft_quota *priv,
111 bool reset) 111 bool reset)
112{ 112{
113 u64 consumed, consumed_cap;
113 u32 flags = priv->flags; 114 u32 flags = priv->flags;
114 u64 consumed;
115
116 if (reset) {
117 consumed = atomic64_xchg(&priv->consumed, 0);
118 if (test_and_clear_bit(NFT_QUOTA_DEPLETED_BIT, &priv->flags))
119 flags |= NFT_QUOTA_F_DEPLETED;
120 } else {
121 consumed = atomic64_read(&priv->consumed);
122 }
123 115
124 /* Since we inconditionally increment consumed quota for each packet 116 /* Since we inconditionally increment consumed quota for each packet
125 * that we see, don't go over the quota boundary in what we send to 117 * that we see, don't go over the quota boundary in what we send to
126 * userspace. 118 * userspace.
127 */ 119 */
128 if (consumed > priv->quota) 120 consumed = atomic64_read(&priv->consumed);
129 consumed = priv->quota; 121 if (consumed >= priv->quota) {
122 consumed_cap = priv->quota;
123 flags |= NFT_QUOTA_F_DEPLETED;
124 } else {
125 consumed_cap = consumed;
126 }
130 127
131 if (nla_put_be64(skb, NFTA_QUOTA_BYTES, cpu_to_be64(priv->quota), 128 if (nla_put_be64(skb, NFTA_QUOTA_BYTES, cpu_to_be64(priv->quota),
132 NFTA_QUOTA_PAD) || 129 NFTA_QUOTA_PAD) ||
133 nla_put_be64(skb, NFTA_QUOTA_CONSUMED, cpu_to_be64(consumed), 130 nla_put_be64(skb, NFTA_QUOTA_CONSUMED, cpu_to_be64(consumed_cap),
134 NFTA_QUOTA_PAD) || 131 NFTA_QUOTA_PAD) ||
135 nla_put_be32(skb, NFTA_QUOTA_FLAGS, htonl(flags))) 132 nla_put_be32(skb, NFTA_QUOTA_FLAGS, htonl(flags)))
136 goto nla_put_failure; 133 goto nla_put_failure;
134
135 if (reset) {
136 atomic64_sub(consumed, &priv->consumed);
137 clear_bit(NFT_QUOTA_DEPLETED_BIT, &priv->flags);
138 }
137 return 0; 139 return 0;
138 140
139nla_put_failure: 141nla_put_failure:
diff --git a/net/netfilter/nft_set_hash.c b/net/netfilter/nft_set_hash.c
index 1e20e2bbb6d9..e36069fb76ae 100644
--- a/net/netfilter/nft_set_hash.c
+++ b/net/netfilter/nft_set_hash.c
@@ -212,7 +212,7 @@ static void nft_hash_remove(const struct nft_set *set,
212 rhashtable_remove_fast(&priv->ht, &he->node, nft_hash_params); 212 rhashtable_remove_fast(&priv->ht, &he->node, nft_hash_params);
213} 213}
214 214
215static void nft_hash_walk(const struct nft_ctx *ctx, const struct nft_set *set, 215static void nft_hash_walk(const struct nft_ctx *ctx, struct nft_set *set,
216 struct nft_set_iter *iter) 216 struct nft_set_iter *iter)
217{ 217{
218 struct nft_hash *priv = nft_set_priv(set); 218 struct nft_hash *priv = nft_set_priv(set);
diff --git a/net/netfilter/nft_set_rbtree.c b/net/netfilter/nft_set_rbtree.c
index 08376e50f6cd..f06f55ee516d 100644
--- a/net/netfilter/nft_set_rbtree.c
+++ b/net/netfilter/nft_set_rbtree.c
@@ -221,7 +221,7 @@ static void *nft_rbtree_deactivate(const struct net *net,
221} 221}
222 222
223static void nft_rbtree_walk(const struct nft_ctx *ctx, 223static void nft_rbtree_walk(const struct nft_ctx *ctx,
224 const struct nft_set *set, 224 struct nft_set *set,
225 struct nft_set_iter *iter) 225 struct nft_set_iter *iter)
226{ 226{
227 const struct nft_rbtree *priv = nft_set_priv(set); 227 const struct nft_rbtree *priv = nft_set_priv(set);
diff --git a/net/netlabel/netlabel_kapi.c b/net/netlabel/netlabel_kapi.c
index 28c56b95fb7f..ea7c67050792 100644
--- a/net/netlabel/netlabel_kapi.c
+++ b/net/netlabel/netlabel_kapi.c
@@ -1502,10 +1502,7 @@ static int __init netlbl_init(void)
1502 printk(KERN_INFO "NetLabel: Initializing\n"); 1502 printk(KERN_INFO "NetLabel: Initializing\n");
1503 printk(KERN_INFO "NetLabel: domain hash size = %u\n", 1503 printk(KERN_INFO "NetLabel: domain hash size = %u\n",
1504 (1 << NETLBL_DOMHSH_BITSIZE)); 1504 (1 << NETLBL_DOMHSH_BITSIZE));
1505 printk(KERN_INFO "NetLabel: protocols =" 1505 printk(KERN_INFO "NetLabel: protocols = UNLABELED CIPSOv4 CALIPSO\n");
1506 " UNLABELED"
1507 " CIPSOv4"
1508 "\n");
1509 1506
1510 ret_val = netlbl_domhsh_init(NETLBL_DOMHSH_BITSIZE); 1507 ret_val = netlbl_domhsh_init(NETLBL_DOMHSH_BITSIZE);
1511 if (ret_val != 0) 1508 if (ret_val != 0)
diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c
index 6b78bab27755..54253ea5976e 100644
--- a/net/openvswitch/conntrack.c
+++ b/net/openvswitch/conntrack.c
@@ -514,7 +514,7 @@ static int ovs_ct_nat_execute(struct sk_buff *skb, struct nf_conn *ct,
514 int hooknum, nh_off, err = NF_ACCEPT; 514 int hooknum, nh_off, err = NF_ACCEPT;
515 515
516 nh_off = skb_network_offset(skb); 516 nh_off = skb_network_offset(skb);
517 skb_pull(skb, nh_off); 517 skb_pull_rcsum(skb, nh_off);
518 518
519 /* See HOOK2MANIP(). */ 519 /* See HOOK2MANIP(). */
520 if (maniptype == NF_NAT_MANIP_SRC) 520 if (maniptype == NF_NAT_MANIP_SRC)
@@ -579,6 +579,7 @@ static int ovs_ct_nat_execute(struct sk_buff *skb, struct nf_conn *ct,
579 err = nf_nat_packet(ct, ctinfo, hooknum, skb); 579 err = nf_nat_packet(ct, ctinfo, hooknum, skb);
580push: 580push:
581 skb_push(skb, nh_off); 581 skb_push(skb, nh_off);
582 skb_postpush_rcsum(skb, skb->data, nh_off);
582 583
583 return err; 584 return err;
584} 585}
@@ -886,7 +887,7 @@ int ovs_ct_execute(struct net *net, struct sk_buff *skb,
886 887
887 /* The conntrack module expects to be working at L3. */ 888 /* The conntrack module expects to be working at L3. */
888 nh_ofs = skb_network_offset(skb); 889 nh_ofs = skb_network_offset(skb);
889 skb_pull(skb, nh_ofs); 890 skb_pull_rcsum(skb, nh_ofs);
890 891
891 if (key->ip.frag != OVS_FRAG_TYPE_NONE) { 892 if (key->ip.frag != OVS_FRAG_TYPE_NONE) {
892 err = handle_fragments(net, key, info->zone.id, skb); 893 err = handle_fragments(net, key, info->zone.id, skb);
@@ -900,6 +901,7 @@ int ovs_ct_execute(struct net *net, struct sk_buff *skb,
900 err = ovs_ct_lookup(net, key, info, skb); 901 err = ovs_ct_lookup(net, key, info, skb);
901 902
902 skb_push(skb, nh_ofs); 903 skb_push(skb, nh_ofs);
904 skb_postpush_rcsum(skb, skb->data, nh_ofs);
903 if (err) 905 if (err)
904 kfree_skb(skb); 906 kfree_skb(skb);
905 return err; 907 return err;
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
index 2d4c4d3911c0..9c62b6325f7a 100644
--- a/net/openvswitch/datapath.c
+++ b/net/openvswitch/datapath.c
@@ -606,7 +606,6 @@ static int ovs_packet_cmd_execute(struct sk_buff *skb, struct genl_info *info)
606 rcu_assign_pointer(flow->sf_acts, acts); 606 rcu_assign_pointer(flow->sf_acts, acts);
607 packet->priority = flow->key.phy.priority; 607 packet->priority = flow->key.phy.priority;
608 packet->mark = flow->key.phy.skb_mark; 608 packet->mark = flow->key.phy.skb_mark;
609 packet->protocol = flow->key.eth.type;
610 609
611 rcu_read_lock(); 610 rcu_read_lock();
612 dp = get_dp_rcu(net, ovs_header->dp_ifindex); 611 dp = get_dp_rcu(net, ovs_header->dp_ifindex);
diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c
index 08aa926cd5cf..2c0a00f7f1b7 100644
--- a/net/openvswitch/flow.c
+++ b/net/openvswitch/flow.c
@@ -312,7 +312,8 @@ static bool icmp6hdr_ok(struct sk_buff *skb)
312 * Returns 0 if it encounters a non-vlan or incomplete packet. 312 * Returns 0 if it encounters a non-vlan or incomplete packet.
313 * Returns 1 after successfully parsing vlan tag. 313 * Returns 1 after successfully parsing vlan tag.
314 */ 314 */
315static int parse_vlan_tag(struct sk_buff *skb, struct vlan_head *key_vh) 315static int parse_vlan_tag(struct sk_buff *skb, struct vlan_head *key_vh,
316 bool untag_vlan)
316{ 317{
317 struct vlan_head *vh = (struct vlan_head *)skb->data; 318 struct vlan_head *vh = (struct vlan_head *)skb->data;
318 319
@@ -330,7 +331,20 @@ static int parse_vlan_tag(struct sk_buff *skb, struct vlan_head *key_vh)
330 key_vh->tci = vh->tci | htons(VLAN_TAG_PRESENT); 331 key_vh->tci = vh->tci | htons(VLAN_TAG_PRESENT);
331 key_vh->tpid = vh->tpid; 332 key_vh->tpid = vh->tpid;
332 333
333 __skb_pull(skb, sizeof(struct vlan_head)); 334 if (unlikely(untag_vlan)) {
335 int offset = skb->data - skb_mac_header(skb);
336 u16 tci;
337 int err;
338
339 __skb_push(skb, offset);
340 err = __skb_vlan_pop(skb, &tci);
341 __skb_pull(skb, offset);
342 if (err)
343 return err;
344 __vlan_hwaccel_put_tag(skb, key_vh->tpid, tci);
345 } else {
346 __skb_pull(skb, sizeof(struct vlan_head));
347 }
334 return 1; 348 return 1;
335} 349}
336 350
@@ -351,13 +365,13 @@ static int parse_vlan(struct sk_buff *skb, struct sw_flow_key *key)
351 key->eth.vlan.tpid = skb->vlan_proto; 365 key->eth.vlan.tpid = skb->vlan_proto;
352 } else { 366 } else {
353 /* Parse outer vlan tag in the non-accelerated case. */ 367 /* Parse outer vlan tag in the non-accelerated case. */
354 res = parse_vlan_tag(skb, &key->eth.vlan); 368 res = parse_vlan_tag(skb, &key->eth.vlan, true);
355 if (res <= 0) 369 if (res <= 0)
356 return res; 370 return res;
357 } 371 }
358 372
359 /* Parse inner vlan tag. */ 373 /* Parse inner vlan tag. */
360 res = parse_vlan_tag(skb, &key->eth.cvlan); 374 res = parse_vlan_tag(skb, &key->eth.cvlan, false);
361 if (res <= 0) 375 if (res <= 0)
362 return res; 376 return res;
363 377
@@ -800,29 +814,15 @@ int ovs_flow_key_extract_userspace(struct net *net, const struct nlattr *attr,
800 if (err) 814 if (err)
801 return err; 815 return err;
802 816
803 if (ovs_key_mac_proto(key) == MAC_PROTO_NONE) { 817 /* key_extract assumes that skb->protocol is set-up for
804 /* key_extract assumes that skb->protocol is set-up for 818 * layer 3 packets which is the case for other callers,
805 * layer 3 packets which is the case for other callers, 819 * in particular packets received from the network stack.
806 * in particular packets recieved from the network stack. 820 * Here the correct value can be set from the metadata
807 * Here the correct value can be set from the metadata 821 * extracted above.
808 * extracted above. 822 * For L2 packet key eth type would be zero. skb protocol
809 */ 823 * would be set to correct value later during key-extact.
810 skb->protocol = key->eth.type; 824 */
811 } else {
812 struct ethhdr *eth;
813
814 skb_reset_mac_header(skb);
815 eth = eth_hdr(skb);
816
817 /* Normally, setting the skb 'protocol' field would be
818 * handled by a call to eth_type_trans(), but it assumes
819 * there's a sending device, which we may not have.
820 */
821 if (eth_proto_is_802_3(eth->h_proto))
822 skb->protocol = eth->h_proto;
823 else
824 skb->protocol = htons(ETH_P_802_2);
825 }
826 825
826 skb->protocol = key->eth.type;
827 return key_extract(skb, key); 827 return key_extract(skb, key);
828} 828}
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index b9e1a13b4ba3..70f5b6a4683c 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -1497,6 +1497,8 @@ static void __fanout_link(struct sock *sk, struct packet_sock *po)
1497 f->arr[f->num_members] = sk; 1497 f->arr[f->num_members] = sk;
1498 smp_wmb(); 1498 smp_wmb();
1499 f->num_members++; 1499 f->num_members++;
1500 if (f->num_members == 1)
1501 dev_add_pack(&f->prot_hook);
1500 spin_unlock(&f->lock); 1502 spin_unlock(&f->lock);
1501} 1503}
1502 1504
@@ -1513,6 +1515,8 @@ static void __fanout_unlink(struct sock *sk, struct packet_sock *po)
1513 BUG_ON(i >= f->num_members); 1515 BUG_ON(i >= f->num_members);
1514 f->arr[i] = f->arr[f->num_members - 1]; 1516 f->arr[i] = f->arr[f->num_members - 1];
1515 f->num_members--; 1517 f->num_members--;
1518 if (f->num_members == 0)
1519 __dev_remove_pack(&f->prot_hook);
1516 spin_unlock(&f->lock); 1520 spin_unlock(&f->lock);
1517} 1521}
1518 1522
@@ -1619,6 +1623,7 @@ static void fanout_release_data(struct packet_fanout *f)
1619 1623
1620static int fanout_add(struct sock *sk, u16 id, u16 type_flags) 1624static int fanout_add(struct sock *sk, u16 id, u16 type_flags)
1621{ 1625{
1626 struct packet_rollover *rollover = NULL;
1622 struct packet_sock *po = pkt_sk(sk); 1627 struct packet_sock *po = pkt_sk(sk);
1623 struct packet_fanout *f, *match; 1628 struct packet_fanout *f, *match;
1624 u8 type = type_flags & 0xff; 1629 u8 type = type_flags & 0xff;
@@ -1641,23 +1646,28 @@ static int fanout_add(struct sock *sk, u16 id, u16 type_flags)
1641 return -EINVAL; 1646 return -EINVAL;
1642 } 1647 }
1643 1648
1649 mutex_lock(&fanout_mutex);
1650
1651 err = -EINVAL;
1644 if (!po->running) 1652 if (!po->running)
1645 return -EINVAL; 1653 goto out;
1646 1654
1655 err = -EALREADY;
1647 if (po->fanout) 1656 if (po->fanout)
1648 return -EALREADY; 1657 goto out;
1649 1658
1650 if (type == PACKET_FANOUT_ROLLOVER || 1659 if (type == PACKET_FANOUT_ROLLOVER ||
1651 (type_flags & PACKET_FANOUT_FLAG_ROLLOVER)) { 1660 (type_flags & PACKET_FANOUT_FLAG_ROLLOVER)) {
1652 po->rollover = kzalloc(sizeof(*po->rollover), GFP_KERNEL); 1661 err = -ENOMEM;
1653 if (!po->rollover) 1662 rollover = kzalloc(sizeof(*rollover), GFP_KERNEL);
1654 return -ENOMEM; 1663 if (!rollover)
1655 atomic_long_set(&po->rollover->num, 0); 1664 goto out;
1656 atomic_long_set(&po->rollover->num_huge, 0); 1665 atomic_long_set(&rollover->num, 0);
1657 atomic_long_set(&po->rollover->num_failed, 0); 1666 atomic_long_set(&rollover->num_huge, 0);
1667 atomic_long_set(&rollover->num_failed, 0);
1668 po->rollover = rollover;
1658 } 1669 }
1659 1670
1660 mutex_lock(&fanout_mutex);
1661 match = NULL; 1671 match = NULL;
1662 list_for_each_entry(f, &fanout_list, list) { 1672 list_for_each_entry(f, &fanout_list, list) {
1663 if (f->id == id && 1673 if (f->id == id &&
@@ -1687,7 +1697,6 @@ static int fanout_add(struct sock *sk, u16 id, u16 type_flags)
1687 match->prot_hook.func = packet_rcv_fanout; 1697 match->prot_hook.func = packet_rcv_fanout;
1688 match->prot_hook.af_packet_priv = match; 1698 match->prot_hook.af_packet_priv = match;
1689 match->prot_hook.id_match = match_fanout_group; 1699 match->prot_hook.id_match = match_fanout_group;
1690 dev_add_pack(&match->prot_hook);
1691 list_add(&match->list, &fanout_list); 1700 list_add(&match->list, &fanout_list);
1692 } 1701 }
1693 err = -EINVAL; 1702 err = -EINVAL;
@@ -1704,36 +1713,40 @@ static int fanout_add(struct sock *sk, u16 id, u16 type_flags)
1704 } 1713 }
1705 } 1714 }
1706out: 1715out:
1707 mutex_unlock(&fanout_mutex); 1716 if (err && rollover) {
1708 if (err) { 1717 kfree(rollover);
1709 kfree(po->rollover);
1710 po->rollover = NULL; 1718 po->rollover = NULL;
1711 } 1719 }
1720 mutex_unlock(&fanout_mutex);
1712 return err; 1721 return err;
1713} 1722}
1714 1723
1715static void fanout_release(struct sock *sk) 1724/* If pkt_sk(sk)->fanout->sk_ref is zero, this function removes
1725 * pkt_sk(sk)->fanout from fanout_list and returns pkt_sk(sk)->fanout.
1726 * It is the responsibility of the caller to call fanout_release_data() and
1727 * free the returned packet_fanout (after synchronize_net())
1728 */
1729static struct packet_fanout *fanout_release(struct sock *sk)
1716{ 1730{
1717 struct packet_sock *po = pkt_sk(sk); 1731 struct packet_sock *po = pkt_sk(sk);
1718 struct packet_fanout *f; 1732 struct packet_fanout *f;
1719 1733
1734 mutex_lock(&fanout_mutex);
1720 f = po->fanout; 1735 f = po->fanout;
1721 if (!f) 1736 if (f) {
1722 return; 1737 po->fanout = NULL;
1723 1738
1724 mutex_lock(&fanout_mutex); 1739 if (atomic_dec_and_test(&f->sk_ref))
1725 po->fanout = NULL; 1740 list_del(&f->list);
1741 else
1742 f = NULL;
1726 1743
1727 if (atomic_dec_and_test(&f->sk_ref)) { 1744 if (po->rollover)
1728 list_del(&f->list); 1745 kfree_rcu(po->rollover, rcu);
1729 dev_remove_pack(&f->prot_hook);
1730 fanout_release_data(f);
1731 kfree(f);
1732 } 1746 }
1733 mutex_unlock(&fanout_mutex); 1747 mutex_unlock(&fanout_mutex);
1734 1748
1735 if (po->rollover) 1749 return f;
1736 kfree_rcu(po->rollover, rcu);
1737} 1750}
1738 1751
1739static bool packet_extra_vlan_len_allowed(const struct net_device *dev, 1752static bool packet_extra_vlan_len_allowed(const struct net_device *dev,
@@ -1976,7 +1989,7 @@ static int packet_rcv_vnet(struct msghdr *msg, const struct sk_buff *skb,
1976 return -EINVAL; 1989 return -EINVAL;
1977 *len -= sizeof(vnet_hdr); 1990 *len -= sizeof(vnet_hdr);
1978 1991
1979 if (virtio_net_hdr_from_skb(skb, &vnet_hdr, vio_le())) 1992 if (virtio_net_hdr_from_skb(skb, &vnet_hdr, vio_le(), true))
1980 return -EINVAL; 1993 return -EINVAL;
1981 1994
1982 return memcpy_to_msg(msg, (void *)&vnet_hdr, sizeof(vnet_hdr)); 1995 return memcpy_to_msg(msg, (void *)&vnet_hdr, sizeof(vnet_hdr));
@@ -2237,7 +2250,7 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
2237 if (po->has_vnet_hdr) { 2250 if (po->has_vnet_hdr) {
2238 if (virtio_net_hdr_from_skb(skb, h.raw + macoff - 2251 if (virtio_net_hdr_from_skb(skb, h.raw + macoff -
2239 sizeof(struct virtio_net_hdr), 2252 sizeof(struct virtio_net_hdr),
2240 vio_le())) { 2253 vio_le(), true)) {
2241 spin_lock(&sk->sk_receive_queue.lock); 2254 spin_lock(&sk->sk_receive_queue.lock);
2242 goto drop_n_account; 2255 goto drop_n_account;
2243 } 2256 }
@@ -2755,7 +2768,7 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
2755 struct virtio_net_hdr vnet_hdr = { 0 }; 2768 struct virtio_net_hdr vnet_hdr = { 0 };
2756 int offset = 0; 2769 int offset = 0;
2757 struct packet_sock *po = pkt_sk(sk); 2770 struct packet_sock *po = pkt_sk(sk);
2758 int hlen, tlen; 2771 int hlen, tlen, linear;
2759 int extra_len = 0; 2772 int extra_len = 0;
2760 2773
2761 /* 2774 /*
@@ -2816,8 +2829,9 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
2816 err = -ENOBUFS; 2829 err = -ENOBUFS;
2817 hlen = LL_RESERVED_SPACE(dev); 2830 hlen = LL_RESERVED_SPACE(dev);
2818 tlen = dev->needed_tailroom; 2831 tlen = dev->needed_tailroom;
2819 skb = packet_alloc_skb(sk, hlen + tlen, hlen, len, 2832 linear = __virtio16_to_cpu(vio_le(), vnet_hdr.hdr_len);
2820 __virtio16_to_cpu(vio_le(), vnet_hdr.hdr_len), 2833 linear = max(linear, min_t(int, len, dev->hard_header_len));
2834 skb = packet_alloc_skb(sk, hlen + tlen, hlen, len, linear,
2821 msg->msg_flags & MSG_DONTWAIT, &err); 2835 msg->msg_flags & MSG_DONTWAIT, &err);
2822 if (skb == NULL) 2836 if (skb == NULL)
2823 goto out_unlock; 2837 goto out_unlock;
@@ -2906,6 +2920,7 @@ static int packet_release(struct socket *sock)
2906{ 2920{
2907 struct sock *sk = sock->sk; 2921 struct sock *sk = sock->sk;
2908 struct packet_sock *po; 2922 struct packet_sock *po;
2923 struct packet_fanout *f;
2909 struct net *net; 2924 struct net *net;
2910 union tpacket_req_u req_u; 2925 union tpacket_req_u req_u;
2911 2926
@@ -2945,9 +2960,14 @@ static int packet_release(struct socket *sock)
2945 packet_set_ring(sk, &req_u, 1, 1); 2960 packet_set_ring(sk, &req_u, 1, 1);
2946 } 2961 }
2947 2962
2948 fanout_release(sk); 2963 f = fanout_release(sk);
2949 2964
2950 synchronize_net(); 2965 synchronize_net();
2966
2967 if (f) {
2968 fanout_release_data(f);
2969 kfree(f);
2970 }
2951 /* 2971 /*
2952 * Now the socket is dead. No more input will appear. 2972 * Now the socket is dead. No more input will appear.
2953 */ 2973 */
@@ -3899,7 +3919,6 @@ static int packet_notifier(struct notifier_block *this,
3899 } 3919 }
3900 if (msg == NETDEV_UNREGISTER) { 3920 if (msg == NETDEV_UNREGISTER) {
3901 packet_cached_dev_reset(po); 3921 packet_cached_dev_reset(po);
3902 fanout_release(sk);
3903 po->ifindex = -1; 3922 po->ifindex = -1;
3904 if (po->prot_hook.dev) 3923 if (po->prot_hook.dev)
3905 dev_put(po->prot_hook.dev); 3924 dev_put(po->prot_hook.dev);
diff --git a/net/qrtr/qrtr.c b/net/qrtr/qrtr.c
index c985ecbe9bd6..ae5ac175b2be 100644
--- a/net/qrtr/qrtr.c
+++ b/net/qrtr/qrtr.c
@@ -252,7 +252,7 @@ static struct sk_buff *qrtr_alloc_resume_tx(u32 src_node,
252 const int pkt_len = 20; 252 const int pkt_len = 20;
253 struct qrtr_hdr *hdr; 253 struct qrtr_hdr *hdr;
254 struct sk_buff *skb; 254 struct sk_buff *skb;
255 u32 *buf; 255 __le32 *buf;
256 256
257 skb = alloc_skb(QRTR_HDR_SIZE + pkt_len, GFP_KERNEL); 257 skb = alloc_skb(QRTR_HDR_SIZE + pkt_len, GFP_KERNEL);
258 if (!skb) 258 if (!skb)
@@ -269,7 +269,7 @@ static struct sk_buff *qrtr_alloc_resume_tx(u32 src_node,
269 hdr->dst_node_id = cpu_to_le32(dst_node); 269 hdr->dst_node_id = cpu_to_le32(dst_node);
270 hdr->dst_port_id = cpu_to_le32(QRTR_PORT_CTRL); 270 hdr->dst_port_id = cpu_to_le32(QRTR_PORT_CTRL);
271 271
272 buf = (u32 *)skb_put(skb, pkt_len); 272 buf = (__le32 *)skb_put(skb, pkt_len);
273 memset(buf, 0, pkt_len); 273 memset(buf, 0, pkt_len);
274 buf[0] = cpu_to_le32(QRTR_TYPE_RESUME_TX); 274 buf[0] = cpu_to_le32(QRTR_TYPE_RESUME_TX);
275 buf[1] = cpu_to_le32(src_node); 275 buf[1] = cpu_to_le32(src_node);
diff --git a/net/sched/act_api.c b/net/sched/act_api.c
index 2095c83ce773..e10456ef6f7a 100644
--- a/net/sched/act_api.c
+++ b/net/sched/act_api.c
@@ -900,8 +900,6 @@ tca_action_gd(struct net *net, struct nlattr *nla, struct nlmsghdr *n,
900 goto err; 900 goto err;
901 } 901 }
902 act->order = i; 902 act->order = i;
903 if (event == RTM_GETACTION)
904 act->tcfa_refcnt++;
905 list_add_tail(&act->list, &actions); 903 list_add_tail(&act->list, &actions);
906 } 904 }
907 905
@@ -914,7 +912,8 @@ tca_action_gd(struct net *net, struct nlattr *nla, struct nlmsghdr *n,
914 return ret; 912 return ret;
915 } 913 }
916err: 914err:
917 tcf_action_destroy(&actions, 0); 915 if (event != RTM_GETACTION)
916 tcf_action_destroy(&actions, 0);
918 return ret; 917 return ret;
919} 918}
920 919
diff --git a/net/sched/act_bpf.c b/net/sched/act_bpf.c
index 1c60317f0121..520baa41cba3 100644
--- a/net/sched/act_bpf.c
+++ b/net/sched/act_bpf.c
@@ -123,12 +123,11 @@ static int tcf_bpf_dump_ebpf_info(const struct tcf_bpf *prog,
123 nla_put_string(skb, TCA_ACT_BPF_NAME, prog->bpf_name)) 123 nla_put_string(skb, TCA_ACT_BPF_NAME, prog->bpf_name))
124 return -EMSGSIZE; 124 return -EMSGSIZE;
125 125
126 nla = nla_reserve(skb, TCA_ACT_BPF_DIGEST, 126 nla = nla_reserve(skb, TCA_ACT_BPF_TAG, sizeof(prog->filter->tag));
127 sizeof(prog->filter->digest));
128 if (nla == NULL) 127 if (nla == NULL)
129 return -EMSGSIZE; 128 return -EMSGSIZE;
130 129
131 memcpy(nla_data(nla), prog->filter->digest, nla_len(nla)); 130 memcpy(nla_data(nla), prog->filter->tag, nla_len(nla));
132 131
133 return 0; 132 return 0;
134} 133}
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index 3fbba79a4ef0..1ecdf809b5fa 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -148,13 +148,15 @@ static int tc_ctl_tfilter(struct sk_buff *skb, struct nlmsghdr *n)
148 unsigned long cl; 148 unsigned long cl;
149 unsigned long fh; 149 unsigned long fh;
150 int err; 150 int err;
151 int tp_created = 0; 151 int tp_created;
152 152
153 if ((n->nlmsg_type != RTM_GETTFILTER) && 153 if ((n->nlmsg_type != RTM_GETTFILTER) &&
154 !netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN)) 154 !netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
155 return -EPERM; 155 return -EPERM;
156 156
157replay: 157replay:
158 tp_created = 0;
159
158 err = nlmsg_parse(n, sizeof(*t), tca, TCA_MAX, NULL); 160 err = nlmsg_parse(n, sizeof(*t), tca, TCA_MAX, NULL);
159 if (err < 0) 161 if (err < 0)
160 return err; 162 return err;
diff --git a/net/sched/cls_bpf.c b/net/sched/cls_bpf.c
index adc776048d1a..d9c97018317d 100644
--- a/net/sched/cls_bpf.c
+++ b/net/sched/cls_bpf.c
@@ -555,11 +555,11 @@ static int cls_bpf_dump_ebpf_info(const struct cls_bpf_prog *prog,
555 nla_put_string(skb, TCA_BPF_NAME, prog->bpf_name)) 555 nla_put_string(skb, TCA_BPF_NAME, prog->bpf_name))
556 return -EMSGSIZE; 556 return -EMSGSIZE;
557 557
558 nla = nla_reserve(skb, TCA_BPF_DIGEST, sizeof(prog->filter->digest)); 558 nla = nla_reserve(skb, TCA_BPF_TAG, sizeof(prog->filter->tag));
559 if (nla == NULL) 559 if (nla == NULL)
560 return -EMSGSIZE; 560 return -EMSGSIZE;
561 561
562 memcpy(nla_data(nla), prog->filter->digest, nla_len(nla)); 562 memcpy(nla_data(nla), prog->filter->tag, nla_len(nla));
563 563
564 return 0; 564 return 0;
565} 565}
diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c
index 333f8e268431..5752789acc13 100644
--- a/net/sched/cls_flower.c
+++ b/net/sched/cls_flower.c
@@ -153,10 +153,14 @@ static int fl_classify(struct sk_buff *skb, const struct tcf_proto *tp,
153 153
154 switch (ip_tunnel_info_af(info)) { 154 switch (ip_tunnel_info_af(info)) {
155 case AF_INET: 155 case AF_INET:
156 skb_key.enc_control.addr_type =
157 FLOW_DISSECTOR_KEY_IPV4_ADDRS;
156 skb_key.enc_ipv4.src = key->u.ipv4.src; 158 skb_key.enc_ipv4.src = key->u.ipv4.src;
157 skb_key.enc_ipv4.dst = key->u.ipv4.dst; 159 skb_key.enc_ipv4.dst = key->u.ipv4.dst;
158 break; 160 break;
159 case AF_INET6: 161 case AF_INET6:
162 skb_key.enc_control.addr_type =
163 FLOW_DISSECTOR_KEY_IPV6_ADDRS;
160 skb_key.enc_ipv6.src = key->u.ipv6.src; 164 skb_key.enc_ipv6.src = key->u.ipv6.src;
161 skb_key.enc_ipv6.dst = key->u.ipv6.dst; 165 skb_key.enc_ipv6.dst = key->u.ipv6.dst;
162 break; 166 break;
@@ -564,9 +568,9 @@ static int fl_set_key(struct net *net, struct nlattr **tb,
564 &mask->icmp.type, 568 &mask->icmp.type,
565 TCA_FLOWER_KEY_ICMPV6_TYPE_MASK, 569 TCA_FLOWER_KEY_ICMPV6_TYPE_MASK,
566 sizeof(key->icmp.type)); 570 sizeof(key->icmp.type));
567 fl_set_key_val(tb, &key->icmp.code, TCA_FLOWER_KEY_ICMPV4_CODE, 571 fl_set_key_val(tb, &key->icmp.code, TCA_FLOWER_KEY_ICMPV6_CODE,
568 &mask->icmp.code, 572 &mask->icmp.code,
569 TCA_FLOWER_KEY_ICMPV4_CODE_MASK, 573 TCA_FLOWER_KEY_ICMPV6_CODE_MASK,
570 sizeof(key->icmp.code)); 574 sizeof(key->icmp.code));
571 } 575 }
572 576
diff --git a/net/sched/cls_matchall.c b/net/sched/cls_matchall.c
index f935429bd5ef..b12bc2abea93 100644
--- a/net/sched/cls_matchall.c
+++ b/net/sched/cls_matchall.c
@@ -16,16 +16,11 @@
16#include <net/sch_generic.h> 16#include <net/sch_generic.h>
17#include <net/pkt_cls.h> 17#include <net/pkt_cls.h>
18 18
19struct cls_mall_filter { 19struct cls_mall_head {
20 struct tcf_exts exts; 20 struct tcf_exts exts;
21 struct tcf_result res; 21 struct tcf_result res;
22 u32 handle; 22 u32 handle;
23 struct rcu_head rcu;
24 u32 flags; 23 u32 flags;
25};
26
27struct cls_mall_head {
28 struct cls_mall_filter *filter;
29 struct rcu_head rcu; 24 struct rcu_head rcu;
30}; 25};
31 26
@@ -33,38 +28,29 @@ static int mall_classify(struct sk_buff *skb, const struct tcf_proto *tp,
33 struct tcf_result *res) 28 struct tcf_result *res)
34{ 29{
35 struct cls_mall_head *head = rcu_dereference_bh(tp->root); 30 struct cls_mall_head *head = rcu_dereference_bh(tp->root);
36 struct cls_mall_filter *f = head->filter;
37 31
38 if (tc_skip_sw(f->flags)) 32 if (tc_skip_sw(head->flags))
39 return -1; 33 return -1;
40 34
41 return tcf_exts_exec(skb, &f->exts, res); 35 return tcf_exts_exec(skb, &head->exts, res);
42} 36}
43 37
44static int mall_init(struct tcf_proto *tp) 38static int mall_init(struct tcf_proto *tp)
45{ 39{
46 struct cls_mall_head *head;
47
48 head = kzalloc(sizeof(*head), GFP_KERNEL);
49 if (!head)
50 return -ENOBUFS;
51
52 rcu_assign_pointer(tp->root, head);
53
54 return 0; 40 return 0;
55} 41}
56 42
57static void mall_destroy_filter(struct rcu_head *head) 43static void mall_destroy_rcu(struct rcu_head *rcu)
58{ 44{
59 struct cls_mall_filter *f = container_of(head, struct cls_mall_filter, rcu); 45 struct cls_mall_head *head = container_of(rcu, struct cls_mall_head,
46 rcu);
60 47
61 tcf_exts_destroy(&f->exts); 48 tcf_exts_destroy(&head->exts);
62 49 kfree(head);
63 kfree(f);
64} 50}
65 51
66static int mall_replace_hw_filter(struct tcf_proto *tp, 52static int mall_replace_hw_filter(struct tcf_proto *tp,
67 struct cls_mall_filter *f, 53 struct cls_mall_head *head,
68 unsigned long cookie) 54 unsigned long cookie)
69{ 55{
70 struct net_device *dev = tp->q->dev_queue->dev; 56 struct net_device *dev = tp->q->dev_queue->dev;
@@ -74,7 +60,7 @@ static int mall_replace_hw_filter(struct tcf_proto *tp,
74 offload.type = TC_SETUP_MATCHALL; 60 offload.type = TC_SETUP_MATCHALL;
75 offload.cls_mall = &mall_offload; 61 offload.cls_mall = &mall_offload;
76 offload.cls_mall->command = TC_CLSMATCHALL_REPLACE; 62 offload.cls_mall->command = TC_CLSMATCHALL_REPLACE;
77 offload.cls_mall->exts = &f->exts; 63 offload.cls_mall->exts = &head->exts;
78 offload.cls_mall->cookie = cookie; 64 offload.cls_mall->cookie = cookie;
79 65
80 return dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle, tp->protocol, 66 return dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle, tp->protocol,
@@ -82,7 +68,7 @@ static int mall_replace_hw_filter(struct tcf_proto *tp,
82} 68}
83 69
84static void mall_destroy_hw_filter(struct tcf_proto *tp, 70static void mall_destroy_hw_filter(struct tcf_proto *tp,
85 struct cls_mall_filter *f, 71 struct cls_mall_head *head,
86 unsigned long cookie) 72 unsigned long cookie)
87{ 73{
88 struct net_device *dev = tp->q->dev_queue->dev; 74 struct net_device *dev = tp->q->dev_queue->dev;
@@ -103,29 +89,20 @@ static bool mall_destroy(struct tcf_proto *tp, bool force)
103{ 89{
104 struct cls_mall_head *head = rtnl_dereference(tp->root); 90 struct cls_mall_head *head = rtnl_dereference(tp->root);
105 struct net_device *dev = tp->q->dev_queue->dev; 91 struct net_device *dev = tp->q->dev_queue->dev;
106 struct cls_mall_filter *f = head->filter;
107 92
108 if (!force && f) 93 if (!head)
109 return false; 94 return true;
110 95
111 if (f) { 96 if (tc_should_offload(dev, tp, head->flags))
112 if (tc_should_offload(dev, tp, f->flags)) 97 mall_destroy_hw_filter(tp, head, (unsigned long) head);
113 mall_destroy_hw_filter(tp, f, (unsigned long) f);
114 98
115 call_rcu(&f->rcu, mall_destroy_filter); 99 call_rcu(&head->rcu, mall_destroy_rcu);
116 }
117 kfree_rcu(head, rcu);
118 return true; 100 return true;
119} 101}
120 102
121static unsigned long mall_get(struct tcf_proto *tp, u32 handle) 103static unsigned long mall_get(struct tcf_proto *tp, u32 handle)
122{ 104{
123 struct cls_mall_head *head = rtnl_dereference(tp->root); 105 return 0UL;
124 struct cls_mall_filter *f = head->filter;
125
126 if (f && f->handle == handle)
127 return (unsigned long) f;
128 return 0;
129} 106}
130 107
131static const struct nla_policy mall_policy[TCA_MATCHALL_MAX + 1] = { 108static const struct nla_policy mall_policy[TCA_MATCHALL_MAX + 1] = {
@@ -134,7 +111,7 @@ static const struct nla_policy mall_policy[TCA_MATCHALL_MAX + 1] = {
134}; 111};
135 112
136static int mall_set_parms(struct net *net, struct tcf_proto *tp, 113static int mall_set_parms(struct net *net, struct tcf_proto *tp,
137 struct cls_mall_filter *f, 114 struct cls_mall_head *head,
138 unsigned long base, struct nlattr **tb, 115 unsigned long base, struct nlattr **tb,
139 struct nlattr *est, bool ovr) 116 struct nlattr *est, bool ovr)
140{ 117{
@@ -147,11 +124,11 @@ static int mall_set_parms(struct net *net, struct tcf_proto *tp,
147 return err; 124 return err;
148 125
149 if (tb[TCA_MATCHALL_CLASSID]) { 126 if (tb[TCA_MATCHALL_CLASSID]) {
150 f->res.classid = nla_get_u32(tb[TCA_MATCHALL_CLASSID]); 127 head->res.classid = nla_get_u32(tb[TCA_MATCHALL_CLASSID]);
151 tcf_bind_filter(tp, &f->res, base); 128 tcf_bind_filter(tp, &head->res, base);
152 } 129 }
153 130
154 tcf_exts_change(tp, &f->exts, &e); 131 tcf_exts_change(tp, &head->exts, &e);
155 132
156 return 0; 133 return 0;
157} 134}
@@ -162,21 +139,17 @@ static int mall_change(struct net *net, struct sk_buff *in_skb,
162 unsigned long *arg, bool ovr) 139 unsigned long *arg, bool ovr)
163{ 140{
164 struct cls_mall_head *head = rtnl_dereference(tp->root); 141 struct cls_mall_head *head = rtnl_dereference(tp->root);
165 struct cls_mall_filter *fold = (struct cls_mall_filter *) *arg;
166 struct net_device *dev = tp->q->dev_queue->dev; 142 struct net_device *dev = tp->q->dev_queue->dev;
167 struct cls_mall_filter *f;
168 struct nlattr *tb[TCA_MATCHALL_MAX + 1]; 143 struct nlattr *tb[TCA_MATCHALL_MAX + 1];
144 struct cls_mall_head *new;
169 u32 flags = 0; 145 u32 flags = 0;
170 int err; 146 int err;
171 147
172 if (!tca[TCA_OPTIONS]) 148 if (!tca[TCA_OPTIONS])
173 return -EINVAL; 149 return -EINVAL;
174 150
175 if (head->filter) 151 if (head)
176 return -EBUSY; 152 return -EEXIST;
177
178 if (fold)
179 return -EINVAL;
180 153
181 err = nla_parse_nested(tb, TCA_MATCHALL_MAX, 154 err = nla_parse_nested(tb, TCA_MATCHALL_MAX,
182 tca[TCA_OPTIONS], mall_policy); 155 tca[TCA_OPTIONS], mall_policy);
@@ -189,23 +162,23 @@ static int mall_change(struct net *net, struct sk_buff *in_skb,
189 return -EINVAL; 162 return -EINVAL;
190 } 163 }
191 164
192 f = kzalloc(sizeof(*f), GFP_KERNEL); 165 new = kzalloc(sizeof(*new), GFP_KERNEL);
193 if (!f) 166 if (!new)
194 return -ENOBUFS; 167 return -ENOBUFS;
195 168
196 tcf_exts_init(&f->exts, TCA_MATCHALL_ACT, 0); 169 tcf_exts_init(&new->exts, TCA_MATCHALL_ACT, 0);
197 170
198 if (!handle) 171 if (!handle)
199 handle = 1; 172 handle = 1;
200 f->handle = handle; 173 new->handle = handle;
201 f->flags = flags; 174 new->flags = flags;
202 175
203 err = mall_set_parms(net, tp, f, base, tb, tca[TCA_RATE], ovr); 176 err = mall_set_parms(net, tp, new, base, tb, tca[TCA_RATE], ovr);
204 if (err) 177 if (err)
205 goto errout; 178 goto errout;
206 179
207 if (tc_should_offload(dev, tp, flags)) { 180 if (tc_should_offload(dev, tp, flags)) {
208 err = mall_replace_hw_filter(tp, f, (unsigned long) f); 181 err = mall_replace_hw_filter(tp, new, (unsigned long) new);
209 if (err) { 182 if (err) {
210 if (tc_skip_sw(flags)) 183 if (tc_skip_sw(flags))
211 goto errout; 184 goto errout;
@@ -214,39 +187,29 @@ static int mall_change(struct net *net, struct sk_buff *in_skb,
214 } 187 }
215 } 188 }
216 189
217 *arg = (unsigned long) f; 190 *arg = (unsigned long) head;
218 rcu_assign_pointer(head->filter, f); 191 rcu_assign_pointer(tp->root, new);
219 192 if (head)
193 call_rcu(&head->rcu, mall_destroy_rcu);
220 return 0; 194 return 0;
221 195
222errout: 196errout:
223 kfree(f); 197 kfree(new);
224 return err; 198 return err;
225} 199}
226 200
227static int mall_delete(struct tcf_proto *tp, unsigned long arg) 201static int mall_delete(struct tcf_proto *tp, unsigned long arg)
228{ 202{
229 struct cls_mall_head *head = rtnl_dereference(tp->root); 203 return -EOPNOTSUPP;
230 struct cls_mall_filter *f = (struct cls_mall_filter *) arg;
231 struct net_device *dev = tp->q->dev_queue->dev;
232
233 if (tc_should_offload(dev, tp, f->flags))
234 mall_destroy_hw_filter(tp, f, (unsigned long) f);
235
236 RCU_INIT_POINTER(head->filter, NULL);
237 tcf_unbind_filter(tp, &f->res);
238 call_rcu(&f->rcu, mall_destroy_filter);
239 return 0;
240} 204}
241 205
242static void mall_walk(struct tcf_proto *tp, struct tcf_walker *arg) 206static void mall_walk(struct tcf_proto *tp, struct tcf_walker *arg)
243{ 207{
244 struct cls_mall_head *head = rtnl_dereference(tp->root); 208 struct cls_mall_head *head = rtnl_dereference(tp->root);
245 struct cls_mall_filter *f = head->filter;
246 209
247 if (arg->count < arg->skip) 210 if (arg->count < arg->skip)
248 goto skip; 211 goto skip;
249 if (arg->fn(tp, (unsigned long) f, arg) < 0) 212 if (arg->fn(tp, (unsigned long) head, arg) < 0)
250 arg->stop = 1; 213 arg->stop = 1;
251skip: 214skip:
252 arg->count++; 215 arg->count++;
@@ -255,28 +218,28 @@ skip:
255static int mall_dump(struct net *net, struct tcf_proto *tp, unsigned long fh, 218static int mall_dump(struct net *net, struct tcf_proto *tp, unsigned long fh,
256 struct sk_buff *skb, struct tcmsg *t) 219 struct sk_buff *skb, struct tcmsg *t)
257{ 220{
258 struct cls_mall_filter *f = (struct cls_mall_filter *) fh; 221 struct cls_mall_head *head = (struct cls_mall_head *) fh;
259 struct nlattr *nest; 222 struct nlattr *nest;
260 223
261 if (!f) 224 if (!head)
262 return skb->len; 225 return skb->len;
263 226
264 t->tcm_handle = f->handle; 227 t->tcm_handle = head->handle;
265 228
266 nest = nla_nest_start(skb, TCA_OPTIONS); 229 nest = nla_nest_start(skb, TCA_OPTIONS);
267 if (!nest) 230 if (!nest)
268 goto nla_put_failure; 231 goto nla_put_failure;
269 232
270 if (f->res.classid && 233 if (head->res.classid &&
271 nla_put_u32(skb, TCA_MATCHALL_CLASSID, f->res.classid)) 234 nla_put_u32(skb, TCA_MATCHALL_CLASSID, head->res.classid))
272 goto nla_put_failure; 235 goto nla_put_failure;
273 236
274 if (tcf_exts_dump(skb, &f->exts)) 237 if (tcf_exts_dump(skb, &head->exts))
275 goto nla_put_failure; 238 goto nla_put_failure;
276 239
277 nla_nest_end(skb, nest); 240 nla_nest_end(skb, nest);
278 241
279 if (tcf_exts_dump_stats(skb, &f->exts) < 0) 242 if (tcf_exts_dump_stats(skb, &head->exts) < 0)
280 goto nla_put_failure; 243 goto nla_put_failure;
281 244
282 return skb->len; 245 return skb->len;
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
index 5ed8e79bf102..64dfd35ccdcc 100644
--- a/net/sctp/ipv6.c
+++ b/net/sctp/ipv6.c
@@ -222,7 +222,8 @@ static int sctp_v6_xmit(struct sk_buff *skb, struct sctp_transport *transport)
222 SCTP_INC_STATS(sock_net(sk), SCTP_MIB_OUTSCTPPACKS); 222 SCTP_INC_STATS(sock_net(sk), SCTP_MIB_OUTSCTPPACKS);
223 223
224 rcu_read_lock(); 224 rcu_read_lock();
225 res = ip6_xmit(sk, skb, fl6, rcu_dereference(np->opt), np->tclass); 225 res = ip6_xmit(sk, skb, fl6, sk->sk_mark, rcu_dereference(np->opt),
226 np->tclass);
226 rcu_read_unlock(); 227 rcu_read_unlock();
227 return res; 228 return res;
228} 229}
diff --git a/net/sctp/offload.c b/net/sctp/offload.c
index 7e869d0cca69..4f5a2b580aa5 100644
--- a/net/sctp/offload.c
+++ b/net/sctp/offload.c
@@ -68,7 +68,7 @@ static struct sk_buff *sctp_gso_segment(struct sk_buff *skb,
68 goto out; 68 goto out;
69 } 69 }
70 70
71 segs = skb_segment(skb, features | NETIF_F_HW_CSUM); 71 segs = skb_segment(skb, features | NETIF_F_HW_CSUM | NETIF_F_SG);
72 if (IS_ERR(segs)) 72 if (IS_ERR(segs))
73 goto out; 73 goto out;
74 74
diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c
index e54082699520..34efaa4ef2f6 100644
--- a/net/sctp/outqueue.c
+++ b/net/sctp/outqueue.c
@@ -1048,7 +1048,7 @@ static void sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp)
1048 (new_transport->state == SCTP_PF))) 1048 (new_transport->state == SCTP_PF)))
1049 new_transport = asoc->peer.active_path; 1049 new_transport = asoc->peer.active_path;
1050 if (new_transport->state == SCTP_UNCONFIRMED) { 1050 if (new_transport->state == SCTP_UNCONFIRMED) {
1051 WARN_ONCE(1, "Atempt to send packet on unconfirmed path."); 1051 WARN_ONCE(1, "Attempt to send packet on unconfirmed path.");
1052 sctp_chunk_fail(chunk, 0); 1052 sctp_chunk_fail(chunk, 0);
1053 sctp_chunk_free(chunk); 1053 sctp_chunk_free(chunk);
1054 continue; 1054 continue;
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 318c6786d653..1b5d669e3029 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -235,8 +235,12 @@ static struct sctp_transport *sctp_addr_id2transport(struct sock *sk,
235 sctp_assoc_t id) 235 sctp_assoc_t id)
236{ 236{
237 struct sctp_association *addr_asoc = NULL, *id_asoc = NULL; 237 struct sctp_association *addr_asoc = NULL, *id_asoc = NULL;
238 struct sctp_transport *transport; 238 struct sctp_af *af = sctp_get_af_specific(addr->ss_family);
239 union sctp_addr *laddr = (union sctp_addr *)addr; 239 union sctp_addr *laddr = (union sctp_addr *)addr;
240 struct sctp_transport *transport;
241
242 if (!af || sctp_verify_addr(sk, laddr, af->sockaddr_len))
243 return NULL;
240 244
241 addr_asoc = sctp_endpoint_lookup_assoc(sctp_sk(sk)->ep, 245 addr_asoc = sctp_endpoint_lookup_assoc(sctp_sk(sk)->ep,
242 laddr, 246 laddr,
@@ -7422,7 +7426,8 @@ static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p,
7422 */ 7426 */
7423 release_sock(sk); 7427 release_sock(sk);
7424 current_timeo = schedule_timeout(current_timeo); 7428 current_timeo = schedule_timeout(current_timeo);
7425 BUG_ON(sk != asoc->base.sk); 7429 if (sk != asoc->base.sk)
7430 goto do_error;
7426 lock_sock(sk); 7431 lock_sock(sk);
7427 7432
7428 *timeo_p = current_timeo; 7433 *timeo_p = current_timeo;
diff --git a/net/socket.c b/net/socket.c
index 8487bf136e5c..0758e13754e2 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -533,11 +533,11 @@ static ssize_t sockfs_listxattr(struct dentry *dentry, char *buffer,
533 return used; 533 return used;
534} 534}
535 535
536int sockfs_setattr(struct dentry *dentry, struct iattr *iattr) 536static int sockfs_setattr(struct dentry *dentry, struct iattr *iattr)
537{ 537{
538 int err = simple_setattr(dentry, iattr); 538 int err = simple_setattr(dentry, iattr);
539 539
540 if (!err) { 540 if (!err && (iattr->ia_valid & ATTR_UID)) {
541 struct socket *sock = SOCKET_I(d_inode(dentry)); 541 struct socket *sock = SOCKET_I(d_inode(dentry));
542 542
543 sock->sk->sk_uid = iattr->ia_uid; 543 sock->sk->sk_uid = iattr->ia_uid;
diff --git a/net/sunrpc/auth_gss/gss_rpc_xdr.c b/net/sunrpc/auth_gss/gss_rpc_xdr.c
index dc6fb79a361f..25d9a9cf7b66 100644
--- a/net/sunrpc/auth_gss/gss_rpc_xdr.c
+++ b/net/sunrpc/auth_gss/gss_rpc_xdr.c
@@ -260,7 +260,7 @@ static int gssx_dec_option_array(struct xdr_stream *xdr,
260 if (!oa->data) 260 if (!oa->data)
261 return -ENOMEM; 261 return -ENOMEM;
262 262
263 creds = kmalloc(sizeof(struct svc_cred), GFP_KERNEL); 263 creds = kzalloc(sizeof(struct svc_cred), GFP_KERNEL);
264 if (!creds) { 264 if (!creds) {
265 kfree(oa->data); 265 kfree(oa->data);
266 return -ENOMEM; 266 return -ENOMEM;
diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
index 886e9d381771..153082598522 100644
--- a/net/sunrpc/auth_gss/svcauth_gss.c
+++ b/net/sunrpc/auth_gss/svcauth_gss.c
@@ -1489,7 +1489,7 @@ svcauth_gss_accept(struct svc_rqst *rqstp, __be32 *authp)
1489 case RPC_GSS_PROC_DESTROY: 1489 case RPC_GSS_PROC_DESTROY:
1490 if (gss_write_verf(rqstp, rsci->mechctx, gc->gc_seq)) 1490 if (gss_write_verf(rqstp, rsci->mechctx, gc->gc_seq))
1491 goto auth_err; 1491 goto auth_err;
1492 rsci->h.expiry_time = get_seconds(); 1492 rsci->h.expiry_time = seconds_since_boot();
1493 set_bit(CACHE_NEGATIVE, &rsci->h.flags); 1493 set_bit(CACHE_NEGATIVE, &rsci->h.flags);
1494 if (resv->iov_len + 4 > PAGE_SIZE) 1494 if (resv->iov_len + 4 > PAGE_SIZE)
1495 goto drop; 1495 goto drop;
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index 1efbe48e794f..1dc9f3bac099 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -336,6 +336,11 @@ out:
336 336
337static DEFINE_IDA(rpc_clids); 337static DEFINE_IDA(rpc_clids);
338 338
339void rpc_cleanup_clids(void)
340{
341 ida_destroy(&rpc_clids);
342}
343
339static int rpc_alloc_clid(struct rpc_clnt *clnt) 344static int rpc_alloc_clid(struct rpc_clnt *clnt)
340{ 345{
341 int clid; 346 int clid;
diff --git a/net/sunrpc/sunrpc_syms.c b/net/sunrpc/sunrpc_syms.c
index d1c330a7953a..c73de181467a 100644
--- a/net/sunrpc/sunrpc_syms.c
+++ b/net/sunrpc/sunrpc_syms.c
@@ -119,6 +119,7 @@ out:
119static void __exit 119static void __exit
120cleanup_sunrpc(void) 120cleanup_sunrpc(void)
121{ 121{
122 rpc_cleanup_clids();
122 rpcauth_remove_module(); 123 rpcauth_remove_module();
123 cleanup_socket_xprt(); 124 cleanup_socket_xprt();
124 svc_cleanup_xprt_sock(); 125 svc_cleanup_xprt_sock();
diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
index 3bc1d61694cb..9c9db55a0c1e 100644
--- a/net/sunrpc/svc_xprt.c
+++ b/net/sunrpc/svc_xprt.c
@@ -799,6 +799,8 @@ static int svc_handle_xprt(struct svc_rqst *rqstp, struct svc_xprt *xprt)
799 799
800 if (test_bit(XPT_CLOSE, &xprt->xpt_flags)) { 800 if (test_bit(XPT_CLOSE, &xprt->xpt_flags)) {
801 dprintk("svc_recv: found XPT_CLOSE\n"); 801 dprintk("svc_recv: found XPT_CLOSE\n");
802 if (test_and_clear_bit(XPT_KILL_TEMP, &xprt->xpt_flags))
803 xprt->xpt_ops->xpo_kill_temp_xprt(xprt);
802 svc_delete_xprt(xprt); 804 svc_delete_xprt(xprt);
803 /* Leave XPT_BUSY set on the dead xprt: */ 805 /* Leave XPT_BUSY set on the dead xprt: */
804 goto out; 806 goto out;
@@ -1020,9 +1022,11 @@ void svc_age_temp_xprts_now(struct svc_serv *serv, struct sockaddr *server_addr)
1020 le = to_be_closed.next; 1022 le = to_be_closed.next;
1021 list_del_init(le); 1023 list_del_init(le);
1022 xprt = list_entry(le, struct svc_xprt, xpt_list); 1024 xprt = list_entry(le, struct svc_xprt, xpt_list);
1023 dprintk("svc_age_temp_xprts_now: closing %p\n", xprt); 1025 set_bit(XPT_CLOSE, &xprt->xpt_flags);
1024 xprt->xpt_ops->xpo_kill_temp_xprt(xprt); 1026 set_bit(XPT_KILL_TEMP, &xprt->xpt_flags);
1025 svc_close_xprt(xprt); 1027 dprintk("svc_age_temp_xprts_now: queuing xprt %p for closing\n",
1028 xprt);
1029 svc_xprt_enqueue(xprt);
1026 } 1030 }
1027} 1031}
1028EXPORT_SYMBOL_GPL(svc_age_temp_xprts_now); 1032EXPORT_SYMBOL_GPL(svc_age_temp_xprts_now);
diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
index 57d35fbb1c28..172b537f8cfc 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
@@ -347,8 +347,6 @@ int rdma_read_chunk_frmr(struct svcxprt_rdma *xprt,
347 atomic_inc(&rdma_stat_read); 347 atomic_inc(&rdma_stat_read);
348 return ret; 348 return ret;
349 err: 349 err:
350 ib_dma_unmap_sg(xprt->sc_cm_id->device,
351 frmr->sg, frmr->sg_nents, frmr->direction);
352 svc_rdma_put_context(ctxt, 0); 350 svc_rdma_put_context(ctxt, 0);
353 svc_rdma_put_frmr(xprt, frmr); 351 svc_rdma_put_frmr(xprt, frmr);
354 return ret; 352 return ret;
diff --git a/net/tipc/discover.c b/net/tipc/discover.c
index 6b109a808d4c..02462d67d191 100644
--- a/net/tipc/discover.c
+++ b/net/tipc/discover.c
@@ -169,7 +169,7 @@ void tipc_disc_rcv(struct net *net, struct sk_buff *skb,
169 169
170 /* Send response, if necessary */ 170 /* Send response, if necessary */
171 if (respond && (mtyp == DSC_REQ_MSG)) { 171 if (respond && (mtyp == DSC_REQ_MSG)) {
172 rskb = tipc_buf_acquire(MAX_H_SIZE); 172 rskb = tipc_buf_acquire(MAX_H_SIZE, GFP_ATOMIC);
173 if (!rskb) 173 if (!rskb)
174 return; 174 return;
175 tipc_disc_init_msg(net, rskb, DSC_RESP_MSG, bearer); 175 tipc_disc_init_msg(net, rskb, DSC_RESP_MSG, bearer);
@@ -278,7 +278,7 @@ int tipc_disc_create(struct net *net, struct tipc_bearer *b,
278 req = kmalloc(sizeof(*req), GFP_ATOMIC); 278 req = kmalloc(sizeof(*req), GFP_ATOMIC);
279 if (!req) 279 if (!req)
280 return -ENOMEM; 280 return -ENOMEM;
281 req->buf = tipc_buf_acquire(MAX_H_SIZE); 281 req->buf = tipc_buf_acquire(MAX_H_SIZE, GFP_ATOMIC);
282 if (!req->buf) { 282 if (!req->buf) {
283 kfree(req); 283 kfree(req);
284 return -ENOMEM; 284 return -ENOMEM;
diff --git a/net/tipc/link.c b/net/tipc/link.c
index bda89bf9f4ff..4e8647aef01c 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -1395,7 +1395,7 @@ tnl:
1395 msg_set_seqno(hdr, seqno++); 1395 msg_set_seqno(hdr, seqno++);
1396 pktlen = msg_size(hdr); 1396 pktlen = msg_size(hdr);
1397 msg_set_size(&tnlhdr, pktlen + INT_H_SIZE); 1397 msg_set_size(&tnlhdr, pktlen + INT_H_SIZE);
1398 tnlskb = tipc_buf_acquire(pktlen + INT_H_SIZE); 1398 tnlskb = tipc_buf_acquire(pktlen + INT_H_SIZE, GFP_ATOMIC);
1399 if (!tnlskb) { 1399 if (!tnlskb) {
1400 pr_warn("%sunable to send packet\n", link_co_err); 1400 pr_warn("%sunable to send packet\n", link_co_err);
1401 return; 1401 return;
diff --git a/net/tipc/msg.c b/net/tipc/msg.c
index a22be502f1bd..ab02d0742476 100644
--- a/net/tipc/msg.c
+++ b/net/tipc/msg.c
@@ -58,12 +58,12 @@ static unsigned int align(unsigned int i)
58 * NOTE: Headroom is reserved to allow prepending of a data link header. 58 * NOTE: Headroom is reserved to allow prepending of a data link header.
59 * There may also be unrequested tailroom present at the buffer's end. 59 * There may also be unrequested tailroom present at the buffer's end.
60 */ 60 */
61struct sk_buff *tipc_buf_acquire(u32 size) 61struct sk_buff *tipc_buf_acquire(u32 size, gfp_t gfp)
62{ 62{
63 struct sk_buff *skb; 63 struct sk_buff *skb;
64 unsigned int buf_size = (BUF_HEADROOM + size + 3) & ~3u; 64 unsigned int buf_size = (BUF_HEADROOM + size + 3) & ~3u;
65 65
66 skb = alloc_skb_fclone(buf_size, GFP_ATOMIC); 66 skb = alloc_skb_fclone(buf_size, gfp);
67 if (skb) { 67 if (skb) {
68 skb_reserve(skb, BUF_HEADROOM); 68 skb_reserve(skb, BUF_HEADROOM);
69 skb_put(skb, size); 69 skb_put(skb, size);
@@ -95,7 +95,7 @@ struct sk_buff *tipc_msg_create(uint user, uint type,
95 struct tipc_msg *msg; 95 struct tipc_msg *msg;
96 struct sk_buff *buf; 96 struct sk_buff *buf;
97 97
98 buf = tipc_buf_acquire(hdr_sz + data_sz); 98 buf = tipc_buf_acquire(hdr_sz + data_sz, GFP_ATOMIC);
99 if (unlikely(!buf)) 99 if (unlikely(!buf))
100 return NULL; 100 return NULL;
101 101
@@ -261,7 +261,7 @@ int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m,
261 261
262 /* No fragmentation needed? */ 262 /* No fragmentation needed? */
263 if (likely(msz <= pktmax)) { 263 if (likely(msz <= pktmax)) {
264 skb = tipc_buf_acquire(msz); 264 skb = tipc_buf_acquire(msz, GFP_KERNEL);
265 if (unlikely(!skb)) 265 if (unlikely(!skb))
266 return -ENOMEM; 266 return -ENOMEM;
267 skb_orphan(skb); 267 skb_orphan(skb);
@@ -282,7 +282,7 @@ int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m,
282 msg_set_importance(&pkthdr, msg_importance(mhdr)); 282 msg_set_importance(&pkthdr, msg_importance(mhdr));
283 283
284 /* Prepare first fragment */ 284 /* Prepare first fragment */
285 skb = tipc_buf_acquire(pktmax); 285 skb = tipc_buf_acquire(pktmax, GFP_KERNEL);
286 if (!skb) 286 if (!skb)
287 return -ENOMEM; 287 return -ENOMEM;
288 skb_orphan(skb); 288 skb_orphan(skb);
@@ -313,7 +313,7 @@ int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m,
313 pktsz = drem + INT_H_SIZE; 313 pktsz = drem + INT_H_SIZE;
314 else 314 else
315 pktsz = pktmax; 315 pktsz = pktmax;
316 skb = tipc_buf_acquire(pktsz); 316 skb = tipc_buf_acquire(pktsz, GFP_KERNEL);
317 if (!skb) { 317 if (!skb) {
318 rc = -ENOMEM; 318 rc = -ENOMEM;
319 goto error; 319 goto error;
@@ -448,7 +448,7 @@ bool tipc_msg_make_bundle(struct sk_buff **skb, struct tipc_msg *msg,
448 if (msz > (max / 2)) 448 if (msz > (max / 2))
449 return false; 449 return false;
450 450
451 _skb = tipc_buf_acquire(max); 451 _skb = tipc_buf_acquire(max, GFP_ATOMIC);
452 if (!_skb) 452 if (!_skb)
453 return false; 453 return false;
454 454
@@ -496,7 +496,7 @@ bool tipc_msg_reverse(u32 own_node, struct sk_buff **skb, int err)
496 496
497 /* Never return SHORT header; expand by replacing buffer if necessary */ 497 /* Never return SHORT header; expand by replacing buffer if necessary */
498 if (msg_short(hdr)) { 498 if (msg_short(hdr)) {
499 *skb = tipc_buf_acquire(BASIC_H_SIZE + dlen); 499 *skb = tipc_buf_acquire(BASIC_H_SIZE + dlen, GFP_ATOMIC);
500 if (!*skb) 500 if (!*skb)
501 goto exit; 501 goto exit;
502 memcpy((*skb)->data + BASIC_H_SIZE, msg_data(hdr), dlen); 502 memcpy((*skb)->data + BASIC_H_SIZE, msg_data(hdr), dlen);
diff --git a/net/tipc/msg.h b/net/tipc/msg.h
index 8d408612ffa4..2c3dc38abf9c 100644
--- a/net/tipc/msg.h
+++ b/net/tipc/msg.h
@@ -820,7 +820,7 @@ static inline bool msg_is_reset(struct tipc_msg *hdr)
820 return (msg_user(hdr) == LINK_PROTOCOL) && (msg_type(hdr) == RESET_MSG); 820 return (msg_user(hdr) == LINK_PROTOCOL) && (msg_type(hdr) == RESET_MSG);
821} 821}
822 822
823struct sk_buff *tipc_buf_acquire(u32 size); 823struct sk_buff *tipc_buf_acquire(u32 size, gfp_t gfp);
824bool tipc_msg_validate(struct sk_buff *skb); 824bool tipc_msg_validate(struct sk_buff *skb);
825bool tipc_msg_reverse(u32 own_addr, struct sk_buff **skb, int err); 825bool tipc_msg_reverse(u32 own_addr, struct sk_buff **skb, int err);
826void tipc_msg_init(u32 own_addr, struct tipc_msg *m, u32 user, u32 type, 826void tipc_msg_init(u32 own_addr, struct tipc_msg *m, u32 user, u32 type,
diff --git a/net/tipc/name_distr.c b/net/tipc/name_distr.c
index c1cfd92de17a..23f8899e0f8c 100644
--- a/net/tipc/name_distr.c
+++ b/net/tipc/name_distr.c
@@ -69,7 +69,7 @@ static struct sk_buff *named_prepare_buf(struct net *net, u32 type, u32 size,
69 u32 dest) 69 u32 dest)
70{ 70{
71 struct tipc_net *tn = net_generic(net, tipc_net_id); 71 struct tipc_net *tn = net_generic(net, tipc_net_id);
72 struct sk_buff *buf = tipc_buf_acquire(INT_H_SIZE + size); 72 struct sk_buff *buf = tipc_buf_acquire(INT_H_SIZE + size, GFP_ATOMIC);
73 struct tipc_msg *msg; 73 struct tipc_msg *msg;
74 74
75 if (buf != NULL) { 75 if (buf != NULL) {
diff --git a/net/tipc/node.c b/net/tipc/node.c
index 9d2f4c2b08ab..27753325e06e 100644
--- a/net/tipc/node.c
+++ b/net/tipc/node.c
@@ -263,6 +263,11 @@ static void tipc_node_write_lock(struct tipc_node *n)
263 write_lock_bh(&n->lock); 263 write_lock_bh(&n->lock);
264} 264}
265 265
266static void tipc_node_write_unlock_fast(struct tipc_node *n)
267{
268 write_unlock_bh(&n->lock);
269}
270
266static void tipc_node_write_unlock(struct tipc_node *n) 271static void tipc_node_write_unlock(struct tipc_node *n)
267{ 272{
268 struct net *net = n->net; 273 struct net *net = n->net;
@@ -417,7 +422,7 @@ void tipc_node_subscribe(struct net *net, struct list_head *subscr, u32 addr)
417 } 422 }
418 tipc_node_write_lock(n); 423 tipc_node_write_lock(n);
419 list_add_tail(subscr, &n->publ_list); 424 list_add_tail(subscr, &n->publ_list);
420 tipc_node_write_unlock(n); 425 tipc_node_write_unlock_fast(n);
421 tipc_node_put(n); 426 tipc_node_put(n);
422} 427}
423 428
@@ -435,7 +440,7 @@ void tipc_node_unsubscribe(struct net *net, struct list_head *subscr, u32 addr)
435 } 440 }
436 tipc_node_write_lock(n); 441 tipc_node_write_lock(n);
437 list_del_init(subscr); 442 list_del_init(subscr);
438 tipc_node_write_unlock(n); 443 tipc_node_write_unlock_fast(n);
439 tipc_node_put(n); 444 tipc_node_put(n);
440} 445}
441 446
diff --git a/net/tipc/server.c b/net/tipc/server.c
index 215849ce453d..3cd6402e812c 100644
--- a/net/tipc/server.c
+++ b/net/tipc/server.c
@@ -86,12 +86,12 @@ struct outqueue_entry {
86static void tipc_recv_work(struct work_struct *work); 86static void tipc_recv_work(struct work_struct *work);
87static void tipc_send_work(struct work_struct *work); 87static void tipc_send_work(struct work_struct *work);
88static void tipc_clean_outqueues(struct tipc_conn *con); 88static void tipc_clean_outqueues(struct tipc_conn *con);
89static void tipc_sock_release(struct tipc_conn *con);
90 89
91static void tipc_conn_kref_release(struct kref *kref) 90static void tipc_conn_kref_release(struct kref *kref)
92{ 91{
93 struct tipc_conn *con = container_of(kref, struct tipc_conn, kref); 92 struct tipc_conn *con = container_of(kref, struct tipc_conn, kref);
94 struct sockaddr_tipc *saddr = con->server->saddr; 93 struct tipc_server *s = con->server;
94 struct sockaddr_tipc *saddr = s->saddr;
95 struct socket *sock = con->sock; 95 struct socket *sock = con->sock;
96 struct sock *sk; 96 struct sock *sk;
97 97
@@ -103,9 +103,13 @@ static void tipc_conn_kref_release(struct kref *kref)
103 } 103 }
104 saddr->scope = -TIPC_NODE_SCOPE; 104 saddr->scope = -TIPC_NODE_SCOPE;
105 kernel_bind(sock, (struct sockaddr *)saddr, sizeof(*saddr)); 105 kernel_bind(sock, (struct sockaddr *)saddr, sizeof(*saddr));
106 tipc_sock_release(con);
107 sock_release(sock); 106 sock_release(sock);
108 con->sock = NULL; 107 con->sock = NULL;
108
109 spin_lock_bh(&s->idr_lock);
110 idr_remove(&s->conn_idr, con->conid);
111 s->idr_in_use--;
112 spin_unlock_bh(&s->idr_lock);
109 } 113 }
110 114
111 tipc_clean_outqueues(con); 115 tipc_clean_outqueues(con);
@@ -128,8 +132,10 @@ static struct tipc_conn *tipc_conn_lookup(struct tipc_server *s, int conid)
128 132
129 spin_lock_bh(&s->idr_lock); 133 spin_lock_bh(&s->idr_lock);
130 con = idr_find(&s->conn_idr, conid); 134 con = idr_find(&s->conn_idr, conid);
131 if (con) 135 if (con && test_bit(CF_CONNECTED, &con->flags))
132 conn_get(con); 136 conn_get(con);
137 else
138 con = NULL;
133 spin_unlock_bh(&s->idr_lock); 139 spin_unlock_bh(&s->idr_lock);
134 return con; 140 return con;
135} 141}
@@ -186,26 +192,15 @@ static void tipc_unregister_callbacks(struct tipc_conn *con)
186 write_unlock_bh(&sk->sk_callback_lock); 192 write_unlock_bh(&sk->sk_callback_lock);
187} 193}
188 194
189static void tipc_sock_release(struct tipc_conn *con)
190{
191 struct tipc_server *s = con->server;
192
193 if (con->conid)
194 s->tipc_conn_release(con->conid, con->usr_data);
195
196 tipc_unregister_callbacks(con);
197}
198
199static void tipc_close_conn(struct tipc_conn *con) 195static void tipc_close_conn(struct tipc_conn *con)
200{ 196{
201 struct tipc_server *s = con->server; 197 struct tipc_server *s = con->server;
202 198
203 if (test_and_clear_bit(CF_CONNECTED, &con->flags)) { 199 if (test_and_clear_bit(CF_CONNECTED, &con->flags)) {
200 tipc_unregister_callbacks(con);
204 201
205 spin_lock_bh(&s->idr_lock); 202 if (con->conid)
206 idr_remove(&s->conn_idr, con->conid); 203 s->tipc_conn_release(con->conid, con->usr_data);
207 s->idr_in_use--;
208 spin_unlock_bh(&s->idr_lock);
209 204
210 /* We shouldn't flush pending works as we may be in the 205 /* We shouldn't flush pending works as we may be in the
211 * thread. In fact the races with pending rx/tx work structs 206 * thread. In fact the races with pending rx/tx work structs
@@ -458,6 +453,11 @@ int tipc_conn_sendmsg(struct tipc_server *s, int conid,
458 if (!con) 453 if (!con)
459 return -EINVAL; 454 return -EINVAL;
460 455
456 if (!test_bit(CF_CONNECTED, &con->flags)) {
457 conn_put(con);
458 return 0;
459 }
460
461 e = tipc_alloc_entry(data, len); 461 e = tipc_alloc_entry(data, len);
462 if (!e) { 462 if (!e) {
463 conn_put(con); 463 conn_put(con);
@@ -471,12 +471,8 @@ int tipc_conn_sendmsg(struct tipc_server *s, int conid,
471 list_add_tail(&e->list, &con->outqueue); 471 list_add_tail(&e->list, &con->outqueue);
472 spin_unlock_bh(&con->outqueue_lock); 472 spin_unlock_bh(&con->outqueue_lock);
473 473
474 if (test_bit(CF_CONNECTED, &con->flags)) { 474 if (!queue_work(s->send_wq, &con->swork))
475 if (!queue_work(s->send_wq, &con->swork))
476 conn_put(con);
477 } else {
478 conn_put(con); 475 conn_put(con);
479 }
480 return 0; 476 return 0;
481} 477}
482 478
@@ -500,7 +496,7 @@ static void tipc_send_to_sock(struct tipc_conn *con)
500 int ret; 496 int ret;
501 497
502 spin_lock_bh(&con->outqueue_lock); 498 spin_lock_bh(&con->outqueue_lock);
503 while (1) { 499 while (test_bit(CF_CONNECTED, &con->flags)) {
504 e = list_entry(con->outqueue.next, struct outqueue_entry, 500 e = list_entry(con->outqueue.next, struct outqueue_entry,
505 list); 501 list);
506 if ((struct list_head *) e == &con->outqueue) 502 if ((struct list_head *) e == &con->outqueue)
@@ -623,14 +619,12 @@ int tipc_server_start(struct tipc_server *s)
623void tipc_server_stop(struct tipc_server *s) 619void tipc_server_stop(struct tipc_server *s)
624{ 620{
625 struct tipc_conn *con; 621 struct tipc_conn *con;
626 int total = 0;
627 int id; 622 int id;
628 623
629 spin_lock_bh(&s->idr_lock); 624 spin_lock_bh(&s->idr_lock);
630 for (id = 0; total < s->idr_in_use; id++) { 625 for (id = 0; s->idr_in_use; id++) {
631 con = idr_find(&s->conn_idr, id); 626 con = idr_find(&s->conn_idr, id);
632 if (con) { 627 if (con) {
633 total++;
634 spin_unlock_bh(&s->idr_lock); 628 spin_unlock_bh(&s->idr_lock);
635 tipc_close_conn(con); 629 tipc_close_conn(con);
636 spin_lock_bh(&s->idr_lock); 630 spin_lock_bh(&s->idr_lock);
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index 333c5dae0072..800caaa699a1 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -441,15 +441,19 @@ static void __tipc_shutdown(struct socket *sock, int error)
441 while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) { 441 while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) {
442 if (TIPC_SKB_CB(skb)->bytes_read) { 442 if (TIPC_SKB_CB(skb)->bytes_read) {
443 kfree_skb(skb); 443 kfree_skb(skb);
444 } else { 444 continue;
445 if (!tipc_sk_type_connectionless(sk) && 445 }
446 sk->sk_state != TIPC_DISCONNECTING) { 446 if (!tipc_sk_type_connectionless(sk) &&
447 tipc_set_sk_state(sk, TIPC_DISCONNECTING); 447 sk->sk_state != TIPC_DISCONNECTING) {
448 tipc_node_remove_conn(net, dnode, tsk->portid); 448 tipc_set_sk_state(sk, TIPC_DISCONNECTING);
449 } 449 tipc_node_remove_conn(net, dnode, tsk->portid);
450 tipc_sk_respond(sk, skb, error);
451 } 450 }
451 tipc_sk_respond(sk, skb, error);
452 } 452 }
453
454 if (tipc_sk_type_connectionless(sk))
455 return;
456
453 if (sk->sk_state != TIPC_DISCONNECTING) { 457 if (sk->sk_state != TIPC_DISCONNECTING) {
454 skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE, 458 skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE,
455 TIPC_CONN_MSG, SHORT_H_SIZE, 0, dnode, 459 TIPC_CONN_MSG, SHORT_H_SIZE, 0, dnode,
@@ -457,10 +461,8 @@ static void __tipc_shutdown(struct socket *sock, int error)
457 tsk->portid, error); 461 tsk->portid, error);
458 if (skb) 462 if (skb)
459 tipc_node_xmit_skb(net, skb, dnode, tsk->portid); 463 tipc_node_xmit_skb(net, skb, dnode, tsk->portid);
460 if (!tipc_sk_type_connectionless(sk)) { 464 tipc_node_remove_conn(net, dnode, tsk->portid);
461 tipc_node_remove_conn(net, dnode, tsk->portid); 465 tipc_set_sk_state(sk, TIPC_DISCONNECTING);
462 tipc_set_sk_state(sk, TIPC_DISCONNECTING);
463 }
464 } 466 }
465} 467}
466 468
diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c
index 0dd02244e21d..9d94e65d0894 100644
--- a/net/tipc/subscr.c
+++ b/net/tipc/subscr.c
@@ -54,6 +54,8 @@ struct tipc_subscriber {
54 54
55static void tipc_subscrp_delete(struct tipc_subscription *sub); 55static void tipc_subscrp_delete(struct tipc_subscription *sub);
56static void tipc_subscrb_put(struct tipc_subscriber *subscriber); 56static void tipc_subscrb_put(struct tipc_subscriber *subscriber);
57static void tipc_subscrp_put(struct tipc_subscription *subscription);
58static void tipc_subscrp_get(struct tipc_subscription *subscription);
57 59
58/** 60/**
59 * htohl - convert value to endianness used by destination 61 * htohl - convert value to endianness used by destination
@@ -123,6 +125,7 @@ void tipc_subscrp_report_overlap(struct tipc_subscription *sub, u32 found_lower,
123{ 125{
124 struct tipc_name_seq seq; 126 struct tipc_name_seq seq;
125 127
128 tipc_subscrp_get(sub);
126 tipc_subscrp_convert_seq(&sub->evt.s.seq, sub->swap, &seq); 129 tipc_subscrp_convert_seq(&sub->evt.s.seq, sub->swap, &seq);
127 if (!tipc_subscrp_check_overlap(&seq, found_lower, found_upper)) 130 if (!tipc_subscrp_check_overlap(&seq, found_lower, found_upper))
128 return; 131 return;
@@ -132,30 +135,23 @@ void tipc_subscrp_report_overlap(struct tipc_subscription *sub, u32 found_lower,
132 135
133 tipc_subscrp_send_event(sub, found_lower, found_upper, event, port_ref, 136 tipc_subscrp_send_event(sub, found_lower, found_upper, event, port_ref,
134 node); 137 node);
138 tipc_subscrp_put(sub);
135} 139}
136 140
137static void tipc_subscrp_timeout(unsigned long data) 141static void tipc_subscrp_timeout(unsigned long data)
138{ 142{
139 struct tipc_subscription *sub = (struct tipc_subscription *)data; 143 struct tipc_subscription *sub = (struct tipc_subscription *)data;
140 struct tipc_subscriber *subscriber = sub->subscriber;
141 144
142 /* Notify subscriber of timeout */ 145 /* Notify subscriber of timeout */
143 tipc_subscrp_send_event(sub, sub->evt.s.seq.lower, sub->evt.s.seq.upper, 146 tipc_subscrp_send_event(sub, sub->evt.s.seq.lower, sub->evt.s.seq.upper,
144 TIPC_SUBSCR_TIMEOUT, 0, 0); 147 TIPC_SUBSCR_TIMEOUT, 0, 0);
145 148
146 spin_lock_bh(&subscriber->lock); 149 tipc_subscrp_put(sub);
147 tipc_subscrp_delete(sub);
148 spin_unlock_bh(&subscriber->lock);
149
150 tipc_subscrb_put(subscriber);
151} 150}
152 151
153static void tipc_subscrb_kref_release(struct kref *kref) 152static void tipc_subscrb_kref_release(struct kref *kref)
154{ 153{
155 struct tipc_subscriber *subcriber = container_of(kref, 154 kfree(container_of(kref,struct tipc_subscriber, kref));
156 struct tipc_subscriber, kref);
157
158 kfree(subcriber);
159} 155}
160 156
161static void tipc_subscrb_put(struct tipc_subscriber *subscriber) 157static void tipc_subscrb_put(struct tipc_subscriber *subscriber)
@@ -168,6 +164,59 @@ static void tipc_subscrb_get(struct tipc_subscriber *subscriber)
168 kref_get(&subscriber->kref); 164 kref_get(&subscriber->kref);
169} 165}
170 166
167static void tipc_subscrp_kref_release(struct kref *kref)
168{
169 struct tipc_subscription *sub = container_of(kref,
170 struct tipc_subscription,
171 kref);
172 struct tipc_net *tn = net_generic(sub->net, tipc_net_id);
173 struct tipc_subscriber *subscriber = sub->subscriber;
174
175 spin_lock_bh(&subscriber->lock);
176 tipc_nametbl_unsubscribe(sub);
177 list_del(&sub->subscrp_list);
178 atomic_dec(&tn->subscription_count);
179 spin_unlock_bh(&subscriber->lock);
180 kfree(sub);
181 tipc_subscrb_put(subscriber);
182}
183
184static void tipc_subscrp_put(struct tipc_subscription *subscription)
185{
186 kref_put(&subscription->kref, tipc_subscrp_kref_release);
187}
188
189static void tipc_subscrp_get(struct tipc_subscription *subscription)
190{
191 kref_get(&subscription->kref);
192}
193
194/* tipc_subscrb_subscrp_delete - delete a specific subscription or all
195 * subscriptions for a given subscriber.
196 */
197static void tipc_subscrb_subscrp_delete(struct tipc_subscriber *subscriber,
198 struct tipc_subscr *s)
199{
200 struct list_head *subscription_list = &subscriber->subscrp_list;
201 struct tipc_subscription *sub, *temp;
202
203 spin_lock_bh(&subscriber->lock);
204 list_for_each_entry_safe(sub, temp, subscription_list, subscrp_list) {
205 if (s && memcmp(s, &sub->evt.s, sizeof(struct tipc_subscr)))
206 continue;
207
208 tipc_subscrp_get(sub);
209 spin_unlock_bh(&subscriber->lock);
210 tipc_subscrp_delete(sub);
211 tipc_subscrp_put(sub);
212 spin_lock_bh(&subscriber->lock);
213
214 if (s)
215 break;
216 }
217 spin_unlock_bh(&subscriber->lock);
218}
219
171static struct tipc_subscriber *tipc_subscrb_create(int conid) 220static struct tipc_subscriber *tipc_subscrb_create(int conid)
172{ 221{
173 struct tipc_subscriber *subscriber; 222 struct tipc_subscriber *subscriber;
@@ -177,8 +226,8 @@ static struct tipc_subscriber *tipc_subscrb_create(int conid)
177 pr_warn("Subscriber rejected, no memory\n"); 226 pr_warn("Subscriber rejected, no memory\n");
178 return NULL; 227 return NULL;
179 } 228 }
180 kref_init(&subscriber->kref);
181 INIT_LIST_HEAD(&subscriber->subscrp_list); 229 INIT_LIST_HEAD(&subscriber->subscrp_list);
230 kref_init(&subscriber->kref);
182 subscriber->conid = conid; 231 subscriber->conid = conid;
183 spin_lock_init(&subscriber->lock); 232 spin_lock_init(&subscriber->lock);
184 233
@@ -187,55 +236,22 @@ static struct tipc_subscriber *tipc_subscrb_create(int conid)
187 236
188static void tipc_subscrb_delete(struct tipc_subscriber *subscriber) 237static void tipc_subscrb_delete(struct tipc_subscriber *subscriber)
189{ 238{
190 struct tipc_subscription *sub, *temp; 239 tipc_subscrb_subscrp_delete(subscriber, NULL);
191 u32 timeout;
192
193 spin_lock_bh(&subscriber->lock);
194 /* Destroy any existing subscriptions for subscriber */
195 list_for_each_entry_safe(sub, temp, &subscriber->subscrp_list,
196 subscrp_list) {
197 timeout = htohl(sub->evt.s.timeout, sub->swap);
198 if ((timeout == TIPC_WAIT_FOREVER) || del_timer(&sub->timer)) {
199 tipc_subscrp_delete(sub);
200 tipc_subscrb_put(subscriber);
201 }
202 }
203 spin_unlock_bh(&subscriber->lock);
204
205 tipc_subscrb_put(subscriber); 240 tipc_subscrb_put(subscriber);
206} 241}
207 242
208static void tipc_subscrp_delete(struct tipc_subscription *sub) 243static void tipc_subscrp_delete(struct tipc_subscription *sub)
209{ 244{
210 struct tipc_net *tn = net_generic(sub->net, tipc_net_id); 245 u32 timeout = htohl(sub->evt.s.timeout, sub->swap);
211 246
212 tipc_nametbl_unsubscribe(sub); 247 if (timeout == TIPC_WAIT_FOREVER || del_timer(&sub->timer))
213 list_del(&sub->subscrp_list); 248 tipc_subscrp_put(sub);
214 kfree(sub);
215 atomic_dec(&tn->subscription_count);
216} 249}
217 250
218static void tipc_subscrp_cancel(struct tipc_subscr *s, 251static void tipc_subscrp_cancel(struct tipc_subscr *s,
219 struct tipc_subscriber *subscriber) 252 struct tipc_subscriber *subscriber)
220{ 253{
221 struct tipc_subscription *sub, *temp; 254 tipc_subscrb_subscrp_delete(subscriber, s);
222 u32 timeout;
223
224 spin_lock_bh(&subscriber->lock);
225 /* Find first matching subscription, exit if not found */
226 list_for_each_entry_safe(sub, temp, &subscriber->subscrp_list,
227 subscrp_list) {
228 if (!memcmp(s, &sub->evt.s, sizeof(struct tipc_subscr))) {
229 timeout = htohl(sub->evt.s.timeout, sub->swap);
230 if ((timeout == TIPC_WAIT_FOREVER) ||
231 del_timer(&sub->timer)) {
232 tipc_subscrp_delete(sub);
233 tipc_subscrb_put(subscriber);
234 }
235 break;
236 }
237 }
238 spin_unlock_bh(&subscriber->lock);
239} 255}
240 256
241static struct tipc_subscription *tipc_subscrp_create(struct net *net, 257static struct tipc_subscription *tipc_subscrp_create(struct net *net,
@@ -272,6 +288,7 @@ static struct tipc_subscription *tipc_subscrp_create(struct net *net,
272 sub->swap = swap; 288 sub->swap = swap;
273 memcpy(&sub->evt.s, s, sizeof(*s)); 289 memcpy(&sub->evt.s, s, sizeof(*s));
274 atomic_inc(&tn->subscription_count); 290 atomic_inc(&tn->subscription_count);
291 kref_init(&sub->kref);
275 return sub; 292 return sub;
276} 293}
277 294
@@ -288,17 +305,16 @@ static void tipc_subscrp_subscribe(struct net *net, struct tipc_subscr *s,
288 305
289 spin_lock_bh(&subscriber->lock); 306 spin_lock_bh(&subscriber->lock);
290 list_add(&sub->subscrp_list, &subscriber->subscrp_list); 307 list_add(&sub->subscrp_list, &subscriber->subscrp_list);
291 tipc_subscrb_get(subscriber);
292 sub->subscriber = subscriber; 308 sub->subscriber = subscriber;
293 tipc_nametbl_subscribe(sub); 309 tipc_nametbl_subscribe(sub);
310 tipc_subscrb_get(subscriber);
294 spin_unlock_bh(&subscriber->lock); 311 spin_unlock_bh(&subscriber->lock);
295 312
313 setup_timer(&sub->timer, tipc_subscrp_timeout, (unsigned long)sub);
296 timeout = htohl(sub->evt.s.timeout, swap); 314 timeout = htohl(sub->evt.s.timeout, swap);
297 if (timeout == TIPC_WAIT_FOREVER)
298 return;
299 315
300 setup_timer(&sub->timer, tipc_subscrp_timeout, (unsigned long)sub); 316 if (timeout != TIPC_WAIT_FOREVER)
301 mod_timer(&sub->timer, jiffies + msecs_to_jiffies(timeout)); 317 mod_timer(&sub->timer, jiffies + msecs_to_jiffies(timeout));
302} 318}
303 319
304/* Handle one termination request for the subscriber */ 320/* Handle one termination request for the subscriber */
diff --git a/net/tipc/subscr.h b/net/tipc/subscr.h
index be60103082c9..ffdc214c117a 100644
--- a/net/tipc/subscr.h
+++ b/net/tipc/subscr.h
@@ -57,6 +57,7 @@ struct tipc_subscriber;
57 * @evt: template for events generated by subscription 57 * @evt: template for events generated by subscription
58 */ 58 */
59struct tipc_subscription { 59struct tipc_subscription {
60 struct kref kref;
60 struct tipc_subscriber *subscriber; 61 struct tipc_subscriber *subscriber;
61 struct net *net; 62 struct net *net;
62 struct timer_list timer; 63 struct timer_list timer;
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 127656ebe7be..cef79873b09d 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -995,6 +995,7 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
995 unsigned int hash; 995 unsigned int hash;
996 struct unix_address *addr; 996 struct unix_address *addr;
997 struct hlist_head *list; 997 struct hlist_head *list;
998 struct path path = { NULL, NULL };
998 999
999 err = -EINVAL; 1000 err = -EINVAL;
1000 if (sunaddr->sun_family != AF_UNIX) 1001 if (sunaddr->sun_family != AF_UNIX)
@@ -1010,9 +1011,20 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
1010 goto out; 1011 goto out;
1011 addr_len = err; 1012 addr_len = err;
1012 1013
1014 if (sun_path[0]) {
1015 umode_t mode = S_IFSOCK |
1016 (SOCK_INODE(sock)->i_mode & ~current_umask());
1017 err = unix_mknod(sun_path, mode, &path);
1018 if (err) {
1019 if (err == -EEXIST)
1020 err = -EADDRINUSE;
1021 goto out;
1022 }
1023 }
1024
1013 err = mutex_lock_interruptible(&u->bindlock); 1025 err = mutex_lock_interruptible(&u->bindlock);
1014 if (err) 1026 if (err)
1015 goto out; 1027 goto out_put;
1016 1028
1017 err = -EINVAL; 1029 err = -EINVAL;
1018 if (u->addr) 1030 if (u->addr)
@@ -1029,16 +1041,6 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
1029 atomic_set(&addr->refcnt, 1); 1041 atomic_set(&addr->refcnt, 1);
1030 1042
1031 if (sun_path[0]) { 1043 if (sun_path[0]) {
1032 struct path path;
1033 umode_t mode = S_IFSOCK |
1034 (SOCK_INODE(sock)->i_mode & ~current_umask());
1035 err = unix_mknod(sun_path, mode, &path);
1036 if (err) {
1037 if (err == -EEXIST)
1038 err = -EADDRINUSE;
1039 unix_release_addr(addr);
1040 goto out_up;
1041 }
1042 addr->hash = UNIX_HASH_SIZE; 1044 addr->hash = UNIX_HASH_SIZE;
1043 hash = d_backing_inode(path.dentry)->i_ino & (UNIX_HASH_SIZE - 1); 1045 hash = d_backing_inode(path.dentry)->i_ino & (UNIX_HASH_SIZE - 1);
1044 spin_lock(&unix_table_lock); 1046 spin_lock(&unix_table_lock);
@@ -1065,6 +1067,9 @@ out_unlock:
1065 spin_unlock(&unix_table_lock); 1067 spin_unlock(&unix_table_lock);
1066out_up: 1068out_up:
1067 mutex_unlock(&u->bindlock); 1069 mutex_unlock(&u->bindlock);
1070out_put:
1071 if (err)
1072 path_put(&path);
1068out: 1073out:
1069 return err; 1074 return err;
1070} 1075}
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 3df85a751a85..aee396b9f190 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -4615,6 +4615,15 @@ int cfg80211_check_station_change(struct wiphy *wiphy,
4615 break; 4615 break;
4616 } 4616 }
4617 4617
4618 /*
4619 * Older kernel versions ignored this attribute entirely, so don't
4620 * reject attempts to update it but mark it as unused instead so the
4621 * driver won't look at the data.
4622 */
4623 if (statype != CFG80211_STA_AP_CLIENT_UNASSOC &&
4624 statype != CFG80211_STA_TDLS_PEER_SETUP)
4625 params->opmode_notif_used = false;
4626
4618 return 0; 4627 return 0;
4619} 4628}
4620EXPORT_SYMBOL(cfg80211_check_station_change); 4629EXPORT_SYMBOL(cfg80211_check_station_change);
@@ -4854,6 +4863,12 @@ static int nl80211_set_station(struct sk_buff *skb, struct genl_info *info)
4854 params.local_pm = pm; 4863 params.local_pm = pm;
4855 } 4864 }
4856 4865
4866 if (info->attrs[NL80211_ATTR_OPMODE_NOTIF]) {
4867 params.opmode_notif_used = true;
4868 params.opmode_notif =
4869 nla_get_u8(info->attrs[NL80211_ATTR_OPMODE_NOTIF]);
4870 }
4871
4857 /* Include parameters for TDLS peer (will check later) */ 4872 /* Include parameters for TDLS peer (will check later) */
4858 err = nl80211_set_station_tdls(info, &params); 4873 err = nl80211_set_station_tdls(info, &params);
4859 if (err) 4874 if (err)
@@ -5901,6 +5916,7 @@ do { \
5901 break; 5916 break;
5902 } 5917 }
5903 cfg->ht_opmode = ht_opmode; 5918 cfg->ht_opmode = ht_opmode;
5919 mask |= (1 << (NL80211_MESHCONF_HT_OPMODE - 1));
5904 } 5920 }
5905 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPactivePathToRootTimeout, 5921 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPactivePathToRootTimeout,
5906 1, 65535, mask, 5922 1, 65535, mask,
@@ -14502,13 +14518,17 @@ static int nl80211_netlink_notify(struct notifier_block * nb,
14502 14518
14503 list_for_each_entry_rcu(rdev, &cfg80211_rdev_list, list) { 14519 list_for_each_entry_rcu(rdev, &cfg80211_rdev_list, list) {
14504 bool schedule_destroy_work = false; 14520 bool schedule_destroy_work = false;
14505 bool schedule_scan_stop = false;
14506 struct cfg80211_sched_scan_request *sched_scan_req = 14521 struct cfg80211_sched_scan_request *sched_scan_req =
14507 rcu_dereference(rdev->sched_scan_req); 14522 rcu_dereference(rdev->sched_scan_req);
14508 14523
14509 if (sched_scan_req && notify->portid && 14524 if (sched_scan_req && notify->portid &&
14510 sched_scan_req->owner_nlportid == notify->portid) 14525 sched_scan_req->owner_nlportid == notify->portid) {
14511 schedule_scan_stop = true; 14526 sched_scan_req->owner_nlportid = 0;
14527
14528 if (rdev->ops->sched_scan_stop &&
14529 rdev->wiphy.flags & WIPHY_FLAG_SUPPORTS_SCHED_SCAN)
14530 schedule_work(&rdev->sched_scan_stop_wk);
14531 }
14512 14532
14513 list_for_each_entry_rcu(wdev, &rdev->wiphy.wdev_list, list) { 14533 list_for_each_entry_rcu(wdev, &rdev->wiphy.wdev_list, list) {
14514 cfg80211_mlme_unregister_socket(wdev, notify->portid); 14534 cfg80211_mlme_unregister_socket(wdev, notify->portid);
@@ -14539,12 +14559,6 @@ static int nl80211_netlink_notify(struct notifier_block * nb,
14539 spin_unlock(&rdev->destroy_list_lock); 14559 spin_unlock(&rdev->destroy_list_lock);
14540 schedule_work(&rdev->destroy_work); 14560 schedule_work(&rdev->destroy_work);
14541 } 14561 }
14542 } else if (schedule_scan_stop) {
14543 sched_scan_req->owner_nlportid = 0;
14544
14545 if (rdev->ops->sched_scan_stop &&
14546 rdev->wiphy.flags & WIPHY_FLAG_SUPPORTS_SCHED_SCAN)
14547 schedule_work(&rdev->sched_scan_stop_wk);
14548 } 14562 }
14549 } 14563 }
14550 14564
diff --git a/samples/Kconfig b/samples/Kconfig
index a6d2a43bbf2e..b124f62ed6cb 100644
--- a/samples/Kconfig
+++ b/samples/Kconfig
@@ -105,4 +105,11 @@ config SAMPLE_BLACKFIN_GPTIMERS
105 help 105 help
106 Build samples of blackfin gptimers sample module. 106 Build samples of blackfin gptimers sample module.
107 107
108config SAMPLE_VFIO_MDEV_MTTY
109 tristate "Build VFIO mtty example mediated device sample code -- loadable modules only"
110 depends on VFIO_MDEV_DEVICE && m
111 help
112 Build a virtual tty sample driver for use as a VFIO
113 mediated device
114
108endif # SAMPLES 115endif # SAMPLES
diff --git a/samples/Makefile b/samples/Makefile
index e17d66d77f09..86a137e451d9 100644
--- a/samples/Makefile
+++ b/samples/Makefile
@@ -2,4 +2,5 @@
2 2
3obj-$(CONFIG_SAMPLES) += kobject/ kprobes/ trace_events/ livepatch/ \ 3obj-$(CONFIG_SAMPLES) += kobject/ kprobes/ trace_events/ livepatch/ \
4 hw_breakpoint/ kfifo/ kdb/ hidraw/ rpmsg/ seccomp/ \ 4 hw_breakpoint/ kfifo/ kdb/ hidraw/ rpmsg/ seccomp/ \
5 configfs/ connector/ v4l/ trace_printk/ blackfin/ 5 configfs/ connector/ v4l/ trace_printk/ blackfin/ \
6 vfio-mdev/
diff --git a/samples/bpf/sock_example.h b/samples/bpf/sock_example.h
index 09f7fe7e5fd7..d8014065d479 100644
--- a/samples/bpf/sock_example.h
+++ b/samples/bpf/sock_example.h
@@ -4,7 +4,7 @@
4#include <unistd.h> 4#include <unistd.h>
5#include <string.h> 5#include <string.h>
6#include <errno.h> 6#include <errno.h>
7#include <net/ethernet.h> 7#include <linux/if_ether.h>
8#include <net/if.h> 8#include <net/if.h>
9#include <linux/if_packet.h> 9#include <linux/if_packet.h>
10#include <arpa/inet.h> 10#include <arpa/inet.h>
diff --git a/samples/bpf/tc_l2_redirect_kern.c b/samples/bpf/tc_l2_redirect_kern.c
index 92a44729dbe4..7ef2a12b25b2 100644
--- a/samples/bpf/tc_l2_redirect_kern.c
+++ b/samples/bpf/tc_l2_redirect_kern.c
@@ -4,6 +4,7 @@
4 * modify it under the terms of version 2 of the GNU General Public 4 * modify it under the terms of version 2 of the GNU General Public
5 * License as published by the Free Software Foundation. 5 * License as published by the Free Software Foundation.
6 */ 6 */
7#define KBUILD_MODNAME "foo"
7#include <uapi/linux/bpf.h> 8#include <uapi/linux/bpf.h>
8#include <uapi/linux/if_ether.h> 9#include <uapi/linux/if_ether.h>
9#include <uapi/linux/if_packet.h> 10#include <uapi/linux/if_packet.h>
diff --git a/samples/bpf/test_cgrp2_attach.c b/samples/bpf/test_cgrp2_attach.c
index 504058631ffc..4bfcaf93fcf3 100644
--- a/samples/bpf/test_cgrp2_attach.c
+++ b/samples/bpf/test_cgrp2_attach.c
@@ -104,7 +104,7 @@ static int attach_filter(int cg_fd, int type, int verdict)
104 return EXIT_FAILURE; 104 return EXIT_FAILURE;
105 } 105 }
106 106
107 ret = bpf_prog_attach(prog_fd, cg_fd, type); 107 ret = bpf_prog_attach(prog_fd, cg_fd, type, 0);
108 if (ret < 0) { 108 if (ret < 0) {
109 printf("Failed to attach prog to cgroup: '%s'\n", 109 printf("Failed to attach prog to cgroup: '%s'\n",
110 strerror(errno)); 110 strerror(errno));
diff --git a/samples/bpf/test_cgrp2_attach2.c b/samples/bpf/test_cgrp2_attach2.c
index 6e69be37f87f..3049b1f26267 100644
--- a/samples/bpf/test_cgrp2_attach2.c
+++ b/samples/bpf/test_cgrp2_attach2.c
@@ -79,11 +79,12 @@ int main(int argc, char **argv)
79 if (join_cgroup(FOO)) 79 if (join_cgroup(FOO))
80 goto err; 80 goto err;
81 81
82 if (bpf_prog_attach(drop_prog, foo, BPF_CGROUP_INET_EGRESS)) { 82 if (bpf_prog_attach(drop_prog, foo, BPF_CGROUP_INET_EGRESS, 1)) {
83 log_err("Attaching prog to /foo"); 83 log_err("Attaching prog to /foo");
84 goto err; 84 goto err;
85 } 85 }
86 86
87 printf("Attached DROP prog. This ping in cgroup /foo should fail...\n");
87 assert(system(PING_CMD) != 0); 88 assert(system(PING_CMD) != 0);
88 89
89 /* Create cgroup /foo/bar, get fd, and join it */ 90 /* Create cgroup /foo/bar, get fd, and join it */
@@ -94,24 +95,27 @@ int main(int argc, char **argv)
94 if (join_cgroup(BAR)) 95 if (join_cgroup(BAR))
95 goto err; 96 goto err;
96 97
98 printf("Attached DROP prog. This ping in cgroup /foo/bar should fail...\n");
97 assert(system(PING_CMD) != 0); 99 assert(system(PING_CMD) != 0);
98 100
99 if (bpf_prog_attach(allow_prog, bar, BPF_CGROUP_INET_EGRESS)) { 101 if (bpf_prog_attach(allow_prog, bar, BPF_CGROUP_INET_EGRESS, 1)) {
100 log_err("Attaching prog to /foo/bar"); 102 log_err("Attaching prog to /foo/bar");
101 goto err; 103 goto err;
102 } 104 }
103 105
106 printf("Attached PASS prog. This ping in cgroup /foo/bar should pass...\n");
104 assert(system(PING_CMD) == 0); 107 assert(system(PING_CMD) == 0);
105 108
106
107 if (bpf_prog_detach(bar, BPF_CGROUP_INET_EGRESS)) { 109 if (bpf_prog_detach(bar, BPF_CGROUP_INET_EGRESS)) {
108 log_err("Detaching program from /foo/bar"); 110 log_err("Detaching program from /foo/bar");
109 goto err; 111 goto err;
110 } 112 }
111 113
114 printf("Detached PASS from /foo/bar while DROP is attached to /foo.\n"
115 "This ping in cgroup /foo/bar should fail...\n");
112 assert(system(PING_CMD) != 0); 116 assert(system(PING_CMD) != 0);
113 117
114 if (bpf_prog_attach(allow_prog, bar, BPF_CGROUP_INET_EGRESS)) { 118 if (bpf_prog_attach(allow_prog, bar, BPF_CGROUP_INET_EGRESS, 1)) {
115 log_err("Attaching prog to /foo/bar"); 119 log_err("Attaching prog to /foo/bar");
116 goto err; 120 goto err;
117 } 121 }
@@ -121,8 +125,60 @@ int main(int argc, char **argv)
121 goto err; 125 goto err;
122 } 126 }
123 127
128 printf("Attached PASS from /foo/bar and detached DROP from /foo.\n"
129 "This ping in cgroup /foo/bar should pass...\n");
124 assert(system(PING_CMD) == 0); 130 assert(system(PING_CMD) == 0);
125 131
132 if (bpf_prog_attach(allow_prog, bar, BPF_CGROUP_INET_EGRESS, 1)) {
133 log_err("Attaching prog to /foo/bar");
134 goto err;
135 }
136
137 if (!bpf_prog_attach(allow_prog, bar, BPF_CGROUP_INET_EGRESS, 0)) {
138 errno = 0;
139 log_err("Unexpected success attaching prog to /foo/bar");
140 goto err;
141 }
142
143 if (bpf_prog_detach(bar, BPF_CGROUP_INET_EGRESS)) {
144 log_err("Detaching program from /foo/bar");
145 goto err;
146 }
147
148 if (!bpf_prog_detach(foo, BPF_CGROUP_INET_EGRESS)) {
149 errno = 0;
150 log_err("Unexpected success in double detach from /foo");
151 goto err;
152 }
153
154 if (bpf_prog_attach(allow_prog, foo, BPF_CGROUP_INET_EGRESS, 0)) {
155 log_err("Attaching non-overridable prog to /foo");
156 goto err;
157 }
158
159 if (!bpf_prog_attach(allow_prog, bar, BPF_CGROUP_INET_EGRESS, 0)) {
160 errno = 0;
161 log_err("Unexpected success attaching non-overridable prog to /foo/bar");
162 goto err;
163 }
164
165 if (!bpf_prog_attach(allow_prog, bar, BPF_CGROUP_INET_EGRESS, 1)) {
166 errno = 0;
167 log_err("Unexpected success attaching overridable prog to /foo/bar");
168 goto err;
169 }
170
171 if (!bpf_prog_attach(allow_prog, foo, BPF_CGROUP_INET_EGRESS, 1)) {
172 errno = 0;
173 log_err("Unexpected success attaching overridable prog to /foo");
174 goto err;
175 }
176
177 if (bpf_prog_attach(drop_prog, foo, BPF_CGROUP_INET_EGRESS, 0)) {
178 log_err("Attaching different non-overridable prog to /foo");
179 goto err;
180 }
181
126 goto out; 182 goto out;
127 183
128err: 184err:
@@ -132,5 +188,9 @@ out:
132 close(foo); 188 close(foo);
133 close(bar); 189 close(bar);
134 cleanup_cgroup_environment(); 190 cleanup_cgroup_environment();
191 if (!rc)
192 printf("PASS\n");
193 else
194 printf("FAIL\n");
135 return rc; 195 return rc;
136} 196}
diff --git a/samples/bpf/test_cgrp2_sock.c b/samples/bpf/test_cgrp2_sock.c
index 0791b949cbe4..c3cfb23e23b5 100644
--- a/samples/bpf/test_cgrp2_sock.c
+++ b/samples/bpf/test_cgrp2_sock.c
@@ -75,7 +75,7 @@ int main(int argc, char **argv)
75 return EXIT_FAILURE; 75 return EXIT_FAILURE;
76 } 76 }
77 77
78 ret = bpf_prog_attach(prog_fd, cg_fd, BPF_CGROUP_INET_SOCK_CREATE); 78 ret = bpf_prog_attach(prog_fd, cg_fd, BPF_CGROUP_INET_SOCK_CREATE, 0);
79 if (ret < 0) { 79 if (ret < 0) {
80 printf("Failed to attach prog to cgroup: '%s'\n", 80 printf("Failed to attach prog to cgroup: '%s'\n",
81 strerror(errno)); 81 strerror(errno));
diff --git a/samples/bpf/test_cgrp2_sock2.c b/samples/bpf/test_cgrp2_sock2.c
index 455ef0d06e93..db036077b644 100644
--- a/samples/bpf/test_cgrp2_sock2.c
+++ b/samples/bpf/test_cgrp2_sock2.c
@@ -55,7 +55,7 @@ int main(int argc, char **argv)
55 } 55 }
56 56
57 ret = bpf_prog_attach(prog_fd[filter_id], cg_fd, 57 ret = bpf_prog_attach(prog_fd[filter_id], cg_fd,
58 BPF_CGROUP_INET_SOCK_CREATE); 58 BPF_CGROUP_INET_SOCK_CREATE, 0);
59 if (ret < 0) { 59 if (ret < 0) {
60 printf("Failed to attach prog to cgroup: '%s'\n", 60 printf("Failed to attach prog to cgroup: '%s'\n",
61 strerror(errno)); 61 strerror(errno));
diff --git a/samples/bpf/trace_output_user.c b/samples/bpf/trace_output_user.c
index f4fa6af22def..ccca1e348017 100644
--- a/samples/bpf/trace_output_user.c
+++ b/samples/bpf/trace_output_user.c
@@ -9,7 +9,6 @@
9#include <string.h> 9#include <string.h>
10#include <fcntl.h> 10#include <fcntl.h>
11#include <poll.h> 11#include <poll.h>
12#include <sys/ioctl.h>
13#include <linux/perf_event.h> 12#include <linux/perf_event.h>
14#include <linux/bpf.h> 13#include <linux/bpf.h>
15#include <errno.h> 14#include <errno.h>
diff --git a/samples/bpf/xdp_tx_iptunnel_kern.c b/samples/bpf/xdp_tx_iptunnel_kern.c
index 85c38ecd3a2d..0f4f6e8c8611 100644
--- a/samples/bpf/xdp_tx_iptunnel_kern.c
+++ b/samples/bpf/xdp_tx_iptunnel_kern.c
@@ -8,6 +8,7 @@
8 * encapsulating the incoming packet in an IPv4/v6 header 8 * encapsulating the incoming packet in an IPv4/v6 header
9 * and then XDP_TX it out. 9 * and then XDP_TX it out.
10 */ 10 */
11#define KBUILD_MODNAME "foo"
11#include <uapi/linux/bpf.h> 12#include <uapi/linux/bpf.h>
12#include <linux/in.h> 13#include <linux/in.h>
13#include <linux/if_ether.h> 14#include <linux/if_ether.h>
diff --git a/samples/vfio-mdev/Makefile b/samples/vfio-mdev/Makefile
index a932edbe38eb..cbbd868a50a8 100644
--- a/samples/vfio-mdev/Makefile
+++ b/samples/vfio-mdev/Makefile
@@ -1,13 +1 @@
1# obj-$(CONFIG_SAMPLE_VFIO_MDEV_MTTY) += mtty.o
2# Makefile for mtty.c file
3#
4KERNEL_DIR:=/lib/modules/$(shell uname -r)/build
5
6obj-m:=mtty.o
7
8modules clean modules_install:
9 $(MAKE) -C $(KERNEL_DIR) SUBDIRS=$(PWD) $@
10
11default: modules
12
13module: modules
diff --git a/samples/vfio-mdev/mtty.c b/samples/vfio-mdev/mtty.c
index 6b633a4ea333..ca495686b9c3 100644
--- a/samples/vfio-mdev/mtty.c
+++ b/samples/vfio-mdev/mtty.c
@@ -164,7 +164,7 @@ static struct mdev_state *find_mdev_state_by_uuid(uuid_le uuid)
164 struct mdev_state *mds; 164 struct mdev_state *mds;
165 165
166 list_for_each_entry(mds, &mdev_devices_list, next) { 166 list_for_each_entry(mds, &mdev_devices_list, next) {
167 if (uuid_le_cmp(mds->mdev->uuid, uuid) == 0) 167 if (uuid_le_cmp(mdev_uuid(mds->mdev), uuid) == 0)
168 return mds; 168 return mds;
169 } 169 }
170 170
@@ -341,7 +341,8 @@ static void handle_bar_write(unsigned int index, struct mdev_state *mdev_state,
341 pr_err("Serial port %d: Fifo level trigger\n", 341 pr_err("Serial port %d: Fifo level trigger\n",
342 index); 342 index);
343#endif 343#endif
344 mtty_trigger_interrupt(mdev_state->mdev->uuid); 344 mtty_trigger_interrupt(
345 mdev_uuid(mdev_state->mdev));
345 } 346 }
346 } else { 347 } else {
347#if defined(DEBUG_INTR) 348#if defined(DEBUG_INTR)
@@ -355,7 +356,8 @@ static void handle_bar_write(unsigned int index, struct mdev_state *mdev_state,
355 */ 356 */
356 if (mdev_state->s[index].uart_reg[UART_IER] & 357 if (mdev_state->s[index].uart_reg[UART_IER] &
357 UART_IER_RLSI) 358 UART_IER_RLSI)
358 mtty_trigger_interrupt(mdev_state->mdev->uuid); 359 mtty_trigger_interrupt(
360 mdev_uuid(mdev_state->mdev));
359 } 361 }
360 mutex_unlock(&mdev_state->rxtx_lock); 362 mutex_unlock(&mdev_state->rxtx_lock);
361 break; 363 break;
@@ -374,7 +376,8 @@ static void handle_bar_write(unsigned int index, struct mdev_state *mdev_state,
374 pr_err("Serial port %d: IER_THRI write\n", 376 pr_err("Serial port %d: IER_THRI write\n",
375 index); 377 index);
376#endif 378#endif
377 mtty_trigger_interrupt(mdev_state->mdev->uuid); 379 mtty_trigger_interrupt(
380 mdev_uuid(mdev_state->mdev));
378 } 381 }
379 382
380 mutex_unlock(&mdev_state->rxtx_lock); 383 mutex_unlock(&mdev_state->rxtx_lock);
@@ -445,7 +448,7 @@ static void handle_bar_write(unsigned int index, struct mdev_state *mdev_state,
445#if defined(DEBUG_INTR) 448#if defined(DEBUG_INTR)
446 pr_err("Serial port %d: MCR_OUT2 write\n", index); 449 pr_err("Serial port %d: MCR_OUT2 write\n", index);
447#endif 450#endif
448 mtty_trigger_interrupt(mdev_state->mdev->uuid); 451 mtty_trigger_interrupt(mdev_uuid(mdev_state->mdev));
449 } 452 }
450 453
451 if ((mdev_state->s[index].uart_reg[UART_IER] & UART_IER_MSI) && 454 if ((mdev_state->s[index].uart_reg[UART_IER] & UART_IER_MSI) &&
@@ -453,7 +456,7 @@ static void handle_bar_write(unsigned int index, struct mdev_state *mdev_state,
453#if defined(DEBUG_INTR) 456#if defined(DEBUG_INTR)
454 pr_err("Serial port %d: MCR RTS/DTR write\n", index); 457 pr_err("Serial port %d: MCR RTS/DTR write\n", index);
455#endif 458#endif
456 mtty_trigger_interrupt(mdev_state->mdev->uuid); 459 mtty_trigger_interrupt(mdev_uuid(mdev_state->mdev));
457 } 460 }
458 break; 461 break;
459 462
@@ -504,7 +507,8 @@ static void handle_bar_read(unsigned int index, struct mdev_state *mdev_state,
504#endif 507#endif
505 if (mdev_state->s[index].uart_reg[UART_IER] & 508 if (mdev_state->s[index].uart_reg[UART_IER] &
506 UART_IER_THRI) 509 UART_IER_THRI)
507 mtty_trigger_interrupt(mdev_state->mdev->uuid); 510 mtty_trigger_interrupt(
511 mdev_uuid(mdev_state->mdev));
508 } 512 }
509 mutex_unlock(&mdev_state->rxtx_lock); 513 mutex_unlock(&mdev_state->rxtx_lock);
510 514
@@ -734,7 +738,7 @@ int mtty_create(struct kobject *kobj, struct mdev_device *mdev)
734 738
735 for (i = 0; i < 2; i++) { 739 for (i = 0; i < 2; i++) {
736 snprintf(name, MTTY_STRING_LEN, "%s-%d", 740 snprintf(name, MTTY_STRING_LEN, "%s-%d",
737 dev_driver_string(mdev->parent->dev), i + 1); 741 dev_driver_string(mdev_parent_dev(mdev)), i + 1);
738 if (!strcmp(kobj->name, name)) { 742 if (!strcmp(kobj->name, name)) {
739 nr_ports = i + 1; 743 nr_ports = i + 1;
740 break; 744 break;
@@ -1069,7 +1073,7 @@ int mtty_get_region_info(struct mdev_device *mdev,
1069{ 1073{
1070 unsigned int size = 0; 1074 unsigned int size = 0;
1071 struct mdev_state *mdev_state; 1075 struct mdev_state *mdev_state;
1072 int bar_index; 1076 u32 bar_index;
1073 1077
1074 if (!mdev) 1078 if (!mdev)
1075 return -EINVAL; 1079 return -EINVAL;
@@ -1078,8 +1082,11 @@ int mtty_get_region_info(struct mdev_device *mdev,
1078 if (!mdev_state) 1082 if (!mdev_state)
1079 return -EINVAL; 1083 return -EINVAL;
1080 1084
1081 mutex_lock(&mdev_state->ops_lock);
1082 bar_index = region_info->index; 1085 bar_index = region_info->index;
1086 if (bar_index >= VFIO_PCI_NUM_REGIONS)
1087 return -EINVAL;
1088
1089 mutex_lock(&mdev_state->ops_lock);
1083 1090
1084 switch (bar_index) { 1091 switch (bar_index) {
1085 case VFIO_PCI_CONFIG_REGION_INDEX: 1092 case VFIO_PCI_CONFIG_REGION_INDEX:
@@ -1176,7 +1183,10 @@ static long mtty_ioctl(struct mdev_device *mdev, unsigned int cmd,
1176 1183
1177 memcpy(&mdev_state->dev_info, &info, sizeof(info)); 1184 memcpy(&mdev_state->dev_info, &info, sizeof(info));
1178 1185
1179 return copy_to_user((void __user *)arg, &info, minsz); 1186 if (copy_to_user((void __user *)arg, &info, minsz))
1187 return -EFAULT;
1188
1189 return 0;
1180 } 1190 }
1181 case VFIO_DEVICE_GET_REGION_INFO: 1191 case VFIO_DEVICE_GET_REGION_INFO:
1182 { 1192 {
@@ -1197,7 +1207,10 @@ static long mtty_ioctl(struct mdev_device *mdev, unsigned int cmd,
1197 if (ret) 1207 if (ret)
1198 return ret; 1208 return ret;
1199 1209
1200 return copy_to_user((void __user *)arg, &info, minsz); 1210 if (copy_to_user((void __user *)arg, &info, minsz))
1211 return -EFAULT;
1212
1213 return 0;
1201 } 1214 }
1202 1215
1203 case VFIO_DEVICE_GET_IRQ_INFO: 1216 case VFIO_DEVICE_GET_IRQ_INFO:
@@ -1217,10 +1230,10 @@ static long mtty_ioctl(struct mdev_device *mdev, unsigned int cmd,
1217 if (ret) 1230 if (ret)
1218 return ret; 1231 return ret;
1219 1232
1220 if (info.count == -1) 1233 if (copy_to_user((void __user *)arg, &info, minsz))
1221 return -EINVAL; 1234 return -EFAULT;
1222 1235
1223 return copy_to_user((void __user *)arg, &info, minsz); 1236 return 0;
1224 } 1237 }
1225 case VFIO_DEVICE_SET_IRQS: 1238 case VFIO_DEVICE_SET_IRQS:
1226 { 1239 {
@@ -1298,10 +1311,8 @@ static ssize_t
1298sample_mdev_dev_show(struct device *dev, struct device_attribute *attr, 1311sample_mdev_dev_show(struct device *dev, struct device_attribute *attr,
1299 char *buf) 1312 char *buf)
1300{ 1313{
1301 struct mdev_device *mdev = to_mdev_device(dev); 1314 if (mdev_from_dev(dev))
1302 1315 return sprintf(buf, "This is MDEV %s\n", dev_name(dev));
1303 if (mdev)
1304 return sprintf(buf, "This is MDEV %s\n", dev_name(&mdev->dev));
1305 1316
1306 return sprintf(buf, "\n"); 1317 return sprintf(buf, "\n");
1307} 1318}
@@ -1402,7 +1413,7 @@ struct attribute_group *mdev_type_groups[] = {
1402 NULL, 1413 NULL,
1403}; 1414};
1404 1415
1405struct parent_ops mdev_fops = { 1416struct mdev_parent_ops mdev_fops = {
1406 .owner = THIS_MODULE, 1417 .owner = THIS_MODULE,
1407 .dev_attr_groups = mtty_dev_groups, 1418 .dev_attr_groups = mtty_dev_groups,
1408 .mdev_attr_groups = mdev_dev_groups, 1419 .mdev_attr_groups = mdev_dev_groups,
@@ -1447,6 +1458,7 @@ static int __init mtty_dev_init(void)
1447 1458
1448 if (IS_ERR(mtty_dev.vd_class)) { 1459 if (IS_ERR(mtty_dev.vd_class)) {
1449 pr_err("Error: failed to register mtty_dev class\n"); 1460 pr_err("Error: failed to register mtty_dev class\n");
1461 ret = PTR_ERR(mtty_dev.vd_class);
1450 goto failed1; 1462 goto failed1;
1451 } 1463 }
1452 1464
@@ -1458,7 +1470,8 @@ static int __init mtty_dev_init(void)
1458 if (ret) 1470 if (ret)
1459 goto failed2; 1471 goto failed2;
1460 1472
1461 if (mdev_register_device(&mtty_dev.dev, &mdev_fops) != 0) 1473 ret = mdev_register_device(&mtty_dev.dev, &mdev_fops);
1474 if (ret)
1462 goto failed3; 1475 goto failed3;
1463 1476
1464 mutex_init(&mdev_list_lock); 1477 mutex_init(&mdev_list_lock);
diff --git a/scripts/Makefile.build b/scripts/Makefile.build
index eadcd4d359d9..d883116ebaa4 100644
--- a/scripts/Makefile.build
+++ b/scripts/Makefile.build
@@ -164,6 +164,7 @@ cmd_gensymtypes_c = \
164 $(CPP) -D__GENKSYMS__ $(c_flags) $< | \ 164 $(CPP) -D__GENKSYMS__ $(c_flags) $< | \
165 $(GENKSYMS) $(if $(1), -T $(2)) \ 165 $(GENKSYMS) $(if $(1), -T $(2)) \
166 $(patsubst y,-s _,$(CONFIG_HAVE_UNDERSCORE_SYMBOL_PREFIX)) \ 166 $(patsubst y,-s _,$(CONFIG_HAVE_UNDERSCORE_SYMBOL_PREFIX)) \
167 $(patsubst y,-R,$(CONFIG_MODULE_REL_CRCS)) \
167 $(if $(KBUILD_PRESERVE),-p) \ 168 $(if $(KBUILD_PRESERVE),-p) \
168 -r $(firstword $(wildcard $(2:.symtypes=.symref) /dev/null)) 169 -r $(firstword $(wildcard $(2:.symtypes=.symref) /dev/null))
169 170
@@ -337,6 +338,7 @@ cmd_gensymtypes_S = \
337 $(CPP) -D__GENKSYMS__ $(c_flags) -xc - | \ 338 $(CPP) -D__GENKSYMS__ $(c_flags) -xc - | \
338 $(GENKSYMS) $(if $(1), -T $(2)) \ 339 $(GENKSYMS) $(if $(1), -T $(2)) \
339 $(patsubst y,-s _,$(CONFIG_HAVE_UNDERSCORE_SYMBOL_PREFIX)) \ 340 $(patsubst y,-s _,$(CONFIG_HAVE_UNDERSCORE_SYMBOL_PREFIX)) \
341 $(patsubst y,-R,$(CONFIG_MODULE_REL_CRCS)) \
340 $(if $(KBUILD_PRESERVE),-p) \ 342 $(if $(KBUILD_PRESERVE),-p) \
341 -r $(firstword $(wildcard $(2:.symtypes=.symref) /dev/null)) 343 -r $(firstword $(wildcard $(2:.symtypes=.symref) /dev/null))
342 344
diff --git a/scripts/gcc-plugins/gcc-common.h b/scripts/gcc-plugins/gcc-common.h
index 950fd2e64bb7..12262c0cc691 100644
--- a/scripts/gcc-plugins/gcc-common.h
+++ b/scripts/gcc-plugins/gcc-common.h
@@ -39,6 +39,9 @@
39#include "hash-map.h" 39#include "hash-map.h"
40#endif 40#endif
41 41
42#if BUILDING_GCC_VERSION >= 7000
43#include "memmodel.h"
44#endif
42#include "emit-rtl.h" 45#include "emit-rtl.h"
43#include "debug.h" 46#include "debug.h"
44#include "target.h" 47#include "target.h"
@@ -91,6 +94,9 @@
91#include "tree-ssa-alias.h" 94#include "tree-ssa-alias.h"
92#include "tree-ssa.h" 95#include "tree-ssa.h"
93#include "stringpool.h" 96#include "stringpool.h"
97#if BUILDING_GCC_VERSION >= 7000
98#include "tree-vrp.h"
99#endif
94#include "tree-ssanames.h" 100#include "tree-ssanames.h"
95#include "print-tree.h" 101#include "print-tree.h"
96#include "tree-eh.h" 102#include "tree-eh.h"
@@ -287,6 +293,22 @@ static inline struct cgraph_node *cgraph_next_function_with_gimple_body(struct c
287 return NULL; 293 return NULL;
288} 294}
289 295
296static inline bool cgraph_for_node_and_aliases(cgraph_node_ptr node, bool (*callback)(cgraph_node_ptr, void *), void *data, bool include_overwritable)
297{
298 cgraph_node_ptr alias;
299
300 if (callback(node, data))
301 return true;
302
303 for (alias = node->same_body; alias; alias = alias->next) {
304 if (include_overwritable || cgraph_function_body_availability(alias) > AVAIL_OVERWRITABLE)
305 if (cgraph_for_node_and_aliases(alias, callback, data, include_overwritable))
306 return true;
307 }
308
309 return false;
310}
311
290#define FOR_EACH_FUNCTION_WITH_GIMPLE_BODY(node) \ 312#define FOR_EACH_FUNCTION_WITH_GIMPLE_BODY(node) \
291 for ((node) = cgraph_first_function_with_gimple_body(); (node); \ 313 for ((node) = cgraph_first_function_with_gimple_body(); (node); \
292 (node) = cgraph_next_function_with_gimple_body(node)) 314 (node) = cgraph_next_function_with_gimple_body(node))
@@ -399,6 +421,7 @@ typedef union gimple_statement_d gassign;
399typedef union gimple_statement_d gcall; 421typedef union gimple_statement_d gcall;
400typedef union gimple_statement_d gcond; 422typedef union gimple_statement_d gcond;
401typedef union gimple_statement_d gdebug; 423typedef union gimple_statement_d gdebug;
424typedef union gimple_statement_d ggoto;
402typedef union gimple_statement_d gphi; 425typedef union gimple_statement_d gphi;
403typedef union gimple_statement_d greturn; 426typedef union gimple_statement_d greturn;
404 427
@@ -452,6 +475,16 @@ static inline const gdebug *as_a_const_gdebug(const_gimple stmt)
452 return stmt; 475 return stmt;
453} 476}
454 477
478static inline ggoto *as_a_ggoto(gimple stmt)
479{
480 return stmt;
481}
482
483static inline const ggoto *as_a_const_ggoto(const_gimple stmt)
484{
485 return stmt;
486}
487
455static inline gphi *as_a_gphi(gimple stmt) 488static inline gphi *as_a_gphi(gimple stmt)
456{ 489{
457 return stmt; 490 return stmt;
@@ -496,6 +529,14 @@ static inline const greturn *as_a_const_greturn(const_gimple stmt)
496 529
497typedef struct rtx_def rtx_insn; 530typedef struct rtx_def rtx_insn;
498 531
532static inline const char *get_decl_section_name(const_tree decl)
533{
534 if (DECL_SECTION_NAME(decl) == NULL_TREE)
535 return NULL;
536
537 return TREE_STRING_POINTER(DECL_SECTION_NAME(decl));
538}
539
499static inline void set_decl_section_name(tree node, const char *value) 540static inline void set_decl_section_name(tree node, const char *value)
500{ 541{
501 if (value) 542 if (value)
@@ -511,6 +552,7 @@ typedef struct gimple_statement_base gassign;
511typedef struct gimple_statement_call gcall; 552typedef struct gimple_statement_call gcall;
512typedef struct gimple_statement_base gcond; 553typedef struct gimple_statement_base gcond;
513typedef struct gimple_statement_base gdebug; 554typedef struct gimple_statement_base gdebug;
555typedef struct gimple_statement_base ggoto;
514typedef struct gimple_statement_phi gphi; 556typedef struct gimple_statement_phi gphi;
515typedef struct gimple_statement_base greturn; 557typedef struct gimple_statement_base greturn;
516 558
@@ -564,6 +606,16 @@ static inline const gdebug *as_a_const_gdebug(const_gimple stmt)
564 return stmt; 606 return stmt;
565} 607}
566 608
609static inline ggoto *as_a_ggoto(gimple stmt)
610{
611 return stmt;
612}
613
614static inline const ggoto *as_a_const_ggoto(const_gimple stmt)
615{
616 return stmt;
617}
618
567static inline gphi *as_a_gphi(gimple stmt) 619static inline gphi *as_a_gphi(gimple stmt)
568{ 620{
569 return as_a<gphi>(stmt); 621 return as_a<gphi>(stmt);
@@ -611,6 +663,11 @@ inline bool is_a_helper<const gassign *>::test(const_gimple gs)
611 663
612#define INSN_DELETED_P(insn) (insn)->deleted() 664#define INSN_DELETED_P(insn) (insn)->deleted()
613 665
666static inline const char *get_decl_section_name(const_tree decl)
667{
668 return DECL_SECTION_NAME(decl);
669}
670
614/* symtab/cgraph related */ 671/* symtab/cgraph related */
615#define debug_cgraph_node(node) (node)->debug() 672#define debug_cgraph_node(node) (node)->debug()
616#define cgraph_get_node(decl) cgraph_node::get(decl) 673#define cgraph_get_node(decl) cgraph_node::get(decl)
@@ -619,6 +676,7 @@ inline bool is_a_helper<const gassign *>::test(const_gimple gs)
619#define cgraph_n_nodes symtab->cgraph_count 676#define cgraph_n_nodes symtab->cgraph_count
620#define cgraph_max_uid symtab->cgraph_max_uid 677#define cgraph_max_uid symtab->cgraph_max_uid
621#define varpool_get_node(decl) varpool_node::get(decl) 678#define varpool_get_node(decl) varpool_node::get(decl)
679#define dump_varpool_node(file, node) (node)->dump(file)
622 680
623#define cgraph_create_edge(caller, callee, call_stmt, count, freq, nest) \ 681#define cgraph_create_edge(caller, callee, call_stmt, count, freq, nest) \
624 (caller)->create_edge((callee), (call_stmt), (count), (freq)) 682 (caller)->create_edge((callee), (call_stmt), (count), (freq))
@@ -674,6 +732,11 @@ static inline cgraph_node_ptr cgraph_alias_target(cgraph_node_ptr node)
674 return node->get_alias_target(); 732 return node->get_alias_target();
675} 733}
676 734
735static inline bool cgraph_for_node_and_aliases(cgraph_node_ptr node, bool (*callback)(cgraph_node_ptr, void *), void *data, bool include_overwritable)
736{
737 return node->call_for_symbol_thunks_and_aliases(callback, data, include_overwritable);
738}
739
677static inline struct cgraph_node_hook_list *cgraph_add_function_insertion_hook(cgraph_node_hook hook, void *data) 740static inline struct cgraph_node_hook_list *cgraph_add_function_insertion_hook(cgraph_node_hook hook, void *data)
678{ 741{
679 return symtab->add_cgraph_insertion_hook(hook, data); 742 return symtab->add_cgraph_insertion_hook(hook, data);
@@ -731,6 +794,13 @@ static inline gimple gimple_build_assign_with_ops(enum tree_code subcode, tree l
731 794
732template <> 795template <>
733template <> 796template <>
797inline bool is_a_helper<const ggoto *>::test(const_gimple gs)
798{
799 return gs->code == GIMPLE_GOTO;
800}
801
802template <>
803template <>
734inline bool is_a_helper<const greturn *>::test(const_gimple gs) 804inline bool is_a_helper<const greturn *>::test(const_gimple gs)
735{ 805{
736 return gs->code == GIMPLE_RETURN; 806 return gs->code == GIMPLE_RETURN;
@@ -766,6 +836,16 @@ static inline const gcall *as_a_const_gcall(const_gimple stmt)
766 return as_a<const gcall *>(stmt); 836 return as_a<const gcall *>(stmt);
767} 837}
768 838
839static inline ggoto *as_a_ggoto(gimple stmt)
840{
841 return as_a<ggoto *>(stmt);
842}
843
844static inline const ggoto *as_a_const_ggoto(const_gimple stmt)
845{
846 return as_a<const ggoto *>(stmt);
847}
848
769static inline gphi *as_a_gphi(gimple stmt) 849static inline gphi *as_a_gphi(gimple stmt)
770{ 850{
771 return as_a<gphi *>(stmt); 851 return as_a<gphi *>(stmt);
@@ -828,4 +908,9 @@ static inline void debug_gimple_stmt(const_gimple s)
828#define debug_gimple_stmt(s) debug_gimple_stmt(CONST_CAST_GIMPLE(s)) 908#define debug_gimple_stmt(s) debug_gimple_stmt(CONST_CAST_GIMPLE(s))
829#endif 909#endif
830 910
911#if BUILDING_GCC_VERSION >= 7000
912#define get_inner_reference(exp, pbitsize, pbitpos, poffset, pmode, punsignedp, preversep, pvolatilep, keep_aligning) \
913 get_inner_reference(exp, pbitsize, pbitpos, poffset, pmode, punsignedp, preversep, pvolatilep)
914#endif
915
831#endif 916#endif
diff --git a/scripts/gcc-plugins/latent_entropy_plugin.c b/scripts/gcc-plugins/latent_entropy_plugin.c
index 12541126575b..8ff203ad4809 100644
--- a/scripts/gcc-plugins/latent_entropy_plugin.c
+++ b/scripts/gcc-plugins/latent_entropy_plugin.c
@@ -328,9 +328,9 @@ static enum tree_code get_op(tree *rhs)
328 op = LROTATE_EXPR; 328 op = LROTATE_EXPR;
329 /* 329 /*
330 * This code limits the value of random_const to 330 * This code limits the value of random_const to
331 * the size of a wide int for the rotation 331 * the size of a long for the rotation
332 */ 332 */
333 random_const &= HOST_BITS_PER_WIDE_INT - 1; 333 random_const %= TYPE_PRECISION(long_unsigned_type_node);
334 break; 334 break;
335 } 335 }
336 336
diff --git a/scripts/genksyms/genksyms.c b/scripts/genksyms/genksyms.c
index 06121ce524a7..c9235d8340f1 100644
--- a/scripts/genksyms/genksyms.c
+++ b/scripts/genksyms/genksyms.c
@@ -44,7 +44,7 @@ char *cur_filename, *source_file;
44int in_source_file; 44int in_source_file;
45 45
46static int flag_debug, flag_dump_defs, flag_reference, flag_dump_types, 46static int flag_debug, flag_dump_defs, flag_reference, flag_dump_types,
47 flag_preserve, flag_warnings; 47 flag_preserve, flag_warnings, flag_rel_crcs;
48static const char *mod_prefix = ""; 48static const char *mod_prefix = "";
49 49
50static int errors; 50static int errors;
@@ -693,7 +693,10 @@ void export_symbol(const char *name)
693 fputs(">\n", debugfile); 693 fputs(">\n", debugfile);
694 694
695 /* Used as a linker script. */ 695 /* Used as a linker script. */
696 printf("%s__crc_%s = 0x%08lx ;\n", mod_prefix, name, crc); 696 printf(!flag_rel_crcs ? "%s__crc_%s = 0x%08lx;\n" :
697 "SECTIONS { .rodata : ALIGN(4) { "
698 "%s__crc_%s = .; LONG(0x%08lx); } }\n",
699 mod_prefix, name, crc);
697 } 700 }
698} 701}
699 702
@@ -730,7 +733,7 @@ void error_with_pos(const char *fmt, ...)
730 733
731static void genksyms_usage(void) 734static void genksyms_usage(void)
732{ 735{
733 fputs("Usage:\n" "genksyms [-adDTwqhV] > /path/to/.tmp_obj.ver\n" "\n" 736 fputs("Usage:\n" "genksyms [-adDTwqhVR] > /path/to/.tmp_obj.ver\n" "\n"
734#ifdef __GNU_LIBRARY__ 737#ifdef __GNU_LIBRARY__
735 " -s, --symbol-prefix Select symbol prefix\n" 738 " -s, --symbol-prefix Select symbol prefix\n"
736 " -d, --debug Increment the debug level (repeatable)\n" 739 " -d, --debug Increment the debug level (repeatable)\n"
@@ -742,6 +745,7 @@ static void genksyms_usage(void)
742 " -q, --quiet Disable warnings (default)\n" 745 " -q, --quiet Disable warnings (default)\n"
743 " -h, --help Print this message\n" 746 " -h, --help Print this message\n"
744 " -V, --version Print the release version\n" 747 " -V, --version Print the release version\n"
748 " -R, --relative-crc Emit section relative symbol CRCs\n"
745#else /* __GNU_LIBRARY__ */ 749#else /* __GNU_LIBRARY__ */
746 " -s Select symbol prefix\n" 750 " -s Select symbol prefix\n"
747 " -d Increment the debug level (repeatable)\n" 751 " -d Increment the debug level (repeatable)\n"
@@ -753,6 +757,7 @@ static void genksyms_usage(void)
753 " -q Disable warnings (default)\n" 757 " -q Disable warnings (default)\n"
754 " -h Print this message\n" 758 " -h Print this message\n"
755 " -V Print the release version\n" 759 " -V Print the release version\n"
760 " -R Emit section relative symbol CRCs\n"
756#endif /* __GNU_LIBRARY__ */ 761#endif /* __GNU_LIBRARY__ */
757 , stderr); 762 , stderr);
758} 763}
@@ -774,13 +779,14 @@ int main(int argc, char **argv)
774 {"preserve", 0, 0, 'p'}, 779 {"preserve", 0, 0, 'p'},
775 {"version", 0, 0, 'V'}, 780 {"version", 0, 0, 'V'},
776 {"help", 0, 0, 'h'}, 781 {"help", 0, 0, 'h'},
782 {"relative-crc", 0, 0, 'R'},
777 {0, 0, 0, 0} 783 {0, 0, 0, 0}
778 }; 784 };
779 785
780 while ((o = getopt_long(argc, argv, "s:dwqVDr:T:ph", 786 while ((o = getopt_long(argc, argv, "s:dwqVDr:T:phR",
781 &long_opts[0], NULL)) != EOF) 787 &long_opts[0], NULL)) != EOF)
782#else /* __GNU_LIBRARY__ */ 788#else /* __GNU_LIBRARY__ */
783 while ((o = getopt(argc, argv, "s:dwqVDr:T:ph")) != EOF) 789 while ((o = getopt(argc, argv, "s:dwqVDr:T:phR")) != EOF)
784#endif /* __GNU_LIBRARY__ */ 790#endif /* __GNU_LIBRARY__ */
785 switch (o) { 791 switch (o) {
786 case 's': 792 case 's':
@@ -823,6 +829,9 @@ int main(int argc, char **argv)
823 case 'h': 829 case 'h':
824 genksyms_usage(); 830 genksyms_usage();
825 return 0; 831 return 0;
832 case 'R':
833 flag_rel_crcs = 1;
834 break;
826 default: 835 default:
827 genksyms_usage(); 836 genksyms_usage();
828 return 1; 837 return 1;
diff --git a/scripts/kallsyms.c b/scripts/kallsyms.c
index 299b92ca1ae0..5d554419170b 100644
--- a/scripts/kallsyms.c
+++ b/scripts/kallsyms.c
@@ -219,6 +219,10 @@ static int symbol_valid(struct sym_entry *s)
219 "_SDA2_BASE_", /* ppc */ 219 "_SDA2_BASE_", /* ppc */
220 NULL }; 220 NULL };
221 221
222 static char *special_prefixes[] = {
223 "__crc_", /* modversions */
224 NULL };
225
222 static char *special_suffixes[] = { 226 static char *special_suffixes[] = {
223 "_veneer", /* arm */ 227 "_veneer", /* arm */
224 "_from_arm", /* arm */ 228 "_from_arm", /* arm */
@@ -259,6 +263,14 @@ static int symbol_valid(struct sym_entry *s)
259 if (strcmp(sym_name, special_symbols[i]) == 0) 263 if (strcmp(sym_name, special_symbols[i]) == 0)
260 return 0; 264 return 0;
261 265
266 for (i = 0; special_prefixes[i]; i++) {
267 int l = strlen(special_prefixes[i]);
268
269 if (l <= strlen(sym_name) &&
270 strncmp(sym_name, special_prefixes[i], l) == 0)
271 return 0;
272 }
273
262 for (i = 0; special_suffixes[i]; i++) { 274 for (i = 0; special_suffixes[i]; i++) {
263 int l = strlen(sym_name) - strlen(special_suffixes[i]); 275 int l = strlen(sym_name) - strlen(special_suffixes[i]);
264 276
diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c
index 29c89a6bad3d..4dedd0d3d3a7 100644
--- a/scripts/mod/modpost.c
+++ b/scripts/mod/modpost.c
@@ -621,6 +621,16 @@ static void handle_modversions(struct module *mod, struct elf_info *info,
621 if (strncmp(symname, CRC_PFX, strlen(CRC_PFX)) == 0) { 621 if (strncmp(symname, CRC_PFX, strlen(CRC_PFX)) == 0) {
622 is_crc = true; 622 is_crc = true;
623 crc = (unsigned int) sym->st_value; 623 crc = (unsigned int) sym->st_value;
624 if (sym->st_shndx != SHN_UNDEF && sym->st_shndx != SHN_ABS) {
625 unsigned int *crcp;
626
627 /* symbol points to the CRC in the ELF object */
628 crcp = (void *)info->hdr + sym->st_value +
629 info->sechdrs[sym->st_shndx].sh_offset -
630 (info->hdr->e_type != ET_REL ?
631 info->sechdrs[sym->st_shndx].sh_addr : 0);
632 crc = *crcp;
633 }
624 sym_update_crc(symname + strlen(CRC_PFX), mod, crc, 634 sym_update_crc(symname + strlen(CRC_PFX), mod, crc,
625 export); 635 export);
626 } 636 }
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index c7c6619431d5..d98550abe16d 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -5887,7 +5887,7 @@ static int selinux_setprocattr(struct task_struct *p,
5887 return error; 5887 return error;
5888 5888
5889 /* Obtain a SID for the context, if one was specified. */ 5889 /* Obtain a SID for the context, if one was specified. */
5890 if (size && str[1] && str[1] != '\n') { 5890 if (size && str[0] && str[0] != '\n') {
5891 if (str[size-1] == '\n') { 5891 if (str[size-1] == '\n') {
5892 str[size-1] = 0; 5892 str[size-1] = 0;
5893 size--; 5893 size--;
diff --git a/sound/core/seq/seq_memory.c b/sound/core/seq/seq_memory.c
index c850345c43b5..dfa5156f3585 100644
--- a/sound/core/seq/seq_memory.c
+++ b/sound/core/seq/seq_memory.c
@@ -419,7 +419,6 @@ int snd_seq_pool_done(struct snd_seq_pool *pool)
419{ 419{
420 unsigned long flags; 420 unsigned long flags;
421 struct snd_seq_event_cell *ptr; 421 struct snd_seq_event_cell *ptr;
422 int max_count = 5 * HZ;
423 422
424 if (snd_BUG_ON(!pool)) 423 if (snd_BUG_ON(!pool))
425 return -EINVAL; 424 return -EINVAL;
@@ -432,14 +431,8 @@ int snd_seq_pool_done(struct snd_seq_pool *pool)
432 if (waitqueue_active(&pool->output_sleep)) 431 if (waitqueue_active(&pool->output_sleep))
433 wake_up(&pool->output_sleep); 432 wake_up(&pool->output_sleep);
434 433
435 while (atomic_read(&pool->counter) > 0) { 434 while (atomic_read(&pool->counter) > 0)
436 if (max_count == 0) {
437 pr_warn("ALSA: snd_seq_pool_done timeout: %d cells remain\n", atomic_read(&pool->counter));
438 break;
439 }
440 schedule_timeout_uninterruptible(1); 435 schedule_timeout_uninterruptible(1);
441 max_count--;
442 }
443 436
444 /* release all resources */ 437 /* release all resources */
445 spin_lock_irqsave(&pool->lock, flags); 438 spin_lock_irqsave(&pool->lock, flags);
diff --git a/sound/core/seq/seq_queue.c b/sound/core/seq/seq_queue.c
index 0bec02e89d51..450c5187eecb 100644
--- a/sound/core/seq/seq_queue.c
+++ b/sound/core/seq/seq_queue.c
@@ -181,6 +181,8 @@ void __exit snd_seq_queues_delete(void)
181 } 181 }
182} 182}
183 183
184static void queue_use(struct snd_seq_queue *queue, int client, int use);
185
184/* allocate a new queue - 186/* allocate a new queue -
185 * return queue index value or negative value for error 187 * return queue index value or negative value for error
186 */ 188 */
@@ -192,11 +194,11 @@ int snd_seq_queue_alloc(int client, int locked, unsigned int info_flags)
192 if (q == NULL) 194 if (q == NULL)
193 return -ENOMEM; 195 return -ENOMEM;
194 q->info_flags = info_flags; 196 q->info_flags = info_flags;
197 queue_use(q, client, 1);
195 if (queue_list_add(q) < 0) { 198 if (queue_list_add(q) < 0) {
196 queue_delete(q); 199 queue_delete(q);
197 return -ENOMEM; 200 return -ENOMEM;
198 } 201 }
199 snd_seq_queue_use(q->queue, client, 1); /* use this queue */
200 return q->queue; 202 return q->queue;
201} 203}
202 204
@@ -502,19 +504,9 @@ int snd_seq_queue_timer_set_tempo(int queueid, int client,
502 return result; 504 return result;
503} 505}
504 506
505 507/* use or unuse this queue */
506/* use or unuse this queue - 508static void queue_use(struct snd_seq_queue *queue, int client, int use)
507 * if it is the first client, starts the timer.
508 * if it is not longer used by any clients, stop the timer.
509 */
510int snd_seq_queue_use(int queueid, int client, int use)
511{ 509{
512 struct snd_seq_queue *queue;
513
514 queue = queueptr(queueid);
515 if (queue == NULL)
516 return -EINVAL;
517 mutex_lock(&queue->timer_mutex);
518 if (use) { 510 if (use) {
519 if (!test_and_set_bit(client, queue->clients_bitmap)) 511 if (!test_and_set_bit(client, queue->clients_bitmap))
520 queue->clients++; 512 queue->clients++;
@@ -529,6 +521,21 @@ int snd_seq_queue_use(int queueid, int client, int use)
529 } else { 521 } else {
530 snd_seq_timer_close(queue); 522 snd_seq_timer_close(queue);
531 } 523 }
524}
525
526/* use or unuse this queue -
527 * if it is the first client, starts the timer.
528 * if it is not longer used by any clients, stop the timer.
529 */
530int snd_seq_queue_use(int queueid, int client, int use)
531{
532 struct snd_seq_queue *queue;
533
534 queue = queueptr(queueid);
535 if (queue == NULL)
536 return -EINVAL;
537 mutex_lock(&queue->timer_mutex);
538 queue_use(queue, client, use);
532 mutex_unlock(&queue->timer_mutex); 539 mutex_unlock(&queue->timer_mutex);
533 queuefree(queue); 540 queuefree(queue);
534 return 0; 541 return 0;
diff --git a/sound/firewire/fireworks/fireworks_stream.c b/sound/firewire/fireworks/fireworks_stream.c
index ee47924aef0d..827161bc269c 100644
--- a/sound/firewire/fireworks/fireworks_stream.c
+++ b/sound/firewire/fireworks/fireworks_stream.c
@@ -117,7 +117,7 @@ destroy_stream(struct snd_efw *efw, struct amdtp_stream *stream)
117 conn = &efw->in_conn; 117 conn = &efw->in_conn;
118 118
119 amdtp_stream_destroy(stream); 119 amdtp_stream_destroy(stream);
120 cmp_connection_destroy(&efw->out_conn); 120 cmp_connection_destroy(conn);
121} 121}
122 122
123static int 123static int
diff --git a/sound/firewire/tascam/tascam-stream.c b/sound/firewire/tascam/tascam-stream.c
index 4ad3bd7fd445..f1657a4e0621 100644
--- a/sound/firewire/tascam/tascam-stream.c
+++ b/sound/firewire/tascam/tascam-stream.c
@@ -343,7 +343,7 @@ int snd_tscm_stream_init_duplex(struct snd_tscm *tscm)
343 if (err < 0) 343 if (err < 0)
344 amdtp_stream_destroy(&tscm->rx_stream); 344 amdtp_stream_destroy(&tscm->rx_stream);
345 345
346 return 0; 346 return err;
347} 347}
348 348
349/* At bus reset, streaming is stopped and some registers are clear. */ 349/* At bus reset, streaming is stopped and some registers are clear. */
diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
index cf9bc042fe96..3fc201c3b95a 100644
--- a/sound/pci/hda/patch_hdmi.c
+++ b/sound/pci/hda/patch_hdmi.c
@@ -3639,6 +3639,7 @@ HDA_CODEC_ENTRY(0x10de0070, "GPU 70 HDMI/DP", patch_nvhdmi),
3639HDA_CODEC_ENTRY(0x10de0071, "GPU 71 HDMI/DP", patch_nvhdmi), 3639HDA_CODEC_ENTRY(0x10de0071, "GPU 71 HDMI/DP", patch_nvhdmi),
3640HDA_CODEC_ENTRY(0x10de0072, "GPU 72 HDMI/DP", patch_nvhdmi), 3640HDA_CODEC_ENTRY(0x10de0072, "GPU 72 HDMI/DP", patch_nvhdmi),
3641HDA_CODEC_ENTRY(0x10de007d, "GPU 7d HDMI/DP", patch_nvhdmi), 3641HDA_CODEC_ENTRY(0x10de007d, "GPU 7d HDMI/DP", patch_nvhdmi),
3642HDA_CODEC_ENTRY(0x10de0080, "GPU 80 HDMI/DP", patch_nvhdmi),
3642HDA_CODEC_ENTRY(0x10de0082, "GPU 82 HDMI/DP", patch_nvhdmi), 3643HDA_CODEC_ENTRY(0x10de0082, "GPU 82 HDMI/DP", patch_nvhdmi),
3643HDA_CODEC_ENTRY(0x10de0083, "GPU 83 HDMI/DP", patch_nvhdmi), 3644HDA_CODEC_ENTRY(0x10de0083, "GPU 83 HDMI/DP", patch_nvhdmi),
3644HDA_CODEC_ENTRY(0x10de8001, "MCP73 HDMI", patch_nvhdmi_2ch), 3645HDA_CODEC_ENTRY(0x10de8001, "MCP73 HDMI", patch_nvhdmi_2ch),
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 9448daff9d8b..7d660ee1d5e8 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -2230,6 +2230,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
2230 SND_PCI_QUIRK(0x1043, 0x1971, "Asus W2JC", ALC882_FIXUP_ASUS_W2JC), 2230 SND_PCI_QUIRK(0x1043, 0x1971, "Asus W2JC", ALC882_FIXUP_ASUS_W2JC),
2231 SND_PCI_QUIRK(0x1043, 0x835f, "Asus Eee 1601", ALC888_FIXUP_EEE1601), 2231 SND_PCI_QUIRK(0x1043, 0x835f, "Asus Eee 1601", ALC888_FIXUP_EEE1601),
2232 SND_PCI_QUIRK(0x1043, 0x84bc, "ASUS ET2700", ALC887_FIXUP_ASUS_BASS), 2232 SND_PCI_QUIRK(0x1043, 0x84bc, "ASUS ET2700", ALC887_FIXUP_ASUS_BASS),
2233 SND_PCI_QUIRK(0x1043, 0x8691, "ASUS ROG Ranger VIII", ALC882_FIXUP_GPIO3),
2233 SND_PCI_QUIRK(0x104d, 0x9047, "Sony Vaio TT", ALC889_FIXUP_VAIO_TT), 2234 SND_PCI_QUIRK(0x104d, 0x9047, "Sony Vaio TT", ALC889_FIXUP_VAIO_TT),
2234 SND_PCI_QUIRK(0x104d, 0x905a, "Sony Vaio Z", ALC882_FIXUP_NO_PRIMARY_HP), 2235 SND_PCI_QUIRK(0x104d, 0x905a, "Sony Vaio Z", ALC882_FIXUP_NO_PRIMARY_HP),
2235 SND_PCI_QUIRK(0x104d, 0x9043, "Sony Vaio VGC-LN51JGB", ALC882_FIXUP_NO_PRIMARY_HP), 2236 SND_PCI_QUIRK(0x104d, 0x9043, "Sony Vaio VGC-LN51JGB", ALC882_FIXUP_NO_PRIMARY_HP),
@@ -6983,6 +6984,7 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = {
6983 SND_PCI_QUIRK(0x1043, 0x15a7, "ASUS UX51VZH", ALC662_FIXUP_BASS_16), 6984 SND_PCI_QUIRK(0x1043, 0x15a7, "ASUS UX51VZH", ALC662_FIXUP_BASS_16),
6984 SND_PCI_QUIRK(0x1043, 0x177d, "ASUS N551", ALC668_FIXUP_ASUS_Nx51), 6985 SND_PCI_QUIRK(0x1043, 0x177d, "ASUS N551", ALC668_FIXUP_ASUS_Nx51),
6985 SND_PCI_QUIRK(0x1043, 0x17bd, "ASUS N751", ALC668_FIXUP_ASUS_Nx51), 6986 SND_PCI_QUIRK(0x1043, 0x17bd, "ASUS N751", ALC668_FIXUP_ASUS_Nx51),
6987 SND_PCI_QUIRK(0x1043, 0x1963, "ASUS X71SL", ALC662_FIXUP_ASUS_MODE8),
6986 SND_PCI_QUIRK(0x1043, 0x1b73, "ASUS N55SF", ALC662_FIXUP_BASS_16), 6988 SND_PCI_QUIRK(0x1043, 0x1b73, "ASUS N55SF", ALC662_FIXUP_BASS_16),
6987 SND_PCI_QUIRK(0x1043, 0x1bf3, "ASUS N76VZ", ALC662_FIXUP_BASS_MODE4_CHMAP), 6989 SND_PCI_QUIRK(0x1043, 0x1bf3, "ASUS N76VZ", ALC662_FIXUP_BASS_MODE4_CHMAP),
6988 SND_PCI_QUIRK(0x1043, 0x8469, "ASUS mobo", ALC662_FIXUP_NO_JACK_DETECT), 6990 SND_PCI_QUIRK(0x1043, 0x8469, "ASUS mobo", ALC662_FIXUP_NO_JACK_DETECT),
diff --git a/sound/soc/codecs/nau8825.c b/sound/soc/codecs/nau8825.c
index efe3a44658d5..4576f987a4a5 100644
--- a/sound/soc/codecs/nau8825.c
+++ b/sound/soc/codecs/nau8825.c
@@ -561,9 +561,9 @@ static void nau8825_xtalk_prepare(struct nau8825 *nau8825)
561 nau8825_xtalk_backup(nau8825); 561 nau8825_xtalk_backup(nau8825);
562 /* Config IIS as master to output signal by codec */ 562 /* Config IIS as master to output signal by codec */
563 regmap_update_bits(nau8825->regmap, NAU8825_REG_I2S_PCM_CTRL2, 563 regmap_update_bits(nau8825->regmap, NAU8825_REG_I2S_PCM_CTRL2,
564 NAU8825_I2S_MS_MASK | NAU8825_I2S_DRV_MASK | 564 NAU8825_I2S_MS_MASK | NAU8825_I2S_LRC_DIV_MASK |
565 NAU8825_I2S_BLK_DIV_MASK, NAU8825_I2S_MS_MASTER | 565 NAU8825_I2S_BLK_DIV_MASK, NAU8825_I2S_MS_MASTER |
566 (0x2 << NAU8825_I2S_DRV_SFT) | 0x1); 566 (0x2 << NAU8825_I2S_LRC_DIV_SFT) | 0x1);
567 /* Ramp up headphone volume to 0dB to get better performance and 567 /* Ramp up headphone volume to 0dB to get better performance and
568 * avoid pop noise in headphone. 568 * avoid pop noise in headphone.
569 */ 569 */
@@ -657,7 +657,7 @@ static void nau8825_xtalk_clean(struct nau8825 *nau8825)
657 NAU8825_IRQ_RMS_EN, NAU8825_IRQ_RMS_EN); 657 NAU8825_IRQ_RMS_EN, NAU8825_IRQ_RMS_EN);
658 /* Recover default value for IIS */ 658 /* Recover default value for IIS */
659 regmap_update_bits(nau8825->regmap, NAU8825_REG_I2S_PCM_CTRL2, 659 regmap_update_bits(nau8825->regmap, NAU8825_REG_I2S_PCM_CTRL2,
660 NAU8825_I2S_MS_MASK | NAU8825_I2S_DRV_MASK | 660 NAU8825_I2S_MS_MASK | NAU8825_I2S_LRC_DIV_MASK |
661 NAU8825_I2S_BLK_DIV_MASK, NAU8825_I2S_MS_SLAVE); 661 NAU8825_I2S_BLK_DIV_MASK, NAU8825_I2S_MS_SLAVE);
662 /* Restore value of specific register for cross talk */ 662 /* Restore value of specific register for cross talk */
663 nau8825_xtalk_restore(nau8825); 663 nau8825_xtalk_restore(nau8825);
@@ -2006,7 +2006,8 @@ static void nau8825_fll_apply(struct nau8825 *nau8825,
2006 NAU8825_FLL_INTEGER_MASK, fll_param->fll_int); 2006 NAU8825_FLL_INTEGER_MASK, fll_param->fll_int);
2007 /* FLL pre-scaler */ 2007 /* FLL pre-scaler */
2008 regmap_update_bits(nau8825->regmap, NAU8825_REG_FLL4, 2008 regmap_update_bits(nau8825->regmap, NAU8825_REG_FLL4,
2009 NAU8825_FLL_REF_DIV_MASK, fll_param->clk_ref_div); 2009 NAU8825_FLL_REF_DIV_MASK,
2010 fll_param->clk_ref_div << NAU8825_FLL_REF_DIV_SFT);
2010 /* select divided VCO input */ 2011 /* select divided VCO input */
2011 regmap_update_bits(nau8825->regmap, NAU8825_REG_FLL5, 2012 regmap_update_bits(nau8825->regmap, NAU8825_REG_FLL5,
2012 NAU8825_FLL_CLK_SW_MASK, NAU8825_FLL_CLK_SW_REF); 2013 NAU8825_FLL_CLK_SW_MASK, NAU8825_FLL_CLK_SW_REF);
diff --git a/sound/soc/codecs/nau8825.h b/sound/soc/codecs/nau8825.h
index 5d1704e73241..514fd13c2f46 100644
--- a/sound/soc/codecs/nau8825.h
+++ b/sound/soc/codecs/nau8825.h
@@ -137,7 +137,8 @@
137#define NAU8825_FLL_CLK_SRC_FS (0x3 << NAU8825_FLL_CLK_SRC_SFT) 137#define NAU8825_FLL_CLK_SRC_FS (0x3 << NAU8825_FLL_CLK_SRC_SFT)
138 138
139/* FLL4 (0x07) */ 139/* FLL4 (0x07) */
140#define NAU8825_FLL_REF_DIV_MASK (0x3 << 10) 140#define NAU8825_FLL_REF_DIV_SFT 10
141#define NAU8825_FLL_REF_DIV_MASK (0x3 << NAU8825_FLL_REF_DIV_SFT)
141 142
142/* FLL5 (0x08) */ 143/* FLL5 (0x08) */
143#define NAU8825_FLL_PDB_DAC_EN (0x1 << 15) 144#define NAU8825_FLL_PDB_DAC_EN (0x1 << 15)
@@ -247,8 +248,8 @@
247 248
248/* I2S_PCM_CTRL2 (0x1d) */ 249/* I2S_PCM_CTRL2 (0x1d) */
249#define NAU8825_I2S_TRISTATE (1 << 15) /* 0 - normal mode, 1 - Hi-Z output */ 250#define NAU8825_I2S_TRISTATE (1 << 15) /* 0 - normal mode, 1 - Hi-Z output */
250#define NAU8825_I2S_DRV_SFT 12 251#define NAU8825_I2S_LRC_DIV_SFT 12
251#define NAU8825_I2S_DRV_MASK (0x3 << NAU8825_I2S_DRV_SFT) 252#define NAU8825_I2S_LRC_DIV_MASK (0x3 << NAU8825_I2S_LRC_DIV_SFT)
252#define NAU8825_I2S_MS_SFT 3 253#define NAU8825_I2S_MS_SFT 3
253#define NAU8825_I2S_MS_MASK (1 << NAU8825_I2S_MS_SFT) 254#define NAU8825_I2S_MS_MASK (1 << NAU8825_I2S_MS_SFT)
254#define NAU8825_I2S_MS_MASTER (1 << NAU8825_I2S_MS_SFT) 255#define NAU8825_I2S_MS_MASTER (1 << NAU8825_I2S_MS_SFT)
diff --git a/sound/soc/codecs/rt5645.c b/sound/soc/codecs/rt5645.c
index 10c2a564a715..1ac96ef9ee20 100644
--- a/sound/soc/codecs/rt5645.c
+++ b/sound/soc/codecs/rt5645.c
@@ -3833,6 +3833,9 @@ static int rt5645_i2c_probe(struct i2c_client *i2c,
3833 } 3833 }
3834 } 3834 }
3835 3835
3836 regmap_update_bits(rt5645->regmap, RT5645_ADDA_CLK1,
3837 RT5645_I2S_PD1_MASK, RT5645_I2S_PD1_2);
3838
3836 if (rt5645->pdata.jd_invert) { 3839 if (rt5645->pdata.jd_invert) {
3837 regmap_update_bits(rt5645->regmap, RT5645_IRQ_CTRL2, 3840 regmap_update_bits(rt5645->regmap, RT5645_IRQ_CTRL2,
3838 RT5645_JD_1_1_MASK, RT5645_JD_1_1_INV); 3841 RT5645_JD_1_1_MASK, RT5645_JD_1_1_INV);
diff --git a/sound/soc/codecs/tlv320aic3x.c b/sound/soc/codecs/tlv320aic3x.c
index 8877b74b0510..bb94d50052d7 100644
--- a/sound/soc/codecs/tlv320aic3x.c
+++ b/sound/soc/codecs/tlv320aic3x.c
@@ -126,6 +126,16 @@ static const struct reg_default aic3x_reg[] = {
126 { 108, 0x00 }, { 109, 0x00 }, 126 { 108, 0x00 }, { 109, 0x00 },
127}; 127};
128 128
129static bool aic3x_volatile_reg(struct device *dev, unsigned int reg)
130{
131 switch (reg) {
132 case AIC3X_RESET:
133 return true;
134 default:
135 return false;
136 }
137}
138
129static const struct regmap_config aic3x_regmap = { 139static const struct regmap_config aic3x_regmap = {
130 .reg_bits = 8, 140 .reg_bits = 8,
131 .val_bits = 8, 141 .val_bits = 8,
@@ -133,6 +143,9 @@ static const struct regmap_config aic3x_regmap = {
133 .max_register = DAC_ICC_ADJ, 143 .max_register = DAC_ICC_ADJ,
134 .reg_defaults = aic3x_reg, 144 .reg_defaults = aic3x_reg,
135 .num_reg_defaults = ARRAY_SIZE(aic3x_reg), 145 .num_reg_defaults = ARRAY_SIZE(aic3x_reg),
146
147 .volatile_reg = aic3x_volatile_reg,
148
136 .cache_type = REGCACHE_RBTREE, 149 .cache_type = REGCACHE_RBTREE,
137}; 150};
138 151
diff --git a/sound/soc/codecs/wm_adsp.c b/sound/soc/codecs/wm_adsp.c
index 593b7d1aed46..d72ccef9e238 100644
--- a/sound/soc/codecs/wm_adsp.c
+++ b/sound/soc/codecs/wm_adsp.c
@@ -1551,7 +1551,7 @@ static int wm_adsp_load(struct wm_adsp *dsp)
1551 const struct wmfw_region *region; 1551 const struct wmfw_region *region;
1552 const struct wm_adsp_region *mem; 1552 const struct wm_adsp_region *mem;
1553 const char *region_name; 1553 const char *region_name;
1554 char *file, *text; 1554 char *file, *text = NULL;
1555 struct wm_adsp_buf *buf; 1555 struct wm_adsp_buf *buf;
1556 unsigned int reg; 1556 unsigned int reg;
1557 int regions = 0; 1557 int regions = 0;
@@ -1700,10 +1700,21 @@ static int wm_adsp_load(struct wm_adsp *dsp)
1700 regions, le32_to_cpu(region->len), offset, 1700 regions, le32_to_cpu(region->len), offset,
1701 region_name); 1701 region_name);
1702 1702
1703 if ((pos + le32_to_cpu(region->len) + sizeof(*region)) >
1704 firmware->size) {
1705 adsp_err(dsp,
1706 "%s.%d: %s region len %d bytes exceeds file length %zu\n",
1707 file, regions, region_name,
1708 le32_to_cpu(region->len), firmware->size);
1709 ret = -EINVAL;
1710 goto out_fw;
1711 }
1712
1703 if (text) { 1713 if (text) {
1704 memcpy(text, region->data, le32_to_cpu(region->len)); 1714 memcpy(text, region->data, le32_to_cpu(region->len));
1705 adsp_info(dsp, "%s: %s\n", file, text); 1715 adsp_info(dsp, "%s: %s\n", file, text);
1706 kfree(text); 1716 kfree(text);
1717 text = NULL;
1707 } 1718 }
1708 1719
1709 if (reg) { 1720 if (reg) {
@@ -1748,6 +1759,7 @@ out_fw:
1748 regmap_async_complete(regmap); 1759 regmap_async_complete(regmap);
1749 wm_adsp_buf_free(&buf_list); 1760 wm_adsp_buf_free(&buf_list);
1750 release_firmware(firmware); 1761 release_firmware(firmware);
1762 kfree(text);
1751out: 1763out:
1752 kfree(file); 1764 kfree(file);
1753 1765
@@ -2233,6 +2245,17 @@ static int wm_adsp_load_coeff(struct wm_adsp *dsp)
2233 } 2245 }
2234 2246
2235 if (reg) { 2247 if (reg) {
2248 if ((pos + le32_to_cpu(blk->len) + sizeof(*blk)) >
2249 firmware->size) {
2250 adsp_err(dsp,
2251 "%s.%d: %s region len %d bytes exceeds file length %zu\n",
2252 file, blocks, region_name,
2253 le32_to_cpu(blk->len),
2254 firmware->size);
2255 ret = -EINVAL;
2256 goto out_fw;
2257 }
2258
2236 buf = wm_adsp_buf_alloc(blk->data, 2259 buf = wm_adsp_buf_alloc(blk->data,
2237 le32_to_cpu(blk->len), 2260 le32_to_cpu(blk->len),
2238 &buf_list); 2261 &buf_list);
diff --git a/sound/soc/dwc/designware_i2s.c b/sound/soc/dwc/designware_i2s.c
index 2998954a1c74..bdf8398cbc81 100644
--- a/sound/soc/dwc/designware_i2s.c
+++ b/sound/soc/dwc/designware_i2s.c
@@ -681,22 +681,19 @@ static int dw_i2s_probe(struct platform_device *pdev)
681 } 681 }
682 682
683 if (!pdata) { 683 if (!pdata) {
684 ret = devm_snd_dmaengine_pcm_register(&pdev->dev, NULL, 0); 684 if (irq >= 0) {
685 if (ret == -EPROBE_DEFER) {
686 dev_err(&pdev->dev,
687 "failed to register PCM, deferring probe\n");
688 return ret;
689 } else if (ret) {
690 dev_err(&pdev->dev,
691 "Could not register DMA PCM: %d\n"
692 "falling back to PIO mode\n", ret);
693 ret = dw_pcm_register(pdev); 685 ret = dw_pcm_register(pdev);
694 if (ret) { 686 dev->use_pio = true;
695 dev_err(&pdev->dev, 687 } else {
696 "Could not register PIO PCM: %d\n", 688 ret = devm_snd_dmaengine_pcm_register(&pdev->dev, NULL,
689 0);
690 dev->use_pio = false;
691 }
692
693 if (ret) {
694 dev_err(&pdev->dev, "could not register pcm: %d\n",
697 ret); 695 ret);
698 goto err_clk_disable; 696 goto err_clk_disable;
699 }
700 } 697 }
701 } 698 }
702 699
diff --git a/sound/soc/fsl/fsl_ssi.c b/sound/soc/fsl/fsl_ssi.c
index 50349437d961..fde08660b63b 100644
--- a/sound/soc/fsl/fsl_ssi.c
+++ b/sound/soc/fsl/fsl_ssi.c
@@ -224,6 +224,12 @@ struct fsl_ssi_soc_data {
224 * @dbg_stats: Debugging statistics 224 * @dbg_stats: Debugging statistics
225 * 225 *
226 * @soc: SoC specific data 226 * @soc: SoC specific data
227 *
228 * @fifo_watermark: the FIFO watermark setting. Notifies DMA when
229 * there are @fifo_watermark or fewer words in TX fifo or
230 * @fifo_watermark or more empty words in RX fifo.
231 * @dma_maxburst: max number of words to transfer in one go. So far,
232 * this is always the same as fifo_watermark.
227 */ 233 */
228struct fsl_ssi_private { 234struct fsl_ssi_private {
229 struct regmap *regs; 235 struct regmap *regs;
@@ -263,6 +269,9 @@ struct fsl_ssi_private {
263 269
264 const struct fsl_ssi_soc_data *soc; 270 const struct fsl_ssi_soc_data *soc;
265 struct device *dev; 271 struct device *dev;
272
273 u32 fifo_watermark;
274 u32 dma_maxburst;
266}; 275};
267 276
268/* 277/*
@@ -1051,21 +1060,7 @@ static int _fsl_ssi_set_dai_fmt(struct device *dev,
1051 regmap_write(regs, CCSR_SSI_SRCR, srcr); 1060 regmap_write(regs, CCSR_SSI_SRCR, srcr);
1052 regmap_write(regs, CCSR_SSI_SCR, scr); 1061 regmap_write(regs, CCSR_SSI_SCR, scr);
1053 1062
1054 /* 1063 wm = ssi_private->fifo_watermark;
1055 * Set the watermark for transmit FIFI 0 and receive FIFO 0. We don't
1056 * use FIFO 1. We program the transmit water to signal a DMA transfer
1057 * if there are only two (or fewer) elements left in the FIFO. Two
1058 * elements equals one frame (left channel, right channel). This value,
1059 * however, depends on the depth of the transmit buffer.
1060 *
1061 * We set the watermark on the same level as the DMA burstsize. For
1062 * fiq it is probably better to use the biggest possible watermark
1063 * size.
1064 */
1065 if (ssi_private->use_dma)
1066 wm = ssi_private->fifo_depth - 2;
1067 else
1068 wm = ssi_private->fifo_depth;
1069 1064
1070 regmap_write(regs, CCSR_SSI_SFCSR, 1065 regmap_write(regs, CCSR_SSI_SFCSR,
1071 CCSR_SSI_SFCSR_TFWM0(wm) | CCSR_SSI_SFCSR_RFWM0(wm) | 1066 CCSR_SSI_SFCSR_TFWM0(wm) | CCSR_SSI_SFCSR_RFWM0(wm) |
@@ -1373,12 +1368,8 @@ static int fsl_ssi_imx_probe(struct platform_device *pdev,
1373 dev_dbg(&pdev->dev, "could not get baud clock: %ld\n", 1368 dev_dbg(&pdev->dev, "could not get baud clock: %ld\n",
1374 PTR_ERR(ssi_private->baudclk)); 1369 PTR_ERR(ssi_private->baudclk));
1375 1370
1376 /* 1371 ssi_private->dma_params_tx.maxburst = ssi_private->dma_maxburst;
1377 * We have burstsize be "fifo_depth - 2" to match the SSI 1372 ssi_private->dma_params_rx.maxburst = ssi_private->dma_maxburst;
1378 * watermark setting in fsl_ssi_startup().
1379 */
1380 ssi_private->dma_params_tx.maxburst = ssi_private->fifo_depth - 2;
1381 ssi_private->dma_params_rx.maxburst = ssi_private->fifo_depth - 2;
1382 ssi_private->dma_params_tx.addr = ssi_private->ssi_phys + CCSR_SSI_STX0; 1373 ssi_private->dma_params_tx.addr = ssi_private->ssi_phys + CCSR_SSI_STX0;
1383 ssi_private->dma_params_rx.addr = ssi_private->ssi_phys + CCSR_SSI_SRX0; 1374 ssi_private->dma_params_rx.addr = ssi_private->ssi_phys + CCSR_SSI_SRX0;
1384 1375
@@ -1543,6 +1534,47 @@ static int fsl_ssi_probe(struct platform_device *pdev)
1543 /* Older 8610 DTs didn't have the fifo-depth property */ 1534 /* Older 8610 DTs didn't have the fifo-depth property */
1544 ssi_private->fifo_depth = 8; 1535 ssi_private->fifo_depth = 8;
1545 1536
1537 /*
1538 * Set the watermark for transmit FIFO 0 and receive FIFO 0. We don't
1539 * use FIFO 1 but set the watermark appropriately nontheless.
1540 * We program the transmit water to signal a DMA transfer
1541 * if there are N elements left in the FIFO. For chips with 15-deep
1542 * FIFOs, set watermark to 8. This allows the SSI to operate at a
1543 * high data rate without channel slipping. Behavior is unchanged
1544 * for the older chips with a fifo depth of only 8. A value of 4
1545 * might be appropriate for the older chips, but is left at
1546 * fifo_depth-2 until sombody has a chance to test.
1547 *
1548 * We set the watermark on the same level as the DMA burstsize. For
1549 * fiq it is probably better to use the biggest possible watermark
1550 * size.
1551 */
1552 switch (ssi_private->fifo_depth) {
1553 case 15:
1554 /*
1555 * 2 samples is not enough when running at high data
1556 * rates (like 48kHz @ 16 bits/channel, 16 channels)
1557 * 8 seems to split things evenly and leave enough time
1558 * for the DMA to fill the FIFO before it's over/under
1559 * run.
1560 */
1561 ssi_private->fifo_watermark = 8;
1562 ssi_private->dma_maxburst = 8;
1563 break;
1564 case 8:
1565 default:
1566 /*
1567 * maintain old behavior for older chips.
1568 * Keeping it the same because I don't have an older
1569 * board to test with.
1570 * I suspect this could be changed to be something to
1571 * leave some more space in the fifo.
1572 */
1573 ssi_private->fifo_watermark = ssi_private->fifo_depth - 2;
1574 ssi_private->dma_maxburst = ssi_private->fifo_depth - 2;
1575 break;
1576 }
1577
1546 dev_set_drvdata(&pdev->dev, ssi_private); 1578 dev_set_drvdata(&pdev->dev, ssi_private);
1547 1579
1548 if (ssi_private->soc->imx) { 1580 if (ssi_private->soc->imx) {
diff --git a/sound/soc/intel/boards/bytcr_rt5640.c b/sound/soc/intel/boards/bytcr_rt5640.c
index 507a86a5eafe..8d2fb2d6f532 100644
--- a/sound/soc/intel/boards/bytcr_rt5640.c
+++ b/sound/soc/intel/boards/bytcr_rt5640.c
@@ -142,7 +142,7 @@ static int platform_clock_control(struct snd_soc_dapm_widget *w,
142 * for Jack detection and button press 142 * for Jack detection and button press
143 */ 143 */
144 ret = snd_soc_dai_set_sysclk(codec_dai, RT5640_SCLK_S_RCCLK, 144 ret = snd_soc_dai_set_sysclk(codec_dai, RT5640_SCLK_S_RCCLK,
145 0, 145 48000 * 512,
146 SND_SOC_CLOCK_IN); 146 SND_SOC_CLOCK_IN);
147 if (!ret) { 147 if (!ret) {
148 if ((byt_rt5640_quirk & BYT_RT5640_MCLK_EN) && priv->mclk) 148 if ((byt_rt5640_quirk & BYT_RT5640_MCLK_EN) && priv->mclk)
@@ -825,10 +825,20 @@ static int snd_byt_rt5640_mc_probe(struct platform_device *pdev)
825 if ((byt_rt5640_quirk & BYT_RT5640_MCLK_EN) && (is_valleyview())) { 825 if ((byt_rt5640_quirk & BYT_RT5640_MCLK_EN) && (is_valleyview())) {
826 priv->mclk = devm_clk_get(&pdev->dev, "pmc_plt_clk_3"); 826 priv->mclk = devm_clk_get(&pdev->dev, "pmc_plt_clk_3");
827 if (IS_ERR(priv->mclk)) { 827 if (IS_ERR(priv->mclk)) {
828 ret_val = PTR_ERR(priv->mclk);
829
828 dev_err(&pdev->dev, 830 dev_err(&pdev->dev,
829 "Failed to get MCLK from pmc_plt_clk_3: %ld\n", 831 "Failed to get MCLK from pmc_plt_clk_3: %d\n",
830 PTR_ERR(priv->mclk)); 832 ret_val);
831 return PTR_ERR(priv->mclk); 833
834 /*
835 * Fall back to bit clock usage for -ENOENT (clock not
836 * available likely due to missing dependencies), bail
837 * for all other errors, including -EPROBE_DEFER
838 */
839 if (ret_val != -ENOENT)
840 return ret_val;
841 byt_rt5640_quirk &= ~BYT_RT5640_MCLK_EN;
832 } 842 }
833 } 843 }
834 844
diff --git a/sound/soc/intel/skylake/skl-pcm.c b/sound/soc/intel/skylake/skl-pcm.c
index 84b5101e6ca6..6c6b63a6b338 100644
--- a/sound/soc/intel/skylake/skl-pcm.c
+++ b/sound/soc/intel/skylake/skl-pcm.c
@@ -180,6 +180,9 @@ static int skl_pcm_open(struct snd_pcm_substream *substream,
180 snd_pcm_set_sync(substream); 180 snd_pcm_set_sync(substream);
181 181
182 mconfig = skl_tplg_fe_get_cpr_module(dai, substream->stream); 182 mconfig = skl_tplg_fe_get_cpr_module(dai, substream->stream);
183 if (!mconfig)
184 return -EINVAL;
185
183 skl_tplg_d0i3_get(skl, mconfig->d0i3_caps); 186 skl_tplg_d0i3_get(skl, mconfig->d0i3_caps);
184 187
185 return 0; 188 return 0;
diff --git a/sound/soc/intel/skylake/skl-sst.c b/sound/soc/intel/skylake/skl-sst.c
index 8fc3178bc79c..b30bd384c8d3 100644
--- a/sound/soc/intel/skylake/skl-sst.c
+++ b/sound/soc/intel/skylake/skl-sst.c
@@ -515,6 +515,9 @@ EXPORT_SYMBOL_GPL(skl_sst_init_fw);
515 515
516void skl_sst_dsp_cleanup(struct device *dev, struct skl_sst *ctx) 516void skl_sst_dsp_cleanup(struct device *dev, struct skl_sst *ctx)
517{ 517{
518
519 if (ctx->dsp->fw)
520 release_firmware(ctx->dsp->fw);
518 skl_clear_module_table(ctx->dsp); 521 skl_clear_module_table(ctx->dsp);
519 skl_freeup_uuid_list(ctx); 522 skl_freeup_uuid_list(ctx);
520 skl_ipc_free(&ctx->ipc); 523 skl_ipc_free(&ctx->ipc);
diff --git a/sound/soc/sh/rcar/core.c b/sound/soc/sh/rcar/core.c
index 4bd68de76130..99b5b0835c1e 100644
--- a/sound/soc/sh/rcar/core.c
+++ b/sound/soc/sh/rcar/core.c
@@ -1030,10 +1030,8 @@ static int __rsnd_kctrl_new(struct rsnd_mod *mod,
1030 return -ENOMEM; 1030 return -ENOMEM;
1031 1031
1032 ret = snd_ctl_add(card, kctrl); 1032 ret = snd_ctl_add(card, kctrl);
1033 if (ret < 0) { 1033 if (ret < 0)
1034 snd_ctl_free_one(kctrl);
1035 return ret; 1034 return ret;
1036 }
1037 1035
1038 cfg->update = update; 1036 cfg->update = update;
1039 cfg->card = card; 1037 cfg->card = card;
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
index f1901bb1466e..baa1afa41e3d 100644
--- a/sound/soc/soc-core.c
+++ b/sound/soc/soc-core.c
@@ -1748,6 +1748,7 @@ static int soc_bind_aux_dev(struct snd_soc_card *card, int num)
1748 1748
1749 component->init = aux_dev->init; 1749 component->init = aux_dev->init;
1750 component->auxiliary = 1; 1750 component->auxiliary = 1;
1751 list_add(&component->card_aux_list, &card->aux_comp_list);
1751 1752
1752 return 0; 1753 return 0;
1753 1754
@@ -1758,16 +1759,14 @@ err_defer:
1758 1759
1759static int soc_probe_aux_devices(struct snd_soc_card *card) 1760static int soc_probe_aux_devices(struct snd_soc_card *card)
1760{ 1761{
1761 struct snd_soc_component *comp; 1762 struct snd_soc_component *comp, *tmp;
1762 int order; 1763 int order;
1763 int ret; 1764 int ret;
1764 1765
1765 for (order = SND_SOC_COMP_ORDER_FIRST; order <= SND_SOC_COMP_ORDER_LAST; 1766 for (order = SND_SOC_COMP_ORDER_FIRST; order <= SND_SOC_COMP_ORDER_LAST;
1766 order++) { 1767 order++) {
1767 list_for_each_entry(comp, &card->component_dev_list, card_list) { 1768 list_for_each_entry_safe(comp, tmp, &card->aux_comp_list,
1768 if (!comp->auxiliary) 1769 card_aux_list) {
1769 continue;
1770
1771 if (comp->driver->probe_order == order) { 1770 if (comp->driver->probe_order == order) {
1772 ret = soc_probe_component(card, comp); 1771 ret = soc_probe_component(card, comp);
1773 if (ret < 0) { 1772 if (ret < 0) {
@@ -1776,6 +1775,7 @@ static int soc_probe_aux_devices(struct snd_soc_card *card)
1776 comp->name, ret); 1775 comp->name, ret);
1777 return ret; 1776 return ret;
1778 } 1777 }
1778 list_del(&comp->card_aux_list);
1779 } 1779 }
1780 } 1780 }
1781 } 1781 }
diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c
index e7a1eaa2772f..6aba14009c92 100644
--- a/sound/soc/soc-pcm.c
+++ b/sound/soc/soc-pcm.c
@@ -2184,9 +2184,11 @@ static int dpcm_fe_dai_do_trigger(struct snd_pcm_substream *substream, int cmd)
2184 break; 2184 break;
2185 case SNDRV_PCM_TRIGGER_STOP: 2185 case SNDRV_PCM_TRIGGER_STOP:
2186 case SNDRV_PCM_TRIGGER_SUSPEND: 2186 case SNDRV_PCM_TRIGGER_SUSPEND:
2187 case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
2188 fe->dpcm[stream].state = SND_SOC_DPCM_STATE_STOP; 2187 fe->dpcm[stream].state = SND_SOC_DPCM_STATE_STOP;
2189 break; 2188 break;
2189 case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
2190 fe->dpcm[stream].state = SND_SOC_DPCM_STATE_PAUSED;
2191 break;
2190 } 2192 }
2191 2193
2192out: 2194out:
diff --git a/sound/soc/soc-topology.c b/sound/soc/soc-topology.c
index 65670b2b408c..fbfb1fab88d5 100644
--- a/sound/soc/soc-topology.c
+++ b/sound/soc/soc-topology.c
@@ -514,13 +514,12 @@ static void remove_widget(struct snd_soc_component *comp,
514 == SND_SOC_TPLG_TYPE_MIXER) 514 == SND_SOC_TPLG_TYPE_MIXER)
515 kfree(kcontrol->tlv.p); 515 kfree(kcontrol->tlv.p);
516 516
517 snd_ctl_remove(card, kcontrol);
518
519 /* Private value is used as struct soc_mixer_control 517 /* Private value is used as struct soc_mixer_control
520 * for volume mixers or soc_bytes_ext for bytes 518 * for volume mixers or soc_bytes_ext for bytes
521 * controls. 519 * controls.
522 */ 520 */
523 kfree((void *)kcontrol->private_value); 521 kfree((void *)kcontrol->private_value);
522 snd_ctl_remove(card, kcontrol);
524 } 523 }
525 kfree(w->kcontrol_news); 524 kfree(w->kcontrol_news);
526 } 525 }
diff --git a/sound/usb/endpoint.c b/sound/usb/endpoint.c
index 15d1d5c63c3c..c90607ebe155 100644
--- a/sound/usb/endpoint.c
+++ b/sound/usb/endpoint.c
@@ -384,6 +384,9 @@ static void snd_complete_urb(struct urb *urb)
384 if (unlikely(atomic_read(&ep->chip->shutdown))) 384 if (unlikely(atomic_read(&ep->chip->shutdown)))
385 goto exit_clear; 385 goto exit_clear;
386 386
387 if (unlikely(!test_bit(EP_FLAG_RUNNING, &ep->flags)))
388 goto exit_clear;
389
387 if (usb_pipeout(ep->pipe)) { 390 if (usb_pipeout(ep->pipe)) {
388 retire_outbound_urb(ep, ctx); 391 retire_outbound_urb(ep, ctx);
389 /* can be stopped during retire callback */ 392 /* can be stopped during retire callback */
@@ -534,6 +537,11 @@ static int wait_clear_urbs(struct snd_usb_endpoint *ep)
534 alive, ep->ep_num); 537 alive, ep->ep_num);
535 clear_bit(EP_FLAG_STOPPING, &ep->flags); 538 clear_bit(EP_FLAG_STOPPING, &ep->flags);
536 539
540 ep->data_subs = NULL;
541 ep->sync_slave = NULL;
542 ep->retire_data_urb = NULL;
543 ep->prepare_data_urb = NULL;
544
537 return 0; 545 return 0;
538} 546}
539 547
@@ -912,9 +920,7 @@ int snd_usb_endpoint_set_params(struct snd_usb_endpoint *ep,
912/** 920/**
913 * snd_usb_endpoint_start: start an snd_usb_endpoint 921 * snd_usb_endpoint_start: start an snd_usb_endpoint
914 * 922 *
915 * @ep: the endpoint to start 923 * @ep: the endpoint to start
916 * @can_sleep: flag indicating whether the operation is executed in
917 * non-atomic context
918 * 924 *
919 * A call to this function will increment the use count of the endpoint. 925 * A call to this function will increment the use count of the endpoint.
920 * In case it is not already running, the URBs for this endpoint will be 926 * In case it is not already running, the URBs for this endpoint will be
@@ -924,7 +930,7 @@ int snd_usb_endpoint_set_params(struct snd_usb_endpoint *ep,
924 * 930 *
925 * Returns an error if the URB submission failed, 0 in all other cases. 931 * Returns an error if the URB submission failed, 0 in all other cases.
926 */ 932 */
927int snd_usb_endpoint_start(struct snd_usb_endpoint *ep, bool can_sleep) 933int snd_usb_endpoint_start(struct snd_usb_endpoint *ep)
928{ 934{
929 int err; 935 int err;
930 unsigned int i; 936 unsigned int i;
@@ -938,8 +944,6 @@ int snd_usb_endpoint_start(struct snd_usb_endpoint *ep, bool can_sleep)
938 944
939 /* just to be sure */ 945 /* just to be sure */
940 deactivate_urbs(ep, false); 946 deactivate_urbs(ep, false);
941 if (can_sleep)
942 wait_clear_urbs(ep);
943 947
944 ep->active_mask = 0; 948 ep->active_mask = 0;
945 ep->unlink_mask = 0; 949 ep->unlink_mask = 0;
@@ -1020,10 +1024,6 @@ void snd_usb_endpoint_stop(struct snd_usb_endpoint *ep)
1020 1024
1021 if (--ep->use_count == 0) { 1025 if (--ep->use_count == 0) {
1022 deactivate_urbs(ep, false); 1026 deactivate_urbs(ep, false);
1023 ep->data_subs = NULL;
1024 ep->sync_slave = NULL;
1025 ep->retire_data_urb = NULL;
1026 ep->prepare_data_urb = NULL;
1027 set_bit(EP_FLAG_STOPPING, &ep->flags); 1027 set_bit(EP_FLAG_STOPPING, &ep->flags);
1028 } 1028 }
1029} 1029}
diff --git a/sound/usb/endpoint.h b/sound/usb/endpoint.h
index 6428392d8f62..584f295d7c77 100644
--- a/sound/usb/endpoint.h
+++ b/sound/usb/endpoint.h
@@ -18,7 +18,7 @@ int snd_usb_endpoint_set_params(struct snd_usb_endpoint *ep,
18 struct audioformat *fmt, 18 struct audioformat *fmt,
19 struct snd_usb_endpoint *sync_ep); 19 struct snd_usb_endpoint *sync_ep);
20 20
21int snd_usb_endpoint_start(struct snd_usb_endpoint *ep, bool can_sleep); 21int snd_usb_endpoint_start(struct snd_usb_endpoint *ep);
22void snd_usb_endpoint_stop(struct snd_usb_endpoint *ep); 22void snd_usb_endpoint_stop(struct snd_usb_endpoint *ep);
23void snd_usb_endpoint_sync_pending_stop(struct snd_usb_endpoint *ep); 23void snd_usb_endpoint_sync_pending_stop(struct snd_usb_endpoint *ep);
24int snd_usb_endpoint_activate(struct snd_usb_endpoint *ep); 24int snd_usb_endpoint_activate(struct snd_usb_endpoint *ep);
diff --git a/sound/usb/line6/driver.c b/sound/usb/line6/driver.c
index 90009c0b3a92..ab3c280a23d1 100644
--- a/sound/usb/line6/driver.c
+++ b/sound/usb/line6/driver.c
@@ -754,8 +754,9 @@ int line6_probe(struct usb_interface *interface,
754 goto error; 754 goto error;
755 } 755 }
756 756
757 line6_get_interval(line6);
758
757 if (properties->capabilities & LINE6_CAP_CONTROL) { 759 if (properties->capabilities & LINE6_CAP_CONTROL) {
758 line6_get_interval(line6);
759 ret = line6_init_cap_control(line6); 760 ret = line6_init_cap_control(line6);
760 if (ret < 0) 761 if (ret < 0)
761 goto error; 762 goto error;
diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c
index 34c6d4f2c0b6..9aa5b1855481 100644
--- a/sound/usb/pcm.c
+++ b/sound/usb/pcm.c
@@ -218,7 +218,7 @@ int snd_usb_init_pitch(struct snd_usb_audio *chip, int iface,
218 } 218 }
219} 219}
220 220
221static int start_endpoints(struct snd_usb_substream *subs, bool can_sleep) 221static int start_endpoints(struct snd_usb_substream *subs)
222{ 222{
223 int err; 223 int err;
224 224
@@ -231,7 +231,7 @@ static int start_endpoints(struct snd_usb_substream *subs, bool can_sleep)
231 dev_dbg(&subs->dev->dev, "Starting data EP @%p\n", ep); 231 dev_dbg(&subs->dev->dev, "Starting data EP @%p\n", ep);
232 232
233 ep->data_subs = subs; 233 ep->data_subs = subs;
234 err = snd_usb_endpoint_start(ep, can_sleep); 234 err = snd_usb_endpoint_start(ep);
235 if (err < 0) { 235 if (err < 0) {
236 clear_bit(SUBSTREAM_FLAG_DATA_EP_STARTED, &subs->flags); 236 clear_bit(SUBSTREAM_FLAG_DATA_EP_STARTED, &subs->flags);
237 return err; 237 return err;
@@ -260,7 +260,7 @@ static int start_endpoints(struct snd_usb_substream *subs, bool can_sleep)
260 dev_dbg(&subs->dev->dev, "Starting sync EP @%p\n", ep); 260 dev_dbg(&subs->dev->dev, "Starting sync EP @%p\n", ep);
261 261
262 ep->sync_slave = subs->data_endpoint; 262 ep->sync_slave = subs->data_endpoint;
263 err = snd_usb_endpoint_start(ep, can_sleep); 263 err = snd_usb_endpoint_start(ep);
264 if (err < 0) { 264 if (err < 0) {
265 clear_bit(SUBSTREAM_FLAG_SYNC_EP_STARTED, &subs->flags); 265 clear_bit(SUBSTREAM_FLAG_SYNC_EP_STARTED, &subs->flags);
266 return err; 266 return err;
@@ -850,7 +850,7 @@ static int snd_usb_pcm_prepare(struct snd_pcm_substream *substream)
850 /* for playback, submit the URBs now; otherwise, the first hwptr_done 850 /* for playback, submit the URBs now; otherwise, the first hwptr_done
851 * updates for all URBs would happen at the same time when starting */ 851 * updates for all URBs would happen at the same time when starting */
852 if (subs->direction == SNDRV_PCM_STREAM_PLAYBACK) 852 if (subs->direction == SNDRV_PCM_STREAM_PLAYBACK)
853 ret = start_endpoints(subs, true); 853 ret = start_endpoints(subs);
854 854
855 unlock: 855 unlock:
856 snd_usb_unlock_shutdown(subs->stream->chip); 856 snd_usb_unlock_shutdown(subs->stream->chip);
@@ -1666,7 +1666,7 @@ static int snd_usb_substream_capture_trigger(struct snd_pcm_substream *substream
1666 1666
1667 switch (cmd) { 1667 switch (cmd) {
1668 case SNDRV_PCM_TRIGGER_START: 1668 case SNDRV_PCM_TRIGGER_START:
1669 err = start_endpoints(subs, false); 1669 err = start_endpoints(subs);
1670 if (err < 0) 1670 if (err < 0)
1671 return err; 1671 return err;
1672 1672
diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
index b3fd2382fdd9..eb4b9f7a571e 100644
--- a/sound/usb/quirks.c
+++ b/sound/usb/quirks.c
@@ -1135,6 +1135,7 @@ bool snd_usb_get_sample_rate_quirk(struct snd_usb_audio *chip)
1135 case USB_ID(0x045E, 0x076F): /* MS Lifecam HD-6000 */ 1135 case USB_ID(0x045E, 0x076F): /* MS Lifecam HD-6000 */
1136 case USB_ID(0x045E, 0x0772): /* MS Lifecam Studio */ 1136 case USB_ID(0x045E, 0x0772): /* MS Lifecam Studio */
1137 case USB_ID(0x045E, 0x0779): /* MS Lifecam HD-3000 */ 1137 case USB_ID(0x045E, 0x0779): /* MS Lifecam HD-3000 */
1138 case USB_ID(0x047F, 0x02F7): /* Plantronics BT-600 */
1138 case USB_ID(0x047F, 0x0415): /* Plantronics BT-300 */ 1139 case USB_ID(0x047F, 0x0415): /* Plantronics BT-300 */
1139 case USB_ID(0x047F, 0xAA05): /* Plantronics DA45 */ 1140 case USB_ID(0x047F, 0xAA05): /* Plantronics DA45 */
1140 case USB_ID(0x04D8, 0xFEEA): /* Benchmark DAC1 Pre */ 1141 case USB_ID(0x04D8, 0xFEEA): /* Benchmark DAC1 Pre */
diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h
index 0eb0e87dbe9f..d2b0ac799d03 100644
--- a/tools/include/uapi/linux/bpf.h
+++ b/tools/include/uapi/linux/bpf.h
@@ -116,6 +116,12 @@ enum bpf_attach_type {
116 116
117#define MAX_BPF_ATTACH_TYPE __MAX_BPF_ATTACH_TYPE 117#define MAX_BPF_ATTACH_TYPE __MAX_BPF_ATTACH_TYPE
118 118
119/* If BPF_F_ALLOW_OVERRIDE flag is used in BPF_PROG_ATTACH command
120 * to the given target_fd cgroup the descendent cgroup will be able to
121 * override effective bpf program that was inherited from this cgroup
122 */
123#define BPF_F_ALLOW_OVERRIDE (1U << 0)
124
119#define BPF_PSEUDO_MAP_FD 1 125#define BPF_PSEUDO_MAP_FD 1
120 126
121/* flags for BPF_MAP_UPDATE_ELEM command */ 127/* flags for BPF_MAP_UPDATE_ELEM command */
@@ -171,6 +177,7 @@ union bpf_attr {
171 __u32 target_fd; /* container object to attach to */ 177 __u32 target_fd; /* container object to attach to */
172 __u32 attach_bpf_fd; /* eBPF program to attach */ 178 __u32 attach_bpf_fd; /* eBPF program to attach */
173 __u32 attach_type; 179 __u32 attach_type;
180 __u32 attach_flags;
174 }; 181 };
175} __attribute__((aligned(8))); 182} __attribute__((aligned(8)));
176 183
diff --git a/tools/lib/bpf/bpf.c b/tools/lib/bpf/bpf.c
index 3ddb58a36d3c..ae752fa4eaa7 100644
--- a/tools/lib/bpf/bpf.c
+++ b/tools/lib/bpf/bpf.c
@@ -168,7 +168,8 @@ int bpf_obj_get(const char *pathname)
168 return sys_bpf(BPF_OBJ_GET, &attr, sizeof(attr)); 168 return sys_bpf(BPF_OBJ_GET, &attr, sizeof(attr));
169} 169}
170 170
171int bpf_prog_attach(int prog_fd, int target_fd, enum bpf_attach_type type) 171int bpf_prog_attach(int prog_fd, int target_fd, enum bpf_attach_type type,
172 unsigned int flags)
172{ 173{
173 union bpf_attr attr; 174 union bpf_attr attr;
174 175
@@ -176,6 +177,7 @@ int bpf_prog_attach(int prog_fd, int target_fd, enum bpf_attach_type type)
176 attr.target_fd = target_fd; 177 attr.target_fd = target_fd;
177 attr.attach_bpf_fd = prog_fd; 178 attr.attach_bpf_fd = prog_fd;
178 attr.attach_type = type; 179 attr.attach_type = type;
180 attr.attach_flags = flags;
179 181
180 return sys_bpf(BPF_PROG_ATTACH, &attr, sizeof(attr)); 182 return sys_bpf(BPF_PROG_ATTACH, &attr, sizeof(attr));
181} 183}
diff --git a/tools/lib/bpf/bpf.h b/tools/lib/bpf/bpf.h
index a2f9853dd882..4ac6c4b84100 100644
--- a/tools/lib/bpf/bpf.h
+++ b/tools/lib/bpf/bpf.h
@@ -41,7 +41,8 @@ int bpf_map_delete_elem(int fd, void *key);
41int bpf_map_get_next_key(int fd, void *key, void *next_key); 41int bpf_map_get_next_key(int fd, void *key, void *next_key);
42int bpf_obj_pin(int fd, const char *pathname); 42int bpf_obj_pin(int fd, const char *pathname);
43int bpf_obj_get(const char *pathname); 43int bpf_obj_get(const char *pathname);
44int bpf_prog_attach(int prog_fd, int attachable_fd, enum bpf_attach_type type); 44int bpf_prog_attach(int prog_fd, int attachable_fd, enum bpf_attach_type type,
45 unsigned int flags);
45int bpf_prog_detach(int attachable_fd, enum bpf_attach_type type); 46int bpf_prog_detach(int attachable_fd, enum bpf_attach_type type);
46 47
47 48
diff --git a/tools/lib/subcmd/parse-options.c b/tools/lib/subcmd/parse-options.c
index 3284bb14ae78..8aad81151d50 100644
--- a/tools/lib/subcmd/parse-options.c
+++ b/tools/lib/subcmd/parse-options.c
@@ -213,6 +213,9 @@ static int get_value(struct parse_opt_ctx_t *p,
213 else 213 else
214 err = get_arg(p, opt, flags, (const char **)opt->value); 214 err = get_arg(p, opt, flags, (const char **)opt->value);
215 215
216 if (opt->set)
217 *(bool *)opt->set = true;
218
216 /* PARSE_OPT_NOEMPTY: Allow NULL but disallow empty string. */ 219 /* PARSE_OPT_NOEMPTY: Allow NULL but disallow empty string. */
217 if (opt->flags & PARSE_OPT_NOEMPTY) { 220 if (opt->flags & PARSE_OPT_NOEMPTY) {
218 const char *val = *(const char **)opt->value; 221 const char *val = *(const char **)opt->value;
diff --git a/tools/lib/subcmd/parse-options.h b/tools/lib/subcmd/parse-options.h
index 8866ac438b34..11c3be3bcce7 100644
--- a/tools/lib/subcmd/parse-options.h
+++ b/tools/lib/subcmd/parse-options.h
@@ -137,6 +137,11 @@ struct option {
137 { .type = OPTION_STRING, .short_name = (s), .long_name = (l), \ 137 { .type = OPTION_STRING, .short_name = (s), .long_name = (l), \
138 .value = check_vtype(v, const char **), (a), .help = (h), \ 138 .value = check_vtype(v, const char **), (a), .help = (h), \
139 .flags = PARSE_OPT_OPTARG, .defval = (intptr_t)(d) } 139 .flags = PARSE_OPT_OPTARG, .defval = (intptr_t)(d) }
140#define OPT_STRING_OPTARG_SET(s, l, v, os, a, h, d) \
141 { .type = OPTION_STRING, .short_name = (s), .long_name = (l), \
142 .value = check_vtype(v, const char **), (a), .help = (h), \
143 .flags = PARSE_OPT_OPTARG, .defval = (intptr_t)(d), \
144 .set = check_vtype(os, bool *)}
140#define OPT_STRING_NOEMPTY(s, l, v, a, h) { .type = OPTION_STRING, .short_name = (s), .long_name = (l), .value = check_vtype(v, const char **), (a), .help = (h), .flags = PARSE_OPT_NOEMPTY} 145#define OPT_STRING_NOEMPTY(s, l, v, a, h) { .type = OPTION_STRING, .short_name = (s), .long_name = (l), .value = check_vtype(v, const char **), (a), .help = (h), .flags = PARSE_OPT_NOEMPTY}
141#define OPT_DATE(s, l, v, h) \ 146#define OPT_DATE(s, l, v, h) \
142 { .type = OPTION_CALLBACK, .short_name = (s), .long_name = (l), .value = (v), .argh = "time", .help = (h), .callback = parse_opt_approxidate_cb } 147 { .type = OPTION_CALLBACK, .short_name = (s), .long_name = (l), .value = (v), .argh = "time", .help = (h), .callback = parse_opt_approxidate_cb }
diff --git a/tools/lib/traceevent/plugin_sched_switch.c b/tools/lib/traceevent/plugin_sched_switch.c
index f1ce60065258..ec30c2fcbac0 100644
--- a/tools/lib/traceevent/plugin_sched_switch.c
+++ b/tools/lib/traceevent/plugin_sched_switch.c
@@ -111,7 +111,7 @@ static int sched_switch_handler(struct trace_seq *s,
111 trace_seq_printf(s, "%lld ", val); 111 trace_seq_printf(s, "%lld ", val);
112 112
113 if (pevent_get_field_val(s, event, "prev_prio", record, &val, 0) == 0) 113 if (pevent_get_field_val(s, event, "prev_prio", record, &val, 0) == 0)
114 trace_seq_printf(s, "[%lld] ", val); 114 trace_seq_printf(s, "[%d] ", (int) val);
115 115
116 if (pevent_get_field_val(s, event, "prev_state", record, &val, 0) == 0) 116 if (pevent_get_field_val(s, event, "prev_state", record, &val, 0) == 0)
117 write_state(s, val); 117 write_state(s, val);
@@ -129,7 +129,7 @@ static int sched_switch_handler(struct trace_seq *s,
129 trace_seq_printf(s, "%lld", val); 129 trace_seq_printf(s, "%lld", val);
130 130
131 if (pevent_get_field_val(s, event, "next_prio", record, &val, 0) == 0) 131 if (pevent_get_field_val(s, event, "next_prio", record, &val, 0) == 0)
132 trace_seq_printf(s, " [%lld]", val); 132 trace_seq_printf(s, " [%d]", (int) val);
133 133
134 return 0; 134 return 0;
135} 135}
diff --git a/tools/objtool/arch/x86/decode.c b/tools/objtool/arch/x86/decode.c
index 5e0dea2cdc01..039636ffb6c8 100644
--- a/tools/objtool/arch/x86/decode.c
+++ b/tools/objtool/arch/x86/decode.c
@@ -150,9 +150,9 @@ int arch_decode_instruction(struct elf *elf, struct section *sec,
150 *type = INSN_RETURN; 150 *type = INSN_RETURN;
151 break; 151 break;
152 152
153 case 0xc5: /* iret */
154 case 0xca: /* retf */ 153 case 0xca: /* retf */
155 case 0xcb: /* retf */ 154 case 0xcb: /* retf */
155 case 0xcf: /* iret */
156 *type = INSN_CONTEXT_SWITCH; 156 *type = INSN_CONTEXT_SWITCH;
157 break; 157 break;
158 158
diff --git a/tools/perf/Documentation/perf-record.txt b/tools/perf/Documentation/perf-record.txt
index 27fc3617c6a4..5054d9147f0f 100644
--- a/tools/perf/Documentation/perf-record.txt
+++ b/tools/perf/Documentation/perf-record.txt
@@ -430,6 +430,10 @@ that gets then processed, possibly via a perf script, to decide if that
430particular perf.data snapshot should be kept or not. 430particular perf.data snapshot should be kept or not.
431 431
432Implies --timestamp-filename, --no-buildid and --no-buildid-cache. 432Implies --timestamp-filename, --no-buildid and --no-buildid-cache.
433The reason for the latter two is to reduce the data file switching
434overhead. You can still switch them on with:
435
436 --switch-output --no-no-buildid --no-no-buildid-cache
433 437
434--dry-run:: 438--dry-run::
435Parse options then exit. --dry-run can be used to detect errors in cmdline 439Parse options then exit. --dry-run can be used to detect errors in cmdline
diff --git a/tools/perf/Makefile.perf b/tools/perf/Makefile.perf
index 8fc24824705e..8bb16aa9d661 100644
--- a/tools/perf/Makefile.perf
+++ b/tools/perf/Makefile.perf
@@ -704,9 +704,9 @@ install-tests: all install-gtk
704 $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests/attr'; \ 704 $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests/attr'; \
705 $(INSTALL) tests/attr/* '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests/attr' 705 $(INSTALL) tests/attr/* '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests/attr'
706 706
707install-bin: install-tools install-tests 707install-bin: install-tools install-tests install-traceevent-plugins
708 708
709install: install-bin try-install-man install-traceevent-plugins 709install: install-bin try-install-man
710 710
711install-python_ext: 711install-python_ext:
712 $(PYTHON_WORD) util/setup.py --quiet install --root='/$(DESTDIR_SQ)' 712 $(PYTHON_WORD) util/setup.py --quiet install --root='/$(DESTDIR_SQ)'
diff --git a/tools/perf/builtin-diff.c b/tools/perf/builtin-diff.c
index 9ff0db4e2d0c..933aeec46f4a 100644
--- a/tools/perf/builtin-diff.c
+++ b/tools/perf/builtin-diff.c
@@ -1199,7 +1199,7 @@ static int ui_init(void)
1199 BUG_ON(1); 1199 BUG_ON(1);
1200 } 1200 }
1201 1201
1202 perf_hpp__register_sort_field(fmt); 1202 perf_hpp__prepend_sort_field(fmt);
1203 return 0; 1203 return 0;
1204} 1204}
1205 1205
diff --git a/tools/perf/builtin-kmem.c b/tools/perf/builtin-kmem.c
index 35a02f8e5a4a..915869e00d86 100644
--- a/tools/perf/builtin-kmem.c
+++ b/tools/perf/builtin-kmem.c
@@ -655,7 +655,6 @@ static const struct {
655 { "__GFP_RECLAIM", "R" }, 655 { "__GFP_RECLAIM", "R" },
656 { "__GFP_DIRECT_RECLAIM", "DR" }, 656 { "__GFP_DIRECT_RECLAIM", "DR" },
657 { "__GFP_KSWAPD_RECLAIM", "KR" }, 657 { "__GFP_KSWAPD_RECLAIM", "KR" },
658 { "__GFP_OTHER_NODE", "ON" },
659}; 658};
660 659
661static size_t max_gfp_len; 660static size_t max_gfp_len;
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index 74d6a035133a..4ec10e9427d9 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -1405,7 +1405,7 @@ static bool dry_run;
1405 * perf_evlist__prepare_workload, etc instead of fork+exec'in 'perf record', 1405 * perf_evlist__prepare_workload, etc instead of fork+exec'in 'perf record',
1406 * using pipes, etc. 1406 * using pipes, etc.
1407 */ 1407 */
1408struct option __record_options[] = { 1408static struct option __record_options[] = {
1409 OPT_CALLBACK('e', "event", &record.evlist, "event", 1409 OPT_CALLBACK('e', "event", &record.evlist, "event",
1410 "event selector. use 'perf list' to list available events", 1410 "event selector. use 'perf list' to list available events",
1411 parse_events_option), 1411 parse_events_option),
@@ -1636,7 +1636,7 @@ int cmd_record(int argc, const char **argv, const char *prefix __maybe_unused)
1636 * overhead. Still generate buildid if they are required 1636 * overhead. Still generate buildid if they are required
1637 * explicitly using 1637 * explicitly using
1638 * 1638 *
1639 * perf record --signal-trigger --no-no-buildid \ 1639 * perf record --switch-output --no-no-buildid \
1640 * --no-no-buildid-cache 1640 * --no-no-buildid-cache
1641 * 1641 *
1642 * Following code equals to: 1642 * Following code equals to:
diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c
index d53e706a6f17..5b134b0d1ff3 100644
--- a/tools/perf/builtin-sched.c
+++ b/tools/perf/builtin-sched.c
@@ -209,6 +209,7 @@ struct perf_sched {
209 u64 skipped_samples; 209 u64 skipped_samples;
210 const char *time_str; 210 const char *time_str;
211 struct perf_time_interval ptime; 211 struct perf_time_interval ptime;
212 struct perf_time_interval hist_time;
212}; 213};
213 214
214/* per thread run time data */ 215/* per thread run time data */
@@ -2460,6 +2461,11 @@ static int timehist_sched_change_event(struct perf_tool *tool,
2460 timehist_print_sample(sched, sample, &al, thread, t); 2461 timehist_print_sample(sched, sample, &al, thread, t);
2461 2462
2462out: 2463out:
2464 if (sched->hist_time.start == 0 && t >= ptime->start)
2465 sched->hist_time.start = t;
2466 if (ptime->end == 0 || t <= ptime->end)
2467 sched->hist_time.end = t;
2468
2463 if (tr) { 2469 if (tr) {
2464 /* time of this sched_switch event becomes last time task seen */ 2470 /* time of this sched_switch event becomes last time task seen */
2465 tr->last_time = sample->time; 2471 tr->last_time = sample->time;
@@ -2624,6 +2630,7 @@ static void timehist_print_summary(struct perf_sched *sched,
2624 struct thread *t; 2630 struct thread *t;
2625 struct thread_runtime *r; 2631 struct thread_runtime *r;
2626 int i; 2632 int i;
2633 u64 hist_time = sched->hist_time.end - sched->hist_time.start;
2627 2634
2628 memset(&totals, 0, sizeof(totals)); 2635 memset(&totals, 0, sizeof(totals));
2629 2636
@@ -2665,7 +2672,7 @@ static void timehist_print_summary(struct perf_sched *sched,
2665 totals.sched_count += r->run_stats.n; 2672 totals.sched_count += r->run_stats.n;
2666 printf(" CPU %2d idle for ", i); 2673 printf(" CPU %2d idle for ", i);
2667 print_sched_time(r->total_run_time, 6); 2674 print_sched_time(r->total_run_time, 6);
2668 printf(" msec\n"); 2675 printf(" msec (%6.2f%%)\n", 100.0 * r->total_run_time / hist_time);
2669 } else 2676 } else
2670 printf(" CPU %2d idle entire time window\n", i); 2677 printf(" CPU %2d idle entire time window\n", i);
2671 } 2678 }
@@ -2701,12 +2708,16 @@ static void timehist_print_summary(struct perf_sched *sched,
2701 2708
2702 printf("\n" 2709 printf("\n"
2703 " Total number of unique tasks: %" PRIu64 "\n" 2710 " Total number of unique tasks: %" PRIu64 "\n"
2704 "Total number of context switches: %" PRIu64 "\n" 2711 "Total number of context switches: %" PRIu64 "\n",
2705 " Total run time (msec): ",
2706 totals.task_count, totals.sched_count); 2712 totals.task_count, totals.sched_count);
2707 2713
2714 printf(" Total run time (msec): ");
2708 print_sched_time(totals.total_run_time, 2); 2715 print_sched_time(totals.total_run_time, 2);
2709 printf("\n"); 2716 printf("\n");
2717
2718 printf(" Total scheduling time (msec): ");
2719 print_sched_time(hist_time, 2);
2720 printf(" (x %d)\n", sched->max_cpu);
2710} 2721}
2711 2722
2712typedef int (*sched_handler)(struct perf_tool *tool, 2723typedef int (*sched_handler)(struct perf_tool *tool,
diff --git a/tools/perf/ui/hist.c b/tools/perf/ui/hist.c
index 37388397b5bc..18cfcdc90356 100644
--- a/tools/perf/ui/hist.c
+++ b/tools/perf/ui/hist.c
@@ -521,6 +521,12 @@ void perf_hpp_list__register_sort_field(struct perf_hpp_list *list,
521 list_add_tail(&format->sort_list, &list->sorts); 521 list_add_tail(&format->sort_list, &list->sorts);
522} 522}
523 523
524void perf_hpp_list__prepend_sort_field(struct perf_hpp_list *list,
525 struct perf_hpp_fmt *format)
526{
527 list_add(&format->sort_list, &list->sorts);
528}
529
524void perf_hpp__column_unregister(struct perf_hpp_fmt *format) 530void perf_hpp__column_unregister(struct perf_hpp_fmt *format)
525{ 531{
526 list_del(&format->list); 532 list_del(&format->list);
@@ -560,6 +566,10 @@ void perf_hpp__setup_output_field(struct perf_hpp_list *list)
560 perf_hpp_list__for_each_sort_list(list, fmt) { 566 perf_hpp_list__for_each_sort_list(list, fmt) {
561 struct perf_hpp_fmt *pos; 567 struct perf_hpp_fmt *pos;
562 568
569 /* skip sort-only fields ("sort_compute" in perf diff) */
570 if (!fmt->entry && !fmt->color)
571 continue;
572
563 perf_hpp_list__for_each_format(list, pos) { 573 perf_hpp_list__for_each_format(list, pos) {
564 if (fmt_equal(fmt, pos)) 574 if (fmt_equal(fmt, pos))
565 goto next; 575 goto next;
diff --git a/tools/perf/util/callchain.c b/tools/perf/util/callchain.c
index 42922512c1c6..8b610dd9e2f6 100644
--- a/tools/perf/util/callchain.c
+++ b/tools/perf/util/callchain.c
@@ -437,7 +437,7 @@ fill_node(struct callchain_node *node, struct callchain_cursor *cursor)
437 } 437 }
438 call->ip = cursor_node->ip; 438 call->ip = cursor_node->ip;
439 call->ms.sym = cursor_node->sym; 439 call->ms.sym = cursor_node->sym;
440 call->ms.map = cursor_node->map; 440 call->ms.map = map__get(cursor_node->map);
441 441
442 if (cursor_node->branch) { 442 if (cursor_node->branch) {
443 call->branch_count = 1; 443 call->branch_count = 1;
@@ -477,6 +477,7 @@ add_child(struct callchain_node *parent,
477 477
478 list_for_each_entry_safe(call, tmp, &new->val, list) { 478 list_for_each_entry_safe(call, tmp, &new->val, list) {
479 list_del(&call->list); 479 list_del(&call->list);
480 map__zput(call->ms.map);
480 free(call); 481 free(call);
481 } 482 }
482 free(new); 483 free(new);
@@ -761,6 +762,7 @@ merge_chain_branch(struct callchain_cursor *cursor,
761 list->ms.map, list->ms.sym, 762 list->ms.map, list->ms.sym,
762 false, NULL, 0, 0); 763 false, NULL, 0, 0);
763 list_del(&list->list); 764 list_del(&list->list);
765 map__zput(list->ms.map);
764 free(list); 766 free(list);
765 } 767 }
766 768
@@ -811,7 +813,8 @@ int callchain_cursor_append(struct callchain_cursor *cursor,
811 } 813 }
812 814
813 node->ip = ip; 815 node->ip = ip;
814 node->map = map; 816 map__zput(node->map);
817 node->map = map__get(map);
815 node->sym = sym; 818 node->sym = sym;
816 node->branch = branch; 819 node->branch = branch;
817 node->nr_loop_iter = nr_loop_iter; 820 node->nr_loop_iter = nr_loop_iter;
@@ -1142,11 +1145,13 @@ static void free_callchain_node(struct callchain_node *node)
1142 1145
1143 list_for_each_entry_safe(list, tmp, &node->parent_val, list) { 1146 list_for_each_entry_safe(list, tmp, &node->parent_val, list) {
1144 list_del(&list->list); 1147 list_del(&list->list);
1148 map__zput(list->ms.map);
1145 free(list); 1149 free(list);
1146 } 1150 }
1147 1151
1148 list_for_each_entry_safe(list, tmp, &node->val, list) { 1152 list_for_each_entry_safe(list, tmp, &node->val, list) {
1149 list_del(&list->list); 1153 list_del(&list->list);
1154 map__zput(list->ms.map);
1150 free(list); 1155 free(list);
1151 } 1156 }
1152 1157
@@ -1210,6 +1215,7 @@ int callchain_node__make_parent_list(struct callchain_node *node)
1210 goto out; 1215 goto out;
1211 *new = *chain; 1216 *new = *chain;
1212 new->has_children = false; 1217 new->has_children = false;
1218 map__get(new->ms.map);
1213 list_add_tail(&new->list, &head); 1219 list_add_tail(&new->list, &head);
1214 } 1220 }
1215 parent = parent->parent; 1221 parent = parent->parent;
@@ -1230,6 +1236,7 @@ int callchain_node__make_parent_list(struct callchain_node *node)
1230out: 1236out:
1231 list_for_each_entry_safe(chain, new, &head, list) { 1237 list_for_each_entry_safe(chain, new, &head, list) {
1232 list_del(&chain->list); 1238 list_del(&chain->list);
1239 map__zput(chain->ms.map);
1233 free(chain); 1240 free(chain);
1234 } 1241 }
1235 return -ENOMEM; 1242 return -ENOMEM;
diff --git a/tools/perf/util/callchain.h b/tools/perf/util/callchain.h
index 35c8e379530f..4f4b60f1558a 100644
--- a/tools/perf/util/callchain.h
+++ b/tools/perf/util/callchain.h
@@ -5,6 +5,7 @@
5#include <linux/list.h> 5#include <linux/list.h>
6#include <linux/rbtree.h> 6#include <linux/rbtree.h>
7#include "event.h" 7#include "event.h"
8#include "map.h"
8#include "symbol.h" 9#include "symbol.h"
9 10
10#define HELP_PAD "\t\t\t\t" 11#define HELP_PAD "\t\t\t\t"
@@ -184,8 +185,13 @@ int callchain_merge(struct callchain_cursor *cursor,
184 */ 185 */
185static inline void callchain_cursor_reset(struct callchain_cursor *cursor) 186static inline void callchain_cursor_reset(struct callchain_cursor *cursor)
186{ 187{
188 struct callchain_cursor_node *node;
189
187 cursor->nr = 0; 190 cursor->nr = 0;
188 cursor->last = &cursor->first; 191 cursor->last = &cursor->first;
192
193 for (node = cursor->first; node != NULL; node = node->next)
194 map__zput(node->map);
189} 195}
190 196
191int callchain_cursor_append(struct callchain_cursor *cursor, u64 ip, 197int callchain_cursor_append(struct callchain_cursor *cursor, u64 ip,
diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c
index 6770a9645609..7d1b7d33e644 100644
--- a/tools/perf/util/hist.c
+++ b/tools/perf/util/hist.c
@@ -1,6 +1,7 @@
1#include "util.h" 1#include "util.h"
2#include "build-id.h" 2#include "build-id.h"
3#include "hist.h" 3#include "hist.h"
4#include "map.h"
4#include "session.h" 5#include "session.h"
5#include "sort.h" 6#include "sort.h"
6#include "evlist.h" 7#include "evlist.h"
@@ -1019,6 +1020,10 @@ int hist_entry_iter__add(struct hist_entry_iter *iter, struct addr_location *al,
1019 int max_stack_depth, void *arg) 1020 int max_stack_depth, void *arg)
1020{ 1021{
1021 int err, err2; 1022 int err, err2;
1023 struct map *alm = NULL;
1024
1025 if (al && al->map)
1026 alm = map__get(al->map);
1022 1027
1023 err = sample__resolve_callchain(iter->sample, &callchain_cursor, &iter->parent, 1028 err = sample__resolve_callchain(iter->sample, &callchain_cursor, &iter->parent,
1024 iter->evsel, al, max_stack_depth); 1029 iter->evsel, al, max_stack_depth);
@@ -1058,6 +1063,8 @@ out:
1058 if (!err) 1063 if (!err)
1059 err = err2; 1064 err = err2;
1060 1065
1066 map__put(alm);
1067
1061 return err; 1068 return err;
1062} 1069}
1063 1070
diff --git a/tools/perf/util/hist.h b/tools/perf/util/hist.h
index d4b6514eeef5..28c216e3d5b7 100644
--- a/tools/perf/util/hist.h
+++ b/tools/perf/util/hist.h
@@ -283,6 +283,8 @@ void perf_hpp_list__column_register(struct perf_hpp_list *list,
283 struct perf_hpp_fmt *format); 283 struct perf_hpp_fmt *format);
284void perf_hpp_list__register_sort_field(struct perf_hpp_list *list, 284void perf_hpp_list__register_sort_field(struct perf_hpp_list *list,
285 struct perf_hpp_fmt *format); 285 struct perf_hpp_fmt *format);
286void perf_hpp_list__prepend_sort_field(struct perf_hpp_list *list,
287 struct perf_hpp_fmt *format);
286 288
287static inline void perf_hpp__column_register(struct perf_hpp_fmt *format) 289static inline void perf_hpp__column_register(struct perf_hpp_fmt *format)
288{ 290{
@@ -294,6 +296,11 @@ static inline void perf_hpp__register_sort_field(struct perf_hpp_fmt *format)
294 perf_hpp_list__register_sort_field(&perf_hpp_list, format); 296 perf_hpp_list__register_sort_field(&perf_hpp_list, format);
295} 297}
296 298
299static inline void perf_hpp__prepend_sort_field(struct perf_hpp_fmt *format)
300{
301 perf_hpp_list__prepend_sort_field(&perf_hpp_list, format);
302}
303
297#define perf_hpp_list__for_each_format(_list, format) \ 304#define perf_hpp_list__for_each_format(_list, format) \
298 list_for_each_entry(format, &(_list)->fields, list) 305 list_for_each_entry(format, &(_list)->fields, list)
299 306
diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c
index d281ae2b54e8..6a6f44dd594b 100644
--- a/tools/perf/util/probe-event.c
+++ b/tools/perf/util/probe-event.c
@@ -163,7 +163,7 @@ static struct map *kernel_get_module_map(const char *module)
163 163
164 /* A file path -- this is an offline module */ 164 /* A file path -- this is an offline module */
165 if (module && strchr(module, '/')) 165 if (module && strchr(module, '/'))
166 return machine__findnew_module_map(host_machine, 0, module); 166 return dso__new_map(module);
167 167
168 if (!module) 168 if (!module)
169 module = "kernel"; 169 module = "kernel";
@@ -173,6 +173,7 @@ static struct map *kernel_get_module_map(const char *module)
173 if (strncmp(pos->dso->short_name + 1, module, 173 if (strncmp(pos->dso->short_name + 1, module,
174 pos->dso->short_name_len - 2) == 0 && 174 pos->dso->short_name_len - 2) == 0 &&
175 module[pos->dso->short_name_len - 2] == '\0') { 175 module[pos->dso->short_name_len - 2] == '\0') {
176 map__get(pos);
176 return pos; 177 return pos;
177 } 178 }
178 } 179 }
@@ -188,15 +189,6 @@ struct map *get_target_map(const char *target, bool user)
188 return kernel_get_module_map(target); 189 return kernel_get_module_map(target);
189} 190}
190 191
191static void put_target_map(struct map *map, bool user)
192{
193 if (map && user) {
194 /* Only the user map needs to be released */
195 map__put(map);
196 }
197}
198
199
200static int convert_exec_to_group(const char *exec, char **result) 192static int convert_exec_to_group(const char *exec, char **result)
201{ 193{
202 char *ptr1, *ptr2, *exec_copy; 194 char *ptr1, *ptr2, *exec_copy;
@@ -268,21 +260,6 @@ static bool kprobe_warn_out_range(const char *symbol, unsigned long address)
268} 260}
269 261
270/* 262/*
271 * NOTE:
272 * '.gnu.linkonce.this_module' section of kernel module elf directly
273 * maps to 'struct module' from linux/module.h. This section contains
274 * actual module name which will be used by kernel after loading it.
275 * But, we cannot use 'struct module' here since linux/module.h is not
276 * exposed to user-space. Offset of 'name' has remained same from long
277 * time, so hardcoding it here.
278 */
279#ifdef __LP64__
280#define MOD_NAME_OFFSET 24
281#else
282#define MOD_NAME_OFFSET 12
283#endif
284
285/*
286 * @module can be module name of module file path. In case of path, 263 * @module can be module name of module file path. In case of path,
287 * inspect elf and find out what is actual module name. 264 * inspect elf and find out what is actual module name.
288 * Caller has to free mod_name after using it. 265 * Caller has to free mod_name after using it.
@@ -296,6 +273,7 @@ static char *find_module_name(const char *module)
296 Elf_Data *data; 273 Elf_Data *data;
297 Elf_Scn *sec; 274 Elf_Scn *sec;
298 char *mod_name = NULL; 275 char *mod_name = NULL;
276 int name_offset;
299 277
300 fd = open(module, O_RDONLY); 278 fd = open(module, O_RDONLY);
301 if (fd < 0) 279 if (fd < 0)
@@ -317,7 +295,21 @@ static char *find_module_name(const char *module)
317 if (!data || !data->d_buf) 295 if (!data || !data->d_buf)
318 goto ret_err; 296 goto ret_err;
319 297
320 mod_name = strdup((char *)data->d_buf + MOD_NAME_OFFSET); 298 /*
299 * NOTE:
300 * '.gnu.linkonce.this_module' section of kernel module elf directly
301 * maps to 'struct module' from linux/module.h. This section contains
302 * actual module name which will be used by kernel after loading it.
303 * But, we cannot use 'struct module' here since linux/module.h is not
304 * exposed to user-space. Offset of 'name' has remained same from long
305 * time, so hardcoding it here.
306 */
307 if (ehdr.e_ident[EI_CLASS] == ELFCLASS32)
308 name_offset = 12;
309 else /* expect ELFCLASS64 by default */
310 name_offset = 24;
311
312 mod_name = strdup((char *)data->d_buf + name_offset);
321 313
322ret_err: 314ret_err:
323 elf_end(elf); 315 elf_end(elf);
@@ -412,7 +404,7 @@ static int find_alternative_probe_point(struct debuginfo *dinfo,
412 } 404 }
413 405
414out: 406out:
415 put_target_map(map, uprobes); 407 map__put(map);
416 return ret; 408 return ret;
417 409
418} 410}
@@ -618,6 +610,67 @@ error:
618 return ret ? : -ENOENT; 610 return ret ? : -ENOENT;
619} 611}
620 612
613/* Adjust symbol name and address */
614static int post_process_probe_trace_point(struct probe_trace_point *tp,
615 struct map *map, unsigned long offs)
616{
617 struct symbol *sym;
618 u64 addr = tp->address + tp->offset - offs;
619
620 sym = map__find_symbol(map, addr);
621 if (!sym)
622 return -ENOENT;
623
624 if (strcmp(sym->name, tp->symbol)) {
625 /* If we have no realname, use symbol for it */
626 if (!tp->realname)
627 tp->realname = tp->symbol;
628 else
629 free(tp->symbol);
630 tp->symbol = strdup(sym->name);
631 if (!tp->symbol)
632 return -ENOMEM;
633 }
634 tp->offset = addr - sym->start;
635 tp->address -= offs;
636
637 return 0;
638}
639
640/*
641 * Rename DWARF symbols to ELF symbols -- gcc sometimes optimizes functions
642 * and generate new symbols with suffixes such as .constprop.N or .isra.N
643 * etc. Since those symbols are not recorded in DWARF, we have to find
644 * correct generated symbols from offline ELF binary.
645 * For online kernel or uprobes we don't need this because those are
646 * rebased on _text, or already a section relative address.
647 */
648static int
649post_process_offline_probe_trace_events(struct probe_trace_event *tevs,
650 int ntevs, const char *pathname)
651{
652 struct map *map;
653 unsigned long stext = 0;
654 int i, ret = 0;
655
656 /* Prepare a map for offline binary */
657 map = dso__new_map(pathname);
658 if (!map || get_text_start_address(pathname, &stext) < 0) {
659 pr_warning("Failed to get ELF symbols for %s\n", pathname);
660 return -EINVAL;
661 }
662
663 for (i = 0; i < ntevs; i++) {
664 ret = post_process_probe_trace_point(&tevs[i].point,
665 map, stext);
666 if (ret < 0)
667 break;
668 }
669 map__put(map);
670
671 return ret;
672}
673
621static int add_exec_to_probe_trace_events(struct probe_trace_event *tevs, 674static int add_exec_to_probe_trace_events(struct probe_trace_event *tevs,
622 int ntevs, const char *exec) 675 int ntevs, const char *exec)
623{ 676{
@@ -645,18 +698,31 @@ static int add_exec_to_probe_trace_events(struct probe_trace_event *tevs,
645 return ret; 698 return ret;
646} 699}
647 700
648static int add_module_to_probe_trace_events(struct probe_trace_event *tevs, 701static int
649 int ntevs, const char *module) 702post_process_module_probe_trace_events(struct probe_trace_event *tevs,
703 int ntevs, const char *module,
704 struct debuginfo *dinfo)
650{ 705{
706 Dwarf_Addr text_offs = 0;
651 int i, ret = 0; 707 int i, ret = 0;
652 char *mod_name = NULL; 708 char *mod_name = NULL;
709 struct map *map;
653 710
654 if (!module) 711 if (!module)
655 return 0; 712 return 0;
656 713
657 mod_name = find_module_name(module); 714 map = get_target_map(module, false);
715 if (!map || debuginfo__get_text_offset(dinfo, &text_offs, true) < 0) {
716 pr_warning("Failed to get ELF symbols for %s\n", module);
717 return -EINVAL;
718 }
658 719
720 mod_name = find_module_name(module);
659 for (i = 0; i < ntevs; i++) { 721 for (i = 0; i < ntevs; i++) {
722 ret = post_process_probe_trace_point(&tevs[i].point,
723 map, (unsigned long)text_offs);
724 if (ret < 0)
725 break;
660 tevs[i].point.module = 726 tevs[i].point.module =
661 strdup(mod_name ? mod_name : module); 727 strdup(mod_name ? mod_name : module);
662 if (!tevs[i].point.module) { 728 if (!tevs[i].point.module) {
@@ -666,6 +732,8 @@ static int add_module_to_probe_trace_events(struct probe_trace_event *tevs,
666 } 732 }
667 733
668 free(mod_name); 734 free(mod_name);
735 map__put(map);
736
669 return ret; 737 return ret;
670} 738}
671 739
@@ -679,7 +747,8 @@ post_process_kernel_probe_trace_events(struct probe_trace_event *tevs,
679 747
680 /* Skip post process if the target is an offline kernel */ 748 /* Skip post process if the target is an offline kernel */
681 if (symbol_conf.ignore_vmlinux_buildid) 749 if (symbol_conf.ignore_vmlinux_buildid)
682 return 0; 750 return post_process_offline_probe_trace_events(tevs, ntevs,
751 symbol_conf.vmlinux_name);
683 752
684 reloc_sym = kernel_get_ref_reloc_sym(); 753 reloc_sym = kernel_get_ref_reloc_sym();
685 if (!reloc_sym) { 754 if (!reloc_sym) {
@@ -722,7 +791,7 @@ arch__post_process_probe_trace_events(struct perf_probe_event *pev __maybe_unuse
722static int post_process_probe_trace_events(struct perf_probe_event *pev, 791static int post_process_probe_trace_events(struct perf_probe_event *pev,
723 struct probe_trace_event *tevs, 792 struct probe_trace_event *tevs,
724 int ntevs, const char *module, 793 int ntevs, const char *module,
725 bool uprobe) 794 bool uprobe, struct debuginfo *dinfo)
726{ 795{
727 int ret; 796 int ret;
728 797
@@ -730,7 +799,8 @@ static int post_process_probe_trace_events(struct perf_probe_event *pev,
730 ret = add_exec_to_probe_trace_events(tevs, ntevs, module); 799 ret = add_exec_to_probe_trace_events(tevs, ntevs, module);
731 else if (module) 800 else if (module)
732 /* Currently ref_reloc_sym based probe is not for drivers */ 801 /* Currently ref_reloc_sym based probe is not for drivers */
733 ret = add_module_to_probe_trace_events(tevs, ntevs, module); 802 ret = post_process_module_probe_trace_events(tevs, ntevs,
803 module, dinfo);
734 else 804 else
735 ret = post_process_kernel_probe_trace_events(tevs, ntevs); 805 ret = post_process_kernel_probe_trace_events(tevs, ntevs);
736 806
@@ -774,30 +844,27 @@ static int try_to_find_probe_trace_events(struct perf_probe_event *pev,
774 } 844 }
775 } 845 }
776 846
777 debuginfo__delete(dinfo);
778
779 if (ntevs > 0) { /* Succeeded to find trace events */ 847 if (ntevs > 0) { /* Succeeded to find trace events */
780 pr_debug("Found %d probe_trace_events.\n", ntevs); 848 pr_debug("Found %d probe_trace_events.\n", ntevs);
781 ret = post_process_probe_trace_events(pev, *tevs, ntevs, 849 ret = post_process_probe_trace_events(pev, *tevs, ntevs,
782 pev->target, pev->uprobes); 850 pev->target, pev->uprobes, dinfo);
783 if (ret < 0 || ret == ntevs) { 851 if (ret < 0 || ret == ntevs) {
852 pr_debug("Post processing failed or all events are skipped. (%d)\n", ret);
784 clear_probe_trace_events(*tevs, ntevs); 853 clear_probe_trace_events(*tevs, ntevs);
785 zfree(tevs); 854 zfree(tevs);
855 ntevs = 0;
786 } 856 }
787 if (ret != ntevs)
788 return ret < 0 ? ret : ntevs;
789 ntevs = 0;
790 /* Fall through */
791 } 857 }
792 858
859 debuginfo__delete(dinfo);
860
793 if (ntevs == 0) { /* No error but failed to find probe point. */ 861 if (ntevs == 0) { /* No error but failed to find probe point. */
794 pr_warning("Probe point '%s' not found.\n", 862 pr_warning("Probe point '%s' not found.\n",
795 synthesize_perf_probe_point(&pev->point)); 863 synthesize_perf_probe_point(&pev->point));
796 return -ENOENT; 864 return -ENOENT;
797 } 865 } else if (ntevs < 0) {
798 /* Error path : ntevs < 0 */ 866 /* Error path : ntevs < 0 */
799 pr_debug("An error occurred in debuginfo analysis (%d).\n", ntevs); 867 pr_debug("An error occurred in debuginfo analysis (%d).\n", ntevs);
800 if (ntevs < 0) {
801 if (ntevs == -EBADF) 868 if (ntevs == -EBADF)
802 pr_warning("Warning: No dwarf info found in the vmlinux - " 869 pr_warning("Warning: No dwarf info found in the vmlinux - "
803 "please rebuild kernel with CONFIG_DEBUG_INFO=y.\n"); 870 "please rebuild kernel with CONFIG_DEBUG_INFO=y.\n");
@@ -2869,7 +2936,7 @@ static int find_probe_trace_events_from_map(struct perf_probe_event *pev,
2869 } 2936 }
2870 2937
2871out: 2938out:
2872 put_target_map(map, pev->uprobes); 2939 map__put(map);
2873 free(syms); 2940 free(syms);
2874 return ret; 2941 return ret;
2875 2942
@@ -3362,10 +3429,7 @@ int show_available_funcs(const char *target, struct strfilter *_filter,
3362 return ret; 3429 return ret;
3363 3430
3364 /* Get a symbol map */ 3431 /* Get a symbol map */
3365 if (user) 3432 map = get_target_map(target, user);
3366 map = dso__new_map(target);
3367 else
3368 map = kernel_get_module_map(target);
3369 if (!map) { 3433 if (!map) {
3370 pr_err("Failed to get a map for %s\n", (target) ? : "kernel"); 3434 pr_err("Failed to get a map for %s\n", (target) ? : "kernel");
3371 return -EINVAL; 3435 return -EINVAL;
@@ -3397,9 +3461,7 @@ int show_available_funcs(const char *target, struct strfilter *_filter,
3397 } 3461 }
3398 3462
3399end: 3463end:
3400 if (user) { 3464 map__put(map);
3401 map__put(map);
3402 }
3403 exit_probe_symbol_maps(); 3465 exit_probe_symbol_maps();
3404 3466
3405 return ret; 3467 return ret;
diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c
index df4debe564da..0d9d6e0803b8 100644
--- a/tools/perf/util/probe-finder.c
+++ b/tools/perf/util/probe-finder.c
@@ -1501,7 +1501,8 @@ int debuginfo__find_available_vars_at(struct debuginfo *dbg,
1501} 1501}
1502 1502
1503/* For the kernel module, we need a special code to get a DIE */ 1503/* For the kernel module, we need a special code to get a DIE */
1504static int debuginfo__get_text_offset(struct debuginfo *dbg, Dwarf_Addr *offs) 1504int debuginfo__get_text_offset(struct debuginfo *dbg, Dwarf_Addr *offs,
1505 bool adjust_offset)
1505{ 1506{
1506 int n, i; 1507 int n, i;
1507 Elf32_Word shndx; 1508 Elf32_Word shndx;
@@ -1530,6 +1531,8 @@ static int debuginfo__get_text_offset(struct debuginfo *dbg, Dwarf_Addr *offs)
1530 if (!shdr) 1531 if (!shdr)
1531 return -ENOENT; 1532 return -ENOENT;
1532 *offs = shdr->sh_addr; 1533 *offs = shdr->sh_addr;
1534 if (adjust_offset)
1535 *offs -= shdr->sh_offset;
1533 } 1536 }
1534 } 1537 }
1535 return 0; 1538 return 0;
@@ -1543,16 +1546,12 @@ int debuginfo__find_probe_point(struct debuginfo *dbg, unsigned long addr,
1543 Dwarf_Addr _addr = 0, baseaddr = 0; 1546 Dwarf_Addr _addr = 0, baseaddr = 0;
1544 const char *fname = NULL, *func = NULL, *basefunc = NULL, *tmp; 1547 const char *fname = NULL, *func = NULL, *basefunc = NULL, *tmp;
1545 int baseline = 0, lineno = 0, ret = 0; 1548 int baseline = 0, lineno = 0, ret = 0;
1546 bool reloc = false;
1547 1549
1548retry: 1550 /* We always need to relocate the address for aranges */
1551 if (debuginfo__get_text_offset(dbg, &baseaddr, false) == 0)
1552 addr += baseaddr;
1549 /* Find cu die */ 1553 /* Find cu die */
1550 if (!dwarf_addrdie(dbg->dbg, (Dwarf_Addr)addr, &cudie)) { 1554 if (!dwarf_addrdie(dbg->dbg, (Dwarf_Addr)addr, &cudie)) {
1551 if (!reloc && debuginfo__get_text_offset(dbg, &baseaddr) == 0) {
1552 addr += baseaddr;
1553 reloc = true;
1554 goto retry;
1555 }
1556 pr_warning("Failed to find debug information for address %lx\n", 1555 pr_warning("Failed to find debug information for address %lx\n",
1557 addr); 1556 addr);
1558 ret = -EINVAL; 1557 ret = -EINVAL;
diff --git a/tools/perf/util/probe-finder.h b/tools/perf/util/probe-finder.h
index f1d8558f498e..2956c5198652 100644
--- a/tools/perf/util/probe-finder.h
+++ b/tools/perf/util/probe-finder.h
@@ -46,6 +46,9 @@ int debuginfo__find_trace_events(struct debuginfo *dbg,
46int debuginfo__find_probe_point(struct debuginfo *dbg, unsigned long addr, 46int debuginfo__find_probe_point(struct debuginfo *dbg, unsigned long addr,
47 struct perf_probe_point *ppt); 47 struct perf_probe_point *ppt);
48 48
49int debuginfo__get_text_offset(struct debuginfo *dbg, Dwarf_Addr *offs,
50 bool adjust_offset);
51
49/* Find a line range */ 52/* Find a line range */
50int debuginfo__find_line_range(struct debuginfo *dbg, struct line_range *lr); 53int debuginfo__find_line_range(struct debuginfo *dbg, struct line_range *lr);
51 54
diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c
index 99400b0e8f2a..adbc6c02c3aa 100644
--- a/tools/perf/util/symbol-elf.c
+++ b/tools/perf/util/symbol-elf.c
@@ -537,6 +537,12 @@ int sysfs__read_build_id(const char *filename, void *build_id, size_t size)
537 break; 537 break;
538 } else { 538 } else {
539 int n = namesz + descsz; 539 int n = namesz + descsz;
540
541 if (n > (int)sizeof(bf)) {
542 n = sizeof(bf);
543 pr_debug("%s: truncating reading of build id in sysfs file %s: n_namesz=%u, n_descsz=%u.\n",
544 __func__, filename, nhdr.n_namesz, nhdr.n_descsz);
545 }
540 if (read(fd, bf, n) != n) 546 if (read(fd, bf, n) != n)
541 break; 547 break;
542 } 548 }
diff --git a/tools/testing/selftests/Makefile b/tools/testing/selftests/Makefile
index 71b05891a6a1..831022b12848 100644
--- a/tools/testing/selftests/Makefile
+++ b/tools/testing/selftests/Makefile
@@ -90,7 +90,7 @@ ifdef INSTALL_PATH
90 done; 90 done;
91 91
92 @# Ask all targets to emit their test scripts 92 @# Ask all targets to emit their test scripts
93 echo "#!/bin/bash" > $(ALL_SCRIPT) 93 echo "#!/bin/sh" > $(ALL_SCRIPT)
94 echo "cd \$$(dirname \$$0)" >> $(ALL_SCRIPT) 94 echo "cd \$$(dirname \$$0)" >> $(ALL_SCRIPT)
95 echo "ROOT=\$$PWD" >> $(ALL_SCRIPT) 95 echo "ROOT=\$$PWD" >> $(ALL_SCRIPT)
96 96
diff --git a/tools/testing/selftests/bpf/test_kmod.sh b/tools/testing/selftests/bpf/test_kmod.sh
index 92e627adf354..6d58cca8e235 100755
--- a/tools/testing/selftests/bpf/test_kmod.sh
+++ b/tools/testing/selftests/bpf/test_kmod.sh
@@ -1,4 +1,4 @@
1#!/bin/bash 1#!/bin/sh
2 2
3SRC_TREE=../../../../ 3SRC_TREE=../../../../
4 4
diff --git a/tools/testing/selftests/bpf/test_lru_map.c b/tools/testing/selftests/bpf/test_lru_map.c
index b13fed534d76..9f7bd1915c21 100644
--- a/tools/testing/selftests/bpf/test_lru_map.c
+++ b/tools/testing/selftests/bpf/test_lru_map.c
@@ -67,21 +67,23 @@ static int map_equal(int lru_map, int expected)
67 return map_subset(lru_map, expected) && map_subset(expected, lru_map); 67 return map_subset(lru_map, expected) && map_subset(expected, lru_map);
68} 68}
69 69
70static int sched_next_online(int pid, int next_to_try) 70static int sched_next_online(int pid, int *next_to_try)
71{ 71{
72 cpu_set_t cpuset; 72 cpu_set_t cpuset;
73 int next = *next_to_try;
74 int ret = -1;
73 75
74 if (next_to_try == nr_cpus) 76 while (next < nr_cpus) {
75 return -1;
76
77 while (next_to_try < nr_cpus) {
78 CPU_ZERO(&cpuset); 77 CPU_ZERO(&cpuset);
79 CPU_SET(next_to_try++, &cpuset); 78 CPU_SET(next++, &cpuset);
80 if (!sched_setaffinity(pid, sizeof(cpuset), &cpuset)) 79 if (!sched_setaffinity(pid, sizeof(cpuset), &cpuset)) {
80 ret = 0;
81 break; 81 break;
82 }
82 } 83 }
83 84
84 return next_to_try; 85 *next_to_try = next;
86 return ret;
85} 87}
86 88
87/* Size of the LRU amp is 2 89/* Size of the LRU amp is 2
@@ -96,11 +98,12 @@ static void test_lru_sanity0(int map_type, int map_flags)
96{ 98{
97 unsigned long long key, value[nr_cpus]; 99 unsigned long long key, value[nr_cpus];
98 int lru_map_fd, expected_map_fd; 100 int lru_map_fd, expected_map_fd;
101 int next_cpu = 0;
99 102
100 printf("%s (map_type:%d map_flags:0x%X): ", __func__, map_type, 103 printf("%s (map_type:%d map_flags:0x%X): ", __func__, map_type,
101 map_flags); 104 map_flags);
102 105
103 assert(sched_next_online(0, 0) != -1); 106 assert(sched_next_online(0, &next_cpu) != -1);
104 107
105 if (map_flags & BPF_F_NO_COMMON_LRU) 108 if (map_flags & BPF_F_NO_COMMON_LRU)
106 lru_map_fd = create_map(map_type, map_flags, 2 * nr_cpus); 109 lru_map_fd = create_map(map_type, map_flags, 2 * nr_cpus);
@@ -183,6 +186,7 @@ static void test_lru_sanity1(int map_type, int map_flags, unsigned int tgt_free)
183 int lru_map_fd, expected_map_fd; 186 int lru_map_fd, expected_map_fd;
184 unsigned int batch_size; 187 unsigned int batch_size;
185 unsigned int map_size; 188 unsigned int map_size;
189 int next_cpu = 0;
186 190
187 if (map_flags & BPF_F_NO_COMMON_LRU) 191 if (map_flags & BPF_F_NO_COMMON_LRU)
188 /* Ther percpu lru list (i.e each cpu has its own LRU 192 /* Ther percpu lru list (i.e each cpu has its own LRU
@@ -196,7 +200,7 @@ static void test_lru_sanity1(int map_type, int map_flags, unsigned int tgt_free)
196 printf("%s (map_type:%d map_flags:0x%X): ", __func__, map_type, 200 printf("%s (map_type:%d map_flags:0x%X): ", __func__, map_type,
197 map_flags); 201 map_flags);
198 202
199 assert(sched_next_online(0, 0) != -1); 203 assert(sched_next_online(0, &next_cpu) != -1);
200 204
201 batch_size = tgt_free / 2; 205 batch_size = tgt_free / 2;
202 assert(batch_size * 2 == tgt_free); 206 assert(batch_size * 2 == tgt_free);
@@ -262,6 +266,7 @@ static void test_lru_sanity2(int map_type, int map_flags, unsigned int tgt_free)
262 int lru_map_fd, expected_map_fd; 266 int lru_map_fd, expected_map_fd;
263 unsigned int batch_size; 267 unsigned int batch_size;
264 unsigned int map_size; 268 unsigned int map_size;
269 int next_cpu = 0;
265 270
266 if (map_flags & BPF_F_NO_COMMON_LRU) 271 if (map_flags & BPF_F_NO_COMMON_LRU)
267 /* Ther percpu lru list (i.e each cpu has its own LRU 272 /* Ther percpu lru list (i.e each cpu has its own LRU
@@ -275,7 +280,7 @@ static void test_lru_sanity2(int map_type, int map_flags, unsigned int tgt_free)
275 printf("%s (map_type:%d map_flags:0x%X): ", __func__, map_type, 280 printf("%s (map_type:%d map_flags:0x%X): ", __func__, map_type,
276 map_flags); 281 map_flags);
277 282
278 assert(sched_next_online(0, 0) != -1); 283 assert(sched_next_online(0, &next_cpu) != -1);
279 284
280 batch_size = tgt_free / 2; 285 batch_size = tgt_free / 2;
281 assert(batch_size * 2 == tgt_free); 286 assert(batch_size * 2 == tgt_free);
@@ -370,11 +375,12 @@ static void test_lru_sanity3(int map_type, int map_flags, unsigned int tgt_free)
370 int lru_map_fd, expected_map_fd; 375 int lru_map_fd, expected_map_fd;
371 unsigned int batch_size; 376 unsigned int batch_size;
372 unsigned int map_size; 377 unsigned int map_size;
378 int next_cpu = 0;
373 379
374 printf("%s (map_type:%d map_flags:0x%X): ", __func__, map_type, 380 printf("%s (map_type:%d map_flags:0x%X): ", __func__, map_type,
375 map_flags); 381 map_flags);
376 382
377 assert(sched_next_online(0, 0) != -1); 383 assert(sched_next_online(0, &next_cpu) != -1);
378 384
379 batch_size = tgt_free / 2; 385 batch_size = tgt_free / 2;
380 assert(batch_size * 2 == tgt_free); 386 assert(batch_size * 2 == tgt_free);
@@ -430,11 +436,12 @@ static void test_lru_sanity4(int map_type, int map_flags, unsigned int tgt_free)
430 int lru_map_fd, expected_map_fd; 436 int lru_map_fd, expected_map_fd;
431 unsigned long long key, value[nr_cpus]; 437 unsigned long long key, value[nr_cpus];
432 unsigned long long end_key; 438 unsigned long long end_key;
439 int next_cpu = 0;
433 440
434 printf("%s (map_type:%d map_flags:0x%X): ", __func__, map_type, 441 printf("%s (map_type:%d map_flags:0x%X): ", __func__, map_type,
435 map_flags); 442 map_flags);
436 443
437 assert(sched_next_online(0, 0) != -1); 444 assert(sched_next_online(0, &next_cpu) != -1);
438 445
439 if (map_flags & BPF_F_NO_COMMON_LRU) 446 if (map_flags & BPF_F_NO_COMMON_LRU)
440 lru_map_fd = create_map(map_type, map_flags, 447 lru_map_fd = create_map(map_type, map_flags,
@@ -502,9 +509,8 @@ static void do_test_lru_sanity5(unsigned long long last_key, int map_fd)
502static void test_lru_sanity5(int map_type, int map_flags) 509static void test_lru_sanity5(int map_type, int map_flags)
503{ 510{
504 unsigned long long key, value[nr_cpus]; 511 unsigned long long key, value[nr_cpus];
505 int next_sched_cpu = 0; 512 int next_cpu = 0;
506 int map_fd; 513 int map_fd;
507 int i;
508 514
509 if (map_flags & BPF_F_NO_COMMON_LRU) 515 if (map_flags & BPF_F_NO_COMMON_LRU)
510 return; 516 return;
@@ -519,27 +525,20 @@ static void test_lru_sanity5(int map_type, int map_flags)
519 key = 0; 525 key = 0;
520 assert(!bpf_map_update(map_fd, &key, value, BPF_NOEXIST)); 526 assert(!bpf_map_update(map_fd, &key, value, BPF_NOEXIST));
521 527
522 for (i = 0; i < nr_cpus; i++) { 528 while (sched_next_online(0, &next_cpu) != -1) {
523 pid_t pid; 529 pid_t pid;
524 530
525 pid = fork(); 531 pid = fork();
526 if (pid == 0) { 532 if (pid == 0) {
527 next_sched_cpu = sched_next_online(0, next_sched_cpu); 533 do_test_lru_sanity5(key, map_fd);
528 if (next_sched_cpu != -1)
529 do_test_lru_sanity5(key, map_fd);
530 exit(0); 534 exit(0);
531 } else if (pid == -1) { 535 } else if (pid == -1) {
532 printf("couldn't spawn #%d process\n", i); 536 printf("couldn't spawn process to test key:%llu\n",
537 key);
533 exit(1); 538 exit(1);
534 } else { 539 } else {
535 int status; 540 int status;
536 541
537 /* It is mostly redundant and just allow the parent
538 * process to update next_shced_cpu for the next child
539 * process
540 */
541 next_sched_cpu = sched_next_online(pid, next_sched_cpu);
542
543 assert(waitpid(pid, &status, 0) == pid); 542 assert(waitpid(pid, &status, 0) == pid);
544 assert(status == 0); 543 assert(status == 0);
545 key++; 544 key++;
@@ -547,6 +546,8 @@ static void test_lru_sanity5(int map_type, int map_flags)
547 } 546 }
548 547
549 close(map_fd); 548 close(map_fd);
549 /* At least one key should be tested */
550 assert(key > 0);
550 551
551 printf("Pass\n"); 552 printf("Pass\n");
552} 553}
diff --git a/tools/testing/selftests/net/run_netsocktests b/tools/testing/selftests/net/run_netsocktests
index c09a682df56a..16058bbea7a8 100755
--- a/tools/testing/selftests/net/run_netsocktests
+++ b/tools/testing/selftests/net/run_netsocktests
@@ -1,4 +1,4 @@
1#!/bin/bash 1#!/bin/sh
2 2
3echo "--------------------" 3echo "--------------------"
4echo "running socket test" 4echo "running socket test"
diff --git a/tools/testing/selftests/powerpc/pmu/ebb/pmc56_overflow_test.c b/tools/testing/selftests/powerpc/pmu/ebb/pmc56_overflow_test.c
index c22860ab9733..30e1ac62e8cb 100644
--- a/tools/testing/selftests/powerpc/pmu/ebb/pmc56_overflow_test.c
+++ b/tools/testing/selftests/powerpc/pmu/ebb/pmc56_overflow_test.c
@@ -66,7 +66,7 @@ int pmc56_overflow(void)
66 66
67 FAIL_IF(ebb_event_enable(&event)); 67 FAIL_IF(ebb_event_enable(&event));
68 68
69 mtspr(SPRN_PMC1, pmc_sample_period(sample_period)); 69 mtspr(SPRN_PMC2, pmc_sample_period(sample_period));
70 mtspr(SPRN_PMC5, 0); 70 mtspr(SPRN_PMC5, 0);
71 mtspr(SPRN_PMC6, 0); 71 mtspr(SPRN_PMC6, 0);
72 72
diff --git a/tools/testing/selftests/x86/protection_keys.c b/tools/testing/selftests/x86/protection_keys.c
index bdd58c78902e..df9e0a0cdf29 100644
--- a/tools/testing/selftests/x86/protection_keys.c
+++ b/tools/testing/selftests/x86/protection_keys.c
@@ -1367,7 +1367,7 @@ void run_tests_once(void)
1367 tracing_off(); 1367 tracing_off();
1368 close_test_fds(); 1368 close_test_fds();
1369 1369
1370 printf("test %2d PASSED (itertation %d)\n", test_nr, iteration_nr); 1370 printf("test %2d PASSED (iteration %d)\n", test_nr, iteration_nr);
1371 dprintf1("======================\n\n"); 1371 dprintf1("======================\n\n");
1372 } 1372 }
1373 iteration_nr++; 1373 iteration_nr++;
diff --git a/tools/virtio/ringtest/main.h b/tools/virtio/ringtest/main.h
index 34e63cc4c572..14142faf040b 100644
--- a/tools/virtio/ringtest/main.h
+++ b/tools/virtio/ringtest/main.h
@@ -26,6 +26,16 @@ static inline void wait_cycles(unsigned long long cycles)
26#define VMEXIT_CYCLES 500 26#define VMEXIT_CYCLES 500
27#define VMENTRY_CYCLES 500 27#define VMENTRY_CYCLES 500
28 28
29#elif defined(__s390x__)
30static inline void wait_cycles(unsigned long long cycles)
31{
32 asm volatile("0: brctg %0,0b" : : "d" (cycles));
33}
34
35/* tweak me */
36#define VMEXIT_CYCLES 200
37#define VMENTRY_CYCLES 200
38
29#else 39#else
30static inline void wait_cycles(unsigned long long cycles) 40static inline void wait_cycles(unsigned long long cycles)
31{ 41{
@@ -81,6 +91,8 @@ extern unsigned ring_size;
81/* Is there a portable way to do this? */ 91/* Is there a portable way to do this? */
82#if defined(__x86_64__) || defined(__i386__) 92#if defined(__x86_64__) || defined(__i386__)
83#define cpu_relax() asm ("rep; nop" ::: "memory") 93#define cpu_relax() asm ("rep; nop" ::: "memory")
94#elif defined(__s390x__)
95#define cpu_relax() barrier()
84#else 96#else
85#define cpu_relax() assert(0) 97#define cpu_relax() assert(0)
86#endif 98#endif
diff --git a/tools/virtio/ringtest/run-on-all.sh b/tools/virtio/ringtest/run-on-all.sh
index 2e69ca812b4c..29b0d3920bfc 100755
--- a/tools/virtio/ringtest/run-on-all.sh
+++ b/tools/virtio/ringtest/run-on-all.sh
@@ -1,12 +1,13 @@
1#!/bin/sh 1#!/bin/sh
2 2
3CPUS_ONLINE=$(lscpu --online -p=cpu|grep -v -e '#')
3#use last CPU for host. Why not the first? 4#use last CPU for host. Why not the first?
4#many devices tend to use cpu0 by default so 5#many devices tend to use cpu0 by default so
5#it tends to be busier 6#it tends to be busier
6HOST_AFFINITY=$(lscpu -p=cpu | tail -1) 7HOST_AFFINITY=$(echo "${CPUS_ONLINE}"|tail -n 1)
7 8
8#run command on all cpus 9#run command on all cpus
9for cpu in $(seq 0 $HOST_AFFINITY) 10for cpu in $CPUS_ONLINE
10do 11do
11 #Don't run guest and host on same CPU 12 #Don't run guest and host on same CPU
12 #It actually works ok if using signalling 13 #It actually works ok if using signalling
diff --git a/usr/Makefile b/usr/Makefile
index 17a513268325..0b87e71c00fc 100644
--- a/usr/Makefile
+++ b/usr/Makefile
@@ -5,8 +5,10 @@
5klibcdirs:; 5klibcdirs:;
6PHONY += klibcdirs 6PHONY += klibcdirs
7 7
8suffix_y = $(CONFIG_INITRAMFS_COMPRESSION) 8suffix_y = $(subst $\",,$(CONFIG_INITRAMFS_COMPRESSION))
9AFLAGS_initramfs_data.o += -DINITRAMFS_IMAGE="usr/initramfs_data.cpio$(suffix_y)" 9datafile_y = initramfs_data.cpio$(suffix_y)
10AFLAGS_initramfs_data.o += -DINITRAMFS_IMAGE="usr/$(datafile_y)"
11
10 12
11# Generate builtin.o based on initramfs_data.o 13# Generate builtin.o based on initramfs_data.o
12obj-$(CONFIG_BLK_DEV_INITRD) := initramfs_data.o 14obj-$(CONFIG_BLK_DEV_INITRD) := initramfs_data.o
@@ -14,7 +16,7 @@ obj-$(CONFIG_BLK_DEV_INITRD) := initramfs_data.o
14# initramfs_data.o contains the compressed initramfs_data.cpio image. 16# initramfs_data.o contains the compressed initramfs_data.cpio image.
15# The image is included using .incbin, a dependency which is not 17# The image is included using .incbin, a dependency which is not
16# tracked automatically. 18# tracked automatically.
17$(obj)/initramfs_data.o: $(obj)/initramfs_data.cpio$(suffix_y) FORCE 19$(obj)/initramfs_data.o: $(obj)/$(datafile_y) FORCE
18 20
19##### 21#####
20# Generate the initramfs cpio archive 22# Generate the initramfs cpio archive
@@ -38,10 +40,8 @@ endif
38quiet_cmd_initfs = GEN $@ 40quiet_cmd_initfs = GEN $@
39 cmd_initfs = $(initramfs) -o $@ $(ramfs-args) $(ramfs-input) 41 cmd_initfs = $(initramfs) -o $@ $(ramfs-args) $(ramfs-input)
40 42
41targets := initramfs_data.cpio.gz initramfs_data.cpio.bz2 \ 43targets := $(datafile_y)
42 initramfs_data.cpio.lzma initramfs_data.cpio.xz \ 44
43 initramfs_data.cpio.lzo initramfs_data.cpio.lz4 \
44 initramfs_data.cpio
45# do not try to update files included in initramfs 45# do not try to update files included in initramfs
46$(deps_initramfs): ; 46$(deps_initramfs): ;
47 47
@@ -51,6 +51,6 @@ $(deps_initramfs): klibcdirs
51# 2) There are changes in which files are included (added or deleted) 51# 2) There are changes in which files are included (added or deleted)
52# 3) If gen_init_cpio are newer than initramfs_data.cpio 52# 3) If gen_init_cpio are newer than initramfs_data.cpio
53# 4) arguments to gen_initramfs.sh changes 53# 4) arguments to gen_initramfs.sh changes
54$(obj)/initramfs_data.cpio$(suffix_y): $(obj)/gen_init_cpio $(deps_initramfs) klibcdirs 54$(obj)/$(datafile_y): $(obj)/gen_init_cpio $(deps_initramfs) klibcdirs
55 $(Q)$(initramfs) -l $(ramfs-input) > $(obj)/.initramfs_data.cpio.d 55 $(Q)$(initramfs) -l $(ramfs-input) > $(obj)/.initramfs_data.cpio.d
56 $(call if_changed,initfs) 56 $(call if_changed,initfs)
diff --git a/virt/kvm/arm/arch_timer.c b/virt/kvm/arm/arch_timer.c
index a2dbbccbb6a3..6a084cd57b88 100644
--- a/virt/kvm/arm/arch_timer.c
+++ b/virt/kvm/arm/arch_timer.c
@@ -24,6 +24,7 @@
24 24
25#include <clocksource/arm_arch_timer.h> 25#include <clocksource/arm_arch_timer.h>
26#include <asm/arch_timer.h> 26#include <asm/arch_timer.h>
27#include <asm/kvm_hyp.h>
27 28
28#include <kvm/arm_vgic.h> 29#include <kvm/arm_vgic.h>
29#include <kvm/arm_arch_timer.h> 30#include <kvm/arm_arch_timer.h>
@@ -89,9 +90,6 @@ static void kvm_timer_inject_irq_work(struct work_struct *work)
89 struct kvm_vcpu *vcpu; 90 struct kvm_vcpu *vcpu;
90 91
91 vcpu = container_of(work, struct kvm_vcpu, arch.timer_cpu.expired); 92 vcpu = container_of(work, struct kvm_vcpu, arch.timer_cpu.expired);
92 vcpu->arch.timer_cpu.armed = false;
93
94 WARN_ON(!kvm_timer_should_fire(vcpu));
95 93
96 /* 94 /*
97 * If the vcpu is blocked we want to wake it up so that it will see 95 * If the vcpu is blocked we want to wake it up so that it will see
@@ -512,3 +510,25 @@ void kvm_timer_init(struct kvm *kvm)
512{ 510{
513 kvm->arch.timer.cntvoff = kvm_phys_timer_read(); 511 kvm->arch.timer.cntvoff = kvm_phys_timer_read();
514} 512}
513
514/*
515 * On VHE system, we only need to configure trap on physical timer and counter
516 * accesses in EL0 and EL1 once, not for every world switch.
517 * The host kernel runs at EL2 with HCR_EL2.TGE == 1,
518 * and this makes those bits have no effect for the host kernel execution.
519 */
520void kvm_timer_init_vhe(void)
521{
522 /* When HCR_EL2.E2H ==1, EL1PCEN and EL1PCTEN are shifted by 10 */
523 u32 cnthctl_shift = 10;
524 u64 val;
525
526 /*
527 * Disallow physical timer access for the guest.
528 * Physical counter access is allowed.
529 */
530 val = read_sysreg(cnthctl_el2);
531 val &= ~(CNTHCTL_EL1PCEN << cnthctl_shift);
532 val |= (CNTHCTL_EL1PCTEN << cnthctl_shift);
533 write_sysreg(val, cnthctl_el2);
534}
diff --git a/virt/kvm/arm/hyp/timer-sr.c b/virt/kvm/arm/hyp/timer-sr.c
index 798866a8d875..63e28dd18bb0 100644
--- a/virt/kvm/arm/hyp/timer-sr.c
+++ b/virt/kvm/arm/hyp/timer-sr.c
@@ -35,10 +35,16 @@ void __hyp_text __timer_save_state(struct kvm_vcpu *vcpu)
35 /* Disable the virtual timer */ 35 /* Disable the virtual timer */
36 write_sysreg_el0(0, cntv_ctl); 36 write_sysreg_el0(0, cntv_ctl);
37 37
38 /* Allow physical timer/counter access for the host */ 38 /*
39 val = read_sysreg(cnthctl_el2); 39 * We don't need to do this for VHE since the host kernel runs in EL2
40 val |= CNTHCTL_EL1PCTEN | CNTHCTL_EL1PCEN; 40 * with HCR_EL2.TGE ==1, which makes those bits have no impact.
41 write_sysreg(val, cnthctl_el2); 41 */
42 if (!has_vhe()) {
43 /* Allow physical timer/counter access for the host */
44 val = read_sysreg(cnthctl_el2);
45 val |= CNTHCTL_EL1PCTEN | CNTHCTL_EL1PCEN;
46 write_sysreg(val, cnthctl_el2);
47 }
42 48
43 /* Clear cntvoff for the host */ 49 /* Clear cntvoff for the host */
44 write_sysreg(0, cntvoff_el2); 50 write_sysreg(0, cntvoff_el2);
@@ -50,14 +56,17 @@ void __hyp_text __timer_restore_state(struct kvm_vcpu *vcpu)
50 struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; 56 struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
51 u64 val; 57 u64 val;
52 58
53 /* 59 /* Those bits are already configured at boot on VHE-system */
54 * Disallow physical timer access for the guest 60 if (!has_vhe()) {
55 * Physical counter access is allowed 61 /*
56 */ 62 * Disallow physical timer access for the guest
57 val = read_sysreg(cnthctl_el2); 63 * Physical counter access is allowed
58 val &= ~CNTHCTL_EL1PCEN; 64 */
59 val |= CNTHCTL_EL1PCTEN; 65 val = read_sysreg(cnthctl_el2);
60 write_sysreg(val, cnthctl_el2); 66 val &= ~CNTHCTL_EL1PCEN;
67 val |= CNTHCTL_EL1PCTEN;
68 write_sysreg(val, cnthctl_el2);
69 }
61 70
62 if (timer->enabled) { 71 if (timer->enabled) {
63 write_sysreg(kvm->arch.timer.cntvoff, cntvoff_el2); 72 write_sysreg(kvm->arch.timer.cntvoff, cntvoff_el2);
diff --git a/virt/kvm/arm/vgic/vgic-init.c b/virt/kvm/arm/vgic/vgic-init.c
index 5114391b7e5a..c737ea0a310a 100644
--- a/virt/kvm/arm/vgic/vgic-init.c
+++ b/virt/kvm/arm/vgic/vgic-init.c
@@ -268,15 +268,11 @@ static void kvm_vgic_dist_destroy(struct kvm *kvm)
268{ 268{
269 struct vgic_dist *dist = &kvm->arch.vgic; 269 struct vgic_dist *dist = &kvm->arch.vgic;
270 270
271 mutex_lock(&kvm->lock);
272
273 dist->ready = false; 271 dist->ready = false;
274 dist->initialized = false; 272 dist->initialized = false;
275 273
276 kfree(dist->spis); 274 kfree(dist->spis);
277 dist->nr_spis = 0; 275 dist->nr_spis = 0;
278
279 mutex_unlock(&kvm->lock);
280} 276}
281 277
282void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu) 278void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu)
@@ -286,7 +282,8 @@ void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu)
286 INIT_LIST_HEAD(&vgic_cpu->ap_list_head); 282 INIT_LIST_HEAD(&vgic_cpu->ap_list_head);
287} 283}
288 284
289void kvm_vgic_destroy(struct kvm *kvm) 285/* To be called with kvm->lock held */
286static void __kvm_vgic_destroy(struct kvm *kvm)
290{ 287{
291 struct kvm_vcpu *vcpu; 288 struct kvm_vcpu *vcpu;
292 int i; 289 int i;
@@ -297,6 +294,13 @@ void kvm_vgic_destroy(struct kvm *kvm)
297 kvm_vgic_vcpu_destroy(vcpu); 294 kvm_vgic_vcpu_destroy(vcpu);
298} 295}
299 296
297void kvm_vgic_destroy(struct kvm *kvm)
298{
299 mutex_lock(&kvm->lock);
300 __kvm_vgic_destroy(kvm);
301 mutex_unlock(&kvm->lock);
302}
303
300/** 304/**
301 * vgic_lazy_init: Lazy init is only allowed if the GIC exposed to the guest 305 * vgic_lazy_init: Lazy init is only allowed if the GIC exposed to the guest
302 * is a GICv2. A GICv3 must be explicitly initialized by the guest using the 306 * is a GICv2. A GICv3 must be explicitly initialized by the guest using the
@@ -348,6 +352,10 @@ int kvm_vgic_map_resources(struct kvm *kvm)
348 ret = vgic_v2_map_resources(kvm); 352 ret = vgic_v2_map_resources(kvm);
349 else 353 else
350 ret = vgic_v3_map_resources(kvm); 354 ret = vgic_v3_map_resources(kvm);
355
356 if (ret)
357 __kvm_vgic_destroy(kvm);
358
351out: 359out:
352 mutex_unlock(&kvm->lock); 360 mutex_unlock(&kvm->lock);
353 return ret; 361 return ret;
diff --git a/virt/kvm/arm/vgic/vgic-v2.c b/virt/kvm/arm/vgic/vgic-v2.c
index 9bab86757fa4..834137e7b83f 100644
--- a/virt/kvm/arm/vgic/vgic-v2.c
+++ b/virt/kvm/arm/vgic/vgic-v2.c
@@ -293,8 +293,6 @@ int vgic_v2_map_resources(struct kvm *kvm)
293 dist->ready = true; 293 dist->ready = true;
294 294
295out: 295out:
296 if (ret)
297 kvm_vgic_destroy(kvm);
298 return ret; 296 return ret;
299} 297}
300 298
diff --git a/virt/kvm/arm/vgic/vgic-v3.c b/virt/kvm/arm/vgic/vgic-v3.c
index 5c9f9745e6ca..e6b03fd8c374 100644
--- a/virt/kvm/arm/vgic/vgic-v3.c
+++ b/virt/kvm/arm/vgic/vgic-v3.c
@@ -302,8 +302,6 @@ int vgic_v3_map_resources(struct kvm *kvm)
302 dist->ready = true; 302 dist->ready = true;
303 303
304out: 304out:
305 if (ret)
306 kvm_vgic_destroy(kvm);
307 return ret; 305 return ret;
308} 306}
309 307
diff --git a/virt/lib/irqbypass.c b/virt/lib/irqbypass.c
index 52abac4bb6a2..6d2fcd6fcb25 100644
--- a/virt/lib/irqbypass.c
+++ b/virt/lib/irqbypass.c
@@ -195,7 +195,7 @@ int irq_bypass_register_consumer(struct irq_bypass_consumer *consumer)
195 mutex_lock(&lock); 195 mutex_lock(&lock);
196 196
197 list_for_each_entry(tmp, &consumers, node) { 197 list_for_each_entry(tmp, &consumers, node) {
198 if (tmp->token == consumer->token) { 198 if (tmp->token == consumer->token || tmp == consumer) {
199 mutex_unlock(&lock); 199 mutex_unlock(&lock);
200 module_put(THIS_MODULE); 200 module_put(THIS_MODULE);
201 return -EBUSY; 201 return -EBUSY;
@@ -245,7 +245,7 @@ void irq_bypass_unregister_consumer(struct irq_bypass_consumer *consumer)
245 mutex_lock(&lock); 245 mutex_lock(&lock);
246 246
247 list_for_each_entry(tmp, &consumers, node) { 247 list_for_each_entry(tmp, &consumers, node) {
248 if (tmp->token != consumer->token) 248 if (tmp != consumer)
249 continue; 249 continue;
250 250
251 list_for_each_entry(producer, &producers, node) { 251 list_for_each_entry(producer, &producers, node) {