aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Walleij <linus.walleij@linaro.org>2015-12-21 03:36:21 -0500
committerLinus Walleij <linus.walleij@linaro.org>2015-12-21 03:36:21 -0500
commit0529357f102b96f68bc199f858d1c3b07f4b674c (patch)
treeeaa047952a768099e00a5245afa0d84799df6990
parenta9f1a3e4c1c7dc82711bc22dc52c7b0d6912ed56 (diff)
parent4ef7675344d687a0ef5b0d7c0cee12da005870c0 (diff)
Merge tag 'v4.4-rc6' into devel
Linux 4.4-rc6
-rw-r--r--Documentation/IPMI.txt7
-rw-r--r--Documentation/arm/keystone/Overview.txt18
-rw-r--r--Documentation/block/null_blk.txt3
-rw-r--r--Documentation/devicetree/bindings/dma/ti-edma.txt10
-rw-r--r--Documentation/devicetree/bindings/gpio/gpio-mpc8xxx.txt4
-rw-r--r--Documentation/devicetree/bindings/input/sun4i-lradc-keys.txt2
-rw-r--r--Documentation/devicetree/bindings/mtd/partition.txt7
-rw-r--r--Documentation/devicetree/bindings/net/marvell-armada-370-neta.txt6
-rw-r--r--Documentation/devicetree/bindings/thermal/rockchip-thermal.txt4
-rw-r--r--Documentation/i2c/busses/i2c-i8011
-rw-r--r--Documentation/kernel-parameters.txt3
-rw-r--r--Documentation/networking/e100.txt14
-rw-r--r--MAINTAINERS78
-rw-r--r--Makefile2
-rw-r--r--arch/arc/Kconfig1
-rw-r--r--arch/arc/boot/dts/axs10x_mb.dtsi1
-rw-r--r--arch/arc/boot/dts/nsim_hs.dts3
-rw-r--r--arch/arc/configs/axs101_defconfig2
-rw-r--r--arch/arc/configs/axs103_defconfig2
-rw-r--r--arch/arc/configs/axs103_smp_defconfig2
-rw-r--r--arch/arc/configs/nsim_hs_defconfig2
-rw-r--r--arch/arc/configs/nsim_hs_smp_defconfig2
-rw-r--r--arch/arc/configs/nsimosci_hs_defconfig2
-rw-r--r--arch/arc/configs/nsimosci_hs_smp_defconfig2
-rw-r--r--arch/arc/configs/vdk_hs38_defconfig2
-rw-r--r--arch/arc/configs/vdk_hs38_smp_defconfig2
-rw-r--r--arch/arc/include/asm/irqflags-arcv2.h3
-rw-r--r--arch/arc/include/asm/irqflags-compact.h2
-rw-r--r--arch/arc/include/asm/mach_desc.h4
-rw-r--r--arch/arc/include/asm/smp.h4
-rw-r--r--arch/arc/include/asm/unwind.h4
-rw-r--r--arch/arc/kernel/ctx_sw.c2
-rw-r--r--arch/arc/kernel/ctx_sw_asm.S3
-rw-r--r--arch/arc/kernel/intc-arcv2.c15
-rw-r--r--arch/arc/kernel/irq.c33
-rw-r--r--arch/arc/kernel/mcip.c2
-rw-r--r--arch/arc/kernel/perf_event.c32
-rw-r--r--arch/arc/kernel/process.c9
-rw-r--r--arch/arc/kernel/setup.c1
-rw-r--r--arch/arc/kernel/smp.c8
-rw-r--r--arch/arc/kernel/unwind.c90
-rw-r--r--arch/arc/mm/init.c4
-rw-r--r--arch/arc/mm/tlb.c4
-rw-r--r--arch/arm/Kconfig4
-rw-r--r--arch/arm/boot/dts/am4372.dtsi4
-rw-r--r--arch/arm/boot/dts/am43xx-clocks.dtsi8
-rw-r--r--arch/arm/boot/dts/am57xx-beagle-x15.dts1
-rw-r--r--arch/arm/boot/dts/animeo_ip.dts6
-rw-r--r--arch/arm/boot/dts/armada-38x.dtsi1
-rw-r--r--arch/arm/boot/dts/at91-foxg20.dts2
-rw-r--r--arch/arm/boot/dts/at91-kizbox.dts13
-rw-r--r--arch/arm/boot/dts/at91-kizbox2.dts6
-rw-r--r--arch/arm/boot/dts/at91-kizboxmini.dts4
-rw-r--r--arch/arm/boot/dts/at91-qil_a9260.dts2
-rw-r--r--arch/arm/boot/dts/at91-sama5d2_xplained.dts116
-rw-r--r--arch/arm/boot/dts/at91-sama5d3_xplained.dts2
-rw-r--r--arch/arm/boot/dts/at91-sama5d4_xplained.dts12
-rw-r--r--arch/arm/boot/dts/at91-sama5d4ek.dts12
-rw-r--r--arch/arm/boot/dts/at91rm9200ek.dts9
-rw-r--r--arch/arm/boot/dts/at91sam9261ek.dts19
-rw-r--r--arch/arm/boot/dts/at91sam9263ek.dts13
-rw-r--r--arch/arm/boot/dts/at91sam9g20ek_common.dtsi13
-rw-r--r--arch/arm/boot/dts/at91sam9m10g45ek.dts13
-rw-r--r--arch/arm/boot/dts/at91sam9n12ek.dts11
-rw-r--r--arch/arm/boot/dts/at91sam9rlek.dts13
-rw-r--r--arch/arm/boot/dts/at91sam9x5cm.dtsi11
-rw-r--r--arch/arm/boot/dts/berlin2q.dtsi8
-rw-r--r--arch/arm/boot/dts/dm816x.dtsi8
-rw-r--r--arch/arm/boot/dts/dra7.dtsi4
-rw-r--r--arch/arm/boot/dts/imx27.dtsi16
-rw-r--r--arch/arm/boot/dts/k2l-netcp.dtsi2
-rw-r--r--arch/arm/boot/dts/kirkwood-ts219.dtsi2
-rw-r--r--arch/arm/boot/dts/rk3288-veyron-minnie.dts4
-rw-r--r--arch/arm/boot/dts/rk3288.dtsi10
-rw-r--r--arch/arm/boot/dts/sama5d35ek.dts2
-rw-r--r--arch/arm/boot/dts/sama5d4.dtsi2
-rw-r--r--arch/arm/boot/dts/usb_a9260_common.dtsi2
-rw-r--r--arch/arm/boot/dts/usb_a9263.dts2
-rw-r--r--arch/arm/boot/dts/vf610-colibri.dtsi5
-rw-r--r--arch/arm/boot/dts/vf610.dtsi2
-rw-r--r--arch/arm/boot/dts/vfxxx.dtsi14
-rw-r--r--arch/arm/configs/at91_dt_defconfig1
-rw-r--r--arch/arm/configs/sama5_defconfig1
-rw-r--r--arch/arm/include/asm/arch_gicv3.h1
-rw-r--r--arch/arm/include/asm/irq.h5
-rw-r--r--arch/arm/include/asm/kvm_emulate.h12
-rw-r--r--arch/arm/include/asm/uaccess.h4
-rw-r--r--arch/arm/include/uapi/asm/unistd.h1
-rw-r--r--arch/arm/kernel/bios32.c19
-rw-r--r--arch/arm/kernel/calls.S1
-rw-r--r--arch/arm/kernel/process.c33
-rw-r--r--arch/arm/kernel/swp_emulate.c6
-rw-r--r--arch/arm/kvm/arm.c7
-rw-r--r--arch/arm/kvm/mmio.c5
-rw-r--r--arch/arm/kvm/mmu.c15
-rw-r--r--arch/arm/kvm/psci.c20
-rw-r--r--arch/arm/lib/uaccess_with_memcpy.c29
-rw-r--r--arch/arm/mach-at91/Kconfig6
-rw-r--r--arch/arm/mach-at91/pm.c7
-rw-r--r--arch/arm/mach-dove/include/mach/entry-macro.S4
-rw-r--r--arch/arm/mach-exynos/pmu.c6
-rw-r--r--arch/arm/mach-imx/gpc.c1
-rw-r--r--arch/arm/mach-ixp4xx/include/mach/io.h12
-rw-r--r--arch/arm/mach-omap2/Kconfig2
-rw-r--r--arch/arm/mach-omap2/omap-smp.c6
-rw-r--r--arch/arm/mach-omap2/omap_hwmod.c66
-rw-r--r--arch/arm/mach-omap2/omap_hwmod.h3
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_7xx_data.c56
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_81xx_data.c3
-rw-r--r--arch/arm/mach-omap2/pdata-quirks.c29
-rw-r--r--arch/arm/mach-omap2/pm34xx.c4
-rw-r--r--arch/arm/mach-orion5x/include/mach/entry-macro.S2
-rw-r--r--arch/arm/mach-pxa/ezx.c5
-rw-r--r--arch/arm/mach-pxa/palm27x.c2
-rw-r--r--arch/arm/mach-pxa/palmtc.c2
-rw-r--r--arch/arm/mach-s3c24xx/pll-s3c2440-12000000.c2
-rw-r--r--arch/arm/mach-s3c24xx/pll-s3c2440-16934400.c2
-rw-r--r--arch/arm/mach-shmobile/setup-r8a7793.c2
-rw-r--r--arch/arm/mach-zx/Kconfig2
-rw-r--r--arch/arm/mm/context.c38
-rw-r--r--arch/arm/mm/dma-mapping.c2
-rw-r--r--arch/arm/mm/init.c92
-rw-r--r--arch/arm/mm/proc-v7.S4
-rw-r--r--arch/arm/net/bpf_jit_32.c2
-rw-r--r--arch/arm64/Kconfig23
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls2080a.dtsi5
-rw-r--r--arch/arm64/crypto/aes-ce-cipher.c2
-rw-r--r--arch/arm64/include/asm/arch_gicv3.h1
-rw-r--r--arch/arm64/include/asm/barrier.h16
-rw-r--r--arch/arm64/include/asm/compat.h3
-rw-r--r--arch/arm64/include/asm/cpufeature.h25
-rw-r--r--arch/arm64/include/asm/dma-mapping.h13
-rw-r--r--arch/arm64/include/asm/hw_breakpoint.h6
-rw-r--r--arch/arm64/include/asm/irq.h5
-rw-r--r--arch/arm64/include/asm/kvm_emulate.h18
-rw-r--r--arch/arm64/include/asm/mmu_context.h2
-rw-r--r--arch/arm64/include/asm/pgtable.h13
-rw-r--r--arch/arm64/kernel/cpu_errata.c9
-rw-r--r--arch/arm64/kernel/cpufeature.c37
-rw-r--r--arch/arm64/kernel/cpuinfo.c5
-rw-r--r--arch/arm64/kernel/efi.c45
-rw-r--r--arch/arm64/kernel/suspend.c10
-rw-r--r--arch/arm64/kernel/vmlinux.lds.S5
-rw-r--r--arch/arm64/kvm/handle_exit.c2
-rw-r--r--arch/arm64/kvm/hyp.S14
-rw-r--r--arch/arm64/kvm/inject_fault.c2
-rw-r--r--arch/arm64/kvm/sys_regs.c123
-rw-r--r--arch/arm64/kvm/sys_regs.h8
-rw-r--r--arch/arm64/kvm/sys_regs_generic_v8.c4
-rw-r--r--arch/arm64/mm/context.c38
-rw-r--r--arch/arm64/mm/dma-mapping.c35
-rw-r--r--arch/arm64/mm/fault.c28
-rw-r--r--arch/arm64/mm/mmu.c91
-rw-r--r--arch/arm64/net/bpf_jit_comp.c85
-rw-r--r--arch/blackfin/kernel/perf_event.c2
-rw-r--r--arch/ia64/include/asm/unistd.h2
-rw-r--r--arch/ia64/include/uapi/asm/unistd.h1
-rw-r--r--arch/ia64/kernel/entry.S1
-rw-r--r--arch/m68k/coldfire/m54xx.c2
-rw-r--r--arch/m68k/include/asm/unistd.h2
-rw-r--r--arch/m68k/include/uapi/asm/unistd.h1
-rw-r--r--arch/m68k/kernel/setup_no.c9
-rw-r--r--arch/m68k/kernel/syscalltable.S1
-rw-r--r--arch/m68k/mm/motorola.c2
-rw-r--r--arch/m68k/sun3/config.c4
-rw-r--r--arch/microblaze/kernel/dma.c3
-rw-r--r--arch/mips/ath79/setup.c7
-rw-r--r--arch/mips/boot/dts/qca/ar9132.dtsi2
-rw-r--r--arch/mips/include/asm/page.h3
-rw-r--r--arch/mips/kvm/emulate.c2
-rw-r--r--arch/mips/kvm/locore.S16
-rw-r--r--arch/mips/kvm/mips.c5
-rw-r--r--arch/mips/mm/dma-default.c2
-rw-r--r--arch/mips/pci/pci-rt2880.c4
-rw-r--r--arch/mips/pmcs-msp71xx/msp_setup.c4
-rw-r--r--arch/mips/sni/reset.c6
-rw-r--r--arch/mn10300/Kconfig4
-rw-r--r--arch/nios2/mm/cacheflush.c24
-rw-r--r--arch/parisc/Kconfig3
-rw-r--r--arch/parisc/include/asm/hugetlb.h85
-rw-r--r--arch/parisc/include/asm/page.h13
-rw-r--r--arch/parisc/include/asm/pgalloc.h2
-rw-r--r--arch/parisc/include/asm/pgtable.h27
-rw-r--r--arch/parisc/include/asm/processor.h27
-rw-r--r--arch/parisc/include/uapi/asm/mman.h10
-rw-r--r--arch/parisc/include/uapi/asm/unistd.h3
-rw-r--r--arch/parisc/kernel/asm-offsets.c8
-rw-r--r--arch/parisc/kernel/entry.S56
-rw-r--r--arch/parisc/kernel/head.S4
-rw-r--r--arch/parisc/kernel/pci.c18
-rw-r--r--arch/parisc/kernel/setup.c14
-rw-r--r--arch/parisc/kernel/syscall.S4
-rw-r--r--arch/parisc/kernel/syscall_table.S1
-rw-r--r--arch/parisc/kernel/traps.c35
-rw-r--r--arch/parisc/kernel/vmlinux.lds.S9
-rw-r--r--arch/parisc/mm/Makefile1
-rw-r--r--arch/parisc/mm/hugetlbpage.c161
-rw-r--r--arch/parisc/mm/init.c40
-rw-r--r--arch/powerpc/boot/dts/sbc8641d.dts8
-rw-r--r--arch/powerpc/include/asm/reg.h1
-rw-r--r--arch/powerpc/include/asm/systbl.h25
-rw-r--r--arch/powerpc/include/asm/unistd.h2
-rw-r--r--arch/powerpc/include/uapi/asm/unistd.h13
-rw-r--r--arch/powerpc/kernel/eeh_driver.c14
-rw-r--r--arch/powerpc/kernel/process.c18
-rw-r--r--arch/powerpc/kernel/signal_32.c14
-rw-r--r--arch/powerpc/kernel/signal_64.c4
-rw-r--r--arch/powerpc/platforms/powernv/opal-irqchip.c64
-rw-r--r--arch/powerpc/platforms/powernv/opal.c2
-rw-r--r--arch/s390/include/asm/cio.h1
-rw-r--r--arch/s390/include/asm/elf.h13
-rw-r--r--arch/s390/include/asm/ipl.h3
-rw-r--r--arch/s390/include/asm/pci_dma.h4
-rw-r--r--arch/s390/include/asm/trace/diag.h6
-rw-r--r--arch/s390/include/uapi/asm/unistd.h19
-rw-r--r--arch/s390/kernel/compat_wrapper.c1
-rw-r--r--arch/s390/kernel/diag.c4
-rw-r--r--arch/s390/kernel/head.S95
-rw-r--r--arch/s390/kernel/ipl.c65
-rw-r--r--arch/s390/kernel/process.c6
-rw-r--r--arch/s390/kernel/sclp.c2
-rw-r--r--arch/s390/kernel/setup.c3
-rw-r--r--arch/s390/kernel/syscalls.S1
-rw-r--r--arch/s390/kernel/trace.c6
-rw-r--r--arch/s390/kvm/interrupt.c7
-rw-r--r--arch/s390/kvm/kvm-s390.c6
-rw-r--r--arch/s390/kvm/priv.c2
-rw-r--r--arch/s390/kvm/sigp.c8
-rw-r--r--arch/s390/mm/init.c30
-rw-r--r--arch/s390/mm/mmap.c60
-rw-r--r--arch/s390/pci/pci_dma.c84
-rw-r--r--arch/sh/include/uapi/asm/unistd_64.h2
-rw-r--r--arch/sh/kernel/perf_event.c2
-rw-r--r--arch/sparc/kernel/perf_event.c2
-rw-r--r--arch/tile/kernel/perf_event.c2
-rw-r--r--arch/um/Makefile2
-rw-r--r--arch/um/drivers/net_user.c10
-rw-r--r--arch/um/kernel/signal.c2
-rw-r--r--arch/x86/boot/boot.h1
-rw-r--r--arch/x86/boot/video-mode.c2
-rw-r--r--arch/x86/boot/video.c2
-rw-r--r--arch/x86/entry/entry_64.S19
-rw-r--r--arch/x86/include/asm/msr-index.h3
-rw-r--r--arch/x86/include/asm/page_types.h16
-rw-r--r--arch/x86/include/asm/pgtable_types.h14
-rw-r--r--arch/x86/include/asm/x86_init.h1
-rw-r--r--arch/x86/kernel/cpu/common.c3
-rw-r--r--arch/x86/kernel/cpu/microcode/core.c1
-rw-r--r--arch/x86/kernel/cpu/perf_event.c2
-rw-r--r--arch/x86/kernel/cpu/perf_event.h5
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel.c2
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_cqm.c2
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_lbr.c4
-rw-r--r--arch/x86/kernel/fpu/signal.c11
-rw-r--r--arch/x86/kernel/fpu/xstate.c1
-rw-r--r--arch/x86/kernel/irq_work.c2
-rw-r--r--arch/x86/kernel/mcount_64.S6
-rw-r--r--arch/x86/kernel/pmem.c12
-rw-r--r--arch/x86/kernel/setup.c2
-rw-r--r--arch/x86/kernel/signal.c17
-rw-r--r--arch/x86/kernel/smpboot.c9
-rw-r--r--arch/x86/kvm/vmx.c5
-rw-r--r--arch/x86/kvm/x86.c61
-rw-r--r--arch/x86/mm/dump_pagetables.c2
-rw-r--r--arch/x86/mm/mpx.c53
-rw-r--r--arch/x86/pci/bus_numa.c13
-rw-r--r--arch/x86/um/signal.c18
-rw-r--r--arch/x86/xen/mmu.c9
-rw-r--r--arch/x86/xen/suspend.c20
-rw-r--r--block/blk-cgroup.c6
-rw-r--r--block/blk-core.c33
-rw-r--r--block/blk-merge.c35
-rw-r--r--block/blk-mq.c14
-rw-r--r--block/blk-settings.c36
-rw-r--r--block/blk-sysfs.c3
-rw-r--r--block/blk-timeout.c8
-rw-r--r--block/blk.h2
-rw-r--r--block/noop-iosched.c10
-rw-r--r--block/partition-generic.c2
-rw-r--r--block/partitions/mac.c10
-rw-r--r--crypto/ablkcipher.c2
-rw-r--r--crypto/algif_aead.c4
-rw-r--r--crypto/algif_skcipher.c6
-rw-r--r--crypto/blkcipher.c2
-rw-r--r--drivers/Makefile2
-rw-r--r--drivers/acpi/Kconfig4
-rw-r--r--drivers/acpi/cppc_acpi.c2
-rw-r--r--drivers/acpi/ec.c2
-rw-r--r--drivers/acpi/nfit.c67
-rw-r--r--drivers/acpi/nfit.h3
-rw-r--r--drivers/acpi/pci_root.c7
-rw-r--r--drivers/acpi/sbshc.c48
-rw-r--r--drivers/ata/ahci.c22
-rw-r--r--drivers/ata/ahci_mvebu.c5
-rw-r--r--drivers/ata/libahci.c9
-rw-r--r--drivers/ata/libata-eh.c8
-rw-r--r--drivers/ata/sata_fsl.c3
-rw-r--r--drivers/ata/sata_sil.c3
-rw-r--r--drivers/base/memory.c4
-rw-r--r--drivers/base/power/domain.c36
-rw-r--r--drivers/base/power/domain_governor.c3
-rw-r--r--drivers/base/power/wakeirq.c6
-rw-r--r--drivers/block/mtip32xx/mtip32xx.c6
-rw-r--r--drivers/block/null_blk.c317
-rw-r--r--drivers/block/rbd.c1
-rw-r--r--drivers/block/xen-blkback/blkback.c15
-rw-r--r--drivers/block/xen-blkback/common.h8
-rw-r--r--drivers/bus/omap-ocp2scp.c2
-rw-r--r--drivers/char/ipmi/ipmi_si_intf.c90
-rw-r--r--drivers/char/ipmi/ipmi_watchdog.c8
-rw-r--r--drivers/clk/clk-gpio.c33
-rw-r--r--drivers/clk/clk-qoriq.c4
-rw-r--r--drivers/clk/clk-scpi.c1
-rw-r--r--drivers/clk/imx/clk-pllv1.c14
-rw-r--r--drivers/clk/imx/clk-pllv2.c9
-rw-r--r--drivers/clk/imx/clk-vf610.c8
-rw-r--r--drivers/clk/mmp/clk-mmp2.c1
-rw-r--r--drivers/clk/mmp/clk-pxa168.c1
-rw-r--r--drivers/clk/mmp/clk-pxa910.c1
-rw-r--r--drivers/clk/sunxi/clk-a10-pll2.c23
-rw-r--r--drivers/clk/ti/clk-816x.c2
-rw-r--r--drivers/clk/ti/clkt_dpll.c4
-rw-r--r--drivers/clk/ti/divider.c16
-rw-r--r--drivers/clk/ti/fapll.c4
-rw-r--r--drivers/clk/ti/mux.c15
-rw-r--r--drivers/clocksource/Kconfig1
-rw-r--r--drivers/clocksource/fsl_ftm_timer.c4
-rw-r--r--drivers/clocksource/mmio.c2
-rw-r--r--drivers/cpufreq/Kconfig.arm5
-rw-r--r--drivers/cpufreq/Kconfig.x861
-rw-r--r--drivers/cpufreq/cppc_cpufreq.c3
-rw-r--r--drivers/cpufreq/cpufreq.c21
-rw-r--r--drivers/cpufreq/intel_pstate.c322
-rw-r--r--drivers/cpufreq/s3c24xx-cpufreq.c2
-rw-r--r--drivers/crypto/nx/nx-aes-ccm.c2
-rw-r--r--drivers/crypto/nx/nx-aes-gcm.c3
-rw-r--r--drivers/crypto/qat/qat_common/adf_ctl_drv.c2
-rw-r--r--drivers/crypto/talitos.c2
-rw-r--r--drivers/dma/at_hdmac.c20
-rw-r--r--drivers/dma/at_hdmac_regs.h6
-rw-r--r--drivers/dma/at_xdmac.c29
-rw-r--r--drivers/dma/bcm2835-dma.c78
-rw-r--r--drivers/dma/edma.c57
-rw-r--r--drivers/dma/imx-sdma.c2
-rw-r--r--drivers/dma/mic_x100_dma.c15
-rw-r--r--drivers/dma/sh/usb-dmac.c11
-rw-r--r--drivers/fpga/fpga-mgr.c13
-rw-r--r--drivers/gpio/gpio-74xx-mmio.c7
-rw-r--r--drivers/gpio/gpio-ath79.c2
-rw-r--r--drivers/gpio/gpio-generic.c4
-rw-r--r--drivers/gpio/gpio-omap.c2
-rw-r--r--drivers/gpio/gpio-palmas.c2
-rw-r--r--drivers/gpio/gpio-syscon.c6
-rw-r--r--drivers/gpio/gpio-tegra.c105
-rw-r--r--drivers/gpio/gpiolib.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h126
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c185
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.c108
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c101
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c51
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c48
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.h7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c30
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_semaphore.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h94
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c23
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c172
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ci_dpm.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v10_0.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v11_0.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v8_0.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c302
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v3_0.c24
-rw-r--r--drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h24
-rw-r--r--drivers/gpu/drm/amd/scheduler/gpu_scheduler.c151
-rw-r--r--drivers/gpu/drm/amd/scheduler/gpu_scheduler.h11
-rw-r--r--drivers/gpu/drm/amd/scheduler/sched_fence.c23
-rw-r--r--drivers/gpu/drm/drm_atomic.c61
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c29
-rw-r--r--drivers/gpu/drm/drm_drv.c5
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c51
-rw-r--r--drivers/gpu/drm/drm_fops.c84
-rw-r--r--drivers/gpu/drm/drm_irq.c54
-rw-r--r--drivers/gpu/drm/drm_probe_helper.c3
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c2
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h5
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c20
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_fence.c36
-rw-r--r--drivers/gpu/drm/i915/i915_params.c5
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c31
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c75
-rw-r--r--drivers/gpu/drm/i915/intel_display.c94
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c51
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h4
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c12
-rw-r--r--drivers/gpu/drm/i915/intel_i2c.c6
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c15
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.c34
-rw-r--r--drivers/gpu/drm/imx/imx-drm-core.c7
-rw-r--r--drivers/gpu/drm/imx/imx-drm.h3
-rw-r--r--drivers/gpu/drm/imx/imx-tve.c1
-rw-r--r--drivers/gpu/drm/imx/ipuv3-crtc.c63
-rw-r--r--drivers/gpu/drm/imx/ipuv3-plane.c9
-rw-r--r--drivers/gpu/drm/imx/ipuv3-plane.h2
-rw-r--r--drivers/gpu/drm/imx/parallel-display.c4
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_cursor.c11
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/device.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/instmem.h1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_acpi.c1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c19
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.h4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_usif.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c19
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf117.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpc.fuc8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgf117.fuc3.h344
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk104.fuc3.h344
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk110.fuc3.h344
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk208.fuc5.h308
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgm107.fuc5.h474
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk104.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk104.c2
-rw-r--r--drivers/gpu/drm/omapdrm/omap_fbdev.c5
-rw-r--r--drivers/gpu/drm/radeon/cik.c11
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c5
-rw-r--r--drivers/gpu/drm/radeon/r100.c12
-rw-r--r--drivers/gpu/drm/radeon/r600.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon.h2
-rw-r--r--drivers/gpu/drm/radeon/radeon_agp.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c21
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c106
-rw-r--r--drivers/gpu/drm/radeon/radeon_irq_kms.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c50
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h5
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c15
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c7
-rw-r--r--drivers/gpu/drm/radeon/radeon_vce.c100
-rw-r--r--drivers/gpu/drm/radeon/rs600.c2
-rw-r--r--drivers/gpu/drm/radeon/rs690.c10
-rw-r--r--drivers/gpu/drm/radeon/rv730_dpm.c2
-rw-r--r--drivers/gpu/drm/radeon/rv770_dpm.c4
-rw-r--r--drivers/gpu/drm/radeon/si.c5
-rw-r--r--drivers/gpu/drm/radeon/si_dpm.c2
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_gem.c1
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.c43
-rw-r--r--drivers/gpu/drm/ttm/ttm_lock.c2
-rw-r--r--drivers/gpu/drm/vc4/vc4_crtc.c9
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.c1
-rw-r--r--drivers/gpu/drm/vc4/vc4_hvs.c8
-rw-r--r--drivers/gpu/drm/vc4/vc4_plane.c18
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_display.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c64
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.h7
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c2
-rw-r--r--drivers/gpu/ipu-v3/ipu-common.c69
-rw-r--r--drivers/gpu/vga/vgaarb.c6
-rw-r--r--drivers/hid/hid-ids.h6
-rw-r--r--drivers/hid/hid-lg.c5
-rw-r--r--drivers/hid/usbhid/hid-quirks.c10
-rw-r--r--drivers/hid/wacom_wac.c5
-rw-r--r--drivers/hwmon/Kconfig3
-rw-r--r--drivers/hwmon/applesmc.c2
-rw-r--r--drivers/hwmon/scpi-hwmon.c21
-rw-r--r--drivers/hwmon/tmp102.c16
-rw-r--r--drivers/i2c/busses/Kconfig1
-rw-r--r--drivers/i2c/busses/i2c-davinci.c11
-rw-r--r--drivers/i2c/busses/i2c-designware-core.c6
-rw-r--r--drivers/i2c/busses/i2c-designware-core.h1
-rw-r--r--drivers/i2c/busses/i2c-designware-platdrv.c16
-rw-r--r--drivers/i2c/busses/i2c-i801.c6
-rw-r--r--drivers/i2c/busses/i2c-imx.c5
-rw-r--r--drivers/i2c/busses/i2c-mv64xxx.c27
-rw-r--r--drivers/i2c/busses/i2c-rcar.c4
-rw-r--r--drivers/i2c/busses/i2c-rk3x.c2
-rw-r--r--drivers/i2c/busses/i2c-st.c2
-rw-r--r--drivers/i2c/busses/i2c-xiic.c4
-rw-r--r--drivers/i2c/i2c-core.c2
-rw-r--r--drivers/iio/adc/ad7793.c2
-rw-r--r--drivers/iio/adc/qcom-spmi-vadc.c4
-rw-r--r--drivers/iio/adc/vf610_adc.c22
-rw-r--r--drivers/iio/adc/xilinx-xadc-core.c1
-rw-r--r--drivers/iio/dac/ad5064.c91
-rw-r--r--drivers/iio/humidity/si7020.c8
-rw-r--r--drivers/iio/industrialio-buffer.c2
-rw-r--r--drivers/iio/industrialio-core.c2
-rw-r--r--drivers/iio/light/apds9960.c1
-rw-r--r--drivers/iio/proximity/pulsedlight-lidar-lite-v2.c6
-rw-r--r--drivers/infiniband/core/cma.c5
-rw-r--r--drivers/infiniband/core/mad.c5
-rw-r--r--drivers/infiniband/core/sa_query.c32
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c27
-rw-r--r--drivers/infiniband/core/verbs.c43
-rw-r--r--drivers/infiniband/hw/mlx4/main.c2
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c19
-rw-r--r--drivers/infiniband/hw/mlx4/srq.c11
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c14
-rw-r--r--drivers/infiniband/hw/qib/qib_qsfp.c4
-rw-r--r--drivers/infiniband/hw/qib/qib_verbs.h2
-rw-r--r--drivers/infiniband/ulp/iser/iser_verbs.c2
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.c13
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c48
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.h5
-rw-r--r--drivers/input/joystick/db9.c1
-rw-r--r--drivers/input/joystick/gamecon.c1
-rw-r--r--drivers/input/joystick/turbografx.c1
-rw-r--r--drivers/input/joystick/walkera0701.c1
-rw-r--r--drivers/input/misc/arizona-haptics.c3
-rw-r--r--drivers/input/mouse/elan_i2c_core.c3
-rw-r--r--drivers/input/serio/parkbd.c1
-rw-r--r--drivers/input/tablet/aiptek.c9
-rw-r--r--drivers/input/touchscreen/atmel_mxt_ts.c34
-rw-r--r--drivers/input/touchscreen/elants_i2c.c21
-rw-r--r--drivers/iommu/amd_iommu_v2.c20
-rw-r--r--drivers/iommu/intel-iommu.c4
-rw-r--r--drivers/iommu/intel-svm.c20
-rw-r--r--drivers/iommu/iommu.c2
-rw-r--r--drivers/iommu/s390-iommu.c23
-rw-r--r--drivers/irqchip/irq-gic-common.c13
-rw-r--r--drivers/irqchip/irq-gic.c38
-rw-r--r--drivers/irqchip/irq-versatile-fpga.c5
-rw-r--r--drivers/isdn/gigaset/ser-gigaset.c23
-rw-r--r--drivers/isdn/hardware/mISDN/mISDNipac.c7
-rw-r--r--drivers/isdn/hisax/config.c2
-rw-r--r--drivers/isdn/hisax/hfc_pci.c2
-rw-r--r--drivers/isdn/hisax/hfc_sx.c2
-rw-r--r--drivers/isdn/hisax/q931.c6
-rw-r--r--drivers/lightnvm/Kconfig1
-rw-r--r--drivers/lightnvm/core.c158
-rw-r--r--drivers/lightnvm/gennvm.c105
-rw-r--r--drivers/lightnvm/gennvm.h2
-rw-r--r--drivers/lightnvm/rrpc.c57
-rw-r--r--drivers/md/dm-crypt.c22
-rw-r--r--drivers/md/dm-mpath.c30
-rw-r--r--drivers/md/dm-thin-metadata.c34
-rw-r--r--drivers/md/dm-thin.c6
-rw-r--r--drivers/md/dm.c7
-rw-r--r--drivers/md/md.c22
-rw-r--r--drivers/md/md.h8
-rw-r--r--drivers/md/persistent-data/dm-btree.c101
-rw-r--r--drivers/md/persistent-data/dm-btree.h14
-rw-r--r--drivers/md/persistent-data/dm-space-map-metadata.c32
-rw-r--r--drivers/md/raid10.c4
-rw-r--r--drivers/media/pci/cx23885/cx23885-core.c4
-rw-r--r--drivers/media/pci/cx25821/cx25821-core.c3
-rw-r--r--drivers/media/pci/cx88/cx88-alsa.c4
-rw-r--r--drivers/media/pci/cx88/cx88-mpeg.c3
-rw-r--r--drivers/media/pci/cx88/cx88-video.c4
-rw-r--r--drivers/media/pci/ivtv/ivtv-driver.c4
-rw-r--r--drivers/media/pci/netup_unidvb/netup_unidvb_core.c2
-rw-r--r--drivers/media/pci/saa7134/saa7134-core.c4
-rw-r--r--drivers/media/pci/saa7164/saa7164-core.c4
-rw-r--r--drivers/media/pci/tw68/tw68-core.c4
-rw-r--r--drivers/media/usb/airspy/airspy.c2
-rw-r--r--drivers/media/usb/hackrf/hackrf.c13
-rw-r--r--drivers/misc/cxl/native.c2
-rw-r--r--drivers/mmc/card/block.c11
-rw-r--r--drivers/mmc/core/mmc.c93
-rw-r--r--drivers/mmc/host/Kconfig1
-rw-r--r--drivers/mmc/host/mtk-sd.c2
-rw-r--r--drivers/mmc/host/pxamci.c2
-rw-r--r--drivers/mtd/nand/jz4740_nand.c1
-rw-r--r--drivers/mtd/nand/nand_base.c2
-rw-r--r--drivers/mtd/ofpart.c12
-rw-r--r--drivers/net/can/bfin_can.c2
-rw-r--r--drivers/net/can/c_can/c_can.c7
-rw-r--r--drivers/net/can/cc770/cc770.c2
-rw-r--r--drivers/net/can/flexcan.c4
-rw-r--r--drivers/net/can/janz-ican3.c1
-rw-r--r--drivers/net/can/m_can/m_can.c7
-rw-r--r--drivers/net/can/pch_can.c3
-rw-r--r--drivers/net/can/rcar_can.c11
-rw-r--r--drivers/net/can/sja1000/sja1000.c4
-rw-r--r--drivers/net/can/sun4i_can.c1
-rw-r--r--drivers/net/can/ti_hecc.c7
-rw-r--r--drivers/net/can/usb/ems_usb.c1
-rw-r--r--drivers/net/can/usb/esd_usb2.c1
-rw-r--r--drivers/net/can/usb/kvaser_usb.c5
-rw-r--r--drivers/net/can/usb/usb_8dev.c4
-rw-r--r--drivers/net/can/xilinx_can.c9
-rw-r--r--drivers/net/dsa/mv88e6060.c114
-rw-r--r--drivers/net/dsa/mv88e6060.h111
-rw-r--r--drivers/net/ethernet/Kconfig2
-rw-r--r--drivers/net/ethernet/Makefile2
-rw-r--r--drivers/net/ethernet/amd/pcnet32.c5
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-dev.c4
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_main.c69
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_main.h4
-rw-r--r--drivers/net/ethernet/atheros/alx/main.c2
-rw-r--r--drivers/net/ethernet/atheros/alx/reg.h1
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_main.c7
-rw-r--r--drivers/net/ethernet/aurora/Kconfig21
-rw-r--r--drivers/net/ethernet/aurora/Makefile1
-rw-r--r--drivers/net/ethernet/aurora/nb8800.c1552
-rw-r--r--drivers/net/ethernet/aurora/nb8800.h316
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c6
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c90
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h6
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c9
-rw-r--r--drivers/net/ethernet/cadence/macb.c4
-rw-r--r--drivers/net/ethernet/cadence/macb.h5
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_main.c2
-rw-r--r--drivers/net/ethernet/cavium/thunder/nic.h5
-rw-r--r--drivers/net/ethernet/cavium/thunder/nic_main.c22
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c16
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_main.c14
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_queues.c2
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_queues.h2
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_bgx.c28
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_bgx.h2
-rw-r--r--drivers/net/ethernet/dec/tulip/tulip_core.c9
-rw-r--r--drivers/net/ethernet/dec/tulip/winbond-840.c2
-rw-r--r--drivers/net/ethernet/dlink/Kconfig5
-rw-r--r--drivers/net/ethernet/dlink/dl2k.c55
-rw-r--r--drivers/net/ethernet/dlink/dl2k.h15
-rw-r--r--drivers/net/ethernet/emulex/benet/be_ethtool.c19
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c2
-rw-r--r--drivers/net/ethernet/ezchip/nps_enet.c30
-rw-r--r--drivers/net/ethernet/freescale/Kconfig3
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mac-fcc.c2
-rw-r--r--drivers/net/ethernet/freescale/fsl_pq_mdio.c2
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c14
-rw-r--r--drivers/net/ethernet/freescale/gianfar.h1
-rw-r--r--drivers/net/ethernet/freescale/gianfar_ptp.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c49
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h11
-rw-r--r--drivers/net/ethernet/icplus/Kconfig13
-rw-r--r--drivers/net/ethernet/icplus/Makefile5
-rw-r--r--drivers/net/ethernet/icplus/ipg.c2300
-rw-r--r--drivers/net/ethernet/icplus/ipg.h748
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_netdev.c4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq.c6
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c11
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_adminq.c6
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_main.c10
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c3
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c33
-rw-r--r--drivers/net/ethernet/marvell/mvpp2.c52
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cmd.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c44
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c50
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c76
-rw-r--r--drivers/net/ethernet/nxp/lpc_eth.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed.h3
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev.c53
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_int.c33
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_int.h15
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_main.c56
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_reg_addr.h4
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sp.h8
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_spq.c63
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c3
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_main.c5
-rw-r--r--drivers/net/ethernet/qualcomm/qca_spi.c5
-rw-r--r--drivers/net/ethernet/realtek/r8169.c6
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c19
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c55
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.h25
-rw-r--r--drivers/net/ethernet/sfc/ef10.c24
-rw-r--r--drivers/net/ethernet/sfc/efx.c2
-rw-r--r--drivers/net/ethernet/sfc/efx.h5
-rw-r--r--drivers/net/ethernet/sfc/farch.c2
-rw-r--r--drivers/net/ethernet/sfc/txc43128_phy.c2
-rw-r--r--drivers/net/ethernet/smsc/smsc911x.c17
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c10
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c13
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c18
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c28
-rw-r--r--drivers/net/ethernet/ti/cpsw-common.c3
-rw-r--r--drivers/net/ethernet/via/via-velocity.c24
-rw-r--r--drivers/net/fjes/fjes_hw.c2
-rw-r--r--drivers/net/geneve.c2
-rw-r--r--drivers/net/ipvlan/ipvlan_core.c14
-rw-r--r--drivers/net/macvlan.c2
-rw-r--r--drivers/net/macvtap.c4
-rw-r--r--drivers/net/phy/at803x.c4
-rw-r--r--drivers/net/phy/broadcom.c2
-rw-r--r--drivers/net/phy/marvell.c16
-rw-r--r--drivers/net/phy/mdio-mux.c7
-rw-r--r--drivers/net/phy/micrel.c13
-rw-r--r--drivers/net/phy/phy.c6
-rw-r--r--drivers/net/phy/vitesse.c16
-rw-r--r--drivers/net/ppp/pppoe.c14
-rw-r--r--drivers/net/ppp/pptp.c6
-rw-r--r--drivers/net/tun.c4
-rw-r--r--drivers/net/usb/cdc_ether.c5
-rw-r--r--drivers/net/usb/cdc_mbim.c26
-rw-r--r--drivers/net/usb/cdc_ncm.c18
-rw-r--r--drivers/net/usb/qmi_wwan.c1
-rw-r--r--drivers/net/usb/r8152.c21
-rw-r--r--drivers/net/virtio_net.c34
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c78
-rw-r--r--drivers/net/vmxnet3/vmxnet3_int.h4
-rw-r--r--drivers/net/vrf.c11
-rw-r--r--drivers/net/vxlan.c75
-rw-r--r--drivers/net/wan/hdlc_fr.c10
-rw-r--r--drivers/net/wan/x25_asy.c6
-rw-r--r--drivers/net/wireless/ath/ath10k/core.c49
-rw-r--r--drivers/net/wireless/ath/ath10k/core.h1
-rw-r--r--drivers/net/wireless/ath/ath10k/hw.h17
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.c53
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-7000.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-8000.c2
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/d3.c8
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mac80211.c11
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/sta.c88
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/sta.h4
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/drv.c19
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c2
-rw-r--r--drivers/net/xen-netback/netback.c34
-rw-r--r--drivers/nvme/host/Makefile3
-rw-r--r--drivers/nvme/host/lightnvm.c187
-rw-r--r--drivers/nvme/host/nvme.h14
-rw-r--r--drivers/nvme/host/pci.c51
-rw-r--r--drivers/of/address.c5
-rw-r--r--drivers/of/fdt.c7
-rw-r--r--drivers/of/irq.c3
-rw-r--r--drivers/of/of_reserved_mem.c8
-rw-r--r--drivers/parisc/iommu-helpers.h15
-rw-r--r--drivers/pci/host/pcie-altera.c23
-rw-r--r--drivers/pci/host/pcie-designware.c1
-rw-r--r--drivers/pci/host/pcie-hisi.c4
-rw-r--r--drivers/pci/msi.c4
-rw-r--r--drivers/pci/pci-driver.c16
-rw-r--r--drivers/pci/pci-sysfs.c5
-rw-r--r--drivers/pci/pci.h2
-rw-r--r--drivers/pci/probe.c4
-rw-r--r--drivers/phy/Kconfig1
-rw-r--r--drivers/phy/phy-bcm-cygnus-pcie.c16
-rw-r--r--drivers/phy/phy-berlin-sata.c20
-rw-r--r--drivers/phy/phy-brcmstb-sata.c17
-rw-r--r--drivers/phy/phy-core.c21
-rw-r--r--drivers/phy/phy-miphy28lp.c16
-rw-r--r--drivers/phy/phy-miphy365x.c16
-rw-r--r--drivers/phy/phy-mt65xx-usb3.c20
-rw-r--r--drivers/phy/phy-rockchip-usb.c17
-rw-r--r--drivers/pinctrl/Kconfig4
-rw-r--r--drivers/pinctrl/bcm/pinctrl-bcm2835.c13
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx1-core.c8
-rw-r--r--drivers/pinctrl/freescale/pinctrl-vf610.c2
-rw-r--r--drivers/pinctrl/intel/pinctrl-broxton.c1
-rw-r--r--drivers/pinctrl/intel/pinctrl-intel.c41
-rw-r--r--drivers/pinctrl/intel/pinctrl-intel.h3
-rw-r--r--drivers/pinctrl/intel/pinctrl-sunrisepoint.c1
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mtk-common.c11
-rw-r--r--drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c2
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-sh7734.c6
-rw-r--r--drivers/powercap/intel_rapl.c7
-rw-r--r--drivers/remoteproc/remoteproc_core.c2
-rw-r--r--drivers/remoteproc/remoteproc_debugfs.c2
-rw-r--r--drivers/rtc/rtc-da9063.c19
-rw-r--r--drivers/rtc/rtc-ds1307.c44
-rw-r--r--drivers/rtc/rtc-rk808.c48
-rw-r--r--drivers/s390/cio/chsc.c37
-rw-r--r--drivers/s390/cio/chsc.h15
-rw-r--r--drivers/s390/cio/cio.c14
-rw-r--r--drivers/s390/cio/css.c5
-rw-r--r--drivers/s390/crypto/Makefile7
-rw-r--r--drivers/s390/crypto/ap_bus.c6
-rw-r--r--drivers/s390/crypto/zcrypt_api.c10
-rw-r--r--drivers/s390/crypto/zcrypt_api.h1
-rw-r--r--drivers/s390/crypto/zcrypt_msgtype50.c1
-rw-r--r--drivers/s390/crypto/zcrypt_msgtype6.c3
-rw-r--r--drivers/scsi/Kconfig2
-rw-r--r--drivers/scsi/advansys.c2
-rw-r--r--drivers/scsi/hosts.c11
-rw-r--r--drivers/scsi/hpsa.c2
-rw-r--r--drivers/scsi/mpt3sas/Kconfig9
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c3
-rw-r--r--drivers/scsi/mvsas/mv_init.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.c3
-rw-r--r--drivers/scsi/qla2xxx/tcm_qla2xxx.c2
-rw-r--r--drivers/scsi/scsi_debug.c9
-rw-r--r--drivers/scsi/scsi_pm.c20
-rw-r--r--drivers/scsi/scsi_scan.c9
-rw-r--r--drivers/scsi/scsi_sysfs.c22
-rw-r--r--drivers/scsi/sd.c69
-rw-r--r--drivers/scsi/sd.h1
-rw-r--r--drivers/scsi/ses.c30
-rw-r--r--drivers/scsi/st.c5
-rw-r--r--drivers/sh/pm_runtime.c2
-rw-r--r--drivers/soc/mediatek/Kconfig1
-rw-r--r--drivers/soc/ti/knav_qmss_queue.c8
-rw-r--r--drivers/spi/spi-bcm63xx.c4
-rw-r--r--drivers/spi/spi-fsl-dspi.c12
-rw-r--r--drivers/spi/spi-mt65xx.c26
-rw-r--r--drivers/spi/spi-pl022.c28
-rw-r--r--drivers/spi/spi.c4
-rw-r--r--drivers/spi/spidev.c2
-rw-r--r--drivers/staging/android/ion/ion_chunk_heap.c4
-rw-r--r--drivers/staging/iio/Kconfig3
-rw-r--r--drivers/staging/iio/adc/lpc32xx_adc.c4
-rw-r--r--drivers/staging/iio/iio_simple_dummy_events.c2
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_ioctl.h1
-rw-r--r--drivers/staging/lustre/lustre/libcfs/module.c17
-rw-r--r--drivers/staging/lustre/lustre/obdecho/echo_client.c20
-rw-r--r--drivers/staging/wilc1000/coreconfigurator.c48
-rw-r--r--drivers/target/iscsi/iscsi_target.c13
-rw-r--r--drivers/target/iscsi/iscsi_target_nego.c1
-rw-r--r--drivers/target/iscsi/iscsi_target_parameters.c10
-rw-r--r--drivers/target/target_core_sbc.c17
-rw-r--r--drivers/target/target_core_stat.c2
-rw-r--r--drivers/target/target_core_tmr.c7
-rw-r--r--drivers/target/target_core_transport.c26
-rw-r--r--drivers/target/target_core_user.c4
-rw-r--r--drivers/thermal/Kconfig2
-rw-r--r--drivers/thermal/imx_thermal.c56
-rw-r--r--drivers/thermal/of-thermal.c2
-rw-r--r--drivers/thermal/power_allocator.c24
-rw-r--r--drivers/thermal/rcar_thermal.c49
-rw-r--r--drivers/thermal/rockchip_thermal.c328
-rw-r--r--drivers/tty/n_tty.c24
-rw-r--r--drivers/tty/serial/8250/8250_fsl.c1
-rw-r--r--drivers/tty/serial/8250/8250_uniphier.c8
-rw-r--r--drivers/tty/serial/8250/Kconfig1
-rw-r--r--drivers/tty/serial/Kconfig2
-rw-r--r--drivers/tty/serial/bcm63xx_uart.c2
-rw-r--r--drivers/tty/serial/earlycon.c2
-rw-r--r--drivers/tty/serial/etraxfs-uart.c2
-rw-r--r--drivers/tty/serial/sh-sci.c2
-rw-r--r--drivers/tty/tty_audit.c2
-rw-r--r--drivers/tty/tty_buffer.c2
-rw-r--r--drivers/tty/tty_io.c4
-rw-r--r--drivers/tty/tty_ioctl.c4
-rw-r--r--drivers/tty/tty_ldisc.c2
-rw-r--r--drivers/usb/chipidea/ci_hdrc_imx.c142
-rw-r--r--drivers/usb/chipidea/debug.c2
-rw-r--r--drivers/usb/chipidea/udc.c17
-rw-r--r--drivers/usb/chipidea/usbmisc_imx.c10
-rw-r--r--drivers/usb/class/cdc-acm.c5
-rw-r--r--drivers/usb/class/usblp.c2
-rw-r--r--drivers/usb/core/Kconfig3
-rw-r--r--drivers/usb/core/config.c3
-rw-r--r--drivers/usb/core/hub.c44
-rw-r--r--drivers/usb/core/port.c4
-rw-r--r--drivers/usb/core/quirks.c9
-rw-r--r--drivers/usb/dwc2/hcd.c9
-rw-r--r--drivers/usb/dwc2/platform.c84
-rw-r--r--drivers/usb/dwc3/dwc3-pci.c4
-rw-r--r--drivers/usb/dwc3/gadget.c25
-rw-r--r--drivers/usb/gadget/function/f_fs.c6
-rw-r--r--drivers/usb/gadget/function/f_loopback.c2
-rw-r--r--drivers/usb/gadget/function/f_midi.c3
-rw-r--r--drivers/usb/gadget/function/uvc_configfs.c2
-rw-r--r--drivers/usb/gadget/udc/atmel_usba_udc.c2
-rw-r--r--drivers/usb/gadget/udc/pxa27x_udc.c3
-rw-r--r--drivers/usb/host/ohci-at91.c11
-rw-r--r--drivers/usb/host/whci/qset.c4
-rw-r--r--drivers/usb/host/xhci-hub.c62
-rw-r--r--drivers/usb/host/xhci-pci.c8
-rw-r--r--drivers/usb/host/xhci-ring.c35
-rw-r--r--drivers/usb/host/xhci.c18
-rw-r--r--drivers/usb/musb/Kconfig2
-rw-r--r--drivers/usb/musb/musb_core.c20
-rw-r--r--drivers/usb/musb/musb_host.c22
-rw-r--r--drivers/usb/phy/Kconfig4
-rw-r--r--drivers/usb/phy/phy-msm-usb.c6
-rw-r--r--drivers/usb/phy/phy-mxs-usb.c12
-rw-r--r--drivers/usb/phy/phy-omap-otg.c2
-rw-r--r--drivers/usb/renesas_usbhs/mod_gadget.c11
-rw-r--r--drivers/usb/serial/cp210x.c1
-rw-r--r--drivers/usb/serial/ipaq.c3
-rw-r--r--drivers/usb/serial/option.c11
-rw-r--r--drivers/usb/serial/qcserial.c94
-rw-r--r--drivers/usb/serial/ti_usb_3410_5052.c2
-rw-r--r--drivers/usb/serial/ti_usb_3410_5052.h4
-rw-r--r--drivers/usb/serial/usb-serial-simple.c1
-rw-r--r--drivers/usb/storage/uas.c4
-rw-r--r--drivers/usb/storage/unusual_devs.h2
-rw-r--r--drivers/usb/storage/unusual_uas.h2
-rw-r--r--drivers/vfio/Kconfig15
-rw-r--r--drivers/vfio/pci/vfio_pci.c10
-rw-r--r--drivers/vfio/platform/vfio_platform.c1
-rw-r--r--drivers/vfio/platform/vfio_platform_common.c5
-rw-r--r--drivers/vfio/vfio.c188
-rw-r--r--drivers/vhost/vhost.c8
-rw-r--r--drivers/video/fbdev/fsl-diu-fb.c13
-rw-r--r--drivers/video/fbdev/omap2/dss/venc.c12
-rw-r--r--drivers/virtio/virtio.c1
-rw-r--r--drivers/virtio/virtio_ring.c48
-rw-r--r--drivers/watchdog/Kconfig2
-rw-r--r--drivers/watchdog/mtk_wdt.c1
-rw-r--r--drivers/watchdog/omap_wdt.c2
-rw-r--r--drivers/watchdog/pnx4008_wdt.c8
-rw-r--r--drivers/watchdog/tegra_wdt.c4
-rw-r--r--drivers/watchdog/w83977f_wdt.c2
-rw-r--r--drivers/xen/events/events_base.c5
-rw-r--r--drivers/xen/events/events_fifo.c23
-rw-r--r--drivers/xen/evtchn.c123
-rw-r--r--drivers/xen/gntdev.c2
-rw-r--r--drivers/xen/xen-pciback/pciback.h1
-rw-r--r--drivers/xen/xen-pciback/pciback_ops.c75
-rw-r--r--drivers/xen/xen-pciback/xenbus.c4
-rw-r--r--drivers/xen/xen-scsiback.c2
-rw-r--r--fs/9p/vfs_inode.c4
-rw-r--r--fs/Kconfig6
-rw-r--r--fs/block_dev.c27
-rw-r--r--fs/btrfs/backref.c2
-rw-r--r--fs/btrfs/ctree.h4
-rw-r--r--fs/btrfs/extent-tree.c133
-rw-r--r--fs/btrfs/file.c28
-rw-r--r--fs/btrfs/free-space-cache.c10
-rw-r--r--fs/btrfs/inode.c24
-rw-r--r--fs/btrfs/qgroup.c5
-rw-r--r--fs/btrfs/scrub.c62
-rw-r--r--fs/btrfs/tests/free-space-tests.c4
-rw-r--r--fs/btrfs/transaction.c33
-rw-r--r--fs/btrfs/transaction.h6
-rw-r--r--fs/btrfs/volumes.c16
-rw-r--r--fs/btrfs/volumes.h2
-rw-r--r--fs/cachefiles/rdwr.c2
-rw-r--r--fs/cifs/inode.c6
-rw-r--r--fs/configfs/dir.c110
-rw-r--r--fs/dax.c4
-rw-r--r--fs/direct-io.c11
-rw-r--r--fs/dlm/lowcomms.c4
-rw-r--r--fs/exofs/inode.c5
-rw-r--r--fs/ext2/super.c2
-rw-r--r--fs/ext4/crypto.c2
-rw-r--r--fs/ext4/ext4.h51
-rw-r--r--fs/ext4/super.c6
-rw-r--r--fs/ext4/symlink.c2
-rw-r--r--fs/ext4/sysfs.c2
-rw-r--r--fs/fat/dir.c16
-rw-r--r--fs/fuse/cuse.c2
-rw-r--r--fs/fuse/file.c2
-rw-r--r--fs/hugetlbfs/inode.c65
-rw-r--r--fs/jbd2/transaction.c12
-rw-r--r--fs/namei.c1
-rw-r--r--fs/ncpfs/ioctl.c2
-rw-r--r--fs/nfs/inode.c17
-rw-r--r--fs/nfs/internal.h2
-rw-r--r--fs/nfs/nfs42proc.c3
-rw-r--r--fs/nfs/nfs4client.c2
-rw-r--r--fs/nfs/nfs4file.c59
-rw-r--r--fs/nfs/nfs4proc.c2
-rw-r--r--fs/nfs/nfs4xdr.c1
-rw-r--r--fs/nfs/objlayout/objio_osd.c5
-rw-r--r--fs/nfs/pagelist.c2
-rw-r--r--fs/nfs/pnfs.c60
-rw-r--r--fs/ocfs2/namei.c2
-rw-r--r--fs/overlayfs/copy_up.c23
-rw-r--r--fs/overlayfs/inode.c19
-rw-r--r--fs/overlayfs/overlayfs.h3
-rw-r--r--fs/proc/base.c1
-rw-r--r--fs/splice.c8
-rw-r--r--fs/sysv/inode.c11
-rw-r--r--include/asm-generic/tlb.h2
-rw-r--r--include/drm/drmP.h10
-rw-r--r--include/drm/drm_atomic.h3
-rw-r--r--include/kvm/arm_vgic.h2
-rw-r--r--include/linux/acpi.h4
-rw-r--r--include/linux/bitops.h2
-rw-r--r--include/linux/blkdev.h5
-rw-r--r--include/linux/bpf.h5
-rw-r--r--include/linux/cgroup-defs.h13
-rw-r--r--include/linux/cgroup.h47
-rw-r--r--include/linux/configfs.h10
-rw-r--r--include/linux/cpufreq.h1
-rw-r--r--include/linux/dns_resolver.h2
-rw-r--r--include/linux/enclosure.h4
-rw-r--r--include/linux/gfp.h2
-rw-r--r--include/linux/ipv6.h2
-rw-r--r--include/linux/irqchip/arm-gic-v3.h1
-rw-r--r--include/linux/jump_label.h2
-rw-r--r--include/linux/kmemleak.h2
-rw-r--r--include/linux/kref.h33
-rw-r--r--include/linux/kvm_host.h11
-rw-r--r--include/linux/libata.h1
-rw-r--r--include/linux/lightnvm.h197
-rw-r--r--include/linux/lockdep.h2
-rw-r--r--include/linux/marvell_phy.h1
-rw-r--r--include/linux/mlx4/device.h11
-rw-r--r--include/linux/mlx5/mlx5_ifc.h24
-rw-r--r--include/linux/mmdebug.h1
-rw-r--r--include/linux/net.h13
-rw-r--r--include/linux/netdevice.h35
-rw-r--r--include/linux/netfilter/ipset/ip_set.h2
-rw-r--r--include/linux/netfilter/nfnetlink.h2
-rw-r--r--include/linux/netfilter_ingress.h13
-rw-r--r--include/linux/nfs_xdr.h1
-rw-r--r--include/linux/of_dma.h2
-rw-r--r--include/linux/of_irq.h19
-rw-r--r--include/linux/pci.h9
-rw-r--r--include/linux/perf_event.h6
-rw-r--r--include/linux/platform_data/edma.h2
-rw-r--r--include/linux/proportions.h2
-rw-r--r--include/linux/qed/common_hsi.h2
-rw-r--r--include/linux/qed/qed_chain.h3
-rw-r--r--include/linux/rhashtable.h18
-rw-r--r--include/linux/scpi_protocol.h2
-rw-r--r--include/linux/signal.h1
-rw-r--r--include/linux/slab.h45
-rw-r--r--include/linux/stop_machine.h6
-rw-r--r--include/linux/syscalls.h2
-rw-r--r--include/linux/thermal.h3
-rw-r--r--include/linux/tty.h6
-rw-r--r--include/linux/types.h2
-rw-r--r--include/linux/uprobes.h2
-rw-r--r--include/linux/usb/quirks.h3
-rw-r--r--include/linux/vfio.h3
-rw-r--r--include/linux/wait.h10
-rw-r--r--include/net/af_unix.h1
-rw-r--r--include/net/dst.h33
-rw-r--r--include/net/inet_sock.h27
-rw-r--r--include/net/inetpeer.h1
-rw-r--r--include/net/ip6_fib.h3
-rw-r--r--include/net/ip6_route.h17
-rw-r--r--include/net/ip6_tunnel.h3
-rw-r--r--include/net/ip_tunnels.h3
-rw-r--r--include/net/ipv6.h22
-rw-r--r--include/net/mac80211.h6
-rw-r--r--include/net/ndisc.h3
-rw-r--r--include/net/netfilter/nf_tables.h16
-rw-r--r--include/net/sch_generic.h3
-rw-r--r--include/net/sctp/structs.h17
-rw-r--r--include/net/sock.h64
-rw-r--r--include/net/switchdev.h2
-rw-r--r--include/net/vxlan.h2
-rw-r--r--include/net/xfrm.h25
-rw-r--r--include/rdma/ib_mad.h2
-rw-r--r--include/rdma/ib_verbs.h1
-rw-r--r--include/scsi/scsi_host.h3
-rw-r--r--include/sound/hda_register.h3
-rw-r--r--include/sound/soc-dapm.h1
-rw-r--r--include/target/target_core_base.h2
-rw-r--r--include/uapi/linux/Kbuild1
-rw-r--r--include/uapi/linux/nfs.h11
-rw-r--r--include/uapi/linux/openvswitch.h2
-rw-r--r--include/uapi/linux/vfio.h7
-rw-r--r--include/video/imx-ipu-v3.h1
-rw-r--r--include/xen/interface/io/ring.h14
-rw-r--r--init/Kconfig7
-rw-r--r--kernel/bpf/arraymap.c10
-rw-r--r--kernel/bpf/hashtab.c34
-rw-r--r--kernel/bpf/inode.c6
-rw-r--r--kernel/bpf/syscall.c40
-rw-r--r--kernel/bpf/verifier.c3
-rw-r--r--kernel/cgroup.c99
-rw-r--r--kernel/cgroup_freezer.c23
-rw-r--r--kernel/cgroup_pids.c77
-rw-r--r--kernel/cpuset.c33
-rw-r--r--kernel/events/callchain.c2
-rw-r--r--kernel/events/core.c90
-rw-r--r--kernel/events/ring_buffer.c2
-rw-r--r--kernel/events/uprobes.c2
-rw-r--r--kernel/fork.c9
-rw-r--r--kernel/irq_work.c2
-rw-r--r--kernel/jump_label.c2
-rw-r--r--kernel/livepatch/core.c6
-rw-r--r--kernel/locking/lockdep.c2
-rw-r--r--kernel/locking/lockdep_proc.c2
-rw-r--r--kernel/locking/osq_lock.c8
-rw-r--r--kernel/panic.c5
-rw-r--r--kernel/pid.c4
-rw-r--r--kernel/sched/clock.c2
-rw-r--r--kernel/sched/core.c48
-rw-r--r--kernel/sched/cputime.c3
-rw-r--r--kernel/sched/fair.c2
-rw-r--r--kernel/sched/rt.c2
-rw-r--r--kernel/sched/sched.h3
-rw-r--r--kernel/sched/wait.c28
-rw-r--r--kernel/signal.c2
-rw-r--r--kernel/stop_machine.c4
-rw-r--r--kernel/trace/ring_buffer.c17
-rw-r--r--kernel/trace/trace_event_perf.c2
-rw-r--r--kernel/trace/trace_events.c16
-rw-r--r--lib/btree.c2
-rw-r--r--lib/dma-debug.c4
-rw-r--r--lib/proportions.c2
-rw-r--r--lib/rhashtable.c67
-rw-r--r--mm/backing-dev.c19
-rw-r--r--mm/huge_memory.c4
-rw-r--r--mm/hugetlb.c27
-rw-r--r--mm/kasan/kasan.c2
-rw-r--r--mm/memcontrol.c49
-rw-r--r--mm/memory.c8
-rw-r--r--mm/oom_kill.c2
-rw-r--r--mm/page-writeback.c6
-rw-r--r--mm/page_alloc.c3
-rw-r--r--mm/shmem.c34
-rw-r--r--mm/slab.c2
-rw-r--r--mm/slab.h2
-rw-r--r--mm/slab_common.c6
-rw-r--r--mm/slob.c2
-rw-r--r--mm/slub.c304
-rw-r--r--mm/vmalloc.c5
-rw-r--r--mm/vmstat.c8
-rw-r--r--mm/zswap.c6
-rw-r--r--net/8021q/vlan_core.c4
-rw-r--r--net/ax25/af_ax25.c3
-rw-r--r--net/batman-adv/distributed-arp-table.c5
-rw-r--r--net/batman-adv/routing.c19
-rw-r--r--net/batman-adv/translation-table.c16
-rw-r--r--net/bluetooth/af_bluetooth.c6
-rw-r--r--net/bluetooth/sco.c3
-rw-r--r--net/bluetooth/smp.c7
-rw-r--r--net/bridge/br_stp.c2
-rw-r--r--net/bridge/br_stp_if.c2
-rw-r--r--net/caif/caif_socket.c4
-rw-r--r--net/core/datagram.c2
-rw-r--r--net/core/dev.c18
-rw-r--r--net/core/neighbour.c6
-rw-r--r--net/core/netclassid_cgroup.c28
-rw-r--r--net/core/netprio_cgroup.c9
-rw-r--r--net/core/rtnetlink.c274
-rw-r--r--net/core/scm.c2
-rw-r--r--net/core/skbuff.c6
-rw-r--r--net/core/sock.c19
-rw-r--r--net/core/stream.c6
-rw-r--r--net/dccp/ipv6.c37
-rw-r--r--net/dccp/proto.c3
-rw-r--r--net/decnet/af_decnet.c11
-rw-r--r--net/dns_resolver/dns_query.c2
-rw-r--r--net/hsr/hsr_device.c2
-rw-r--r--net/ipv4/af_inet.c3
-rw-r--r--net/ipv4/fib_frontend.c9
-rw-r--r--net/ipv4/fou.c3
-rw-r--r--net/ipv4/igmp.c5
-rw-r--r--net/ipv4/inet_connection_sock.c4
-rw-r--r--net/ipv4/ipmr.c23
-rw-r--r--net/ipv4/netfilter/Kconfig1
-rw-r--r--net/ipv4/netfilter/nf_nat_pptp.c2
-rw-r--r--net/ipv4/raw.c8
-rw-r--r--net/ipv4/tcp.c28
-rw-r--r--net/ipv4/tcp_diag.c2
-rw-r--r--net/ipv4/tcp_input.c23
-rw-r--r--net/ipv4/tcp_ipv4.c22
-rw-r--r--net/ipv4/tcp_output.c23
-rw-r--r--net/ipv4/tcp_timer.c14
-rw-r--r--net/ipv4/udp.c1
-rw-r--r--net/ipv6/addrconf.c10
-rw-r--r--net/ipv6/af_inet6.c18
-rw-r--r--net/ipv6/datagram.c4
-rw-r--r--net/ipv6/exthdrs.c3
-rw-r--r--net/ipv6/icmp.c14
-rw-r--r--net/ipv6/inet6_connection_sock.c21
-rw-r--r--net/ipv6/ip6_gre.c8
-rw-r--r--net/ipv6/ip6_tunnel.c2
-rw-r--r--net/ipv6/ip6mr.c19
-rw-r--r--net/ipv6/ipv6_sockglue.c33
-rw-r--r--net/ipv6/mcast.c2
-rw-r--r--net/ipv6/ndisc.c10
-rw-r--r--net/ipv6/netfilter/Kconfig1
-rw-r--r--net/ipv6/netfilter/nf_conntrack_reasm.c5
-rw-r--r--net/ipv6/raw.c8
-rw-r--r--net/ipv6/reassembly.c10
-rw-r--r--net/ipv6/route.c24
-rw-r--r--net/ipv6/syncookies.c2
-rw-r--r--net/ipv6/tcp_ipv6.c54
-rw-r--r--net/ipv6/udp.c8
-rw-r--r--net/irda/af_irda.c3
-rw-r--r--net/iucv/af_iucv.c2
-rw-r--r--net/l2tp/l2tp_ip6.c8
-rw-r--r--net/mac80211/agg-tx.c3
-rw-r--r--net/mac80211/cfg.c11
-rw-r--r--net/mac80211/ieee80211_i.h4
-rw-r--r--net/mac80211/iface.c5
-rw-r--r--net/mac80211/main.c3
-rw-r--r--net/mac80211/mesh_pathtbl.c8
-rw-r--r--net/mac80211/mlme.c17
-rw-r--r--net/mac80211/rx.c3
-rw-r--r--net/mac80211/scan.c9
-rw-r--r--net/mac80211/util.c113
-rw-r--r--net/mac80211/vht.c10
-rw-r--r--net/mpls/af_mpls.c43
-rw-r--r--net/mpls/mpls_iptunnel.c4
-rw-r--r--net/netfilter/Kconfig6
-rw-r--r--net/netfilter/ipset/ip_set_bitmap_gen.h17
-rw-r--r--net/netfilter/ipset/ip_set_bitmap_ip.c14
-rw-r--r--net/netfilter/ipset/ip_set_bitmap_ipmac.c64
-rw-r--r--net/netfilter/ipset/ip_set_bitmap_port.c18
-rw-r--r--net/netfilter/ipset/ip_set_core.c14
-rw-r--r--net/netfilter/ipset/ip_set_hash_gen.h26
-rw-r--r--net/netfilter/ipset/ip_set_list_set.c5
-rw-r--r--net/netfilter/ipvs/ip_vs_core.c16
-rw-r--r--net/netfilter/nf_tables_api.c99
-rw-r--r--net/netfilter/nfnetlink.c4
-rw-r--r--net/netfilter/nfnetlink_log.c2
-rw-r--r--net/netfilter/nfnetlink_queue.c9
-rw-r--r--net/netfilter/nft_counter.c49
-rw-r--r--net/netfilter/nft_dynset.c5
-rw-r--r--net/nfc/llcp_sock.c2
-rw-r--r--net/openvswitch/conntrack.c16
-rw-r--r--net/openvswitch/dp_notify.c2
-rw-r--r--net/openvswitch/vport-geneve.c1
-rw-r--r--net/openvswitch/vport-gre.c1
-rw-r--r--net/openvswitch/vport-netdev.c8
-rw-r--r--net/openvswitch/vport.c8
-rw-r--r--net/openvswitch/vport.h8
-rw-r--r--net/packet/af_packet.c96
-rw-r--r--net/rds/connection.c6
-rw-r--r--net/rds/send.c4
-rw-r--r--net/rfkill/core.c6
-rw-r--r--net/rxrpc/ar-ack.c4
-rw-r--r--net/rxrpc/ar-output.c2
-rw-r--r--net/sched/sch_api.c25
-rw-r--r--net/sched/sch_generic.c2
-rw-r--r--net/sched/sch_mq.c4
-rw-r--r--net/sched/sch_mqprio.c4
-rw-r--r--net/sctp/auth.c4
-rw-r--r--net/sctp/ipv6.c24
-rw-r--r--net/sctp/outqueue.c2
-rw-r--r--net/sctp/sm_make_chunk.c4
-rw-r--r--net/sctp/sm_statefuns.c3
-rw-r--r--net/sctp/socket.c51
-rw-r--r--net/socket.c22
-rw-r--r--net/sunrpc/sched.c6
-rw-r--r--net/sunrpc/svc.c13
-rw-r--r--net/sunrpc/xprtsock.c14
-rw-r--r--net/tipc/link.c2
-rw-r--r--net/tipc/socket.c10
-rw-r--r--net/tipc/udp_media.c7
-rw-r--r--net/unix/af_unix.c305
-rw-r--r--net/wireless/nl80211.c5
-rw-r--r--net/wireless/reg.c5
-rw-r--r--net/xfrm/xfrm_policy.c50
-rw-r--r--samples/bpf/Makefile7
-rwxr-xr-xscripts/kernel-doc2
-rwxr-xr-xscripts/link-vmlinux.sh2
-rw-r--r--security/keys/encrypted-keys/encrypted.c2
-rw-r--r--security/keys/trusted.c5
-rw-r--r--security/keys/user_defined.c5
-rw-r--r--security/selinux/ss/conditional.c4
-rw-r--r--sound/firewire/dice/dice.c4
-rw-r--r--sound/pci/hda/hda_intel.c30
-rw-r--r--sound/pci/hda/patch_ca0132.c3
-rw-r--r--sound/pci/hda/patch_conexant.c5
-rw-r--r--sound/pci/hda/patch_hdmi.c9
-rw-r--r--sound/pci/hda/patch_realtek.c85
-rw-r--r--sound/pci/hda/patch_sigmatel.c45
-rw-r--r--sound/pci/rme96.c41
-rw-r--r--sound/soc/codecs/arizona.c16
-rw-r--r--sound/soc/codecs/es8328.c16
-rw-r--r--sound/soc/codecs/nau8825.c31
-rw-r--r--sound/soc/codecs/rl6231.c6
-rw-r--r--sound/soc/codecs/rt5645.c61
-rw-r--r--sound/soc/codecs/rt5670.h12
-rw-r--r--sound/soc/codecs/rt5677.c100
-rw-r--r--sound/soc/codecs/wm8960.c2
-rw-r--r--sound/soc/codecs/wm8962.c4
-rw-r--r--sound/soc/davinci/davinci-mcasp.c12
-rw-r--r--sound/soc/fsl/Kconfig2
-rw-r--r--sound/soc/fsl/fsl_sai.c3
-rw-r--r--sound/soc/intel/Kconfig2
-rw-r--r--sound/soc/intel/skylake/skl-topology.c1
-rw-r--r--sound/soc/rockchip/rockchip_spdif.c2
-rw-r--r--sound/soc/rockchip/rockchip_spdif.h6
-rw-r--r--sound/soc/sh/rcar/gen.c2
-rw-r--r--sound/soc/sh/rcar/src.c7
-rw-r--r--sound/soc/soc-core.c6
-rw-r--r--sound/soc/soc-dapm.c7
-rw-r--r--sound/soc/soc-ops.c2
-rw-r--r--sound/soc/soc-topology.c3
-rw-r--r--sound/soc/sti/uniperif_player.c9
-rw-r--r--sound/soc/sti/uniperif_reader.c3
-rw-r--r--sound/soc/sunxi/sun4i-codec.c27
-rw-r--r--sound/usb/midi.c46
-rw-r--r--sound/usb/mixer.c2
-rw-r--r--sound/usb/mixer_maps.c12
-rw-r--r--sound/usb/mixer_quirks.c37
-rw-r--r--sound/usb/mixer_quirks.h4
-rw-r--r--sound/usb/quirks-table.h11
-rw-r--r--sound/usb/quirks.c2
-rw-r--r--sound/usb/usbaudio.h1
-rw-r--r--tools/Makefile11
-rw-r--r--tools/net/Makefile7
-rw-r--r--tools/perf/builtin-inject.c1
-rw-r--r--tools/perf/builtin-report.c6
-rw-r--r--tools/perf/ui/browsers/hists.c7
-rw-r--r--tools/perf/util/build-id.c1
-rw-r--r--tools/perf/util/dso.c17
-rw-r--r--tools/perf/util/dso.h1
-rw-r--r--tools/perf/util/machine.c1
-rw-r--r--tools/perf/util/probe-finder.c24
-rw-r--r--tools/perf/util/symbol.c34
-rw-r--r--tools/perf/util/symbol.h1
-rw-r--r--tools/power/x86/turbostat/turbostat.c8
-rw-r--r--tools/testing/nvdimm/test/nfit.c49
-rw-r--r--tools/testing/selftests/futex/README2
-rw-r--r--tools/testing/selftests/seccomp/seccomp_bpf.c11
-rw-r--r--tools/virtio/linux/kernel.h6
-rw-r--r--tools/virtio/linux/virtio.h6
-rw-r--r--tools/virtio/linux/virtio_config.h20
-rw-r--r--tools/vm/page-types.c1
-rw-r--r--virt/kvm/arm/arch_timer.c28
-rw-r--r--virt/kvm/arm/vgic.c50
1309 files changed, 16347 insertions, 11897 deletions
diff --git a/Documentation/IPMI.txt b/Documentation/IPMI.txt
index 31d1d658827f..c0d8788e75d3 100644
--- a/Documentation/IPMI.txt
+++ b/Documentation/IPMI.txt
@@ -587,7 +587,7 @@ used to control it:
587 587
588 modprobe ipmi_watchdog timeout=<t> pretimeout=<t> action=<action type> 588 modprobe ipmi_watchdog timeout=<t> pretimeout=<t> action=<action type>
589 preaction=<preaction type> preop=<preop type> start_now=x 589 preaction=<preaction type> preop=<preop type> start_now=x
590 nowayout=x ifnum_to_use=n 590 nowayout=x ifnum_to_use=n panic_wdt_timeout=<t>
591 591
592ifnum_to_use specifies which interface the watchdog timer should use. 592ifnum_to_use specifies which interface the watchdog timer should use.
593The default is -1, which means to pick the first one registered. 593The default is -1, which means to pick the first one registered.
@@ -597,7 +597,9 @@ is the amount of seconds before the reset that the pre-timeout panic will
597occur (if pretimeout is zero, then pretimeout will not be enabled). Note 597occur (if pretimeout is zero, then pretimeout will not be enabled). Note
598that the pretimeout is the time before the final timeout. So if the 598that the pretimeout is the time before the final timeout. So if the
599timeout is 50 seconds and the pretimeout is 10 seconds, then the pretimeout 599timeout is 50 seconds and the pretimeout is 10 seconds, then the pretimeout
600will occur in 40 second (10 seconds before the timeout). 600will occur in 40 second (10 seconds before the timeout). The panic_wdt_timeout
601is the value of timeout which is set on kernel panic, in order to let actions
602such as kdump to occur during panic.
601 603
602The action may be "reset", "power_cycle", or "power_off", and 604The action may be "reset", "power_cycle", or "power_off", and
603specifies what to do when the timer times out, and defaults to 605specifies what to do when the timer times out, and defaults to
@@ -634,6 +636,7 @@ for configuring the watchdog:
634 ipmi_watchdog.preop=<preop type> 636 ipmi_watchdog.preop=<preop type>
635 ipmi_watchdog.start_now=x 637 ipmi_watchdog.start_now=x
636 ipmi_watchdog.nowayout=x 638 ipmi_watchdog.nowayout=x
639 ipmi_watchdog.panic_wdt_timeout=<t>
637 640
638The options are the same as the module parameter options. 641The options are the same as the module parameter options.
639 642
diff --git a/Documentation/arm/keystone/Overview.txt b/Documentation/arm/keystone/Overview.txt
index f17bc4c9dff9..400c0c270d2e 100644
--- a/Documentation/arm/keystone/Overview.txt
+++ b/Documentation/arm/keystone/Overview.txt
@@ -49,24 +49,6 @@ specified through DTS. Following are the DTS used:-
49The device tree documentation for the keystone machines are located at 49The device tree documentation for the keystone machines are located at
50 Documentation/devicetree/bindings/arm/keystone/keystone.txt 50 Documentation/devicetree/bindings/arm/keystone/keystone.txt
51 51
52Known issues & workaround
53-------------------------
54
55Some of the device drivers used on keystone are re-used from that from
56DaVinci and other TI SoCs. These device drivers may use clock APIs directly.
57Some of the keystone specific drivers such as netcp uses run time power
58management API instead to enable clock. As this API has limitations on
59keystone, following workaround is needed to boot Linux.
60
61 Add 'clk_ignore_unused' to the bootargs env variable in u-boot. Otherwise
62 clock frameworks will try to disable clocks that are unused and disable
63 the hardware. This is because netcp related power domain and clock
64 domains are enabled in u-boot as run time power management API currently
65 doesn't enable clocks for netcp due to a limitation. This workaround is
66 expected to be removed in the future when proper API support becomes
67 available. Until then, this work around is needed.
68
69
70Document Author 52Document Author
71--------------- 53---------------
72Murali Karicheri <m-karicheri2@ti.com> 54Murali Karicheri <m-karicheri2@ti.com>
diff --git a/Documentation/block/null_blk.txt b/Documentation/block/null_blk.txt
index 2f6c6ff7161d..d8880ca30af4 100644
--- a/Documentation/block/null_blk.txt
+++ b/Documentation/block/null_blk.txt
@@ -70,3 +70,6 @@ use_per_node_hctx=[0/1]: Default: 0
70 parameter. 70 parameter.
71 1: The multi-queue block layer is instantiated with a hardware dispatch 71 1: The multi-queue block layer is instantiated with a hardware dispatch
72 queue for each CPU node in the system. 72 queue for each CPU node in the system.
73
74use_lightnvm=[0/1]: Default: 0
75 Register device with LightNVM. Requires blk-mq to be used.
diff --git a/Documentation/devicetree/bindings/dma/ti-edma.txt b/Documentation/devicetree/bindings/dma/ti-edma.txt
index d3d0a4fb1c73..079b42a81d7c 100644
--- a/Documentation/devicetree/bindings/dma/ti-edma.txt
+++ b/Documentation/devicetree/bindings/dma/ti-edma.txt
@@ -22,8 +22,7 @@ Required properties:
22Optional properties: 22Optional properties:
23- ti,hwmods: Name of the hwmods associated to the eDMA CC 23- ti,hwmods: Name of the hwmods associated to the eDMA CC
24- ti,edma-memcpy-channels: List of channels allocated to be used for memcpy, iow 24- ti,edma-memcpy-channels: List of channels allocated to be used for memcpy, iow
25 these channels will be SW triggered channels. The list must 25 these channels will be SW triggered channels. See example.
26 contain 16 bits numbers, see example.
27- ti,edma-reserved-slot-ranges: PaRAM slot ranges which should not be used by 26- ti,edma-reserved-slot-ranges: PaRAM slot ranges which should not be used by
28 the driver, they are allocated to be used by for example the 27 the driver, they are allocated to be used by for example the
29 DSP. See example. 28 DSP. See example.
@@ -56,10 +55,9 @@ edma: edma@49000000 {
56 ti,tptcs = <&edma_tptc0 7>, <&edma_tptc1 7>, <&edma_tptc2 0>; 55 ti,tptcs = <&edma_tptc0 7>, <&edma_tptc1 7>, <&edma_tptc2 0>;
57 56
58 /* Channel 20 and 21 is allocated for memcpy */ 57 /* Channel 20 and 21 is allocated for memcpy */
59 ti,edma-memcpy-channels = /bits/ 16 <20 21>; 58 ti,edma-memcpy-channels = <20 21>;
60 /* The following PaRAM slots are reserved: 35-45 and 100-110 */ 59 /* The following PaRAM slots are reserved: 35-44 and 100-109 */
61 ti,edma-reserved-slot-ranges = /bits/ 16 <35 10>, 60 ti,edma-reserved-slot-ranges = <35 10>, <100 10>;
62 /bits/ 16 <100 10>;
63}; 61};
64 62
65edma_tptc0: tptc@49800000 { 63edma_tptc0: tptc@49800000 {
diff --git a/Documentation/devicetree/bindings/gpio/gpio-mpc8xxx.txt b/Documentation/devicetree/bindings/gpio/gpio-mpc8xxx.txt
index f2455c50533d..120bc4971cf3 100644
--- a/Documentation/devicetree/bindings/gpio/gpio-mpc8xxx.txt
+++ b/Documentation/devicetree/bindings/gpio/gpio-mpc8xxx.txt
@@ -11,6 +11,10 @@ Required properties:
11 0 = active high 11 0 = active high
12 1 = active low 12 1 = active low
13 13
14Optional properties:
15- little-endian : GPIO registers are used as little endian. If not
16 present registers are used as big endian by default.
17
14Example: 18Example:
15 19
16gpio0: gpio@1100 { 20gpio0: gpio@1100 {
diff --git a/Documentation/devicetree/bindings/input/sun4i-lradc-keys.txt b/Documentation/devicetree/bindings/input/sun4i-lradc-keys.txt
index b9c32f6fd687..4357e498ef04 100644
--- a/Documentation/devicetree/bindings/input/sun4i-lradc-keys.txt
+++ b/Documentation/devicetree/bindings/input/sun4i-lradc-keys.txt
@@ -12,7 +12,7 @@ Each key is represented as a sub-node of "allwinner,sun4i-a10-lradc-keys":
12Required subnode-properties: 12Required subnode-properties:
13 - label: Descriptive name of the key. 13 - label: Descriptive name of the key.
14 - linux,code: Keycode to emit. 14 - linux,code: Keycode to emit.
15 - channel: Channel this key is attached to, mut be 0 or 1. 15 - channel: Channel this key is attached to, must be 0 or 1.
16 - voltage: Voltage in µV at lradc input when this key is pressed. 16 - voltage: Voltage in µV at lradc input when this key is pressed.
17 17
18Example: 18Example:
diff --git a/Documentation/devicetree/bindings/mtd/partition.txt b/Documentation/devicetree/bindings/mtd/partition.txt
index f1e2a02381a4..1c63e40659fc 100644
--- a/Documentation/devicetree/bindings/mtd/partition.txt
+++ b/Documentation/devicetree/bindings/mtd/partition.txt
@@ -6,7 +6,9 @@ used for what purposes, but which don't use an on-flash partition table such
6as RedBoot. 6as RedBoot.
7 7
8The partition table should be a subnode of the mtd node and should be named 8The partition table should be a subnode of the mtd node and should be named
9'partitions'. Partitions are defined in subnodes of the partitions node. 9'partitions'. This node should have the following property:
10- compatible : (required) must be "fixed-partitions"
11Partitions are then defined in subnodes of the partitions node.
10 12
11For backwards compatibility partitions as direct subnodes of the mtd device are 13For backwards compatibility partitions as direct subnodes of the mtd device are
12supported. This use is discouraged. 14supported. This use is discouraged.
@@ -36,6 +38,7 @@ Examples:
36 38
37flash@0 { 39flash@0 {
38 partitions { 40 partitions {
41 compatible = "fixed-partitions";
39 #address-cells = <1>; 42 #address-cells = <1>;
40 #size-cells = <1>; 43 #size-cells = <1>;
41 44
@@ -53,6 +56,7 @@ flash@0 {
53 56
54flash@1 { 57flash@1 {
55 partitions { 58 partitions {
59 compatible = "fixed-partitions";
56 #address-cells = <1>; 60 #address-cells = <1>;
57 #size-cells = <2>; 61 #size-cells = <2>;
58 62
@@ -66,6 +70,7 @@ flash@1 {
66 70
67flash@2 { 71flash@2 {
68 partitions { 72 partitions {
73 compatible = "fixed-partitions";
69 #address-cells = <2>; 74 #address-cells = <2>;
70 #size-cells = <2>; 75 #size-cells = <2>;
71 76
diff --git a/Documentation/devicetree/bindings/net/marvell-armada-370-neta.txt b/Documentation/devicetree/bindings/net/marvell-armada-370-neta.txt
index f5a8ca29aff0..aeea50c84e92 100644
--- a/Documentation/devicetree/bindings/net/marvell-armada-370-neta.txt
+++ b/Documentation/devicetree/bindings/net/marvell-armada-370-neta.txt
@@ -8,6 +8,11 @@ Required properties:
8- phy-mode: See ethernet.txt file in the same directory 8- phy-mode: See ethernet.txt file in the same directory
9- clocks: a pointer to the reference clock for this device. 9- clocks: a pointer to the reference clock for this device.
10 10
11Optional properties:
12- tx-csum-limit: maximum mtu supported by port that allow TX checksum.
13 Value is presented in bytes. If not used, by default 1600B is set for
14 "marvell,armada-370-neta" and 9800B for others.
15
11Example: 16Example:
12 17
13ethernet@d0070000 { 18ethernet@d0070000 {
@@ -15,6 +20,7 @@ ethernet@d0070000 {
15 reg = <0xd0070000 0x2500>; 20 reg = <0xd0070000 0x2500>;
16 interrupts = <8>; 21 interrupts = <8>;
17 clocks = <&gate_clk 4>; 22 clocks = <&gate_clk 4>;
23 tx-csum-limit = <9800>
18 status = "okay"; 24 status = "okay";
19 phy = <&phy0>; 25 phy = <&phy0>;
20 phy-mode = "rgmii-id"; 26 phy-mode = "rgmii-id";
diff --git a/Documentation/devicetree/bindings/thermal/rockchip-thermal.txt b/Documentation/devicetree/bindings/thermal/rockchip-thermal.txt
index b38200d2583a..0dfa60d88dd3 100644
--- a/Documentation/devicetree/bindings/thermal/rockchip-thermal.txt
+++ b/Documentation/devicetree/bindings/thermal/rockchip-thermal.txt
@@ -1,7 +1,9 @@
1* Temperature Sensor ADC (TSADC) on rockchip SoCs 1* Temperature Sensor ADC (TSADC) on rockchip SoCs
2 2
3Required properties: 3Required properties:
4- compatible : "rockchip,rk3288-tsadc" 4- compatible : should be "rockchip,<name>-tsadc"
5 "rockchip,rk3288-tsadc": found on RK3288 SoCs
6 "rockchip,rk3368-tsadc": found on RK3368 SoCs
5- reg : physical base address of the controller and length of memory mapped 7- reg : physical base address of the controller and length of memory mapped
6 region. 8 region.
7- interrupts : The interrupt number to the cpu. The interrupt specifier format 9- interrupts : The interrupt number to the cpu. The interrupt specifier format
diff --git a/Documentation/i2c/busses/i2c-i801 b/Documentation/i2c/busses/i2c-i801
index 6a4b1af724f8..1bba38dd2637 100644
--- a/Documentation/i2c/busses/i2c-i801
+++ b/Documentation/i2c/busses/i2c-i801
@@ -32,6 +32,7 @@ Supported adapters:
32 * Intel Sunrise Point-LP (PCH) 32 * Intel Sunrise Point-LP (PCH)
33 * Intel DNV (SOC) 33 * Intel DNV (SOC)
34 * Intel Broxton (SOC) 34 * Intel Broxton (SOC)
35 * Intel Lewisburg (PCH)
35 Datasheets: Publicly available at the Intel website 36 Datasheets: Publicly available at the Intel website
36 37
37On Intel Patsburg and later chipsets, both the normal host SMBus controller 38On Intel Patsburg and later chipsets, both the normal host SMBus controller
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index f8aae632f02f..742f69d18fc8 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -1583,9 +1583,6 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
1583 hwp_only 1583 hwp_only
1584 Only load intel_pstate on systems which support 1584 Only load intel_pstate on systems which support
1585 hardware P state control (HWP) if available. 1585 hardware P state control (HWP) if available.
1586 no_acpi
1587 Don't use ACPI processor performance control objects
1588 _PSS and _PPC specified limits.
1589 1586
1590 intremap= [X86-64, Intel-IOMMU] 1587 intremap= [X86-64, Intel-IOMMU]
1591 on enable Interrupt Remapping (default) 1588 on enable Interrupt Remapping (default)
diff --git a/Documentation/networking/e100.txt b/Documentation/networking/e100.txt
index f862cf3aff34..42ddbd4b52a9 100644
--- a/Documentation/networking/e100.txt
+++ b/Documentation/networking/e100.txt
@@ -181,17 +181,3 @@ For general information, go to the Intel support website at:
181If an issue is identified with the released source code on the supported 181If an issue is identified with the released source code on the supported
182kernel with a supported adapter, email the specific information related to the 182kernel with a supported adapter, email the specific information related to the
183issue to e1000-devel@lists.sourceforge.net. 183issue to e1000-devel@lists.sourceforge.net.
184
185
186License
187=======
188
189This software program is released under the terms of a license agreement
190between you ('Licensee') and Intel. Do not use or load this software or any
191associated materials (collectively, the 'Software') until you have carefully
192read the full terms and conditions of the file COPYING located in this software
193package. By loading or using the Software, you agree to the terms of this
194Agreement. If you do not agree with the terms of this Agreement, do not install
195or use the Software.
196
197* Other names and brands may be claimed as the property of others.
diff --git a/MAINTAINERS b/MAINTAINERS
index f04fb49ef05e..400c142cf7e6 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -324,7 +324,7 @@ M: Zhang Rui <rui.zhang@intel.com>
324L: linux-acpi@vger.kernel.org 324L: linux-acpi@vger.kernel.org
325W: https://01.org/linux-acpi 325W: https://01.org/linux-acpi
326S: Supported 326S: Supported
327F: drivers/acpi/video.c 327F: drivers/acpi/acpi_video.c
328 328
329ACPI WMI DRIVER 329ACPI WMI DRIVER
330L: platform-driver-x86@vger.kernel.org 330L: platform-driver-x86@vger.kernel.org
@@ -1853,7 +1853,7 @@ S: Supported
1853F: drivers/net/wireless/ath/ath6kl/ 1853F: drivers/net/wireless/ath/ath6kl/
1854 1854
1855WILOCITY WIL6210 WIRELESS DRIVER 1855WILOCITY WIL6210 WIRELESS DRIVER
1856M: Vladimir Kondratiev <qca_vkondrat@qca.qualcomm.com> 1856M: Maya Erez <qca_merez@qca.qualcomm.com>
1857L: linux-wireless@vger.kernel.org 1857L: linux-wireless@vger.kernel.org
1858L: wil6210@qca.qualcomm.com 1858L: wil6210@qca.qualcomm.com
1859S: Supported 1859S: Supported
@@ -1937,7 +1937,7 @@ S: Supported
1937F: drivers/i2c/busses/i2c-at91.c 1937F: drivers/i2c/busses/i2c-at91.c
1938 1938
1939ATMEL ISI DRIVER 1939ATMEL ISI DRIVER
1940M: Josh Wu <josh.wu@atmel.com> 1940M: Ludovic Desroches <ludovic.desroches@atmel.com>
1941L: linux-media@vger.kernel.org 1941L: linux-media@vger.kernel.org
1942S: Supported 1942S: Supported
1943F: drivers/media/platform/soc_camera/atmel-isi.c 1943F: drivers/media/platform/soc_camera/atmel-isi.c
@@ -1956,7 +1956,8 @@ S: Supported
1956F: drivers/net/ethernet/cadence/ 1956F: drivers/net/ethernet/cadence/
1957 1957
1958ATMEL NAND DRIVER 1958ATMEL NAND DRIVER
1959M: Josh Wu <josh.wu@atmel.com> 1959M: Wenyou Yang <wenyou.yang@atmel.com>
1960M: Josh Wu <rainyfeeling@outlook.com>
1960L: linux-mtd@lists.infradead.org 1961L: linux-mtd@lists.infradead.org
1961S: Supported 1962S: Supported
1962F: drivers/mtd/nand/atmel_nand* 1963F: drivers/mtd/nand/atmel_nand*
@@ -2455,7 +2456,9 @@ F: drivers/firmware/broadcom/*
2455 2456
2456BROADCOM STB NAND FLASH DRIVER 2457BROADCOM STB NAND FLASH DRIVER
2457M: Brian Norris <computersforpeace@gmail.com> 2458M: Brian Norris <computersforpeace@gmail.com>
2459M: Kamal Dasu <kdasu.kdev@gmail.com>
2458L: linux-mtd@lists.infradead.org 2460L: linux-mtd@lists.infradead.org
2461L: bcm-kernel-feedback-list@broadcom.com
2459S: Maintained 2462S: Maintained
2460F: drivers/mtd/nand/brcmnand/ 2463F: drivers/mtd/nand/brcmnand/
2461 2464
@@ -2552,7 +2555,7 @@ F: arch/c6x/
2552 2555
2553CACHEFILES: FS-CACHE BACKEND FOR CACHING ON MOUNTED FILESYSTEMS 2556CACHEFILES: FS-CACHE BACKEND FOR CACHING ON MOUNTED FILESYSTEMS
2554M: David Howells <dhowells@redhat.com> 2557M: David Howells <dhowells@redhat.com>
2555L: linux-cachefs@redhat.com 2558L: linux-cachefs@redhat.com (moderated for non-subscribers)
2556S: Supported 2559S: Supported
2557F: Documentation/filesystems/caching/cachefiles.txt 2560F: Documentation/filesystems/caching/cachefiles.txt
2558F: fs/cachefiles/ 2561F: fs/cachefiles/
@@ -2935,10 +2938,9 @@ S: Maintained
2935F: drivers/platform/x86/compal-laptop.c 2938F: drivers/platform/x86/compal-laptop.c
2936 2939
2937CONEXANT ACCESSRUNNER USB DRIVER 2940CONEXANT ACCESSRUNNER USB DRIVER
2938M: Simon Arlott <cxacru@fire.lp0.eu>
2939L: accessrunner-general@lists.sourceforge.net 2941L: accessrunner-general@lists.sourceforge.net
2940W: http://accessrunner.sourceforge.net/ 2942W: http://accessrunner.sourceforge.net/
2941S: Maintained 2943S: Orphan
2942F: drivers/usb/atm/cxacru.c 2944F: drivers/usb/atm/cxacru.c
2943 2945
2944CONFIGFS 2946CONFIGFS
@@ -2979,6 +2981,7 @@ F: kernel/cpuset.c
2979CONTROL GROUP - MEMORY RESOURCE CONTROLLER (MEMCG) 2981CONTROL GROUP - MEMORY RESOURCE CONTROLLER (MEMCG)
2980M: Johannes Weiner <hannes@cmpxchg.org> 2982M: Johannes Weiner <hannes@cmpxchg.org>
2981M: Michal Hocko <mhocko@kernel.org> 2983M: Michal Hocko <mhocko@kernel.org>
2984M: Vladimir Davydov <vdavydov@virtuozzo.com>
2982L: cgroups@vger.kernel.org 2985L: cgroups@vger.kernel.org
2983L: linux-mm@kvack.org 2986L: linux-mm@kvack.org
2984S: Maintained 2987S: Maintained
@@ -4415,6 +4418,7 @@ K: fmc_d.*register
4415 4418
4416FPGA MANAGER FRAMEWORK 4419FPGA MANAGER FRAMEWORK
4417M: Alan Tull <atull@opensource.altera.com> 4420M: Alan Tull <atull@opensource.altera.com>
4421R: Moritz Fischer <moritz.fischer@ettus.com>
4418S: Maintained 4422S: Maintained
4419F: drivers/fpga/ 4423F: drivers/fpga/
4420F: include/linux/fpga/fpga-mgr.h 4424F: include/linux/fpga/fpga-mgr.h
@@ -4565,7 +4569,7 @@ F: include/linux/frontswap.h
4565 4569
4566FS-CACHE: LOCAL CACHING FOR NETWORK FILESYSTEMS 4570FS-CACHE: LOCAL CACHING FOR NETWORK FILESYSTEMS
4567M: David Howells <dhowells@redhat.com> 4571M: David Howells <dhowells@redhat.com>
4568L: linux-cachefs@redhat.com 4572L: linux-cachefs@redhat.com (moderated for non-subscribers)
4569S: Supported 4573S: Supported
4570F: Documentation/filesystems/caching/ 4574F: Documentation/filesystems/caching/
4571F: fs/fscache/ 4575F: fs/fscache/
@@ -5580,7 +5584,7 @@ R: Jesse Brandeburg <jesse.brandeburg@intel.com>
5580R: Shannon Nelson <shannon.nelson@intel.com> 5584R: Shannon Nelson <shannon.nelson@intel.com>
5581R: Carolyn Wyborny <carolyn.wyborny@intel.com> 5585R: Carolyn Wyborny <carolyn.wyborny@intel.com>
5582R: Don Skidmore <donald.c.skidmore@intel.com> 5586R: Don Skidmore <donald.c.skidmore@intel.com>
5583R: Matthew Vick <matthew.vick@intel.com> 5587R: Bruce Allan <bruce.w.allan@intel.com>
5584R: John Ronciak <john.ronciak@intel.com> 5588R: John Ronciak <john.ronciak@intel.com>
5585R: Mitch Williams <mitch.a.williams@intel.com> 5589R: Mitch Williams <mitch.a.williams@intel.com>
5586L: intel-wired-lan@lists.osuosl.org 5590L: intel-wired-lan@lists.osuosl.org
@@ -5717,13 +5721,6 @@ M: Juanjo Ciarlante <jjciarla@raiz.uncu.edu.ar>
5717S: Maintained 5721S: Maintained
5718F: net/ipv4/netfilter/ipt_MASQUERADE.c 5722F: net/ipv4/netfilter/ipt_MASQUERADE.c
5719 5723
5720IP1000A 10/100/1000 GIGABIT ETHERNET DRIVER
5721M: Francois Romieu <romieu@fr.zoreil.com>
5722M: Sorbica Shieh <sorbica@icplus.com.tw>
5723L: netdev@vger.kernel.org
5724S: Maintained
5725F: drivers/net/ethernet/icplus/ipg.*
5726
5727IPATH DRIVER 5724IPATH DRIVER
5728M: Mike Marciniszyn <infinipath@intel.com> 5725M: Mike Marciniszyn <infinipath@intel.com>
5729L: linux-rdma@vger.kernel.org 5726L: linux-rdma@vger.kernel.org
@@ -6377,6 +6374,7 @@ F: arch/*/include/asm/pmem.h
6377LIGHTNVM PLATFORM SUPPORT 6374LIGHTNVM PLATFORM SUPPORT
6378M: Matias Bjorling <mb@lightnvm.io> 6375M: Matias Bjorling <mb@lightnvm.io>
6379W: http://github/OpenChannelSSD 6376W: http://github/OpenChannelSSD
6377L: linux-block@vger.kernel.org
6380S: Maintained 6378S: Maintained
6381F: drivers/lightnvm/ 6379F: drivers/lightnvm/
6382F: include/linux/lightnvm.h 6380F: include/linux/lightnvm.h
@@ -6929,13 +6927,21 @@ F: drivers/scsi/megaraid.*
6929F: drivers/scsi/megaraid/ 6927F: drivers/scsi/megaraid/
6930 6928
6931MELLANOX ETHERNET DRIVER (mlx4_en) 6929MELLANOX ETHERNET DRIVER (mlx4_en)
6932M: Amir Vadai <amirv@mellanox.com> 6930M: Eugenia Emantayev <eugenia@mellanox.com>
6933L: netdev@vger.kernel.org 6931L: netdev@vger.kernel.org
6934S: Supported 6932S: Supported
6935W: http://www.mellanox.com 6933W: http://www.mellanox.com
6936Q: http://patchwork.ozlabs.org/project/netdev/list/ 6934Q: http://patchwork.ozlabs.org/project/netdev/list/
6937F: drivers/net/ethernet/mellanox/mlx4/en_* 6935F: drivers/net/ethernet/mellanox/mlx4/en_*
6938 6936
6937MELLANOX ETHERNET DRIVER (mlx5e)
6938M: Saeed Mahameed <saeedm@mellanox.com>
6939L: netdev@vger.kernel.org
6940S: Supported
6941W: http://www.mellanox.com
6942Q: http://patchwork.ozlabs.org/project/netdev/list/
6943F: drivers/net/ethernet/mellanox/mlx5/core/en_*
6944
6939MELLANOX ETHERNET SWITCH DRIVERS 6945MELLANOX ETHERNET SWITCH DRIVERS
6940M: Jiri Pirko <jiri@mellanox.com> 6946M: Jiri Pirko <jiri@mellanox.com>
6941M: Ido Schimmel <idosch@mellanox.com> 6947M: Ido Schimmel <idosch@mellanox.com>
@@ -7908,6 +7914,18 @@ S: Maintained
7908F: net/openvswitch/ 7914F: net/openvswitch/
7909F: include/uapi/linux/openvswitch.h 7915F: include/uapi/linux/openvswitch.h
7910 7916
7917OPERATING PERFORMANCE POINTS (OPP)
7918M: Viresh Kumar <vireshk@kernel.org>
7919M: Nishanth Menon <nm@ti.com>
7920M: Stephen Boyd <sboyd@codeaurora.org>
7921L: linux-pm@vger.kernel.org
7922S: Maintained
7923T: git git://git.kernel.org/pub/scm/linux/kernel/git/vireshk/pm.git
7924F: drivers/base/power/opp/
7925F: include/linux/pm_opp.h
7926F: Documentation/power/opp.txt
7927F: Documentation/devicetree/bindings/opp/
7928
7911OPL4 DRIVER 7929OPL4 DRIVER
7912M: Clemens Ladisch <clemens@ladisch.de> 7930M: Clemens Ladisch <clemens@ladisch.de>
7913L: alsa-devel@alsa-project.org (moderated for non-subscribers) 7931L: alsa-devel@alsa-project.org (moderated for non-subscribers)
@@ -8276,7 +8294,7 @@ F: include/linux/delayacct.h
8276F: kernel/delayacct.c 8294F: kernel/delayacct.c
8277 8295
8278PERFORMANCE EVENTS SUBSYSTEM 8296PERFORMANCE EVENTS SUBSYSTEM
8279M: Peter Zijlstra <a.p.zijlstra@chello.nl> 8297M: Peter Zijlstra <peterz@infradead.org>
8280M: Ingo Molnar <mingo@redhat.com> 8298M: Ingo Molnar <mingo@redhat.com>
8281M: Arnaldo Carvalho de Melo <acme@kernel.org> 8299M: Arnaldo Carvalho de Melo <acme@kernel.org>
8282L: linux-kernel@vger.kernel.org 8300L: linux-kernel@vger.kernel.org
@@ -8369,6 +8387,14 @@ L: linux-samsung-soc@vger.kernel.org (moderated for non-subscribers)
8369S: Maintained 8387S: Maintained
8370F: drivers/pinctrl/samsung/ 8388F: drivers/pinctrl/samsung/
8371 8389
8390PIN CONTROLLER - SINGLE
8391M: Tony Lindgren <tony@atomide.com>
8392M: Haojian Zhuang <haojian.zhuang@linaro.org>
8393L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
8394L: linux-omap@vger.kernel.org
8395S: Maintained
8396F: drivers/pinctrl/pinctrl-single.c
8397
8372PIN CONTROLLER - ST SPEAR 8398PIN CONTROLLER - ST SPEAR
8373M: Viresh Kumar <vireshk@kernel.org> 8399M: Viresh Kumar <vireshk@kernel.org>
8374L: spear-devel@list.st.com 8400L: spear-devel@list.st.com
@@ -8935,6 +8961,13 @@ F: drivers/rpmsg/
8935F: Documentation/rpmsg.txt 8961F: Documentation/rpmsg.txt
8936F: include/linux/rpmsg.h 8962F: include/linux/rpmsg.h
8937 8963
8964RENESAS ETHERNET DRIVERS
8965R: Sergei Shtylyov <sergei.shtylyov@cogentembedded.com>
8966L: netdev@vger.kernel.org
8967L: linux-sh@vger.kernel.org
8968F: drivers/net/ethernet/renesas/
8969F: include/linux/sh_eth.h
8970
8938RESET CONTROLLER FRAMEWORK 8971RESET CONTROLLER FRAMEWORK
8939M: Philipp Zabel <p.zabel@pengutronix.de> 8972M: Philipp Zabel <p.zabel@pengutronix.de>
8940S: Maintained 8973S: Maintained
@@ -9321,7 +9354,6 @@ F: drivers/i2c/busses/i2c-designware-*
9321F: include/linux/platform_data/i2c-designware.h 9354F: include/linux/platform_data/i2c-designware.h
9322 9355
9323SYNOPSYS DESIGNWARE MMC/SD/SDIO DRIVER 9356SYNOPSYS DESIGNWARE MMC/SD/SDIO DRIVER
9324M: Seungwon Jeon <tgih.jun@samsung.com>
9325M: Jaehoon Chung <jh80.chung@samsung.com> 9357M: Jaehoon Chung <jh80.chung@samsung.com>
9326L: linux-mmc@vger.kernel.org 9358L: linux-mmc@vger.kernel.org
9327S: Maintained 9359S: Maintained
@@ -9418,8 +9450,10 @@ F: include/scsi/sg.h
9418 9450
9419SCSI SUBSYSTEM 9451SCSI SUBSYSTEM
9420M: "James E.J. Bottomley" <JBottomley@odin.com> 9452M: "James E.J. Bottomley" <JBottomley@odin.com>
9421L: linux-scsi@vger.kernel.org
9422T: git git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi.git 9453T: git git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi.git
9454M: "Martin K. Petersen" <martin.petersen@oracle.com>
9455T: git git://git.kernel.org/pub/scm/linux/kernel/git/mkp/scsi.git
9456L: linux-scsi@vger.kernel.org
9423S: Maintained 9457S: Maintained
9424F: drivers/scsi/ 9458F: drivers/scsi/
9425F: include/scsi/ 9459F: include/scsi/
@@ -10894,9 +10928,9 @@ S: Maintained
10894F: drivers/media/tuners/tua9001* 10928F: drivers/media/tuners/tua9001*
10895 10929
10896TULIP NETWORK DRIVERS 10930TULIP NETWORK DRIVERS
10897M: Grant Grundler <grundler@parisc-linux.org>
10898L: netdev@vger.kernel.org 10931L: netdev@vger.kernel.org
10899S: Maintained 10932L: linux-parisc@vger.kernel.org
10933S: Orphan
10900F: drivers/net/ethernet/dec/tulip/ 10934F: drivers/net/ethernet/dec/tulip/
10901 10935
10902TUN/TAP driver 10936TUN/TAP driver
diff --git a/Makefile b/Makefile
index 3a0234f50f36..4e2b18d56091 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
1VERSION = 4 1VERSION = 4
2PATCHLEVEL = 4 2PATCHLEVEL = 4
3SUBLEVEL = 0 3SUBLEVEL = 0
4EXTRAVERSION = -rc1 4EXTRAVERSION = -rc6
5NAME = Blurry Fish Butt 5NAME = Blurry Fish Butt
6 6
7# *DOCUMENTATION* 7# *DOCUMENTATION*
diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig
index 2c2ac3f3ff80..6312f607932f 100644
--- a/arch/arc/Kconfig
+++ b/arch/arc/Kconfig
@@ -445,6 +445,7 @@ config LINUX_LINK_BASE
445 However some customers have peripherals mapped at this addr, so 445 However some customers have peripherals mapped at this addr, so
446 Linux needs to be scooted a bit. 446 Linux needs to be scooted a bit.
447 If you don't know what the above means, leave this setting alone. 447 If you don't know what the above means, leave this setting alone.
448 This needs to match memory start address specified in Device Tree
448 449
449config HIGHMEM 450config HIGHMEM
450 bool "High Memory Support" 451 bool "High Memory Support"
diff --git a/arch/arc/boot/dts/axs10x_mb.dtsi b/arch/arc/boot/dts/axs10x_mb.dtsi
index f3db32154973..44a578c10732 100644
--- a/arch/arc/boot/dts/axs10x_mb.dtsi
+++ b/arch/arc/boot/dts/axs10x_mb.dtsi
@@ -46,6 +46,7 @@
46 snps,pbl = < 32 >; 46 snps,pbl = < 32 >;
47 clocks = <&apbclk>; 47 clocks = <&apbclk>;
48 clock-names = "stmmaceth"; 48 clock-names = "stmmaceth";
49 max-speed = <100>;
49 }; 50 };
50 51
51 ehci@0x40000 { 52 ehci@0x40000 {
diff --git a/arch/arc/boot/dts/nsim_hs.dts b/arch/arc/boot/dts/nsim_hs.dts
index b0eb0e7fe21d..fc81879bc1f5 100644
--- a/arch/arc/boot/dts/nsim_hs.dts
+++ b/arch/arc/boot/dts/nsim_hs.dts
@@ -17,7 +17,8 @@
17 17
18 memory { 18 memory {
19 device_type = "memory"; 19 device_type = "memory";
20 reg = <0x0 0x80000000 0x0 0x40000000 /* 1 GB low mem */ 20 /* CONFIG_LINUX_LINK_BASE needs to match low mem start */
21 reg = <0x0 0x80000000 0x0 0x20000000 /* 512 MB low mem */
21 0x1 0x00000000 0x0 0x40000000>; /* 1 GB highmem */ 22 0x1 0x00000000 0x0 0x40000000>; /* 1 GB highmem */
22 }; 23 };
23 24
diff --git a/arch/arc/configs/axs101_defconfig b/arch/arc/configs/axs101_defconfig
index c92c0ef1e9d2..f1ac9818b751 100644
--- a/arch/arc/configs/axs101_defconfig
+++ b/arch/arc/configs/axs101_defconfig
@@ -1,4 +1,4 @@
1CONFIG_CROSS_COMPILE="arc-linux-uclibc-" 1CONFIG_CROSS_COMPILE="arc-linux-"
2CONFIG_DEFAULT_HOSTNAME="ARCLinux" 2CONFIG_DEFAULT_HOSTNAME="ARCLinux"
3# CONFIG_SWAP is not set 3# CONFIG_SWAP is not set
4CONFIG_SYSVIPC=y 4CONFIG_SYSVIPC=y
diff --git a/arch/arc/configs/axs103_defconfig b/arch/arc/configs/axs103_defconfig
index cfac24e0e7b6..323486d6ee83 100644
--- a/arch/arc/configs/axs103_defconfig
+++ b/arch/arc/configs/axs103_defconfig
@@ -1,4 +1,4 @@
1CONFIG_CROSS_COMPILE="arc-linux-uclibc-" 1CONFIG_CROSS_COMPILE="arc-linux-"
2CONFIG_DEFAULT_HOSTNAME="ARCLinux" 2CONFIG_DEFAULT_HOSTNAME="ARCLinux"
3# CONFIG_SWAP is not set 3# CONFIG_SWAP is not set
4CONFIG_SYSVIPC=y 4CONFIG_SYSVIPC=y
diff --git a/arch/arc/configs/axs103_smp_defconfig b/arch/arc/configs/axs103_smp_defconfig
index 9922a118a15a..66191cd0447e 100644
--- a/arch/arc/configs/axs103_smp_defconfig
+++ b/arch/arc/configs/axs103_smp_defconfig
@@ -1,4 +1,4 @@
1CONFIG_CROSS_COMPILE="arc-linux-uclibc-" 1CONFIG_CROSS_COMPILE="arc-linux-"
2CONFIG_DEFAULT_HOSTNAME="ARCLinux" 2CONFIG_DEFAULT_HOSTNAME="ARCLinux"
3# CONFIG_SWAP is not set 3# CONFIG_SWAP is not set
4CONFIG_SYSVIPC=y 4CONFIG_SYSVIPC=y
diff --git a/arch/arc/configs/nsim_hs_defconfig b/arch/arc/configs/nsim_hs_defconfig
index f761a7c70761..f68838e8068a 100644
--- a/arch/arc/configs/nsim_hs_defconfig
+++ b/arch/arc/configs/nsim_hs_defconfig
@@ -1,4 +1,4 @@
1CONFIG_CROSS_COMPILE="arc-linux-uclibc-" 1CONFIG_CROSS_COMPILE="arc-linux-"
2# CONFIG_LOCALVERSION_AUTO is not set 2# CONFIG_LOCALVERSION_AUTO is not set
3CONFIG_DEFAULT_HOSTNAME="ARCLinux" 3CONFIG_DEFAULT_HOSTNAME="ARCLinux"
4# CONFIG_SWAP is not set 4# CONFIG_SWAP is not set
diff --git a/arch/arc/configs/nsim_hs_smp_defconfig b/arch/arc/configs/nsim_hs_smp_defconfig
index dc6f74f41283..96bd1c20fb0b 100644
--- a/arch/arc/configs/nsim_hs_smp_defconfig
+++ b/arch/arc/configs/nsim_hs_smp_defconfig
@@ -1,4 +1,4 @@
1CONFIG_CROSS_COMPILE="arc-linux-uclibc-" 1CONFIG_CROSS_COMPILE="arc-linux-"
2# CONFIG_LOCALVERSION_AUTO is not set 2# CONFIG_LOCALVERSION_AUTO is not set
3CONFIG_DEFAULT_HOSTNAME="ARCLinux" 3CONFIG_DEFAULT_HOSTNAME="ARCLinux"
4# CONFIG_SWAP is not set 4# CONFIG_SWAP is not set
diff --git a/arch/arc/configs/nsimosci_hs_defconfig b/arch/arc/configs/nsimosci_hs_defconfig
index 3fef0a210c56..fcae66683ca0 100644
--- a/arch/arc/configs/nsimosci_hs_defconfig
+++ b/arch/arc/configs/nsimosci_hs_defconfig
@@ -1,4 +1,4 @@
1CONFIG_CROSS_COMPILE="arc-linux-uclibc-" 1CONFIG_CROSS_COMPILE="arc-linux-"
2# CONFIG_LOCALVERSION_AUTO is not set 2# CONFIG_LOCALVERSION_AUTO is not set
3CONFIG_DEFAULT_HOSTNAME="ARCLinux" 3CONFIG_DEFAULT_HOSTNAME="ARCLinux"
4# CONFIG_SWAP is not set 4# CONFIG_SWAP is not set
diff --git a/arch/arc/configs/nsimosci_hs_smp_defconfig b/arch/arc/configs/nsimosci_hs_smp_defconfig
index 51784837daae..b01b659168ea 100644
--- a/arch/arc/configs/nsimosci_hs_smp_defconfig
+++ b/arch/arc/configs/nsimosci_hs_smp_defconfig
@@ -1,4 +1,4 @@
1CONFIG_CROSS_COMPILE="arc-linux-uclibc-" 1CONFIG_CROSS_COMPILE="arc-linux-"
2CONFIG_DEFAULT_HOSTNAME="ARCLinux" 2CONFIG_DEFAULT_HOSTNAME="ARCLinux"
3# CONFIG_SWAP is not set 3# CONFIG_SWAP is not set
4CONFIG_SYSVIPC=y 4CONFIG_SYSVIPC=y
diff --git a/arch/arc/configs/vdk_hs38_defconfig b/arch/arc/configs/vdk_hs38_defconfig
index ef35ef3923dd..a07f20de221b 100644
--- a/arch/arc/configs/vdk_hs38_defconfig
+++ b/arch/arc/configs/vdk_hs38_defconfig
@@ -1,4 +1,4 @@
1CONFIG_CROSS_COMPILE="arc-linux-uclibc-" 1CONFIG_CROSS_COMPILE="arc-linux-"
2# CONFIG_LOCALVERSION_AUTO is not set 2# CONFIG_LOCALVERSION_AUTO is not set
3CONFIG_DEFAULT_HOSTNAME="ARCLinux" 3CONFIG_DEFAULT_HOSTNAME="ARCLinux"
4# CONFIG_CROSS_MEMORY_ATTACH is not set 4# CONFIG_CROSS_MEMORY_ATTACH is not set
diff --git a/arch/arc/configs/vdk_hs38_smp_defconfig b/arch/arc/configs/vdk_hs38_smp_defconfig
index 634509e5e572..f36c047b33ca 100644
--- a/arch/arc/configs/vdk_hs38_smp_defconfig
+++ b/arch/arc/configs/vdk_hs38_smp_defconfig
@@ -1,4 +1,4 @@
1CONFIG_CROSS_COMPILE="arc-linux-uclibc-" 1CONFIG_CROSS_COMPILE="arc-linux-"
2# CONFIG_LOCALVERSION_AUTO is not set 2# CONFIG_LOCALVERSION_AUTO is not set
3CONFIG_DEFAULT_HOSTNAME="ARCLinux" 3CONFIG_DEFAULT_HOSTNAME="ARCLinux"
4# CONFIG_CROSS_MEMORY_ATTACH is not set 4# CONFIG_CROSS_MEMORY_ATTACH is not set
diff --git a/arch/arc/include/asm/irqflags-arcv2.h b/arch/arc/include/asm/irqflags-arcv2.h
index ad481c24070d..258b0e5ad332 100644
--- a/arch/arc/include/asm/irqflags-arcv2.h
+++ b/arch/arc/include/asm/irqflags-arcv2.h
@@ -37,6 +37,9 @@
37#define ISA_INIT_STATUS_BITS (STATUS_IE_MASK | STATUS_AD_MASK | \ 37#define ISA_INIT_STATUS_BITS (STATUS_IE_MASK | STATUS_AD_MASK | \
38 (ARCV2_IRQ_DEF_PRIO << 1)) 38 (ARCV2_IRQ_DEF_PRIO << 1))
39 39
40/* SLEEP needs default irq priority (<=) which can interrupt the doze */
41#define ISA_SLEEP_ARG (0x10 | ARCV2_IRQ_DEF_PRIO)
42
40#ifndef __ASSEMBLY__ 43#ifndef __ASSEMBLY__
41 44
42/* 45/*
diff --git a/arch/arc/include/asm/irqflags-compact.h b/arch/arc/include/asm/irqflags-compact.h
index d8c608174617..c1d36458bfb7 100644
--- a/arch/arc/include/asm/irqflags-compact.h
+++ b/arch/arc/include/asm/irqflags-compact.h
@@ -43,6 +43,8 @@
43 43
44#define ISA_INIT_STATUS_BITS STATUS_IE_MASK 44#define ISA_INIT_STATUS_BITS STATUS_IE_MASK
45 45
46#define ISA_SLEEP_ARG 0x3
47
46#ifndef __ASSEMBLY__ 48#ifndef __ASSEMBLY__
47 49
48/****************************************************************** 50/******************************************************************
diff --git a/arch/arc/include/asm/mach_desc.h b/arch/arc/include/asm/mach_desc.h
index 6ff657a904b6..c28e6c347b49 100644
--- a/arch/arc/include/asm/mach_desc.h
+++ b/arch/arc/include/asm/mach_desc.h
@@ -23,7 +23,7 @@
23 * @dt_compat: Array of device tree 'compatible' strings 23 * @dt_compat: Array of device tree 'compatible' strings
24 * (XXX: although only 1st entry is looked at) 24 * (XXX: although only 1st entry is looked at)
25 * @init_early: Very early callback [called from setup_arch()] 25 * @init_early: Very early callback [called from setup_arch()]
26 * @init_cpu_smp: for each CPU as it is coming up (SMP as well as UP) 26 * @init_per_cpu: for each CPU as it is coming up (SMP as well as UP)
27 * [(M):init_IRQ(), (o):start_kernel_secondary()] 27 * [(M):init_IRQ(), (o):start_kernel_secondary()]
28 * @init_machine: arch initcall level callback (e.g. populate static 28 * @init_machine: arch initcall level callback (e.g. populate static
29 * platform devices or parse Devicetree) 29 * platform devices or parse Devicetree)
@@ -35,7 +35,7 @@ struct machine_desc {
35 const char **dt_compat; 35 const char **dt_compat;
36 void (*init_early)(void); 36 void (*init_early)(void);
37#ifdef CONFIG_SMP 37#ifdef CONFIG_SMP
38 void (*init_cpu_smp)(unsigned int); 38 void (*init_per_cpu)(unsigned int);
39#endif 39#endif
40 void (*init_machine)(void); 40 void (*init_machine)(void);
41 void (*init_late)(void); 41 void (*init_late)(void);
diff --git a/arch/arc/include/asm/smp.h b/arch/arc/include/asm/smp.h
index 133c867d15af..991380438d6b 100644
--- a/arch/arc/include/asm/smp.h
+++ b/arch/arc/include/asm/smp.h
@@ -48,7 +48,7 @@ extern int smp_ipi_irq_setup(int cpu, int irq);
48 * @init_early_smp: A SMP specific h/w block can init itself 48 * @init_early_smp: A SMP specific h/w block can init itself
49 * Could be common across platforms so not covered by 49 * Could be common across platforms so not covered by
50 * mach_desc->init_early() 50 * mach_desc->init_early()
51 * @init_irq_cpu: Called for each core so SMP h/w block driver can do 51 * @init_per_cpu: Called for each core so SMP h/w block driver can do
52 * any needed setup per cpu (e.g. IPI request) 52 * any needed setup per cpu (e.g. IPI request)
53 * @cpu_kick: For Master to kickstart a cpu (optionally at a PC) 53 * @cpu_kick: For Master to kickstart a cpu (optionally at a PC)
54 * @ipi_send: To send IPI to a @cpu 54 * @ipi_send: To send IPI to a @cpu
@@ -57,7 +57,7 @@ extern int smp_ipi_irq_setup(int cpu, int irq);
57struct plat_smp_ops { 57struct plat_smp_ops {
58 const char *info; 58 const char *info;
59 void (*init_early_smp)(void); 59 void (*init_early_smp)(void);
60 void (*init_irq_cpu)(int cpu); 60 void (*init_per_cpu)(int cpu);
61 void (*cpu_kick)(int cpu, unsigned long pc); 61 void (*cpu_kick)(int cpu, unsigned long pc);
62 void (*ipi_send)(int cpu); 62 void (*ipi_send)(int cpu);
63 void (*ipi_clear)(int irq); 63 void (*ipi_clear)(int irq);
diff --git a/arch/arc/include/asm/unwind.h b/arch/arc/include/asm/unwind.h
index 7ca628b6ee2a..c11a25bb8158 100644
--- a/arch/arc/include/asm/unwind.h
+++ b/arch/arc/include/asm/unwind.h
@@ -112,7 +112,6 @@ struct unwind_frame_info {
112 112
113extern int arc_unwind(struct unwind_frame_info *frame); 113extern int arc_unwind(struct unwind_frame_info *frame);
114extern void arc_unwind_init(void); 114extern void arc_unwind_init(void);
115extern void arc_unwind_setup(void);
116extern void *unwind_add_table(struct module *module, const void *table_start, 115extern void *unwind_add_table(struct module *module, const void *table_start,
117 unsigned long table_size); 116 unsigned long table_size);
118extern void unwind_remove_table(void *handle, int init_only); 117extern void unwind_remove_table(void *handle, int init_only);
@@ -152,9 +151,6 @@ static inline void arc_unwind_init(void)
152{ 151{
153} 152}
154 153
155static inline void arc_unwind_setup(void)
156{
157}
158#define unwind_add_table(a, b, c) 154#define unwind_add_table(a, b, c)
159#define unwind_remove_table(a, b) 155#define unwind_remove_table(a, b)
160 156
diff --git a/arch/arc/kernel/ctx_sw.c b/arch/arc/kernel/ctx_sw.c
index c14a5bea0c76..5d446df2c413 100644
--- a/arch/arc/kernel/ctx_sw.c
+++ b/arch/arc/kernel/ctx_sw.c
@@ -58,8 +58,6 @@ __switch_to(struct task_struct *prev_task, struct task_struct *next_task)
58 "st sp, [r24] \n\t" 58 "st sp, [r24] \n\t"
59#endif 59#endif
60 60
61 "sync \n\t"
62
63 /* 61 /*
64 * setup _current_task with incoming tsk. 62 * setup _current_task with incoming tsk.
65 * optionally, set r25 to that as well 63 * optionally, set r25 to that as well
diff --git a/arch/arc/kernel/ctx_sw_asm.S b/arch/arc/kernel/ctx_sw_asm.S
index e248594097e7..e6890b1f8650 100644
--- a/arch/arc/kernel/ctx_sw_asm.S
+++ b/arch/arc/kernel/ctx_sw_asm.S
@@ -44,9 +44,6 @@ __switch_to:
44 * don't need to do anything special to return it 44 * don't need to do anything special to return it
45 */ 45 */
46 46
47 /* hardware memory barrier */
48 sync
49
50 /* 47 /*
51 * switch to new task, contained in r1 48 * switch to new task, contained in r1
52 * Temp reg r3 is required to get the ptr to store val 49 * Temp reg r3 is required to get the ptr to store val
diff --git a/arch/arc/kernel/intc-arcv2.c b/arch/arc/kernel/intc-arcv2.c
index 26c156827479..0394f9f61b46 100644
--- a/arch/arc/kernel/intc-arcv2.c
+++ b/arch/arc/kernel/intc-arcv2.c
@@ -106,10 +106,21 @@ static struct irq_chip arcv2_irq_chip = {
106static int arcv2_irq_map(struct irq_domain *d, unsigned int irq, 106static int arcv2_irq_map(struct irq_domain *d, unsigned int irq,
107 irq_hw_number_t hw) 107 irq_hw_number_t hw)
108{ 108{
109 if (irq == TIMER0_IRQ || irq == IPI_IRQ) 109 /*
110 * core intc IRQs [16, 23]:
111 * Statically assigned always private-per-core (Timers, WDT, IPI, PCT)
112 */
113 if (hw < 24) {
114 /*
115 * A subsequent request_percpu_irq() fails if percpu_devid is
116 * not set. That in turns sets NOAUTOEN, meaning each core needs
117 * to call enable_percpu_irq()
118 */
119 irq_set_percpu_devid(irq);
110 irq_set_chip_and_handler(irq, &arcv2_irq_chip, handle_percpu_irq); 120 irq_set_chip_and_handler(irq, &arcv2_irq_chip, handle_percpu_irq);
111 else 121 } else {
112 irq_set_chip_and_handler(irq, &arcv2_irq_chip, handle_level_irq); 122 irq_set_chip_and_handler(irq, &arcv2_irq_chip, handle_level_irq);
123 }
113 124
114 return 0; 125 return 0;
115} 126}
diff --git a/arch/arc/kernel/irq.c b/arch/arc/kernel/irq.c
index 2ee226546c6a..ba17f85285cf 100644
--- a/arch/arc/kernel/irq.c
+++ b/arch/arc/kernel/irq.c
@@ -29,11 +29,11 @@ void __init init_IRQ(void)
29 29
30#ifdef CONFIG_SMP 30#ifdef CONFIG_SMP
31 /* a SMP H/w block could do IPI IRQ request here */ 31 /* a SMP H/w block could do IPI IRQ request here */
32 if (plat_smp_ops.init_irq_cpu) 32 if (plat_smp_ops.init_per_cpu)
33 plat_smp_ops.init_irq_cpu(smp_processor_id()); 33 plat_smp_ops.init_per_cpu(smp_processor_id());
34 34
35 if (machine_desc->init_cpu_smp) 35 if (machine_desc->init_per_cpu)
36 machine_desc->init_cpu_smp(smp_processor_id()); 36 machine_desc->init_per_cpu(smp_processor_id());
37#endif 37#endif
38} 38}
39 39
@@ -51,6 +51,18 @@ void arch_do_IRQ(unsigned int irq, struct pt_regs *regs)
51 set_irq_regs(old_regs); 51 set_irq_regs(old_regs);
52} 52}
53 53
54/*
55 * API called for requesting percpu interrupts - called by each CPU
56 * - For boot CPU, actually request the IRQ with genirq core + enables
57 * - For subsequent callers only enable called locally
58 *
59 * Relies on being called by boot cpu first (i.e. request called ahead) of
60 * any enable as expected by genirq. Hence Suitable only for TIMER, IPI
61 * which are guaranteed to be setup on boot core first.
62 * Late probed peripherals such as perf can't use this as there no guarantee
63 * of being called on boot CPU first.
64 */
65
54void arc_request_percpu_irq(int irq, int cpu, 66void arc_request_percpu_irq(int irq, int cpu,
55 irqreturn_t (*isr)(int irq, void *dev), 67 irqreturn_t (*isr)(int irq, void *dev),
56 const char *irq_nm, 68 const char *irq_nm,
@@ -60,14 +72,17 @@ void arc_request_percpu_irq(int irq, int cpu,
60 if (!cpu) { 72 if (!cpu) {
61 int rc; 73 int rc;
62 74
75#ifdef CONFIG_ISA_ARCOMPACT
63 /* 76 /*
64 * These 2 calls are essential to making percpu IRQ APIs work 77 * A subsequent request_percpu_irq() fails if percpu_devid is
65 * Ideally these details could be hidden in irq chip map function 78 * not set. That in turns sets NOAUTOEN, meaning each core needs
66 * but the issue is IPIs IRQs being static (non-DT) and platform 79 * to call enable_percpu_irq()
67 * specific, so we can't identify them there. 80 *
81 * For ARCv2, this is done in irq map function since we know
82 * which irqs are strictly per cpu
68 */ 83 */
69 irq_set_percpu_devid(irq); 84 irq_set_percpu_devid(irq);
70 irq_modify_status(irq, IRQ_NOAUTOEN, 0); /* @irq, @clr, @set */ 85#endif
71 86
72 rc = request_percpu_irq(irq, isr, irq_nm, percpu_dev); 87 rc = request_percpu_irq(irq, isr, irq_nm, percpu_dev);
73 if (rc) 88 if (rc)
diff --git a/arch/arc/kernel/mcip.c b/arch/arc/kernel/mcip.c
index 74a9b074ac3e..bd237acdf4f2 100644
--- a/arch/arc/kernel/mcip.c
+++ b/arch/arc/kernel/mcip.c
@@ -132,7 +132,7 @@ static void mcip_probe_n_setup(void)
132struct plat_smp_ops plat_smp_ops = { 132struct plat_smp_ops plat_smp_ops = {
133 .info = smp_cpuinfo_buf, 133 .info = smp_cpuinfo_buf,
134 .init_early_smp = mcip_probe_n_setup, 134 .init_early_smp = mcip_probe_n_setup,
135 .init_irq_cpu = mcip_setup_per_cpu, 135 .init_per_cpu = mcip_setup_per_cpu,
136 .ipi_send = mcip_ipi_send, 136 .ipi_send = mcip_ipi_send,
137 .ipi_clear = mcip_ipi_clear, 137 .ipi_clear = mcip_ipi_clear,
138}; 138};
diff --git a/arch/arc/kernel/perf_event.c b/arch/arc/kernel/perf_event.c
index 0c08bb1ce15a..8b134cfe5e1f 100644
--- a/arch/arc/kernel/perf_event.c
+++ b/arch/arc/kernel/perf_event.c
@@ -428,12 +428,11 @@ static irqreturn_t arc_pmu_intr(int irq, void *dev)
428 428
429#endif /* CONFIG_ISA_ARCV2 */ 429#endif /* CONFIG_ISA_ARCV2 */
430 430
431void arc_cpu_pmu_irq_init(void) 431static void arc_cpu_pmu_irq_init(void *data)
432{ 432{
433 struct arc_pmu_cpu *pmu_cpu = this_cpu_ptr(&arc_pmu_cpu); 433 int irq = *(int *)data;
434 434
435 arc_request_percpu_irq(arc_pmu->irq, smp_processor_id(), arc_pmu_intr, 435 enable_percpu_irq(irq, IRQ_TYPE_NONE);
436 "ARC perf counters", pmu_cpu);
437 436
438 /* Clear all pending interrupt flags */ 437 /* Clear all pending interrupt flags */
439 write_aux_reg(ARC_REG_PCT_INT_ACT, 0xffffffff); 438 write_aux_reg(ARC_REG_PCT_INT_ACT, 0xffffffff);
@@ -515,7 +514,6 @@ static int arc_pmu_device_probe(struct platform_device *pdev)
515 514
516 if (has_interrupts) { 515 if (has_interrupts) {
517 int irq = platform_get_irq(pdev, 0); 516 int irq = platform_get_irq(pdev, 0);
518 unsigned long flags;
519 517
520 if (irq < 0) { 518 if (irq < 0) {
521 pr_err("Cannot get IRQ number for the platform\n"); 519 pr_err("Cannot get IRQ number for the platform\n");
@@ -524,24 +522,12 @@ static int arc_pmu_device_probe(struct platform_device *pdev)
524 522
525 arc_pmu->irq = irq; 523 arc_pmu->irq = irq;
526 524
527 /* 525 /* intc map function ensures irq_set_percpu_devid() called */
528 * arc_cpu_pmu_irq_init() needs to be called on all cores for 526 request_percpu_irq(irq, arc_pmu_intr, "ARC perf counters",
529 * their respective local PMU. 527 this_cpu_ptr(&arc_pmu_cpu));
530 * However we use opencoded on_each_cpu() to ensure it is called 528
531 * on core0 first, so that arc_request_percpu_irq() sets up 529 on_each_cpu(arc_cpu_pmu_irq_init, &irq, 1);
532 * AUTOEN etc. Otherwise enable_percpu_irq() fails to enable 530
533 * perf IRQ on non master cores.
534 * see arc_request_percpu_irq()
535 */
536 preempt_disable();
537 local_irq_save(flags);
538 arc_cpu_pmu_irq_init();
539 local_irq_restore(flags);
540 smp_call_function((smp_call_func_t)arc_cpu_pmu_irq_init, 0, 1);
541 preempt_enable();
542
543 /* Clean all pending interrupt flags */
544 write_aux_reg(ARC_REG_PCT_INT_ACT, 0xffffffff);
545 } else 531 } else
546 arc_pmu->pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT; 532 arc_pmu->pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
547 533
diff --git a/arch/arc/kernel/process.c b/arch/arc/kernel/process.c
index 91d5a0f1f3f7..a3f750e76b68 100644
--- a/arch/arc/kernel/process.c
+++ b/arch/arc/kernel/process.c
@@ -44,11 +44,10 @@ SYSCALL_DEFINE0(arc_gettls)
44void arch_cpu_idle(void) 44void arch_cpu_idle(void)
45{ 45{
46 /* sleep, but enable all interrupts before committing */ 46 /* sleep, but enable all interrupts before committing */
47 if (is_isa_arcompact()) { 47 __asm__ __volatile__(
48 __asm__("sleep 0x3"); 48 "sleep %0 \n"
49 } else { 49 :
50 __asm__("sleep 0x10"); 50 :"I"(ISA_SLEEP_ARG)); /* can't be "r" has to be embedded const */
51 }
52} 51}
53 52
54asmlinkage void ret_from_fork(void); 53asmlinkage void ret_from_fork(void);
diff --git a/arch/arc/kernel/setup.c b/arch/arc/kernel/setup.c
index c33e77c0ad3e..e1b87444ea9a 100644
--- a/arch/arc/kernel/setup.c
+++ b/arch/arc/kernel/setup.c
@@ -429,7 +429,6 @@ void __init setup_arch(char **cmdline_p)
429#endif 429#endif
430 430
431 arc_unwind_init(); 431 arc_unwind_init();
432 arc_unwind_setup();
433} 432}
434 433
435static int __init customize_machine(void) 434static int __init customize_machine(void)
diff --git a/arch/arc/kernel/smp.c b/arch/arc/kernel/smp.c
index 580587805fa3..ef6e9e15b82a 100644
--- a/arch/arc/kernel/smp.c
+++ b/arch/arc/kernel/smp.c
@@ -132,11 +132,11 @@ void start_kernel_secondary(void)
132 pr_info("## CPU%u LIVE ##: Executing Code...\n", cpu); 132 pr_info("## CPU%u LIVE ##: Executing Code...\n", cpu);
133 133
134 /* Some SMP H/w setup - for each cpu */ 134 /* Some SMP H/w setup - for each cpu */
135 if (plat_smp_ops.init_irq_cpu) 135 if (plat_smp_ops.init_per_cpu)
136 plat_smp_ops.init_irq_cpu(cpu); 136 plat_smp_ops.init_per_cpu(cpu);
137 137
138 if (machine_desc->init_cpu_smp) 138 if (machine_desc->init_per_cpu)
139 machine_desc->init_cpu_smp(cpu); 139 machine_desc->init_per_cpu(cpu);
140 140
141 arc_local_timer_setup(); 141 arc_local_timer_setup();
142 142
diff --git a/arch/arc/kernel/unwind.c b/arch/arc/kernel/unwind.c
index 93c6ea52b671..cf2828ab0905 100644
--- a/arch/arc/kernel/unwind.c
+++ b/arch/arc/kernel/unwind.c
@@ -170,6 +170,23 @@ static struct unwind_table *find_table(unsigned long pc)
170 170
171static unsigned long read_pointer(const u8 **pLoc, 171static unsigned long read_pointer(const u8 **pLoc,
172 const void *end, signed ptrType); 172 const void *end, signed ptrType);
173static void init_unwind_hdr(struct unwind_table *table,
174 void *(*alloc) (unsigned long));
175
176/*
177 * wrappers for header alloc (vs. calling one vs. other at call site)
178 * to elide section mismatches warnings
179 */
180static void *__init unw_hdr_alloc_early(unsigned long sz)
181{
182 return __alloc_bootmem_nopanic(sz, sizeof(unsigned int),
183 MAX_DMA_ADDRESS);
184}
185
186static void *unw_hdr_alloc(unsigned long sz)
187{
188 return kmalloc(sz, GFP_KERNEL);
189}
173 190
174static void init_unwind_table(struct unwind_table *table, const char *name, 191static void init_unwind_table(struct unwind_table *table, const char *name,
175 const void *core_start, unsigned long core_size, 192 const void *core_start, unsigned long core_size,
@@ -209,6 +226,8 @@ void __init arc_unwind_init(void)
209 __start_unwind, __end_unwind - __start_unwind, 226 __start_unwind, __end_unwind - __start_unwind,
210 NULL, 0); 227 NULL, 0);
211 /*__start_unwind_hdr, __end_unwind_hdr - __start_unwind_hdr);*/ 228 /*__start_unwind_hdr, __end_unwind_hdr - __start_unwind_hdr);*/
229
230 init_unwind_hdr(&root_table, unw_hdr_alloc_early);
212} 231}
213 232
214static const u32 bad_cie, not_fde; 233static const u32 bad_cie, not_fde;
@@ -241,8 +260,8 @@ static void swap_eh_frame_hdr_table_entries(void *p1, void *p2, int size)
241 e2->fde = v; 260 e2->fde = v;
242} 261}
243 262
244static void __init setup_unwind_table(struct unwind_table *table, 263static void init_unwind_hdr(struct unwind_table *table,
245 void *(*alloc) (unsigned long)) 264 void *(*alloc) (unsigned long))
246{ 265{
247 const u8 *ptr; 266 const u8 *ptr;
248 unsigned long tableSize = table->size, hdrSize; 267 unsigned long tableSize = table->size, hdrSize;
@@ -274,13 +293,13 @@ static void __init setup_unwind_table(struct unwind_table *table,
274 const u32 *cie = cie_for_fde(fde, table); 293 const u32 *cie = cie_for_fde(fde, table);
275 signed ptrType; 294 signed ptrType;
276 295
277 if (cie == &not_fde) 296 if (cie == &not_fde) /* only process FDE here */
278 continue; 297 continue;
279 if (cie == NULL || cie == &bad_cie) 298 if (cie == NULL || cie == &bad_cie)
280 return; 299 continue; /* say FDE->CIE.version != 1 */
281 ptrType = fde_pointer_type(cie); 300 ptrType = fde_pointer_type(cie);
282 if (ptrType < 0) 301 if (ptrType < 0)
283 return; 302 continue;
284 303
285 ptr = (const u8 *)(fde + 2); 304 ptr = (const u8 *)(fde + 2);
286 if (!read_pointer(&ptr, (const u8 *)(fde + 1) + *fde, 305 if (!read_pointer(&ptr, (const u8 *)(fde + 1) + *fde,
@@ -300,9 +319,11 @@ static void __init setup_unwind_table(struct unwind_table *table,
300 319
301 hdrSize = 4 + sizeof(unsigned long) + sizeof(unsigned int) 320 hdrSize = 4 + sizeof(unsigned long) + sizeof(unsigned int)
302 + 2 * n * sizeof(unsigned long); 321 + 2 * n * sizeof(unsigned long);
322
303 header = alloc(hdrSize); 323 header = alloc(hdrSize);
304 if (!header) 324 if (!header)
305 return; 325 return;
326
306 header->version = 1; 327 header->version = 1;
307 header->eh_frame_ptr_enc = DW_EH_PE_abs | DW_EH_PE_native; 328 header->eh_frame_ptr_enc = DW_EH_PE_abs | DW_EH_PE_native;
308 header->fde_count_enc = DW_EH_PE_abs | DW_EH_PE_data4; 329 header->fde_count_enc = DW_EH_PE_abs | DW_EH_PE_data4;
@@ -322,6 +343,10 @@ static void __init setup_unwind_table(struct unwind_table *table,
322 343
323 if (fde[1] == 0xffffffff) 344 if (fde[1] == 0xffffffff)
324 continue; /* this is a CIE */ 345 continue; /* this is a CIE */
346
347 if (*(u8 *)(cie + 2) != 1)
348 continue; /* FDE->CIE.version not supported */
349
325 ptr = (const u8 *)(fde + 2); 350 ptr = (const u8 *)(fde + 2);
326 header->table[n].start = read_pointer(&ptr, 351 header->table[n].start = read_pointer(&ptr,
327 (const u8 *)(fde + 1) + 352 (const u8 *)(fde + 1) +
@@ -342,18 +367,6 @@ static void __init setup_unwind_table(struct unwind_table *table,
342 table->header = (const void *)header; 367 table->header = (const void *)header;
343} 368}
344 369
345static void *__init balloc(unsigned long sz)
346{
347 return __alloc_bootmem_nopanic(sz,
348 sizeof(unsigned int),
349 __pa(MAX_DMA_ADDRESS));
350}
351
352void __init arc_unwind_setup(void)
353{
354 setup_unwind_table(&root_table, balloc);
355}
356
357#ifdef CONFIG_MODULES 370#ifdef CONFIG_MODULES
358 371
359static struct unwind_table *last_table; 372static struct unwind_table *last_table;
@@ -377,6 +390,8 @@ void *unwind_add_table(struct module *module, const void *table_start,
377 table_start, table_size, 390 table_start, table_size,
378 NULL, 0); 391 NULL, 0);
379 392
393 init_unwind_hdr(table, unw_hdr_alloc);
394
380#ifdef UNWIND_DEBUG 395#ifdef UNWIND_DEBUG
381 unw_debug("Table added for [%s] %lx %lx\n", 396 unw_debug("Table added for [%s] %lx %lx\n",
382 module->name, table->core.pc, table->core.range); 397 module->name, table->core.pc, table->core.range);
@@ -439,6 +454,7 @@ void unwind_remove_table(void *handle, int init_only)
439 info.init_only = init_only; 454 info.init_only = init_only;
440 455
441 unlink_table(&info); /* XXX: SMP */ 456 unlink_table(&info); /* XXX: SMP */
457 kfree(table->header);
442 kfree(table); 458 kfree(table);
443} 459}
444 460
@@ -507,7 +523,8 @@ static const u32 *cie_for_fde(const u32 *fde, const struct unwind_table *table)
507 523
508 if (*cie <= sizeof(*cie) + 4 || *cie >= fde[1] - sizeof(*fde) 524 if (*cie <= sizeof(*cie) + 4 || *cie >= fde[1] - sizeof(*fde)
509 || (*cie & (sizeof(*cie) - 1)) 525 || (*cie & (sizeof(*cie) - 1))
510 || (cie[1] != 0xffffffff)) 526 || (cie[1] != 0xffffffff)
527 || ( *(u8 *)(cie + 2) != 1)) /* version 1 supported */
511 return NULL; /* this is not a (valid) CIE */ 528 return NULL; /* this is not a (valid) CIE */
512 return cie; 529 return cie;
513} 530}
@@ -986,42 +1003,13 @@ int arc_unwind(struct unwind_frame_info *frame)
986 (const u8 *)(fde + 1003 (const u8 *)(fde +
987 1) + 1004 1) +
988 *fde, ptrType); 1005 *fde, ptrType);
989 if (pc >= endLoc) 1006 if (pc >= endLoc) {
990 fde = NULL; 1007 fde = NULL;
991 } else
992 fde = NULL;
993 }
994 if (fde == NULL) {
995 for (fde = table->address, tableSize = table->size;
996 cie = NULL, tableSize > sizeof(*fde)
997 && tableSize - sizeof(*fde) >= *fde;
998 tableSize -= sizeof(*fde) + *fde,
999 fde += 1 + *fde / sizeof(*fde)) {
1000 cie = cie_for_fde(fde, table);
1001 if (cie == &bad_cie) {
1002 cie = NULL; 1008 cie = NULL;
1003 break;
1004 } 1009 }
1005 if (cie == NULL 1010 } else {
1006 || cie == &not_fde 1011 fde = NULL;
1007 || (ptrType = fde_pointer_type(cie)) < 0) 1012 cie = NULL;
1008 continue;
1009 ptr = (const u8 *)(fde + 2);
1010 startLoc = read_pointer(&ptr,
1011 (const u8 *)(fde + 1) +
1012 *fde, ptrType);
1013 if (!startLoc)
1014 continue;
1015 if (!(ptrType & DW_EH_PE_indirect))
1016 ptrType &=
1017 DW_EH_PE_FORM | DW_EH_PE_signed;
1018 endLoc =
1019 startLoc + read_pointer(&ptr,
1020 (const u8 *)(fde +
1021 1) +
1022 *fde, ptrType);
1023 if (pc >= startLoc && pc < endLoc)
1024 break;
1025 } 1013 }
1026 } 1014 }
1027 } 1015 }
diff --git a/arch/arc/mm/init.c b/arch/arc/mm/init.c
index a9305b5a2cd4..7d2c4fbf4f22 100644
--- a/arch/arc/mm/init.c
+++ b/arch/arc/mm/init.c
@@ -51,7 +51,9 @@ void __init early_init_dt_add_memory_arch(u64 base, u64 size)
51 int in_use = 0; 51 int in_use = 0;
52 52
53 if (!low_mem_sz) { 53 if (!low_mem_sz) {
54 BUG_ON(base != low_mem_start); 54 if (base != low_mem_start)
55 panic("CONFIG_LINUX_LINK_BASE != DT memory { }");
56
55 low_mem_sz = size; 57 low_mem_sz = size;
56 in_use = 1; 58 in_use = 1;
57 } else { 59 } else {
diff --git a/arch/arc/mm/tlb.c b/arch/arc/mm/tlb.c
index 0ee739846847..daf2bf52b984 100644
--- a/arch/arc/mm/tlb.c
+++ b/arch/arc/mm/tlb.c
@@ -619,10 +619,10 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long vaddr_unaligned,
619 619
620 int dirty = !test_and_set_bit(PG_dc_clean, &page->flags); 620 int dirty = !test_and_set_bit(PG_dc_clean, &page->flags);
621 if (dirty) { 621 if (dirty) {
622 /* wback + inv dcache lines */ 622 /* wback + inv dcache lines (K-mapping) */
623 __flush_dcache_page(paddr, paddr); 623 __flush_dcache_page(paddr, paddr);
624 624
625 /* invalidate any existing icache lines */ 625 /* invalidate any existing icache lines (U-mapping) */
626 if (vma->vm_flags & VM_EXEC) 626 if (vma->vm_flags & VM_EXEC)
627 __inv_icache_page(paddr, vaddr); 627 __inv_icache_page(paddr, vaddr);
628 } 628 }
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 0365cbbc9179..34e1569a11ee 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -76,6 +76,8 @@ config ARM
76 select IRQ_FORCED_THREADING 76 select IRQ_FORCED_THREADING
77 select MODULES_USE_ELF_REL 77 select MODULES_USE_ELF_REL
78 select NO_BOOTMEM 78 select NO_BOOTMEM
79 select OF_EARLY_FLATTREE if OF
80 select OF_RESERVED_MEM if OF
79 select OLD_SIGACTION 81 select OLD_SIGACTION
80 select OLD_SIGSUSPEND3 82 select OLD_SIGSUSPEND3
81 select PERF_USE_VMALLOC 83 select PERF_USE_VMALLOC
@@ -1822,8 +1824,6 @@ config USE_OF
1822 bool "Flattened Device Tree support" 1824 bool "Flattened Device Tree support"
1823 select IRQ_DOMAIN 1825 select IRQ_DOMAIN
1824 select OF 1826 select OF
1825 select OF_EARLY_FLATTREE
1826 select OF_RESERVED_MEM
1827 help 1827 help
1828 Include support for flattened device tree machine descriptions. 1828 Include support for flattened device tree machine descriptions.
1829 1829
diff --git a/arch/arm/boot/dts/am4372.dtsi b/arch/arm/boot/dts/am4372.dtsi
index d83ff9c9701e..de8791a4d131 100644
--- a/arch/arm/boot/dts/am4372.dtsi
+++ b/arch/arm/boot/dts/am4372.dtsi
@@ -74,7 +74,7 @@
74 reg = <0x48240200 0x100>; 74 reg = <0x48240200 0x100>;
75 interrupts = <GIC_PPI 11 IRQ_TYPE_LEVEL_HIGH>; 75 interrupts = <GIC_PPI 11 IRQ_TYPE_LEVEL_HIGH>;
76 interrupt-parent = <&gic>; 76 interrupt-parent = <&gic>;
77 clocks = <&dpll_mpu_m2_ck>; 77 clocks = <&mpu_periphclk>;
78 }; 78 };
79 79
80 local_timer: timer@48240600 { 80 local_timer: timer@48240600 {
@@ -82,7 +82,7 @@
82 reg = <0x48240600 0x100>; 82 reg = <0x48240600 0x100>;
83 interrupts = <GIC_PPI 13 IRQ_TYPE_LEVEL_HIGH>; 83 interrupts = <GIC_PPI 13 IRQ_TYPE_LEVEL_HIGH>;
84 interrupt-parent = <&gic>; 84 interrupt-parent = <&gic>;
85 clocks = <&dpll_mpu_m2_ck>; 85 clocks = <&mpu_periphclk>;
86 }; 86 };
87 87
88 l2-cache-controller@48242000 { 88 l2-cache-controller@48242000 {
diff --git a/arch/arm/boot/dts/am43xx-clocks.dtsi b/arch/arm/boot/dts/am43xx-clocks.dtsi
index cc88728d751d..a38af2bfbfcf 100644
--- a/arch/arm/boot/dts/am43xx-clocks.dtsi
+++ b/arch/arm/boot/dts/am43xx-clocks.dtsi
@@ -259,6 +259,14 @@
259 ti,invert-autoidle-bit; 259 ti,invert-autoidle-bit;
260 }; 260 };
261 261
262 mpu_periphclk: mpu_periphclk {
263 #clock-cells = <0>;
264 compatible = "fixed-factor-clock";
265 clocks = <&dpll_mpu_m2_ck>;
266 clock-mult = <1>;
267 clock-div = <2>;
268 };
269
262 dpll_ddr_ck: dpll_ddr_ck { 270 dpll_ddr_ck: dpll_ddr_ck {
263 #clock-cells = <0>; 271 #clock-cells = <0>;
264 compatible = "ti,am3-dpll-clock"; 272 compatible = "ti,am3-dpll-clock";
diff --git a/arch/arm/boot/dts/am57xx-beagle-x15.dts b/arch/arm/boot/dts/am57xx-beagle-x15.dts
index d9ba6b879fc1..00352e761b8c 100644
--- a/arch/arm/boot/dts/am57xx-beagle-x15.dts
+++ b/arch/arm/boot/dts/am57xx-beagle-x15.dts
@@ -604,6 +604,7 @@
604 reg = <0x6f>; 604 reg = <0x6f>;
605 interrupts-extended = <&crossbar_mpu GIC_SPI 2 IRQ_TYPE_EDGE_RISING>, 605 interrupts-extended = <&crossbar_mpu GIC_SPI 2 IRQ_TYPE_EDGE_RISING>,
606 <&dra7_pmx_core 0x424>; 606 <&dra7_pmx_core 0x424>;
607 interrupt-names = "irq", "wakeup";
607 608
608 pinctrl-names = "default"; 609 pinctrl-names = "default";
609 pinctrl-0 = <&mcp79410_pins_default>; 610 pinctrl-0 = <&mcp79410_pins_default>;
diff --git a/arch/arm/boot/dts/animeo_ip.dts b/arch/arm/boot/dts/animeo_ip.dts
index 4e0ad3b82796..0962f2fa3f6e 100644
--- a/arch/arm/boot/dts/animeo_ip.dts
+++ b/arch/arm/boot/dts/animeo_ip.dts
@@ -155,21 +155,21 @@
155 label = "keyswitch_in"; 155 label = "keyswitch_in";
156 gpios = <&pioB 1 GPIO_ACTIVE_HIGH>; 156 gpios = <&pioB 1 GPIO_ACTIVE_HIGH>;
157 linux,code = <28>; 157 linux,code = <28>;
158 gpio-key,wakeup; 158 wakeup-source;
159 }; 159 };
160 160
161 error_in { 161 error_in {
162 label = "error_in"; 162 label = "error_in";
163 gpios = <&pioB 2 GPIO_ACTIVE_HIGH>; 163 gpios = <&pioB 2 GPIO_ACTIVE_HIGH>;
164 linux,code = <29>; 164 linux,code = <29>;
165 gpio-key,wakeup; 165 wakeup-source;
166 }; 166 };
167 167
168 btn { 168 btn {
169 label = "btn"; 169 label = "btn";
170 gpios = <&pioC 23 GPIO_ACTIVE_HIGH>; 170 gpios = <&pioC 23 GPIO_ACTIVE_HIGH>;
171 linux,code = <31>; 171 linux,code = <31>;
172 gpio-key,wakeup; 172 wakeup-source;
173 }; 173 };
174 }; 174 };
175}; 175};
diff --git a/arch/arm/boot/dts/armada-38x.dtsi b/arch/arm/boot/dts/armada-38x.dtsi
index c6a0e9d7f1a9..e8b7f6726772 100644
--- a/arch/arm/boot/dts/armada-38x.dtsi
+++ b/arch/arm/boot/dts/armada-38x.dtsi
@@ -498,6 +498,7 @@
498 reg = <0x70000 0x4000>; 498 reg = <0x70000 0x4000>;
499 interrupts-extended = <&mpic 8>; 499 interrupts-extended = <&mpic 8>;
500 clocks = <&gateclk 4>; 500 clocks = <&gateclk 4>;
501 tx-csum-limit = <9800>;
501 status = "disabled"; 502 status = "disabled";
502 }; 503 };
503 504
diff --git a/arch/arm/boot/dts/at91-foxg20.dts b/arch/arm/boot/dts/at91-foxg20.dts
index f89598af4c2b..6bf873e7d96c 100644
--- a/arch/arm/boot/dts/at91-foxg20.dts
+++ b/arch/arm/boot/dts/at91-foxg20.dts
@@ -159,7 +159,7 @@
159 label = "Button"; 159 label = "Button";
160 gpios = <&pioC 4 GPIO_ACTIVE_LOW>; 160 gpios = <&pioC 4 GPIO_ACTIVE_LOW>;
161 linux,code = <0x103>; 161 linux,code = <0x103>;
162 gpio-key,wakeup; 162 wakeup-source;
163 }; 163 };
164 }; 164 };
165}; 165};
diff --git a/arch/arm/boot/dts/at91-kizbox.dts b/arch/arm/boot/dts/at91-kizbox.dts
index bf18ece0c027..229e989eb60d 100644
--- a/arch/arm/boot/dts/at91-kizbox.dts
+++ b/arch/arm/boot/dts/at91-kizbox.dts
@@ -24,15 +24,6 @@
24 }; 24 };
25 25
26 clocks { 26 clocks {
27 #address-cells = <1>;
28 #size-cells = <1>;
29 ranges;
30
31 main_clock: clock@0 {
32 compatible = "atmel,osc", "fixed-clock";
33 clock-frequency = <18432000>;
34 };
35
36 main_xtal { 27 main_xtal {
37 clock-frequency = <18432000>; 28 clock-frequency = <18432000>;
38 }; 29 };
@@ -94,14 +85,14 @@
94 label = "PB_RST"; 85 label = "PB_RST";
95 gpios = <&pioB 30 GPIO_ACTIVE_HIGH>; 86 gpios = <&pioB 30 GPIO_ACTIVE_HIGH>;
96 linux,code = <0x100>; 87 linux,code = <0x100>;
97 gpio-key,wakeup; 88 wakeup-source;
98 }; 89 };
99 90
100 user { 91 user {
101 label = "PB_USER"; 92 label = "PB_USER";
102 gpios = <&pioB 31 GPIO_ACTIVE_HIGH>; 93 gpios = <&pioB 31 GPIO_ACTIVE_HIGH>;
103 linux,code = <0x101>; 94 linux,code = <0x101>;
104 gpio-key,wakeup; 95 wakeup-source;
105 }; 96 };
106 }; 97 };
107 98
diff --git a/arch/arm/boot/dts/at91-kizbox2.dts b/arch/arm/boot/dts/at91-kizbox2.dts
index f0b1563cb3f1..50a14568f094 100644
--- a/arch/arm/boot/dts/at91-kizbox2.dts
+++ b/arch/arm/boot/dts/at91-kizbox2.dts
@@ -171,21 +171,21 @@
171 label = "PB_PROG"; 171 label = "PB_PROG";
172 gpios = <&pioE 27 GPIO_ACTIVE_LOW>; 172 gpios = <&pioE 27 GPIO_ACTIVE_LOW>;
173 linux,code = <0x102>; 173 linux,code = <0x102>;
174 gpio-key,wakeup; 174 wakeup-source;
175 }; 175 };
176 176
177 reset { 177 reset {
178 label = "PB_RST"; 178 label = "PB_RST";
179 gpios = <&pioE 29 GPIO_ACTIVE_LOW>; 179 gpios = <&pioE 29 GPIO_ACTIVE_LOW>;
180 linux,code = <0x100>; 180 linux,code = <0x100>;
181 gpio-key,wakeup; 181 wakeup-source;
182 }; 182 };
183 183
184 user { 184 user {
185 label = "PB_USER"; 185 label = "PB_USER";
186 gpios = <&pioE 31 GPIO_ACTIVE_HIGH>; 186 gpios = <&pioE 31 GPIO_ACTIVE_HIGH>;
187 linux,code = <0x101>; 187 linux,code = <0x101>;
188 gpio-key,wakeup; 188 wakeup-source;
189 }; 189 };
190 }; 190 };
191 191
diff --git a/arch/arm/boot/dts/at91-kizboxmini.dts b/arch/arm/boot/dts/at91-kizboxmini.dts
index 9f72b4932634..9682d105d4d8 100644
--- a/arch/arm/boot/dts/at91-kizboxmini.dts
+++ b/arch/arm/boot/dts/at91-kizboxmini.dts
@@ -98,14 +98,14 @@
98 label = "PB_PROG"; 98 label = "PB_PROG";
99 gpios = <&pioC 17 GPIO_ACTIVE_LOW>; 99 gpios = <&pioC 17 GPIO_ACTIVE_LOW>;
100 linux,code = <0x102>; 100 linux,code = <0x102>;
101 gpio-key,wakeup; 101 wakeup-source;
102 }; 102 };
103 103
104 reset { 104 reset {
105 label = "PB_RST"; 105 label = "PB_RST";
106 gpios = <&pioC 16 GPIO_ACTIVE_LOW>; 106 gpios = <&pioC 16 GPIO_ACTIVE_LOW>;
107 linux,code = <0x100>; 107 linux,code = <0x100>;
108 gpio-key,wakeup; 108 wakeup-source;
109 }; 109 };
110 }; 110 };
111 111
diff --git a/arch/arm/boot/dts/at91-qil_a9260.dts b/arch/arm/boot/dts/at91-qil_a9260.dts
index a9aef53ab764..4f2eebf4a560 100644
--- a/arch/arm/boot/dts/at91-qil_a9260.dts
+++ b/arch/arm/boot/dts/at91-qil_a9260.dts
@@ -183,7 +183,7 @@
183 label = "user_pb"; 183 label = "user_pb";
184 gpios = <&pioB 10 GPIO_ACTIVE_LOW>; 184 gpios = <&pioB 10 GPIO_ACTIVE_LOW>;
185 linux,code = <28>; 185 linux,code = <28>;
186 gpio-key,wakeup; 186 wakeup-source;
187 }; 187 };
188 }; 188 };
189 189
diff --git a/arch/arm/boot/dts/at91-sama5d2_xplained.dts b/arch/arm/boot/dts/at91-sama5d2_xplained.dts
index e07c2b206beb..e74df327cdd3 100644
--- a/arch/arm/boot/dts/at91-sama5d2_xplained.dts
+++ b/arch/arm/boot/dts/at91-sama5d2_xplained.dts
@@ -45,6 +45,7 @@
45/dts-v1/; 45/dts-v1/;
46#include "sama5d2.dtsi" 46#include "sama5d2.dtsi"
47#include "sama5d2-pinfunc.h" 47#include "sama5d2-pinfunc.h"
48#include <dt-bindings/mfd/atmel-flexcom.h>
48 49
49/ { 50/ {
50 model = "Atmel SAMA5D2 Xplained"; 51 model = "Atmel SAMA5D2 Xplained";
@@ -59,15 +60,6 @@
59 }; 60 };
60 61
61 clocks { 62 clocks {
62 #address-cells = <1>;
63 #size-cells = <1>;
64 ranges;
65
66 main_clock: clock@0 {
67 compatible = "atmel,osc", "fixed-clock";
68 clock-frequency = <12000000>;
69 };
70
71 slow_xtal { 63 slow_xtal {
72 clock-frequency = <32768>; 64 clock-frequency = <32768>;
73 }; 65 };
@@ -91,6 +83,22 @@
91 status = "okay"; 83 status = "okay";
92 }; 84 };
93 85
86 sdmmc0: sdio-host@a0000000 {
87 bus-width = <8>;
88 pinctrl-names = "default";
89 pinctrl-0 = <&pinctrl_sdmmc0_default>;
90 non-removable;
91 mmc-ddr-1_8v;
92 status = "okay";
93 };
94
95 sdmmc1: sdio-host@b0000000 {
96 bus-width = <4>;
97 pinctrl-names = "default";
98 pinctrl-0 = <&pinctrl_sdmmc1_default>;
99 status = "okay"; /* conflict with qspi0 */
100 };
101
94 apb { 102 apb {
95 spi0: spi@f8000000 { 103 spi0: spi@f8000000 {
96 pinctrl-names = "default"; 104 pinctrl-names = "default";
@@ -176,17 +184,55 @@
176 regulator-name = "VDD_SDHC_1V8"; 184 regulator-name = "VDD_SDHC_1V8";
177 regulator-min-microvolt = <1800000>; 185 regulator-min-microvolt = <1800000>;
178 regulator-max-microvolt = <1800000>; 186 regulator-max-microvolt = <1800000>;
187 regulator-always-on;
179 }; 188 };
180 }; 189 };
181 }; 190 };
182 }; 191 };
183 192
193 flx0: flexcom@f8034000 {
194 atmel,flexcom-mode = <ATMEL_FLEXCOM_MODE_USART>;
195 status = "disabled"; /* conflict with ISC_D2 & ISC_D3 data pins */
196
197 uart5: serial@200 {
198 compatible = "atmel,at91sam9260-usart";
199 reg = <0x200 0x200>;
200 interrupts = <19 IRQ_TYPE_LEVEL_HIGH 7>;
201 clocks = <&flx0_clk>;
202 clock-names = "usart";
203 pinctrl-names = "default";
204 pinctrl-0 = <&pinctrl_flx0_default>;
205 atmel,fifo-size = <32>;
206 status = "okay";
207 };
208 };
209
184 uart3: serial@fc008000 { 210 uart3: serial@fc008000 {
185 pinctrl-names = "default"; 211 pinctrl-names = "default";
186 pinctrl-0 = <&pinctrl_uart3_default>; 212 pinctrl-0 = <&pinctrl_uart3_default>;
187 status = "okay"; 213 status = "okay";
188 }; 214 };
189 215
216 flx4: flexcom@fc018000 {
217 atmel,flexcom-mode = <ATMEL_FLEXCOM_MODE_TWI>;
218 status = "okay";
219
220 i2c2: i2c@600 {
221 compatible = "atmel,sama5d2-i2c";
222 reg = <0x600 0x200>;
223 interrupts = <23 IRQ_TYPE_LEVEL_HIGH 7>;
224 dmas = <0>, <0>;
225 dma-names = "tx", "rx";
226 #address-cells = <1>;
227 #size-cells = <0>;
228 clocks = <&flx4_clk>;
229 pinctrl-names = "default";
230 pinctrl-0 = <&pinctrl_flx4_default>;
231 atmel,fifo-size = <16>;
232 status = "okay";
233 };
234 };
235
190 i2c1: i2c@fc028000 { 236 i2c1: i2c@fc028000 {
191 dmas = <0>, <0>; 237 dmas = <0>, <0>;
192 pinctrl-names = "default"; 238 pinctrl-names = "default";
@@ -201,6 +247,18 @@
201 }; 247 };
202 248
203 pinctrl@fc038000 { 249 pinctrl@fc038000 {
250 pinctrl_flx0_default: flx0_default {
251 pinmux = <PIN_PB28__FLEXCOM0_IO0>,
252 <PIN_PB29__FLEXCOM0_IO1>;
253 bias-disable;
254 };
255
256 pinctrl_flx4_default: flx4_default {
257 pinmux = <PIN_PD12__FLEXCOM4_IO0>,
258 <PIN_PD13__FLEXCOM4_IO1>;
259 bias-disable;
260 };
261
204 pinctrl_i2c0_default: i2c0_default { 262 pinctrl_i2c0_default: i2c0_default {
205 pinmux = <PIN_PD21__TWD0>, 263 pinmux = <PIN_PD21__TWD0>,
206 <PIN_PD22__TWCK0>; 264 <PIN_PD22__TWCK0>;
@@ -227,6 +285,46 @@
227 bias-disable; 285 bias-disable;
228 }; 286 };
229 287
288 pinctrl_sdmmc0_default: sdmmc0_default {
289 cmd_data {
290 pinmux = <PIN_PA1__SDMMC0_CMD>,
291 <PIN_PA2__SDMMC0_DAT0>,
292 <PIN_PA3__SDMMC0_DAT1>,
293 <PIN_PA4__SDMMC0_DAT2>,
294 <PIN_PA5__SDMMC0_DAT3>,
295 <PIN_PA6__SDMMC0_DAT4>,
296 <PIN_PA7__SDMMC0_DAT5>,
297 <PIN_PA8__SDMMC0_DAT6>,
298 <PIN_PA9__SDMMC0_DAT7>;
299 bias-pull-up;
300 };
301
302 ck_cd_rstn_vddsel {
303 pinmux = <PIN_PA0__SDMMC0_CK>,
304 <PIN_PA10__SDMMC0_RSTN>,
305 <PIN_PA11__SDMMC0_VDDSEL>,
306 <PIN_PA13__SDMMC0_CD>;
307 bias-disable;
308 };
309 };
310
311 pinctrl_sdmmc1_default: sdmmc1_default {
312 cmd_data {
313 pinmux = <PIN_PA28__SDMMC1_CMD>,
314 <PIN_PA18__SDMMC1_DAT0>,
315 <PIN_PA19__SDMMC1_DAT1>,
316 <PIN_PA20__SDMMC1_DAT2>,
317 <PIN_PA21__SDMMC1_DAT3>;
318 bias-pull-up;
319 };
320
321 conf-ck_cd {
322 pinmux = <PIN_PA22__SDMMC1_CK>,
323 <PIN_PA30__SDMMC1_CD>;
324 bias-disable;
325 };
326 };
327
230 pinctrl_spi0_default: spi0_default { 328 pinctrl_spi0_default: spi0_default {
231 pinmux = <PIN_PA14__SPI0_SPCK>, 329 pinmux = <PIN_PA14__SPI0_SPCK>,
232 <PIN_PA15__SPI0_MOSI>, 330 <PIN_PA15__SPI0_MOSI>,
diff --git a/arch/arm/boot/dts/at91-sama5d3_xplained.dts b/arch/arm/boot/dts/at91-sama5d3_xplained.dts
index 8488ac53d22d..ff888d21c786 100644
--- a/arch/arm/boot/dts/at91-sama5d3_xplained.dts
+++ b/arch/arm/boot/dts/at91-sama5d3_xplained.dts
@@ -315,7 +315,7 @@
315 label = "PB_USER"; 315 label = "PB_USER";
316 gpios = <&pioE 29 GPIO_ACTIVE_LOW>; 316 gpios = <&pioE 29 GPIO_ACTIVE_LOW>;
317 linux,code = <0x104>; 317 linux,code = <0x104>;
318 gpio-key,wakeup; 318 wakeup-source;
319 }; 319 };
320 }; 320 };
321 321
diff --git a/arch/arm/boot/dts/at91-sama5d4_xplained.dts b/arch/arm/boot/dts/at91-sama5d4_xplained.dts
index 45371a1b61b3..131614f28e75 100644
--- a/arch/arm/boot/dts/at91-sama5d4_xplained.dts
+++ b/arch/arm/boot/dts/at91-sama5d4_xplained.dts
@@ -50,7 +50,6 @@
50 compatible = "atmel,sama5d4-xplained", "atmel,sama5d4", "atmel,sama5"; 50 compatible = "atmel,sama5d4-xplained", "atmel,sama5d4", "atmel,sama5";
51 51
52 chosen { 52 chosen {
53 bootargs = "ignore_loglevel earlyprintk";
54 stdout-path = "serial0:115200n8"; 53 stdout-path = "serial0:115200n8";
55 }; 54 };
56 55
@@ -59,15 +58,6 @@
59 }; 58 };
60 59
61 clocks { 60 clocks {
62 #address-cells = <1>;
63 #size-cells = <1>;
64 ranges;
65
66 main_clock: clock@0 {
67 compatible = "atmel,osc", "fixed-clock";
68 clock-frequency = <12000000>;
69 };
70
71 slow_xtal { 61 slow_xtal {
72 clock-frequency = <32768>; 62 clock-frequency = <32768>;
73 }; 63 };
@@ -235,7 +225,7 @@
235 label = "pb_user1"; 225 label = "pb_user1";
236 gpios = <&pioE 8 GPIO_ACTIVE_HIGH>; 226 gpios = <&pioE 8 GPIO_ACTIVE_HIGH>;
237 linux,code = <0x100>; 227 linux,code = <0x100>;
238 gpio-key,wakeup; 228 wakeup-source;
239 }; 229 };
240 }; 230 };
241 231
diff --git a/arch/arm/boot/dts/at91-sama5d4ek.dts b/arch/arm/boot/dts/at91-sama5d4ek.dts
index 6d272c0125e3..2d4a33100af6 100644
--- a/arch/arm/boot/dts/at91-sama5d4ek.dts
+++ b/arch/arm/boot/dts/at91-sama5d4ek.dts
@@ -50,7 +50,6 @@
50 compatible = "atmel,sama5d4ek", "atmel,sama5d4", "atmel,sama5"; 50 compatible = "atmel,sama5d4ek", "atmel,sama5d4", "atmel,sama5";
51 51
52 chosen { 52 chosen {
53 bootargs = "ignore_loglevel earlyprintk";
54 stdout-path = "serial0:115200n8"; 53 stdout-path = "serial0:115200n8";
55 }; 54 };
56 55
@@ -59,15 +58,6 @@
59 }; 58 };
60 59
61 clocks { 60 clocks {
62 #address-cells = <1>;
63 #size-cells = <1>;
64 ranges;
65
66 main_clock: clock@0 {
67 compatible = "atmel,osc", "fixed-clock";
68 clock-frequency = <12000000>;
69 };
70
71 slow_xtal { 61 slow_xtal {
72 clock-frequency = <32768>; 62 clock-frequency = <32768>;
73 }; 63 };
@@ -304,7 +294,7 @@
304 label = "pb_user1"; 294 label = "pb_user1";
305 gpios = <&pioE 13 GPIO_ACTIVE_HIGH>; 295 gpios = <&pioE 13 GPIO_ACTIVE_HIGH>;
306 linux,code = <0x100>; 296 linux,code = <0x100>;
307 gpio-key,wakeup; 297 wakeup-source;
308 }; 298 };
309 }; 299 };
310 300
diff --git a/arch/arm/boot/dts/at91rm9200ek.dts b/arch/arm/boot/dts/at91rm9200ek.dts
index 8dab4b75ca97..f90e1c2d3caa 100644
--- a/arch/arm/boot/dts/at91rm9200ek.dts
+++ b/arch/arm/boot/dts/at91rm9200ek.dts
@@ -21,15 +21,6 @@
21 }; 21 };
22 22
23 clocks { 23 clocks {
24 #address-cells = <1>;
25 #size-cells = <1>;
26 ranges;
27
28 main_clock: clock@0 {
29 compatible = "atmel,osc", "fixed-clock";
30 clock-frequency = <18432000>;
31 };
32
33 slow_xtal { 24 slow_xtal {
34 clock-frequency = <32768>; 25 clock-frequency = <32768>;
35 }; 26 };
diff --git a/arch/arm/boot/dts/at91sam9261ek.dts b/arch/arm/boot/dts/at91sam9261ek.dts
index 2e92ac020f23..55bd51f07fa6 100644
--- a/arch/arm/boot/dts/at91sam9261ek.dts
+++ b/arch/arm/boot/dts/at91sam9261ek.dts
@@ -22,15 +22,6 @@
22 }; 22 };
23 23
24 clocks { 24 clocks {
25 #address-cells = <1>;
26 #size-cells = <1>;
27 ranges;
28
29 main_clock: clock@0 {
30 compatible = "atmel,osc", "fixed-clock";
31 clock-frequency = <18432000>;
32 };
33
34 slow_xtal { 25 slow_xtal {
35 clock-frequency = <32768>; 26 clock-frequency = <32768>;
36 }; 27 };
@@ -149,7 +140,7 @@
149 ti,debounce-tol = /bits/ 16 <65535>; 140 ti,debounce-tol = /bits/ 16 <65535>;
150 ti,debounce-max = /bits/ 16 <1>; 141 ti,debounce-max = /bits/ 16 <1>;
151 142
152 linux,wakeup; 143 wakeup-source;
153 }; 144 };
154 }; 145 };
155 146
@@ -193,28 +184,28 @@
193 label = "button_0"; 184 label = "button_0";
194 gpios = <&pioA 27 GPIO_ACTIVE_LOW>; 185 gpios = <&pioA 27 GPIO_ACTIVE_LOW>;
195 linux,code = <256>; 186 linux,code = <256>;
196 gpio-key,wakeup; 187 wakeup-source;
197 }; 188 };
198 189
199 button_1 { 190 button_1 {
200 label = "button_1"; 191 label = "button_1";
201 gpios = <&pioA 26 GPIO_ACTIVE_LOW>; 192 gpios = <&pioA 26 GPIO_ACTIVE_LOW>;
202 linux,code = <257>; 193 linux,code = <257>;
203 gpio-key,wakeup; 194 wakeup-source;
204 }; 195 };
205 196
206 button_2 { 197 button_2 {
207 label = "button_2"; 198 label = "button_2";
208 gpios = <&pioA 25 GPIO_ACTIVE_LOW>; 199 gpios = <&pioA 25 GPIO_ACTIVE_LOW>;
209 linux,code = <258>; 200 linux,code = <258>;
210 gpio-key,wakeup; 201 wakeup-source;
211 }; 202 };
212 203
213 button_3 { 204 button_3 {
214 label = "button_3"; 205 label = "button_3";
215 gpios = <&pioA 24 GPIO_ACTIVE_LOW>; 206 gpios = <&pioA 24 GPIO_ACTIVE_LOW>;
216 linux,code = <259>; 207 linux,code = <259>;
217 gpio-key,wakeup; 208 wakeup-source;
218 }; 209 };
219 }; 210 };
220}; 211};
diff --git a/arch/arm/boot/dts/at91sam9263ek.dts b/arch/arm/boot/dts/at91sam9263ek.dts
index 23381276ffb8..59df9d73d276 100644
--- a/arch/arm/boot/dts/at91sam9263ek.dts
+++ b/arch/arm/boot/dts/at91sam9263ek.dts
@@ -22,15 +22,6 @@
22 }; 22 };
23 23
24 clocks { 24 clocks {
25 #address-cells = <1>;
26 #size-cells = <1>;
27 ranges;
28
29 main_clock: clock@0 {
30 compatible = "atmel,osc", "fixed-clock";
31 clock-frequency = <16367660>;
32 };
33
34 slow_xtal { 25 slow_xtal {
35 clock-frequency = <32768>; 26 clock-frequency = <32768>;
36 }; 27 };
@@ -213,14 +204,14 @@
213 label = "left_click"; 204 label = "left_click";
214 gpios = <&pioC 5 GPIO_ACTIVE_LOW>; 205 gpios = <&pioC 5 GPIO_ACTIVE_LOW>;
215 linux,code = <272>; 206 linux,code = <272>;
216 gpio-key,wakeup; 207 wakeup-source;
217 }; 208 };
218 209
219 right_click { 210 right_click {
220 label = "right_click"; 211 label = "right_click";
221 gpios = <&pioC 4 GPIO_ACTIVE_LOW>; 212 gpios = <&pioC 4 GPIO_ACTIVE_LOW>;
222 linux,code = <273>; 213 linux,code = <273>;
223 gpio-key,wakeup; 214 wakeup-source;
224 }; 215 };
225 }; 216 };
226 217
diff --git a/arch/arm/boot/dts/at91sam9g20ek_common.dtsi b/arch/arm/boot/dts/at91sam9g20ek_common.dtsi
index 57548a2c5a1e..e9cc99b6353a 100644
--- a/arch/arm/boot/dts/at91sam9g20ek_common.dtsi
+++ b/arch/arm/boot/dts/at91sam9g20ek_common.dtsi
@@ -19,15 +19,6 @@
19 }; 19 };
20 20
21 clocks { 21 clocks {
22 #address-cells = <1>;
23 #size-cells = <1>;
24 ranges;
25
26 main_clock: clock@0 {
27 compatible = "atmel,osc", "fixed-clock";
28 clock-frequency = <18432000>;
29 };
30
31 slow_xtal { 22 slow_xtal {
32 clock-frequency = <32768>; 23 clock-frequency = <32768>;
33 }; 24 };
@@ -206,14 +197,14 @@
206 label = "Button 3"; 197 label = "Button 3";
207 gpios = <&pioA 30 GPIO_ACTIVE_LOW>; 198 gpios = <&pioA 30 GPIO_ACTIVE_LOW>;
208 linux,code = <0x103>; 199 linux,code = <0x103>;
209 gpio-key,wakeup; 200 wakeup-source;
210 }; 201 };
211 202
212 btn4 { 203 btn4 {
213 label = "Button 4"; 204 label = "Button 4";
214 gpios = <&pioA 31 GPIO_ACTIVE_LOW>; 205 gpios = <&pioA 31 GPIO_ACTIVE_LOW>;
215 linux,code = <0x104>; 206 linux,code = <0x104>;
216 gpio-key,wakeup; 207 wakeup-source;
217 }; 208 };
218 }; 209 };
219 210
diff --git a/arch/arm/boot/dts/at91sam9m10g45ek.dts b/arch/arm/boot/dts/at91sam9m10g45ek.dts
index 9d16ef8453c5..2400c99134f7 100644
--- a/arch/arm/boot/dts/at91sam9m10g45ek.dts
+++ b/arch/arm/boot/dts/at91sam9m10g45ek.dts
@@ -24,15 +24,6 @@
24 }; 24 };
25 25
26 clocks { 26 clocks {
27 #address-cells = <1>;
28 #size-cells = <1>;
29 ranges;
30
31 main_clock: clock@0 {
32 compatible = "atmel,osc", "fixed-clock";
33 clock-frequency = <12000000>;
34 };
35
36 slow_xtal { 27 slow_xtal {
37 clock-frequency = <32768>; 28 clock-frequency = <32768>;
38 }; 29 };
@@ -323,14 +314,14 @@
323 label = "left_click"; 314 label = "left_click";
324 gpios = <&pioB 6 GPIO_ACTIVE_LOW>; 315 gpios = <&pioB 6 GPIO_ACTIVE_LOW>;
325 linux,code = <272>; 316 linux,code = <272>;
326 gpio-key,wakeup; 317 wakeup-source;
327 }; 318 };
328 319
329 right_click { 320 right_click {
330 label = "right_click"; 321 label = "right_click";
331 gpios = <&pioB 7 GPIO_ACTIVE_LOW>; 322 gpios = <&pioB 7 GPIO_ACTIVE_LOW>;
332 linux,code = <273>; 323 linux,code = <273>;
333 gpio-key,wakeup; 324 wakeup-source;
334 }; 325 };
335 326
336 left { 327 left {
diff --git a/arch/arm/boot/dts/at91sam9n12ek.dts b/arch/arm/boot/dts/at91sam9n12ek.dts
index acf3451a332d..ca4ddf86817a 100644
--- a/arch/arm/boot/dts/at91sam9n12ek.dts
+++ b/arch/arm/boot/dts/at91sam9n12ek.dts
@@ -23,15 +23,6 @@
23 }; 23 };
24 24
25 clocks { 25 clocks {
26 #address-cells = <1>;
27 #size-cells = <1>;
28 ranges;
29
30 main_clock: clock@0 {
31 compatible = "atmel,osc", "fixed-clock";
32 clock-frequency = <16000000>;
33 };
34
35 slow_xtal { 26 slow_xtal {
36 clock-frequency = <32768>; 27 clock-frequency = <32768>;
37 }; 28 };
@@ -219,7 +210,7 @@
219 label = "Enter"; 210 label = "Enter";
220 gpios = <&pioB 3 GPIO_ACTIVE_LOW>; 211 gpios = <&pioB 3 GPIO_ACTIVE_LOW>;
221 linux,code = <28>; 212 linux,code = <28>;
222 gpio-key,wakeup; 213 wakeup-source;
223 }; 214 };
224 }; 215 };
225 216
diff --git a/arch/arm/boot/dts/at91sam9rlek.dts b/arch/arm/boot/dts/at91sam9rlek.dts
index 558c9f220bed..f10566f759cd 100644
--- a/arch/arm/boot/dts/at91sam9rlek.dts
+++ b/arch/arm/boot/dts/at91sam9rlek.dts
@@ -22,15 +22,6 @@
22 }; 22 };
23 23
24 clocks { 24 clocks {
25 #address-cells = <1>;
26 #size-cells = <1>;
27 ranges;
28
29 main_clock: clock {
30 compatible = "atmel,osc", "fixed-clock";
31 clock-frequency = <12000000>;
32 };
33
34 slow_xtal { 25 slow_xtal {
35 clock-frequency = <32768>; 26 clock-frequency = <32768>;
36 }; 27 };
@@ -225,14 +216,14 @@
225 label = "right_click"; 216 label = "right_click";
226 gpios = <&pioB 0 GPIO_ACTIVE_LOW>; 217 gpios = <&pioB 0 GPIO_ACTIVE_LOW>;
227 linux,code = <273>; 218 linux,code = <273>;
228 gpio-key,wakeup; 219 wakeup-source;
229 }; 220 };
230 221
231 left_click { 222 left_click {
232 label = "left_click"; 223 label = "left_click";
233 gpios = <&pioB 1 GPIO_ACTIVE_LOW>; 224 gpios = <&pioB 1 GPIO_ACTIVE_LOW>;
234 linux,code = <272>; 225 linux,code = <272>;
235 gpio-key,wakeup; 226 wakeup-source;
236 }; 227 };
237 }; 228 };
238 229
diff --git a/arch/arm/boot/dts/at91sam9x5cm.dtsi b/arch/arm/boot/dts/at91sam9x5cm.dtsi
index 26112ebd15fc..b098ad8cd93a 100644
--- a/arch/arm/boot/dts/at91sam9x5cm.dtsi
+++ b/arch/arm/boot/dts/at91sam9x5cm.dtsi
@@ -13,17 +13,6 @@
13 }; 13 };
14 14
15 clocks { 15 clocks {
16 #address-cells = <1>;
17 #size-cells = <1>;
18 ranges;
19
20 main_clock: clock@0 {
21 compatible = "atmel,osc", "fixed-clock";
22 clock-frequency = <12000000>;
23 };
24 };
25
26 clocks {
27 slow_xtal { 16 slow_xtal {
28 clock-frequency = <32768>; 17 clock-frequency = <32768>;
29 }; 18 };
diff --git a/arch/arm/boot/dts/berlin2q.dtsi b/arch/arm/boot/dts/berlin2q.dtsi
index 8ea177f375dd..fb1da99996ea 100644
--- a/arch/arm/boot/dts/berlin2q.dtsi
+++ b/arch/arm/boot/dts/berlin2q.dtsi
@@ -118,7 +118,8 @@
118 sdhci0: sdhci@ab0000 { 118 sdhci0: sdhci@ab0000 {
119 compatible = "mrvl,pxav3-mmc"; 119 compatible = "mrvl,pxav3-mmc";
120 reg = <0xab0000 0x200>; 120 reg = <0xab0000 0x200>;
121 clocks = <&chip_clk CLKID_SDIO1XIN>; 121 clocks = <&chip_clk CLKID_SDIO1XIN>, <&chip_clk CLKID_SDIO>;
122 clock-names = "io", "core";
122 interrupts = <GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>; 123 interrupts = <GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>;
123 status = "disabled"; 124 status = "disabled";
124 }; 125 };
@@ -126,7 +127,8 @@
126 sdhci1: sdhci@ab0800 { 127 sdhci1: sdhci@ab0800 {
127 compatible = "mrvl,pxav3-mmc"; 128 compatible = "mrvl,pxav3-mmc";
128 reg = <0xab0800 0x200>; 129 reg = <0xab0800 0x200>;
129 clocks = <&chip_clk CLKID_SDIO1XIN>; 130 clocks = <&chip_clk CLKID_SDIO1XIN>, <&chip_clk CLKID_SDIO>;
131 clock-names = "io", "core";
130 interrupts = <GIC_SPI 20 IRQ_TYPE_LEVEL_HIGH>; 132 interrupts = <GIC_SPI 20 IRQ_TYPE_LEVEL_HIGH>;
131 status = "disabled"; 133 status = "disabled";
132 }; 134 };
@@ -135,7 +137,7 @@
135 compatible = "mrvl,pxav3-mmc"; 137 compatible = "mrvl,pxav3-mmc";
136 reg = <0xab1000 0x200>; 138 reg = <0xab1000 0x200>;
137 interrupts = <GIC_SPI 28 IRQ_TYPE_LEVEL_HIGH>; 139 interrupts = <GIC_SPI 28 IRQ_TYPE_LEVEL_HIGH>;
138 clocks = <&chip_clk CLKID_NFC_ECC>, <&chip_clk CLKID_NFC>; 140 clocks = <&chip_clk CLKID_NFC_ECC>, <&chip_clk CLKID_SDIO>;
139 clock-names = "io", "core"; 141 clock-names = "io", "core";
140 status = "disabled"; 142 status = "disabled";
141 }; 143 };
diff --git a/arch/arm/boot/dts/dm816x.dtsi b/arch/arm/boot/dts/dm816x.dtsi
index 3c99cfa1a876..eee636de4cd8 100644
--- a/arch/arm/boot/dts/dm816x.dtsi
+++ b/arch/arm/boot/dts/dm816x.dtsi
@@ -218,6 +218,7 @@
218 reg = <0x480c8000 0x2000>; 218 reg = <0x480c8000 0x2000>;
219 interrupts = <77>; 219 interrupts = <77>;
220 ti,hwmods = "mailbox"; 220 ti,hwmods = "mailbox";
221 #mbox-cells = <1>;
221 ti,mbox-num-users = <4>; 222 ti,mbox-num-users = <4>;
222 ti,mbox-num-fifos = <12>; 223 ti,mbox-num-fifos = <12>;
223 mbox_dsp: mbox_dsp { 224 mbox_dsp: mbox_dsp {
@@ -279,8 +280,11 @@
279 ti,spi-num-cs = <4>; 280 ti,spi-num-cs = <4>;
280 ti,hwmods = "mcspi1"; 281 ti,hwmods = "mcspi1";
281 dmas = <&edma 16 &edma 17 282 dmas = <&edma 16 &edma 17
282 &edma 18 &edma 19>; 283 &edma 18 &edma 19
283 dma-names = "tx0", "rx0", "tx1", "rx1"; 284 &edma 20 &edma 21
285 &edma 22 &edma 23>;
286 dma-names = "tx0", "rx0", "tx1", "rx1",
287 "tx2", "rx2", "tx3", "rx3";
284 }; 288 };
285 289
286 mmc1: mmc@48060000 { 290 mmc1: mmc@48060000 {
diff --git a/arch/arm/boot/dts/dra7.dtsi b/arch/arm/boot/dts/dra7.dtsi
index bc672fb91466..fe99231cbde5 100644
--- a/arch/arm/boot/dts/dra7.dtsi
+++ b/arch/arm/boot/dts/dra7.dtsi
@@ -1459,8 +1459,8 @@
1459 interrupt-names = "tx", "rx"; 1459 interrupt-names = "tx", "rx";
1460 dmas = <&sdma_xbar 133>, <&sdma_xbar 132>; 1460 dmas = <&sdma_xbar 133>, <&sdma_xbar 132>;
1461 dma-names = "tx", "rx"; 1461 dma-names = "tx", "rx";
1462 clocks = <&mcasp3_ahclkx_mux>; 1462 clocks = <&mcasp3_aux_gfclk_mux>, <&mcasp3_ahclkx_mux>;
1463 clock-names = "fck"; 1463 clock-names = "fck", "ahclkx";
1464 status = "disabled"; 1464 status = "disabled";
1465 }; 1465 };
1466 1466
diff --git a/arch/arm/boot/dts/imx27.dtsi b/arch/arm/boot/dts/imx27.dtsi
index feb9d34b239c..f818ea483aeb 100644
--- a/arch/arm/boot/dts/imx27.dtsi
+++ b/arch/arm/boot/dts/imx27.dtsi
@@ -486,7 +486,10 @@
486 compatible = "fsl,imx27-usb"; 486 compatible = "fsl,imx27-usb";
487 reg = <0x10024000 0x200>; 487 reg = <0x10024000 0x200>;
488 interrupts = <56>; 488 interrupts = <56>;
489 clocks = <&clks IMX27_CLK_USB_IPG_GATE>; 489 clocks = <&clks IMX27_CLK_USB_IPG_GATE>,
490 <&clks IMX27_CLK_USB_AHB_GATE>,
491 <&clks IMX27_CLK_USB_DIV>;
492 clock-names = "ipg", "ahb", "per";
490 fsl,usbmisc = <&usbmisc 0>; 493 fsl,usbmisc = <&usbmisc 0>;
491 status = "disabled"; 494 status = "disabled";
492 }; 495 };
@@ -495,7 +498,10 @@
495 compatible = "fsl,imx27-usb"; 498 compatible = "fsl,imx27-usb";
496 reg = <0x10024200 0x200>; 499 reg = <0x10024200 0x200>;
497 interrupts = <54>; 500 interrupts = <54>;
498 clocks = <&clks IMX27_CLK_USB_IPG_GATE>; 501 clocks = <&clks IMX27_CLK_USB_IPG_GATE>,
502 <&clks IMX27_CLK_USB_AHB_GATE>,
503 <&clks IMX27_CLK_USB_DIV>;
504 clock-names = "ipg", "ahb", "per";
499 fsl,usbmisc = <&usbmisc 1>; 505 fsl,usbmisc = <&usbmisc 1>;
500 dr_mode = "host"; 506 dr_mode = "host";
501 status = "disabled"; 507 status = "disabled";
@@ -505,7 +511,10 @@
505 compatible = "fsl,imx27-usb"; 511 compatible = "fsl,imx27-usb";
506 reg = <0x10024400 0x200>; 512 reg = <0x10024400 0x200>;
507 interrupts = <55>; 513 interrupts = <55>;
508 clocks = <&clks IMX27_CLK_USB_IPG_GATE>; 514 clocks = <&clks IMX27_CLK_USB_IPG_GATE>,
515 <&clks IMX27_CLK_USB_AHB_GATE>,
516 <&clks IMX27_CLK_USB_DIV>;
517 clock-names = "ipg", "ahb", "per";
509 fsl,usbmisc = <&usbmisc 2>; 518 fsl,usbmisc = <&usbmisc 2>;
510 dr_mode = "host"; 519 dr_mode = "host";
511 status = "disabled"; 520 status = "disabled";
@@ -515,7 +524,6 @@
515 #index-cells = <1>; 524 #index-cells = <1>;
516 compatible = "fsl,imx27-usbmisc"; 525 compatible = "fsl,imx27-usbmisc";
517 reg = <0x10024600 0x200>; 526 reg = <0x10024600 0x200>;
518 clocks = <&clks IMX27_CLK_USB_AHB_GATE>;
519 }; 527 };
520 528
521 sahara2: sahara@10025000 { 529 sahara2: sahara@10025000 {
diff --git a/arch/arm/boot/dts/k2l-netcp.dtsi b/arch/arm/boot/dts/k2l-netcp.dtsi
index 01aef230773d..5acbd0dcc2ab 100644
--- a/arch/arm/boot/dts/k2l-netcp.dtsi
+++ b/arch/arm/boot/dts/k2l-netcp.dtsi
@@ -137,7 +137,7 @@ netcp: netcp@26000000 {
137 /* NetCP address range */ 137 /* NetCP address range */
138 ranges = <0 0x26000000 0x1000000>; 138 ranges = <0 0x26000000 0x1000000>;
139 139
140 clocks = <&papllclk>, <&clkcpgmac>, <&chipclk12>; 140 clocks = <&clkosr>, <&papllclk>, <&clkcpgmac>, <&chipclk12>;
141 dma-coherent; 141 dma-coherent;
142 142
143 ti,navigator-dmas = <&dma_gbe 0>, 143 ti,navigator-dmas = <&dma_gbe 0>,
diff --git a/arch/arm/boot/dts/kirkwood-ts219.dtsi b/arch/arm/boot/dts/kirkwood-ts219.dtsi
index c56ab6bbfe3c..0e46560551f4 100644
--- a/arch/arm/boot/dts/kirkwood-ts219.dtsi
+++ b/arch/arm/boot/dts/kirkwood-ts219.dtsi
@@ -40,7 +40,7 @@
40 }; 40 };
41 poweroff@12100 { 41 poweroff@12100 {
42 compatible = "qnap,power-off"; 42 compatible = "qnap,power-off";
43 reg = <0x12000 0x100>; 43 reg = <0x12100 0x100>;
44 clocks = <&gate_clk 7>; 44 clocks = <&gate_clk 7>;
45 }; 45 };
46 spi@10600 { 46 spi@10600 {
diff --git a/arch/arm/boot/dts/rk3288-veyron-minnie.dts b/arch/arm/boot/dts/rk3288-veyron-minnie.dts
index 8fd8ef2c72da..85f0373df498 100644
--- a/arch/arm/boot/dts/rk3288-veyron-minnie.dts
+++ b/arch/arm/boot/dts/rk3288-veyron-minnie.dts
@@ -86,6 +86,10 @@
86 }; 86 };
87}; 87};
88 88
89&emmc {
90 /delete-property/mmc-hs200-1_8v;
91};
92
89&gpio_keys { 93&gpio_keys {
90 pinctrl-0 = <&pwr_key_l &ap_lid_int_l &volum_down_l &volum_up_l>; 94 pinctrl-0 = <&pwr_key_l &ap_lid_int_l &volum_down_l &volum_up_l>;
91 95
diff --git a/arch/arm/boot/dts/rk3288.dtsi b/arch/arm/boot/dts/rk3288.dtsi
index 6a79c9c526b8..04ea209f1737 100644
--- a/arch/arm/boot/dts/rk3288.dtsi
+++ b/arch/arm/boot/dts/rk3288.dtsi
@@ -452,8 +452,10 @@
452 clock-names = "tsadc", "apb_pclk"; 452 clock-names = "tsadc", "apb_pclk";
453 resets = <&cru SRST_TSADC>; 453 resets = <&cru SRST_TSADC>;
454 reset-names = "tsadc-apb"; 454 reset-names = "tsadc-apb";
455 pinctrl-names = "default"; 455 pinctrl-names = "init", "default", "sleep";
456 pinctrl-0 = <&otp_out>; 456 pinctrl-0 = <&otp_gpio>;
457 pinctrl-1 = <&otp_out>;
458 pinctrl-2 = <&otp_gpio>;
457 #thermal-sensor-cells = <1>; 459 #thermal-sensor-cells = <1>;
458 rockchip,hw-tshut-temp = <95000>; 460 rockchip,hw-tshut-temp = <95000>;
459 status = "disabled"; 461 status = "disabled";
@@ -1395,6 +1397,10 @@
1395 }; 1397 };
1396 1398
1397 tsadc { 1399 tsadc {
1400 otp_gpio: otp-gpio {
1401 rockchip,pins = <0 10 RK_FUNC_GPIO &pcfg_pull_none>;
1402 };
1403
1398 otp_out: otp-out { 1404 otp_out: otp-out {
1399 rockchip,pins = <0 10 RK_FUNC_1 &pcfg_pull_none>; 1405 rockchip,pins = <0 10 RK_FUNC_1 &pcfg_pull_none>;
1400 }; 1406 };
diff --git a/arch/arm/boot/dts/sama5d35ek.dts b/arch/arm/boot/dts/sama5d35ek.dts
index d9a9aca1ccfd..e812f5c1bf70 100644
--- a/arch/arm/boot/dts/sama5d35ek.dts
+++ b/arch/arm/boot/dts/sama5d35ek.dts
@@ -49,7 +49,7 @@
49 label = "pb_user1"; 49 label = "pb_user1";
50 gpios = <&pioE 27 GPIO_ACTIVE_HIGH>; 50 gpios = <&pioE 27 GPIO_ACTIVE_HIGH>;
51 linux,code = <0x100>; 51 linux,code = <0x100>;
52 gpio-key,wakeup; 52 wakeup-source;
53 }; 53 };
54 }; 54 };
55}; 55};
diff --git a/arch/arm/boot/dts/sama5d4.dtsi b/arch/arm/boot/dts/sama5d4.dtsi
index 15bbaf690047..2193637b9cd2 100644
--- a/arch/arm/boot/dts/sama5d4.dtsi
+++ b/arch/arm/boot/dts/sama5d4.dtsi
@@ -1300,7 +1300,7 @@
1300 }; 1300 };
1301 1301
1302 watchdog@fc068640 { 1302 watchdog@fc068640 {
1303 compatible = "atmel,at91sam9260-wdt"; 1303 compatible = "atmel,sama5d4-wdt";
1304 reg = <0xfc068640 0x10>; 1304 reg = <0xfc068640 0x10>;
1305 clocks = <&clk32k>; 1305 clocks = <&clk32k>;
1306 status = "disabled"; 1306 status = "disabled";
diff --git a/arch/arm/boot/dts/usb_a9260_common.dtsi b/arch/arm/boot/dts/usb_a9260_common.dtsi
index 12edafefd44a..9beea8976584 100644
--- a/arch/arm/boot/dts/usb_a9260_common.dtsi
+++ b/arch/arm/boot/dts/usb_a9260_common.dtsi
@@ -115,7 +115,7 @@
115 label = "user_pb"; 115 label = "user_pb";
116 gpios = <&pioB 10 GPIO_ACTIVE_LOW>; 116 gpios = <&pioB 10 GPIO_ACTIVE_LOW>;
117 linux,code = <28>; 117 linux,code = <28>;
118 gpio-key,wakeup; 118 wakeup-source;
119 }; 119 };
120 }; 120 };
121 121
diff --git a/arch/arm/boot/dts/usb_a9263.dts b/arch/arm/boot/dts/usb_a9263.dts
index 68c0de36c339..8cc6edb29694 100644
--- a/arch/arm/boot/dts/usb_a9263.dts
+++ b/arch/arm/boot/dts/usb_a9263.dts
@@ -143,7 +143,7 @@
143 label = "user_pb"; 143 label = "user_pb";
144 gpios = <&pioB 10 GPIO_ACTIVE_LOW>; 144 gpios = <&pioB 10 GPIO_ACTIVE_LOW>;
145 linux,code = <28>; 145 linux,code = <28>;
146 gpio-key,wakeup; 146 wakeup-source;
147 }; 147 };
148 }; 148 };
149 149
diff --git a/arch/arm/boot/dts/vf610-colibri.dtsi b/arch/arm/boot/dts/vf610-colibri.dtsi
index 19fe045b8334..2d7eab755210 100644
--- a/arch/arm/boot/dts/vf610-colibri.dtsi
+++ b/arch/arm/boot/dts/vf610-colibri.dtsi
@@ -18,8 +18,3 @@
18 reg = <0x80000000 0x10000000>; 18 reg = <0x80000000 0x10000000>;
19 }; 19 };
20}; 20};
21
22&L2 {
23 arm,data-latency = <2 1 2>;
24 arm,tag-latency = <3 2 3>;
25};
diff --git a/arch/arm/boot/dts/vf610.dtsi b/arch/arm/boot/dts/vf610.dtsi
index 5f8eb1bd782b..58bc6e448be5 100644
--- a/arch/arm/boot/dts/vf610.dtsi
+++ b/arch/arm/boot/dts/vf610.dtsi
@@ -19,7 +19,7 @@
19 reg = <0x40006000 0x1000>; 19 reg = <0x40006000 0x1000>;
20 cache-unified; 20 cache-unified;
21 cache-level = <2>; 21 cache-level = <2>;
22 arm,data-latency = <1 1 1>; 22 arm,data-latency = <3 3 3>;
23 arm,tag-latency = <2 2 2>; 23 arm,tag-latency = <2 2 2>;
24 }; 24 };
25}; 25};
diff --git a/arch/arm/boot/dts/vfxxx.dtsi b/arch/arm/boot/dts/vfxxx.dtsi
index 6736bae43a5b..3cd1b27f2697 100644
--- a/arch/arm/boot/dts/vfxxx.dtsi
+++ b/arch/arm/boot/dts/vfxxx.dtsi
@@ -158,7 +158,7 @@
158 interrupts = <67 IRQ_TYPE_LEVEL_HIGH>; 158 interrupts = <67 IRQ_TYPE_LEVEL_HIGH>;
159 clocks = <&clks VF610_CLK_DSPI0>; 159 clocks = <&clks VF610_CLK_DSPI0>;
160 clock-names = "dspi"; 160 clock-names = "dspi";
161 spi-num-chipselects = <5>; 161 spi-num-chipselects = <6>;
162 status = "disabled"; 162 status = "disabled";
163 }; 163 };
164 164
@@ -170,7 +170,7 @@
170 interrupts = <68 IRQ_TYPE_LEVEL_HIGH>; 170 interrupts = <68 IRQ_TYPE_LEVEL_HIGH>;
171 clocks = <&clks VF610_CLK_DSPI1>; 171 clocks = <&clks VF610_CLK_DSPI1>;
172 clock-names = "dspi"; 172 clock-names = "dspi";
173 spi-num-chipselects = <5>; 173 spi-num-chipselects = <4>;
174 status = "disabled"; 174 status = "disabled";
175 }; 175 };
176 176
@@ -178,8 +178,10 @@
178 compatible = "fsl,vf610-sai"; 178 compatible = "fsl,vf610-sai";
179 reg = <0x40031000 0x1000>; 179 reg = <0x40031000 0x1000>;
180 interrupts = <86 IRQ_TYPE_LEVEL_HIGH>; 180 interrupts = <86 IRQ_TYPE_LEVEL_HIGH>;
181 clocks = <&clks VF610_CLK_SAI2>; 181 clocks = <&clks VF610_CLK_SAI2>,
182 clock-names = "sai"; 182 <&clks VF610_CLK_SAI2_DIV>,
183 <&clks 0>, <&clks 0>;
184 clock-names = "bus", "mclk1", "mclk2", "mclk3";
183 dma-names = "tx", "rx"; 185 dma-names = "tx", "rx";
184 dmas = <&edma0 0 21>, 186 dmas = <&edma0 0 21>,
185 <&edma0 0 20>; 187 <&edma0 0 20>;
@@ -461,6 +463,8 @@
461 clock-names = "adc"; 463 clock-names = "adc";
462 #io-channel-cells = <1>; 464 #io-channel-cells = <1>;
463 status = "disabled"; 465 status = "disabled";
466 fsl,adck-max-frequency = <30000000>, <40000000>,
467 <20000000>;
464 }; 468 };
465 469
466 esdhc0: esdhc@400b1000 { 470 esdhc0: esdhc@400b1000 {
@@ -472,8 +476,6 @@
472 <&clks VF610_CLK_ESDHC0>; 476 <&clks VF610_CLK_ESDHC0>;
473 clock-names = "ipg", "ahb", "per"; 477 clock-names = "ipg", "ahb", "per";
474 status = "disabled"; 478 status = "disabled";
475 fsl,adck-max-frequency = <30000000>, <40000000>,
476 <20000000>;
477 }; 479 };
478 480
479 esdhc1: esdhc@400b2000 { 481 esdhc1: esdhc@400b2000 {
diff --git a/arch/arm/configs/at91_dt_defconfig b/arch/arm/configs/at91_dt_defconfig
index 1b1e5acd76e2..e4b1be66b3f5 100644
--- a/arch/arm/configs/at91_dt_defconfig
+++ b/arch/arm/configs/at91_dt_defconfig
@@ -125,7 +125,6 @@ CONFIG_POWER_RESET=y
125# CONFIG_HWMON is not set 125# CONFIG_HWMON is not set
126CONFIG_WATCHDOG=y 126CONFIG_WATCHDOG=y
127CONFIG_AT91SAM9X_WATCHDOG=y 127CONFIG_AT91SAM9X_WATCHDOG=y
128CONFIG_SSB=m
129CONFIG_MFD_ATMEL_HLCDC=y 128CONFIG_MFD_ATMEL_HLCDC=y
130CONFIG_REGULATOR=y 129CONFIG_REGULATOR=y
131CONFIG_REGULATOR_FIXED_VOLTAGE=y 130CONFIG_REGULATOR_FIXED_VOLTAGE=y
diff --git a/arch/arm/configs/sama5_defconfig b/arch/arm/configs/sama5_defconfig
index a0c57ac88b27..63f7e6ce649a 100644
--- a/arch/arm/configs/sama5_defconfig
+++ b/arch/arm/configs/sama5_defconfig
@@ -129,7 +129,6 @@ CONFIG_GPIO_SYSFS=y
129CONFIG_POWER_SUPPLY=y 129CONFIG_POWER_SUPPLY=y
130CONFIG_POWER_RESET=y 130CONFIG_POWER_RESET=y
131# CONFIG_HWMON is not set 131# CONFIG_HWMON is not set
132CONFIG_SSB=m
133CONFIG_MFD_ATMEL_FLEXCOM=y 132CONFIG_MFD_ATMEL_FLEXCOM=y
134CONFIG_REGULATOR=y 133CONFIG_REGULATOR=y
135CONFIG_REGULATOR_FIXED_VOLTAGE=y 134CONFIG_REGULATOR_FIXED_VOLTAGE=y
diff --git a/arch/arm/include/asm/arch_gicv3.h b/arch/arm/include/asm/arch_gicv3.h
index 6607d976e07d..7da5503c0591 100644
--- a/arch/arm/include/asm/arch_gicv3.h
+++ b/arch/arm/include/asm/arch_gicv3.h
@@ -21,6 +21,7 @@
21#ifndef __ASSEMBLY__ 21#ifndef __ASSEMBLY__
22 22
23#include <linux/io.h> 23#include <linux/io.h>
24#include <asm/barrier.h>
24 25
25#define __ACCESS_CP15(CRn, Op1, CRm, Op2) p15, Op1, %0, CRn, CRm, Op2 26#define __ACCESS_CP15(CRn, Op1, CRm, Op2) p15, Op1, %0, CRn, CRm, Op2
26#define __ACCESS_CP15_64(Op1, CRm) p15, Op1, %Q0, %R0, CRm 27#define __ACCESS_CP15_64(Op1, CRm) p15, Op1, %Q0, %R0, CRm
diff --git a/arch/arm/include/asm/irq.h b/arch/arm/include/asm/irq.h
index be1d07d59ee9..1bd9510de1b9 100644
--- a/arch/arm/include/asm/irq.h
+++ b/arch/arm/include/asm/irq.h
@@ -40,6 +40,11 @@ extern void arch_trigger_all_cpu_backtrace(bool);
40#define arch_trigger_all_cpu_backtrace(x) arch_trigger_all_cpu_backtrace(x) 40#define arch_trigger_all_cpu_backtrace(x) arch_trigger_all_cpu_backtrace(x)
41#endif 41#endif
42 42
43static inline int nr_legacy_irqs(void)
44{
45 return NR_IRQS_LEGACY;
46}
47
43#endif 48#endif
44 49
45#endif 50#endif
diff --git a/arch/arm/include/asm/kvm_emulate.h b/arch/arm/include/asm/kvm_emulate.h
index a9c80a2ea1a7..3095df091ff8 100644
--- a/arch/arm/include/asm/kvm_emulate.h
+++ b/arch/arm/include/asm/kvm_emulate.h
@@ -28,6 +28,18 @@
28unsigned long *vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num); 28unsigned long *vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num);
29unsigned long *vcpu_spsr(struct kvm_vcpu *vcpu); 29unsigned long *vcpu_spsr(struct kvm_vcpu *vcpu);
30 30
31static inline unsigned long vcpu_get_reg(struct kvm_vcpu *vcpu,
32 u8 reg_num)
33{
34 return *vcpu_reg(vcpu, reg_num);
35}
36
37static inline void vcpu_set_reg(struct kvm_vcpu *vcpu, u8 reg_num,
38 unsigned long val)
39{
40 *vcpu_reg(vcpu, reg_num) = val;
41}
42
31bool kvm_condition_valid(struct kvm_vcpu *vcpu); 43bool kvm_condition_valid(struct kvm_vcpu *vcpu);
32void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr); 44void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr);
33void kvm_inject_undefined(struct kvm_vcpu *vcpu); 45void kvm_inject_undefined(struct kvm_vcpu *vcpu);
diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
index 8cc85a4ebec2..35c9db857ebe 100644
--- a/arch/arm/include/asm/uaccess.h
+++ b/arch/arm/include/asm/uaccess.h
@@ -510,10 +510,14 @@ __copy_to_user_std(void __user *to, const void *from, unsigned long n);
510static inline unsigned long __must_check 510static inline unsigned long __must_check
511__copy_to_user(void __user *to, const void *from, unsigned long n) 511__copy_to_user(void __user *to, const void *from, unsigned long n)
512{ 512{
513#ifndef CONFIG_UACCESS_WITH_MEMCPY
513 unsigned int __ua_flags = uaccess_save_and_enable(); 514 unsigned int __ua_flags = uaccess_save_and_enable();
514 n = arm_copy_to_user(to, from, n); 515 n = arm_copy_to_user(to, from, n);
515 uaccess_restore(__ua_flags); 516 uaccess_restore(__ua_flags);
516 return n; 517 return n;
518#else
519 return arm_copy_to_user(to, from, n);
520#endif
517} 521}
518 522
519extern unsigned long __must_check 523extern unsigned long __must_check
diff --git a/arch/arm/include/uapi/asm/unistd.h b/arch/arm/include/uapi/asm/unistd.h
index 7a2a32a1d5a8..ede692ffa32e 100644
--- a/arch/arm/include/uapi/asm/unistd.h
+++ b/arch/arm/include/uapi/asm/unistd.h
@@ -416,6 +416,7 @@
416#define __NR_execveat (__NR_SYSCALL_BASE+387) 416#define __NR_execveat (__NR_SYSCALL_BASE+387)
417#define __NR_userfaultfd (__NR_SYSCALL_BASE+388) 417#define __NR_userfaultfd (__NR_SYSCALL_BASE+388)
418#define __NR_membarrier (__NR_SYSCALL_BASE+389) 418#define __NR_membarrier (__NR_SYSCALL_BASE+389)
419#define __NR_mlock2 (__NR_SYSCALL_BASE+390)
419 420
420/* 421/*
421 * The following SWIs are ARM private. 422 * The following SWIs are ARM private.
diff --git a/arch/arm/kernel/bios32.c b/arch/arm/kernel/bios32.c
index 6551d28c27e6..066f7f9ba411 100644
--- a/arch/arm/kernel/bios32.c
+++ b/arch/arm/kernel/bios32.c
@@ -17,11 +17,6 @@
17#include <asm/mach/pci.h> 17#include <asm/mach/pci.h>
18 18
19static int debug_pci; 19static int debug_pci;
20static resource_size_t (*align_resource)(struct pci_dev *dev,
21 const struct resource *res,
22 resource_size_t start,
23 resource_size_t size,
24 resource_size_t align) = NULL;
25 20
26/* 21/*
27 * We can't use pci_get_device() here since we are 22 * We can't use pci_get_device() here since we are
@@ -461,7 +456,6 @@ static void pcibios_init_hw(struct device *parent, struct hw_pci *hw,
461 sys->busnr = busnr; 456 sys->busnr = busnr;
462 sys->swizzle = hw->swizzle; 457 sys->swizzle = hw->swizzle;
463 sys->map_irq = hw->map_irq; 458 sys->map_irq = hw->map_irq;
464 align_resource = hw->align_resource;
465 INIT_LIST_HEAD(&sys->resources); 459 INIT_LIST_HEAD(&sys->resources);
466 460
467 if (hw->private_data) 461 if (hw->private_data)
@@ -470,6 +464,8 @@ static void pcibios_init_hw(struct device *parent, struct hw_pci *hw,
470 ret = hw->setup(nr, sys); 464 ret = hw->setup(nr, sys);
471 465
472 if (ret > 0) { 466 if (ret > 0) {
467 struct pci_host_bridge *host_bridge;
468
473 ret = pcibios_init_resources(nr, sys); 469 ret = pcibios_init_resources(nr, sys);
474 if (ret) { 470 if (ret) {
475 kfree(sys); 471 kfree(sys);
@@ -491,6 +487,9 @@ static void pcibios_init_hw(struct device *parent, struct hw_pci *hw,
491 busnr = sys->bus->busn_res.end + 1; 487 busnr = sys->bus->busn_res.end + 1;
492 488
493 list_add(&sys->node, head); 489 list_add(&sys->node, head);
490
491 host_bridge = pci_find_host_bridge(sys->bus);
492 host_bridge->align_resource = hw->align_resource;
494 } else { 493 } else {
495 kfree(sys); 494 kfree(sys);
496 if (ret < 0) 495 if (ret < 0)
@@ -578,14 +577,18 @@ resource_size_t pcibios_align_resource(void *data, const struct resource *res,
578{ 577{
579 struct pci_dev *dev = data; 578 struct pci_dev *dev = data;
580 resource_size_t start = res->start; 579 resource_size_t start = res->start;
580 struct pci_host_bridge *host_bridge;
581 581
582 if (res->flags & IORESOURCE_IO && start & 0x300) 582 if (res->flags & IORESOURCE_IO && start & 0x300)
583 start = (start + 0x3ff) & ~0x3ff; 583 start = (start + 0x3ff) & ~0x3ff;
584 584
585 start = (start + align - 1) & ~(align - 1); 585 start = (start + align - 1) & ~(align - 1);
586 586
587 if (align_resource) 587 host_bridge = pci_find_host_bridge(dev->bus);
588 return align_resource(dev, res, start, size, align); 588
589 if (host_bridge->align_resource)
590 return host_bridge->align_resource(dev, res,
591 start, size, align);
589 592
590 return start; 593 return start;
591} 594}
diff --git a/arch/arm/kernel/calls.S b/arch/arm/kernel/calls.S
index fde6c88d560c..ac368bb068d1 100644
--- a/arch/arm/kernel/calls.S
+++ b/arch/arm/kernel/calls.S
@@ -399,6 +399,7 @@
399 CALL(sys_execveat) 399 CALL(sys_execveat)
400 CALL(sys_userfaultfd) 400 CALL(sys_userfaultfd)
401 CALL(sys_membarrier) 401 CALL(sys_membarrier)
402 CALL(sys_mlock2)
402#ifndef syscalls_counted 403#ifndef syscalls_counted
403.equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls 404.equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls
404#define syscalls_counted 405#define syscalls_counted
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
index 7a7c4cea5523..4adfb46e3ee9 100644
--- a/arch/arm/kernel/process.c
+++ b/arch/arm/kernel/process.c
@@ -95,6 +95,22 @@ void __show_regs(struct pt_regs *regs)
95{ 95{
96 unsigned long flags; 96 unsigned long flags;
97 char buf[64]; 97 char buf[64];
98#ifndef CONFIG_CPU_V7M
99 unsigned int domain;
100#ifdef CONFIG_CPU_SW_DOMAIN_PAN
101 /*
102 * Get the domain register for the parent context. In user
103 * mode, we don't save the DACR, so lets use what it should
104 * be. For other modes, we place it after the pt_regs struct.
105 */
106 if (user_mode(regs))
107 domain = DACR_UACCESS_ENABLE;
108 else
109 domain = *(unsigned int *)(regs + 1);
110#else
111 domain = get_domain();
112#endif
113#endif
98 114
99 show_regs_print_info(KERN_DEFAULT); 115 show_regs_print_info(KERN_DEFAULT);
100 116
@@ -123,21 +139,8 @@ void __show_regs(struct pt_regs *regs)
123 139
124#ifndef CONFIG_CPU_V7M 140#ifndef CONFIG_CPU_V7M
125 { 141 {
126 unsigned int domain = get_domain();
127 const char *segment; 142 const char *segment;
128 143
129#ifdef CONFIG_CPU_SW_DOMAIN_PAN
130 /*
131 * Get the domain register for the parent context. In user
132 * mode, we don't save the DACR, so lets use what it should
133 * be. For other modes, we place it after the pt_regs struct.
134 */
135 if (user_mode(regs))
136 domain = DACR_UACCESS_ENABLE;
137 else
138 domain = *(unsigned int *)(regs + 1);
139#endif
140
141 if ((domain & domain_mask(DOMAIN_USER)) == 144 if ((domain & domain_mask(DOMAIN_USER)) ==
142 domain_val(DOMAIN_USER, DOMAIN_NOACCESS)) 145 domain_val(DOMAIN_USER, DOMAIN_NOACCESS))
143 segment = "none"; 146 segment = "none";
@@ -163,11 +166,11 @@ void __show_regs(struct pt_regs *regs)
163 buf[0] = '\0'; 166 buf[0] = '\0';
164#ifdef CONFIG_CPU_CP15_MMU 167#ifdef CONFIG_CPU_CP15_MMU
165 { 168 {
166 unsigned int transbase, dac = get_domain(); 169 unsigned int transbase;
167 asm("mrc p15, 0, %0, c2, c0\n\t" 170 asm("mrc p15, 0, %0, c2, c0\n\t"
168 : "=r" (transbase)); 171 : "=r" (transbase));
169 snprintf(buf, sizeof(buf), " Table: %08x DAC: %08x", 172 snprintf(buf, sizeof(buf), " Table: %08x DAC: %08x",
170 transbase, dac); 173 transbase, domain);
171 } 174 }
172#endif 175#endif
173 asm("mrc p15, 0, %0, c1, c0\n" : "=r" (ctrl)); 176 asm("mrc p15, 0, %0, c1, c0\n" : "=r" (ctrl));
diff --git a/arch/arm/kernel/swp_emulate.c b/arch/arm/kernel/swp_emulate.c
index 5b26e7efa9ea..c3fe769d7558 100644
--- a/arch/arm/kernel/swp_emulate.c
+++ b/arch/arm/kernel/swp_emulate.c
@@ -36,10 +36,10 @@
36 */ 36 */
37#define __user_swpX_asm(data, addr, res, temp, B) \ 37#define __user_swpX_asm(data, addr, res, temp, B) \
38 __asm__ __volatile__( \ 38 __asm__ __volatile__( \
39 " mov %2, %1\n" \ 39 "0: ldrex"B" %2, [%3]\n" \
40 "0: ldrex"B" %1, [%3]\n" \ 40 "1: strex"B" %0, %1, [%3]\n" \
41 "1: strex"B" %0, %2, [%3]\n" \
42 " cmp %0, #0\n" \ 41 " cmp %0, #0\n" \
42 " moveq %1, %2\n" \
43 " movne %0, %4\n" \ 43 " movne %0, %4\n" \
44 "2:\n" \ 44 "2:\n" \
45 " .section .text.fixup,\"ax\"\n" \ 45 " .section .text.fixup,\"ax\"\n" \
diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
index eab83b2435b8..e06fd299de08 100644
--- a/arch/arm/kvm/arm.c
+++ b/arch/arm/kvm/arm.c
@@ -564,17 +564,12 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
564 vcpu_sleep(vcpu); 564 vcpu_sleep(vcpu);
565 565
566 /* 566 /*
567 * Disarming the background timer must be done in a
568 * preemptible context, as this call may sleep.
569 */
570 kvm_timer_flush_hwstate(vcpu);
571
572 /*
573 * Preparing the interrupts to be injected also 567 * Preparing the interrupts to be injected also
574 * involves poking the GIC, which must be done in a 568 * involves poking the GIC, which must be done in a
575 * non-preemptible context. 569 * non-preemptible context.
576 */ 570 */
577 preempt_disable(); 571 preempt_disable();
572 kvm_timer_flush_hwstate(vcpu);
578 kvm_vgic_flush_hwstate(vcpu); 573 kvm_vgic_flush_hwstate(vcpu);
579 574
580 local_irq_disable(); 575 local_irq_disable();
diff --git a/arch/arm/kvm/mmio.c b/arch/arm/kvm/mmio.c
index 974b1c606d04..3a10c9f1d0a4 100644
--- a/arch/arm/kvm/mmio.c
+++ b/arch/arm/kvm/mmio.c
@@ -115,7 +115,7 @@ int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run)
115 trace_kvm_mmio(KVM_TRACE_MMIO_READ, len, run->mmio.phys_addr, 115 trace_kvm_mmio(KVM_TRACE_MMIO_READ, len, run->mmio.phys_addr,
116 data); 116 data);
117 data = vcpu_data_host_to_guest(vcpu, data, len); 117 data = vcpu_data_host_to_guest(vcpu, data, len);
118 *vcpu_reg(vcpu, vcpu->arch.mmio_decode.rt) = data; 118 vcpu_set_reg(vcpu, vcpu->arch.mmio_decode.rt, data);
119 } 119 }
120 120
121 return 0; 121 return 0;
@@ -186,7 +186,8 @@ int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
186 rt = vcpu->arch.mmio_decode.rt; 186 rt = vcpu->arch.mmio_decode.rt;
187 187
188 if (is_write) { 188 if (is_write) {
189 data = vcpu_data_guest_to_host(vcpu, *vcpu_reg(vcpu, rt), len); 189 data = vcpu_data_guest_to_host(vcpu, vcpu_get_reg(vcpu, rt),
190 len);
190 191
191 trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, len, fault_ipa, data); 192 trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, len, fault_ipa, data);
192 mmio_write_buf(data_buf, len, data); 193 mmio_write_buf(data_buf, len, data);
diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
index 6984342da13d..61d96a645ff3 100644
--- a/arch/arm/kvm/mmu.c
+++ b/arch/arm/kvm/mmu.c
@@ -98,6 +98,11 @@ static void kvm_flush_dcache_pud(pud_t pud)
98 __kvm_flush_dcache_pud(pud); 98 __kvm_flush_dcache_pud(pud);
99} 99}
100 100
101static bool kvm_is_device_pfn(unsigned long pfn)
102{
103 return !pfn_valid(pfn);
104}
105
101/** 106/**
102 * stage2_dissolve_pmd() - clear and flush huge PMD entry 107 * stage2_dissolve_pmd() - clear and flush huge PMD entry
103 * @kvm: pointer to kvm structure. 108 * @kvm: pointer to kvm structure.
@@ -213,7 +218,7 @@ static void unmap_ptes(struct kvm *kvm, pmd_t *pmd,
213 kvm_tlb_flush_vmid_ipa(kvm, addr); 218 kvm_tlb_flush_vmid_ipa(kvm, addr);
214 219
215 /* No need to invalidate the cache for device mappings */ 220 /* No need to invalidate the cache for device mappings */
216 if ((pte_val(old_pte) & PAGE_S2_DEVICE) != PAGE_S2_DEVICE) 221 if (!kvm_is_device_pfn(pte_pfn(old_pte)))
217 kvm_flush_dcache_pte(old_pte); 222 kvm_flush_dcache_pte(old_pte);
218 223
219 put_page(virt_to_page(pte)); 224 put_page(virt_to_page(pte));
@@ -305,8 +310,7 @@ static void stage2_flush_ptes(struct kvm *kvm, pmd_t *pmd,
305 310
306 pte = pte_offset_kernel(pmd, addr); 311 pte = pte_offset_kernel(pmd, addr);
307 do { 312 do {
308 if (!pte_none(*pte) && 313 if (!pte_none(*pte) && !kvm_is_device_pfn(pte_pfn(*pte)))
309 (pte_val(*pte) & PAGE_S2_DEVICE) != PAGE_S2_DEVICE)
310 kvm_flush_dcache_pte(*pte); 314 kvm_flush_dcache_pte(*pte);
311 } while (pte++, addr += PAGE_SIZE, addr != end); 315 } while (pte++, addr += PAGE_SIZE, addr != end);
312} 316}
@@ -1037,11 +1041,6 @@ static bool kvm_is_write_fault(struct kvm_vcpu *vcpu)
1037 return kvm_vcpu_dabt_iswrite(vcpu); 1041 return kvm_vcpu_dabt_iswrite(vcpu);
1038} 1042}
1039 1043
1040static bool kvm_is_device_pfn(unsigned long pfn)
1041{
1042 return !pfn_valid(pfn);
1043}
1044
1045/** 1044/**
1046 * stage2_wp_ptes - write protect PMD range 1045 * stage2_wp_ptes - write protect PMD range
1047 * @pmd: pointer to pmd entry 1046 * @pmd: pointer to pmd entry
diff --git a/arch/arm/kvm/psci.c b/arch/arm/kvm/psci.c
index 0b556968a6da..a9b3b905e661 100644
--- a/arch/arm/kvm/psci.c
+++ b/arch/arm/kvm/psci.c
@@ -75,7 +75,7 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
75 unsigned long context_id; 75 unsigned long context_id;
76 phys_addr_t target_pc; 76 phys_addr_t target_pc;
77 77
78 cpu_id = *vcpu_reg(source_vcpu, 1) & MPIDR_HWID_BITMASK; 78 cpu_id = vcpu_get_reg(source_vcpu, 1) & MPIDR_HWID_BITMASK;
79 if (vcpu_mode_is_32bit(source_vcpu)) 79 if (vcpu_mode_is_32bit(source_vcpu))
80 cpu_id &= ~((u32) 0); 80 cpu_id &= ~((u32) 0);
81 81
@@ -94,8 +94,8 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
94 return PSCI_RET_INVALID_PARAMS; 94 return PSCI_RET_INVALID_PARAMS;
95 } 95 }
96 96
97 target_pc = *vcpu_reg(source_vcpu, 2); 97 target_pc = vcpu_get_reg(source_vcpu, 2);
98 context_id = *vcpu_reg(source_vcpu, 3); 98 context_id = vcpu_get_reg(source_vcpu, 3);
99 99
100 kvm_reset_vcpu(vcpu); 100 kvm_reset_vcpu(vcpu);
101 101
@@ -114,7 +114,7 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
114 * NOTE: We always update r0 (or x0) because for PSCI v0.1 114 * NOTE: We always update r0 (or x0) because for PSCI v0.1
115 * the general puspose registers are undefined upon CPU_ON. 115 * the general puspose registers are undefined upon CPU_ON.
116 */ 116 */
117 *vcpu_reg(vcpu, 0) = context_id; 117 vcpu_set_reg(vcpu, 0, context_id);
118 vcpu->arch.power_off = false; 118 vcpu->arch.power_off = false;
119 smp_mb(); /* Make sure the above is visible */ 119 smp_mb(); /* Make sure the above is visible */
120 120
@@ -134,8 +134,8 @@ static unsigned long kvm_psci_vcpu_affinity_info(struct kvm_vcpu *vcpu)
134 struct kvm *kvm = vcpu->kvm; 134 struct kvm *kvm = vcpu->kvm;
135 struct kvm_vcpu *tmp; 135 struct kvm_vcpu *tmp;
136 136
137 target_affinity = *vcpu_reg(vcpu, 1); 137 target_affinity = vcpu_get_reg(vcpu, 1);
138 lowest_affinity_level = *vcpu_reg(vcpu, 2); 138 lowest_affinity_level = vcpu_get_reg(vcpu, 2);
139 139
140 /* Determine target affinity mask */ 140 /* Determine target affinity mask */
141 target_affinity_mask = psci_affinity_mask(lowest_affinity_level); 141 target_affinity_mask = psci_affinity_mask(lowest_affinity_level);
@@ -209,7 +209,7 @@ int kvm_psci_version(struct kvm_vcpu *vcpu)
209static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu) 209static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu)
210{ 210{
211 int ret = 1; 211 int ret = 1;
212 unsigned long psci_fn = *vcpu_reg(vcpu, 0) & ~((u32) 0); 212 unsigned long psci_fn = vcpu_get_reg(vcpu, 0) & ~((u32) 0);
213 unsigned long val; 213 unsigned long val;
214 214
215 switch (psci_fn) { 215 switch (psci_fn) {
@@ -273,13 +273,13 @@ static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu)
273 break; 273 break;
274 } 274 }
275 275
276 *vcpu_reg(vcpu, 0) = val; 276 vcpu_set_reg(vcpu, 0, val);
277 return ret; 277 return ret;
278} 278}
279 279
280static int kvm_psci_0_1_call(struct kvm_vcpu *vcpu) 280static int kvm_psci_0_1_call(struct kvm_vcpu *vcpu)
281{ 281{
282 unsigned long psci_fn = *vcpu_reg(vcpu, 0) & ~((u32) 0); 282 unsigned long psci_fn = vcpu_get_reg(vcpu, 0) & ~((u32) 0);
283 unsigned long val; 283 unsigned long val;
284 284
285 switch (psci_fn) { 285 switch (psci_fn) {
@@ -295,7 +295,7 @@ static int kvm_psci_0_1_call(struct kvm_vcpu *vcpu)
295 break; 295 break;
296 } 296 }
297 297
298 *vcpu_reg(vcpu, 0) = val; 298 vcpu_set_reg(vcpu, 0, val);
299 return 1; 299 return 1;
300} 300}
301 301
diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c
index d72b90905132..588bbc288396 100644
--- a/arch/arm/lib/uaccess_with_memcpy.c
+++ b/arch/arm/lib/uaccess_with_memcpy.c
@@ -88,6 +88,7 @@ pin_page_for_write(const void __user *_addr, pte_t **ptep, spinlock_t **ptlp)
88static unsigned long noinline 88static unsigned long noinline
89__copy_to_user_memcpy(void __user *to, const void *from, unsigned long n) 89__copy_to_user_memcpy(void __user *to, const void *from, unsigned long n)
90{ 90{
91 unsigned long ua_flags;
91 int atomic; 92 int atomic;
92 93
93 if (unlikely(segment_eq(get_fs(), KERNEL_DS))) { 94 if (unlikely(segment_eq(get_fs(), KERNEL_DS))) {
@@ -118,7 +119,9 @@ __copy_to_user_memcpy(void __user *to, const void *from, unsigned long n)
118 if (tocopy > n) 119 if (tocopy > n)
119 tocopy = n; 120 tocopy = n;
120 121
122 ua_flags = uaccess_save_and_enable();
121 memcpy((void *)to, from, tocopy); 123 memcpy((void *)to, from, tocopy);
124 uaccess_restore(ua_flags);
122 to += tocopy; 125 to += tocopy;
123 from += tocopy; 126 from += tocopy;
124 n -= tocopy; 127 n -= tocopy;
@@ -145,14 +148,21 @@ arm_copy_to_user(void __user *to, const void *from, unsigned long n)
145 * With frame pointer disabled, tail call optimization kicks in 148 * With frame pointer disabled, tail call optimization kicks in
146 * as well making this test almost invisible. 149 * as well making this test almost invisible.
147 */ 150 */
148 if (n < 64) 151 if (n < 64) {
149 return __copy_to_user_std(to, from, n); 152 unsigned long ua_flags = uaccess_save_and_enable();
150 return __copy_to_user_memcpy(to, from, n); 153 n = __copy_to_user_std(to, from, n);
154 uaccess_restore(ua_flags);
155 } else {
156 n = __copy_to_user_memcpy(to, from, n);
157 }
158 return n;
151} 159}
152 160
153static unsigned long noinline 161static unsigned long noinline
154__clear_user_memset(void __user *addr, unsigned long n) 162__clear_user_memset(void __user *addr, unsigned long n)
155{ 163{
164 unsigned long ua_flags;
165
156 if (unlikely(segment_eq(get_fs(), KERNEL_DS))) { 166 if (unlikely(segment_eq(get_fs(), KERNEL_DS))) {
157 memset((void *)addr, 0, n); 167 memset((void *)addr, 0, n);
158 return 0; 168 return 0;
@@ -175,7 +185,9 @@ __clear_user_memset(void __user *addr, unsigned long n)
175 if (tocopy > n) 185 if (tocopy > n)
176 tocopy = n; 186 tocopy = n;
177 187
188 ua_flags = uaccess_save_and_enable();
178 memset((void *)addr, 0, tocopy); 189 memset((void *)addr, 0, tocopy);
190 uaccess_restore(ua_flags);
179 addr += tocopy; 191 addr += tocopy;
180 n -= tocopy; 192 n -= tocopy;
181 193
@@ -193,9 +205,14 @@ out:
193unsigned long arm_clear_user(void __user *addr, unsigned long n) 205unsigned long arm_clear_user(void __user *addr, unsigned long n)
194{ 206{
195 /* See rational for this in __copy_to_user() above. */ 207 /* See rational for this in __copy_to_user() above. */
196 if (n < 64) 208 if (n < 64) {
197 return __clear_user_std(addr, n); 209 unsigned long ua_flags = uaccess_save_and_enable();
198 return __clear_user_memset(addr, n); 210 n = __clear_user_std(addr, n);
211 uaccess_restore(ua_flags);
212 } else {
213 n = __clear_user_memset(addr, n);
214 }
215 return n;
199} 216}
200 217
201#if 0 218#if 0
diff --git a/arch/arm/mach-at91/Kconfig b/arch/arm/mach-at91/Kconfig
index 92673006e55c..28656c2b54a0 100644
--- a/arch/arm/mach-at91/Kconfig
+++ b/arch/arm/mach-at91/Kconfig
@@ -4,7 +4,6 @@ menuconfig ARCH_AT91
4 select ARCH_REQUIRE_GPIOLIB 4 select ARCH_REQUIRE_GPIOLIB
5 select COMMON_CLK_AT91 5 select COMMON_CLK_AT91
6 select PINCTRL 6 select PINCTRL
7 select PINCTRL_AT91
8 select SOC_BUS 7 select SOC_BUS
9 8
10if ARCH_AT91 9if ARCH_AT91
@@ -17,6 +16,7 @@ config SOC_SAMA5D2
17 select HAVE_AT91_USB_CLK 16 select HAVE_AT91_USB_CLK
18 select HAVE_AT91_H32MX 17 select HAVE_AT91_H32MX
19 select HAVE_AT91_GENERATED_CLK 18 select HAVE_AT91_GENERATED_CLK
19 select PINCTRL_AT91PIO4
20 help 20 help
21 Select this if ou are using one of Atmel's SAMA5D2 family SoC. 21 Select this if ou are using one of Atmel's SAMA5D2 family SoC.
22 22
@@ -27,6 +27,7 @@ config SOC_SAMA5D3
27 select HAVE_AT91_UTMI 27 select HAVE_AT91_UTMI
28 select HAVE_AT91_SMD 28 select HAVE_AT91_SMD
29 select HAVE_AT91_USB_CLK 29 select HAVE_AT91_USB_CLK
30 select PINCTRL_AT91
30 help 31 help
31 Select this if you are using one of Atmel's SAMA5D3 family SoC. 32 Select this if you are using one of Atmel's SAMA5D3 family SoC.
32 This support covers SAMA5D31, SAMA5D33, SAMA5D34, SAMA5D35, SAMA5D36. 33 This support covers SAMA5D31, SAMA5D33, SAMA5D34, SAMA5D35, SAMA5D36.
@@ -40,6 +41,7 @@ config SOC_SAMA5D4
40 select HAVE_AT91_SMD 41 select HAVE_AT91_SMD
41 select HAVE_AT91_USB_CLK 42 select HAVE_AT91_USB_CLK
42 select HAVE_AT91_H32MX 43 select HAVE_AT91_H32MX
44 select PINCTRL_AT91
43 help 45 help
44 Select this if you are using one of Atmel's SAMA5D4 family SoC. 46 Select this if you are using one of Atmel's SAMA5D4 family SoC.
45 47
@@ -50,6 +52,7 @@ config SOC_AT91RM9200
50 select CPU_ARM920T 52 select CPU_ARM920T
51 select HAVE_AT91_USB_CLK 53 select HAVE_AT91_USB_CLK
52 select MIGHT_HAVE_PCI 54 select MIGHT_HAVE_PCI
55 select PINCTRL_AT91
53 select SOC_SAM_V4_V5 56 select SOC_SAM_V4_V5
54 select SRAM if PM 57 select SRAM if PM
55 help 58 help
@@ -65,6 +68,7 @@ config SOC_AT91SAM9
65 select HAVE_AT91_UTMI 68 select HAVE_AT91_UTMI
66 select HAVE_FB_ATMEL 69 select HAVE_FB_ATMEL
67 select MEMORY 70 select MEMORY
71 select PINCTRL_AT91
68 select SOC_SAM_V4_V5 72 select SOC_SAM_V4_V5
69 select SRAM if PM 73 select SRAM if PM
70 help 74 help
diff --git a/arch/arm/mach-at91/pm.c b/arch/arm/mach-at91/pm.c
index 80e277cfcc8b..23726fb31741 100644
--- a/arch/arm/mach-at91/pm.c
+++ b/arch/arm/mach-at91/pm.c
@@ -41,8 +41,10 @@
41 * implementation should be moved down into the pinctrl driver and get 41 * implementation should be moved down into the pinctrl driver and get
42 * called as part of the generic suspend/resume path. 42 * called as part of the generic suspend/resume path.
43 */ 43 */
44#ifdef CONFIG_PINCTRL_AT91
44extern void at91_pinctrl_gpio_suspend(void); 45extern void at91_pinctrl_gpio_suspend(void);
45extern void at91_pinctrl_gpio_resume(void); 46extern void at91_pinctrl_gpio_resume(void);
47#endif
46 48
47static struct { 49static struct {
48 unsigned long uhp_udp_mask; 50 unsigned long uhp_udp_mask;
@@ -151,8 +153,9 @@ static void at91_pm_suspend(suspend_state_t state)
151 153
152static int at91_pm_enter(suspend_state_t state) 154static int at91_pm_enter(suspend_state_t state)
153{ 155{
156#ifdef CONFIG_PINCTRL_AT91
154 at91_pinctrl_gpio_suspend(); 157 at91_pinctrl_gpio_suspend();
155 158#endif
156 switch (state) { 159 switch (state) {
157 /* 160 /*
158 * Suspend-to-RAM is like STANDBY plus slow clock mode, so 161 * Suspend-to-RAM is like STANDBY plus slow clock mode, so
@@ -192,7 +195,9 @@ static int at91_pm_enter(suspend_state_t state)
192error: 195error:
193 target_state = PM_SUSPEND_ON; 196 target_state = PM_SUSPEND_ON;
194 197
198#ifdef CONFIG_PINCTRL_AT91
195 at91_pinctrl_gpio_resume(); 199 at91_pinctrl_gpio_resume();
200#endif
196 return 0; 201 return 0;
197} 202}
198 203
diff --git a/arch/arm/mach-dove/include/mach/entry-macro.S b/arch/arm/mach-dove/include/mach/entry-macro.S
index 72d622baaad3..df1d44bdc375 100644
--- a/arch/arm/mach-dove/include/mach/entry-macro.S
+++ b/arch/arm/mach-dove/include/mach/entry-macro.S
@@ -18,13 +18,13 @@
18 @ check low interrupts 18 @ check low interrupts
19 ldr \irqstat, [\base, #IRQ_CAUSE_LOW_OFF] 19 ldr \irqstat, [\base, #IRQ_CAUSE_LOW_OFF]
20 ldr \tmp, [\base, #IRQ_MASK_LOW_OFF] 20 ldr \tmp, [\base, #IRQ_MASK_LOW_OFF]
21 mov \irqnr, #31 21 mov \irqnr, #32
22 ands \irqstat, \irqstat, \tmp 22 ands \irqstat, \irqstat, \tmp
23 23
24 @ if no low interrupts set, check high interrupts 24 @ if no low interrupts set, check high interrupts
25 ldreq \irqstat, [\base, #IRQ_CAUSE_HIGH_OFF] 25 ldreq \irqstat, [\base, #IRQ_CAUSE_HIGH_OFF]
26 ldreq \tmp, [\base, #IRQ_MASK_HIGH_OFF] 26 ldreq \tmp, [\base, #IRQ_MASK_HIGH_OFF]
27 moveq \irqnr, #63 27 moveq \irqnr, #64
28 andeqs \irqstat, \irqstat, \tmp 28 andeqs \irqstat, \irqstat, \tmp
29 29
30 @ find first active interrupt source 30 @ find first active interrupt source
diff --git a/arch/arm/mach-exynos/pmu.c b/arch/arm/mach-exynos/pmu.c
index de68938ee6aa..c21e41dad19c 100644
--- a/arch/arm/mach-exynos/pmu.c
+++ b/arch/arm/mach-exynos/pmu.c
@@ -748,8 +748,12 @@ static void exynos5_powerdown_conf(enum sys_powerdown mode)
748void exynos_sys_powerdown_conf(enum sys_powerdown mode) 748void exynos_sys_powerdown_conf(enum sys_powerdown mode)
749{ 749{
750 unsigned int i; 750 unsigned int i;
751 const struct exynos_pmu_data *pmu_data;
752
753 if (!pmu_context)
754 return;
751 755
752 const struct exynos_pmu_data *pmu_data = pmu_context->pmu_data; 756 pmu_data = pmu_context->pmu_data;
753 757
754 if (pmu_data->powerdown_conf) 758 if (pmu_data->powerdown_conf)
755 pmu_data->powerdown_conf(mode); 759 pmu_data->powerdown_conf(mode);
diff --git a/arch/arm/mach-imx/gpc.c b/arch/arm/mach-imx/gpc.c
index 8e7976a4c3e7..cfc696b972f3 100644
--- a/arch/arm/mach-imx/gpc.c
+++ b/arch/arm/mach-imx/gpc.c
@@ -177,6 +177,7 @@ static struct irq_chip imx_gpc_chip = {
177 .irq_unmask = imx_gpc_irq_unmask, 177 .irq_unmask = imx_gpc_irq_unmask,
178 .irq_retrigger = irq_chip_retrigger_hierarchy, 178 .irq_retrigger = irq_chip_retrigger_hierarchy,
179 .irq_set_wake = imx_gpc_irq_set_wake, 179 .irq_set_wake = imx_gpc_irq_set_wake,
180 .irq_set_type = irq_chip_set_type_parent,
180#ifdef CONFIG_SMP 181#ifdef CONFIG_SMP
181 .irq_set_affinity = irq_chip_set_affinity_parent, 182 .irq_set_affinity = irq_chip_set_affinity_parent,
182#endif 183#endif
diff --git a/arch/arm/mach-ixp4xx/include/mach/io.h b/arch/arm/mach-ixp4xx/include/mach/io.h
index b02439019963..7a0c13bf4269 100644
--- a/arch/arm/mach-ixp4xx/include/mach/io.h
+++ b/arch/arm/mach-ixp4xx/include/mach/io.h
@@ -143,7 +143,7 @@ static inline void __indirect_writesl(volatile void __iomem *bus_addr,
143 writel(*vaddr++, bus_addr); 143 writel(*vaddr++, bus_addr);
144} 144}
145 145
146static inline unsigned char __indirect_readb(const volatile void __iomem *p) 146static inline u8 __indirect_readb(const volatile void __iomem *p)
147{ 147{
148 u32 addr = (u32)p; 148 u32 addr = (u32)p;
149 u32 n, byte_enables, data; 149 u32 n, byte_enables, data;
@@ -166,7 +166,7 @@ static inline void __indirect_readsb(const volatile void __iomem *bus_addr,
166 *vaddr++ = readb(bus_addr); 166 *vaddr++ = readb(bus_addr);
167} 167}
168 168
169static inline unsigned short __indirect_readw(const volatile void __iomem *p) 169static inline u16 __indirect_readw(const volatile void __iomem *p)
170{ 170{
171 u32 addr = (u32)p; 171 u32 addr = (u32)p;
172 u32 n, byte_enables, data; 172 u32 n, byte_enables, data;
@@ -189,7 +189,7 @@ static inline void __indirect_readsw(const volatile void __iomem *bus_addr,
189 *vaddr++ = readw(bus_addr); 189 *vaddr++ = readw(bus_addr);
190} 190}
191 191
192static inline unsigned long __indirect_readl(const volatile void __iomem *p) 192static inline u32 __indirect_readl(const volatile void __iomem *p)
193{ 193{
194 u32 addr = (__force u32)p; 194 u32 addr = (__force u32)p;
195 u32 data; 195 u32 data;
@@ -350,7 +350,7 @@ static inline void insl(u32 io_addr, void *p, u32 count)
350 ((unsigned long)p <= (PIO_MASK + PIO_OFFSET))) 350 ((unsigned long)p <= (PIO_MASK + PIO_OFFSET)))
351 351
352#define ioread8(p) ioread8(p) 352#define ioread8(p) ioread8(p)
353static inline unsigned int ioread8(const void __iomem *addr) 353static inline u8 ioread8(const void __iomem *addr)
354{ 354{
355 unsigned long port = (unsigned long __force)addr; 355 unsigned long port = (unsigned long __force)addr;
356 if (__is_io_address(port)) 356 if (__is_io_address(port))
@@ -378,7 +378,7 @@ static inline void ioread8_rep(const void __iomem *addr, void *vaddr, u32 count)
378} 378}
379 379
380#define ioread16(p) ioread16(p) 380#define ioread16(p) ioread16(p)
381static inline unsigned int ioread16(const void __iomem *addr) 381static inline u16 ioread16(const void __iomem *addr)
382{ 382{
383 unsigned long port = (unsigned long __force)addr; 383 unsigned long port = (unsigned long __force)addr;
384 if (__is_io_address(port)) 384 if (__is_io_address(port))
@@ -407,7 +407,7 @@ static inline void ioread16_rep(const void __iomem *addr, void *vaddr,
407} 407}
408 408
409#define ioread32(p) ioread32(p) 409#define ioread32(p) ioread32(p)
410static inline unsigned int ioread32(const void __iomem *addr) 410static inline u32 ioread32(const void __iomem *addr)
411{ 411{
412 unsigned long port = (unsigned long __force)addr; 412 unsigned long port = (unsigned long __force)addr;
413 if (__is_io_address(port)) 413 if (__is_io_address(port))
diff --git a/arch/arm/mach-omap2/Kconfig b/arch/arm/mach-omap2/Kconfig
index 5076d3f334d2..4b4371db5799 100644
--- a/arch/arm/mach-omap2/Kconfig
+++ b/arch/arm/mach-omap2/Kconfig
@@ -121,6 +121,7 @@ config ARCH_OMAP2PLUS_TYPICAL
121 select NEON if CPU_V7 121 select NEON if CPU_V7
122 select PM 122 select PM
123 select REGULATOR 123 select REGULATOR
124 select REGULATOR_FIXED_VOLTAGE
124 select TWL4030_CORE if ARCH_OMAP3 || ARCH_OMAP4 125 select TWL4030_CORE if ARCH_OMAP3 || ARCH_OMAP4
125 select TWL4030_POWER if ARCH_OMAP3 || ARCH_OMAP4 126 select TWL4030_POWER if ARCH_OMAP3 || ARCH_OMAP4
126 select VFP 127 select VFP
@@ -201,7 +202,6 @@ config MACH_OMAP3_PANDORA
201 depends on ARCH_OMAP3 202 depends on ARCH_OMAP3
202 default y 203 default y
203 select OMAP_PACKAGE_CBB 204 select OMAP_PACKAGE_CBB
204 select REGULATOR_FIXED_VOLTAGE if REGULATOR
205 205
206config MACH_NOKIA_N810 206config MACH_NOKIA_N810
207 bool 207 bool
diff --git a/arch/arm/mach-omap2/omap-smp.c b/arch/arm/mach-omap2/omap-smp.c
index 5305ec7341ec..79e1f876d1c9 100644
--- a/arch/arm/mach-omap2/omap-smp.c
+++ b/arch/arm/mach-omap2/omap-smp.c
@@ -143,9 +143,9 @@ static int omap4_boot_secondary(unsigned int cpu, struct task_struct *idle)
143 * Ensure that CPU power state is set to ON to avoid CPU 143 * Ensure that CPU power state is set to ON to avoid CPU
144 * powerdomain transition on wfi 144 * powerdomain transition on wfi
145 */ 145 */
146 clkdm_wakeup(cpu1_clkdm); 146 clkdm_wakeup_nolock(cpu1_clkdm);
147 omap_set_pwrdm_state(cpu1_pwrdm, PWRDM_POWER_ON); 147 pwrdm_set_next_pwrst(cpu1_pwrdm, PWRDM_POWER_ON);
148 clkdm_allow_idle(cpu1_clkdm); 148 clkdm_allow_idle_nolock(cpu1_clkdm);
149 149
150 if (IS_PM44XX_ERRATUM(PM_OMAP4_ROM_SMP_BOOT_ERRATUM_GICD)) { 150 if (IS_PM44XX_ERRATUM(PM_OMAP4_ROM_SMP_BOOT_ERRATUM_GICD)) {
151 while (gic_dist_disabled()) { 151 while (gic_dist_disabled()) {
diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
index cc8a987149e2..48495ad82aba 100644
--- a/arch/arm/mach-omap2/omap_hwmod.c
+++ b/arch/arm/mach-omap2/omap_hwmod.c
@@ -890,6 +890,36 @@ static int _init_opt_clks(struct omap_hwmod *oh)
890 return ret; 890 return ret;
891} 891}
892 892
893static void _enable_optional_clocks(struct omap_hwmod *oh)
894{
895 struct omap_hwmod_opt_clk *oc;
896 int i;
897
898 pr_debug("omap_hwmod: %s: enabling optional clocks\n", oh->name);
899
900 for (i = oh->opt_clks_cnt, oc = oh->opt_clks; i > 0; i--, oc++)
901 if (oc->_clk) {
902 pr_debug("omap_hwmod: enable %s:%s\n", oc->role,
903 __clk_get_name(oc->_clk));
904 clk_enable(oc->_clk);
905 }
906}
907
908static void _disable_optional_clocks(struct omap_hwmod *oh)
909{
910 struct omap_hwmod_opt_clk *oc;
911 int i;
912
913 pr_debug("omap_hwmod: %s: disabling optional clocks\n", oh->name);
914
915 for (i = oh->opt_clks_cnt, oc = oh->opt_clks; i > 0; i--, oc++)
916 if (oc->_clk) {
917 pr_debug("omap_hwmod: disable %s:%s\n", oc->role,
918 __clk_get_name(oc->_clk));
919 clk_disable(oc->_clk);
920 }
921}
922
893/** 923/**
894 * _enable_clocks - enable hwmod main clock and interface clocks 924 * _enable_clocks - enable hwmod main clock and interface clocks
895 * @oh: struct omap_hwmod * 925 * @oh: struct omap_hwmod *
@@ -917,6 +947,9 @@ static int _enable_clocks(struct omap_hwmod *oh)
917 clk_enable(os->_clk); 947 clk_enable(os->_clk);
918 } 948 }
919 949
950 if (oh->flags & HWMOD_OPT_CLKS_NEEDED)
951 _enable_optional_clocks(oh);
952
920 /* The opt clocks are controlled by the device driver. */ 953 /* The opt clocks are controlled by the device driver. */
921 954
922 return 0; 955 return 0;
@@ -948,41 +981,14 @@ static int _disable_clocks(struct omap_hwmod *oh)
948 clk_disable(os->_clk); 981 clk_disable(os->_clk);
949 } 982 }
950 983
984 if (oh->flags & HWMOD_OPT_CLKS_NEEDED)
985 _disable_optional_clocks(oh);
986
951 /* The opt clocks are controlled by the device driver. */ 987 /* The opt clocks are controlled by the device driver. */
952 988
953 return 0; 989 return 0;
954} 990}
955 991
956static void _enable_optional_clocks(struct omap_hwmod *oh)
957{
958 struct omap_hwmod_opt_clk *oc;
959 int i;
960
961 pr_debug("omap_hwmod: %s: enabling optional clocks\n", oh->name);
962
963 for (i = oh->opt_clks_cnt, oc = oh->opt_clks; i > 0; i--, oc++)
964 if (oc->_clk) {
965 pr_debug("omap_hwmod: enable %s:%s\n", oc->role,
966 __clk_get_name(oc->_clk));
967 clk_enable(oc->_clk);
968 }
969}
970
971static void _disable_optional_clocks(struct omap_hwmod *oh)
972{
973 struct omap_hwmod_opt_clk *oc;
974 int i;
975
976 pr_debug("omap_hwmod: %s: disabling optional clocks\n", oh->name);
977
978 for (i = oh->opt_clks_cnt, oc = oh->opt_clks; i > 0; i--, oc++)
979 if (oc->_clk) {
980 pr_debug("omap_hwmod: disable %s:%s\n", oc->role,
981 __clk_get_name(oc->_clk));
982 clk_disable(oc->_clk);
983 }
984}
985
986/** 992/**
987 * _omap4_enable_module - enable CLKCTRL modulemode on OMAP4 993 * _omap4_enable_module - enable CLKCTRL modulemode on OMAP4
988 * @oh: struct omap_hwmod * 994 * @oh: struct omap_hwmod *
diff --git a/arch/arm/mach-omap2/omap_hwmod.h b/arch/arm/mach-omap2/omap_hwmod.h
index ca6df1a73475..76bce11c85a4 100644
--- a/arch/arm/mach-omap2/omap_hwmod.h
+++ b/arch/arm/mach-omap2/omap_hwmod.h
@@ -523,6 +523,8 @@ struct omap_hwmod_omap4_prcm {
523 * HWMOD_RECONFIG_IO_CHAIN: omap_hwmod code needs to reconfigure wake-up 523 * HWMOD_RECONFIG_IO_CHAIN: omap_hwmod code needs to reconfigure wake-up
524 * events by calling _reconfigure_io_chain() when a device is enabled 524 * events by calling _reconfigure_io_chain() when a device is enabled
525 * or idled. 525 * or idled.
526 * HWMOD_OPT_CLKS_NEEDED: The optional clocks are needed for the module to
527 * operate and they need to be handled at the same time as the main_clk.
526 */ 528 */
527#define HWMOD_SWSUP_SIDLE (1 << 0) 529#define HWMOD_SWSUP_SIDLE (1 << 0)
528#define HWMOD_SWSUP_MSTANDBY (1 << 1) 530#define HWMOD_SWSUP_MSTANDBY (1 << 1)
@@ -538,6 +540,7 @@ struct omap_hwmod_omap4_prcm {
538#define HWMOD_FORCE_MSTANDBY (1 << 11) 540#define HWMOD_FORCE_MSTANDBY (1 << 11)
539#define HWMOD_SWSUP_SIDLE_ACT (1 << 12) 541#define HWMOD_SWSUP_SIDLE_ACT (1 << 12)
540#define HWMOD_RECONFIG_IO_CHAIN (1 << 13) 542#define HWMOD_RECONFIG_IO_CHAIN (1 << 13)
543#define HWMOD_OPT_CLKS_NEEDED (1 << 14)
541 544
542/* 545/*
543 * omap_hwmod._int_flags definitions 546 * omap_hwmod._int_flags definitions
diff --git a/arch/arm/mach-omap2/omap_hwmod_7xx_data.c b/arch/arm/mach-omap2/omap_hwmod_7xx_data.c
index 51d1ecb384bd..ee4e04434a94 100644
--- a/arch/arm/mach-omap2/omap_hwmod_7xx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_7xx_data.c
@@ -1298,6 +1298,44 @@ static struct omap_hwmod dra7xx_mcspi4_hwmod = {
1298}; 1298};
1299 1299
1300/* 1300/*
1301 * 'mcasp' class
1302 *
1303 */
1304static struct omap_hwmod_class_sysconfig dra7xx_mcasp_sysc = {
1305 .sysc_offs = 0x0004,
1306 .sysc_flags = SYSC_HAS_SIDLEMODE,
1307 .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
1308 .sysc_fields = &omap_hwmod_sysc_type3,
1309};
1310
1311static struct omap_hwmod_class dra7xx_mcasp_hwmod_class = {
1312 .name = "mcasp",
1313 .sysc = &dra7xx_mcasp_sysc,
1314};
1315
1316/* mcasp3 */
1317static struct omap_hwmod_opt_clk mcasp3_opt_clks[] = {
1318 { .role = "ahclkx", .clk = "mcasp3_ahclkx_mux" },
1319};
1320
1321static struct omap_hwmod dra7xx_mcasp3_hwmod = {
1322 .name = "mcasp3",
1323 .class = &dra7xx_mcasp_hwmod_class,
1324 .clkdm_name = "l4per2_clkdm",
1325 .main_clk = "mcasp3_aux_gfclk_mux",
1326 .flags = HWMOD_OPT_CLKS_NEEDED,
1327 .prcm = {
1328 .omap4 = {
1329 .clkctrl_offs = DRA7XX_CM_L4PER2_MCASP3_CLKCTRL_OFFSET,
1330 .context_offs = DRA7XX_RM_L4PER2_MCASP3_CONTEXT_OFFSET,
1331 .modulemode = MODULEMODE_SWCTRL,
1332 },
1333 },
1334 .opt_clks = mcasp3_opt_clks,
1335 .opt_clks_cnt = ARRAY_SIZE(mcasp3_opt_clks),
1336};
1337
1338/*
1301 * 'mmc' class 1339 * 'mmc' class
1302 * 1340 *
1303 */ 1341 */
@@ -2566,6 +2604,22 @@ static struct omap_hwmod_ocp_if dra7xx_l3_main_1__hdmi = {
2566 .user = OCP_USER_MPU | OCP_USER_SDMA, 2604 .user = OCP_USER_MPU | OCP_USER_SDMA,
2567}; 2605};
2568 2606
2607/* l4_per2 -> mcasp3 */
2608static struct omap_hwmod_ocp_if dra7xx_l4_per2__mcasp3 = {
2609 .master = &dra7xx_l4_per2_hwmod,
2610 .slave = &dra7xx_mcasp3_hwmod,
2611 .clk = "l4_root_clk_div",
2612 .user = OCP_USER_MPU | OCP_USER_SDMA,
2613};
2614
2615/* l3_main_1 -> mcasp3 */
2616static struct omap_hwmod_ocp_if dra7xx_l3_main_1__mcasp3 = {
2617 .master = &dra7xx_l3_main_1_hwmod,
2618 .slave = &dra7xx_mcasp3_hwmod,
2619 .clk = "l3_iclk_div",
2620 .user = OCP_USER_MPU | OCP_USER_SDMA,
2621};
2622
2569/* l4_per1 -> elm */ 2623/* l4_per1 -> elm */
2570static struct omap_hwmod_ocp_if dra7xx_l4_per1__elm = { 2624static struct omap_hwmod_ocp_if dra7xx_l4_per1__elm = {
2571 .master = &dra7xx_l4_per1_hwmod, 2625 .master = &dra7xx_l4_per1_hwmod,
@@ -3308,6 +3362,8 @@ static struct omap_hwmod_ocp_if *dra7xx_hwmod_ocp_ifs[] __initdata = {
3308 &dra7xx_l4_wkup__dcan1, 3362 &dra7xx_l4_wkup__dcan1,
3309 &dra7xx_l4_per2__dcan2, 3363 &dra7xx_l4_per2__dcan2,
3310 &dra7xx_l4_per2__cpgmac0, 3364 &dra7xx_l4_per2__cpgmac0,
3365 &dra7xx_l4_per2__mcasp3,
3366 &dra7xx_l3_main_1__mcasp3,
3311 &dra7xx_gmac__mdio, 3367 &dra7xx_gmac__mdio,
3312 &dra7xx_l4_cfg__dma_system, 3368 &dra7xx_l4_cfg__dma_system,
3313 &dra7xx_l3_main_1__dss, 3369 &dra7xx_l3_main_1__dss,
diff --git a/arch/arm/mach-omap2/omap_hwmod_81xx_data.c b/arch/arm/mach-omap2/omap_hwmod_81xx_data.c
index b1288f56d509..6256052893ec 100644
--- a/arch/arm/mach-omap2/omap_hwmod_81xx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_81xx_data.c
@@ -144,6 +144,7 @@ static struct omap_hwmod dm81xx_l4_ls_hwmod = {
144 .name = "l4_ls", 144 .name = "l4_ls",
145 .clkdm_name = "alwon_l3s_clkdm", 145 .clkdm_name = "alwon_l3s_clkdm",
146 .class = &l4_hwmod_class, 146 .class = &l4_hwmod_class,
147 .flags = HWMOD_NO_IDLEST,
147}; 148};
148 149
149/* 150/*
@@ -155,6 +156,7 @@ static struct omap_hwmod dm81xx_l4_hs_hwmod = {
155 .name = "l4_hs", 156 .name = "l4_hs",
156 .clkdm_name = "alwon_l3_med_clkdm", 157 .clkdm_name = "alwon_l3_med_clkdm",
157 .class = &l4_hwmod_class, 158 .class = &l4_hwmod_class,
159 .flags = HWMOD_NO_IDLEST,
158}; 160};
159 161
160/* L3 slow -> L4 ls peripheral interface running at 125MHz */ 162/* L3 slow -> L4 ls peripheral interface running at 125MHz */
@@ -850,6 +852,7 @@ static struct omap_hwmod dm816x_emac0_hwmod = {
850 .name = "emac0", 852 .name = "emac0",
851 .clkdm_name = "alwon_ethernet_clkdm", 853 .clkdm_name = "alwon_ethernet_clkdm",
852 .class = &dm816x_emac_hwmod_class, 854 .class = &dm816x_emac_hwmod_class,
855 .flags = HWMOD_NO_IDLEST,
853}; 856};
854 857
855static struct omap_hwmod_ocp_if dm81xx_l4_hs__emac0 = { 858static struct omap_hwmod_ocp_if dm81xx_l4_hs__emac0 = {
diff --git a/arch/arm/mach-omap2/pdata-quirks.c b/arch/arm/mach-omap2/pdata-quirks.c
index 1dfe34654c43..58144779dec4 100644
--- a/arch/arm/mach-omap2/pdata-quirks.c
+++ b/arch/arm/mach-omap2/pdata-quirks.c
@@ -24,9 +24,6 @@
24#include <linux/platform_data/iommu-omap.h> 24#include <linux/platform_data/iommu-omap.h>
25#include <linux/platform_data/wkup_m3.h> 25#include <linux/platform_data/wkup_m3.h>
26 26
27#include <asm/siginfo.h>
28#include <asm/signal.h>
29
30#include "common.h" 27#include "common.h"
31#include "common-board-devices.h" 28#include "common-board-devices.h"
32#include "dss-common.h" 29#include "dss-common.h"
@@ -385,29 +382,6 @@ static void __init omap3_pandora_legacy_init(void)
385} 382}
386#endif /* CONFIG_ARCH_OMAP3 */ 383#endif /* CONFIG_ARCH_OMAP3 */
387 384
388#ifdef CONFIG_SOC_TI81XX
389static int fault_fixed_up;
390
391static int t410_abort_handler(unsigned long addr, unsigned int fsr,
392 struct pt_regs *regs)
393{
394 if ((fsr == 0x406 || fsr == 0xc06) && !fault_fixed_up) {
395 pr_warn("External imprecise Data abort at addr=%#lx, fsr=%#x ignored.\n",
396 addr, fsr);
397 fault_fixed_up = 1;
398 return 0;
399 }
400
401 return 1;
402}
403
404static void __init t410_abort_init(void)
405{
406 hook_fault_code(16 + 6, t410_abort_handler, SIGBUS, BUS_OBJERR,
407 "imprecise external abort");
408}
409#endif
410
411#if defined(CONFIG_ARCH_OMAP4) || defined(CONFIG_SOC_OMAP5) 385#if defined(CONFIG_ARCH_OMAP4) || defined(CONFIG_SOC_OMAP5)
412static struct iommu_platform_data omap4_iommu_pdata = { 386static struct iommu_platform_data omap4_iommu_pdata = {
413 .reset_name = "mmu_cache", 387 .reset_name = "mmu_cache",
@@ -536,9 +510,6 @@ static struct pdata_init pdata_quirks[] __initdata = {
536 { "openpandora,omap3-pandora-600mhz", omap3_pandora_legacy_init, }, 510 { "openpandora,omap3-pandora-600mhz", omap3_pandora_legacy_init, },
537 { "openpandora,omap3-pandora-1ghz", omap3_pandora_legacy_init, }, 511 { "openpandora,omap3-pandora-1ghz", omap3_pandora_legacy_init, },
538#endif 512#endif
539#ifdef CONFIG_SOC_TI81XX
540 { "hp,t410", t410_abort_init, },
541#endif
542#ifdef CONFIG_SOC_OMAP5 513#ifdef CONFIG_SOC_OMAP5
543 { "ti,omap5-uevm", omap5_uevm_legacy_init, }, 514 { "ti,omap5-uevm", omap5_uevm_legacy_init, },
544#endif 515#endif
diff --git a/arch/arm/mach-omap2/pm34xx.c b/arch/arm/mach-omap2/pm34xx.c
index 87b98bf92366..2dbd3785ee6f 100644
--- a/arch/arm/mach-omap2/pm34xx.c
+++ b/arch/arm/mach-omap2/pm34xx.c
@@ -301,11 +301,11 @@ static void omap3_pm_idle(void)
301 if (omap_irq_pending()) 301 if (omap_irq_pending())
302 return; 302 return;
303 303
304 trace_cpu_idle(1, smp_processor_id()); 304 trace_cpu_idle_rcuidle(1, smp_processor_id());
305 305
306 omap_sram_idle(); 306 omap_sram_idle();
307 307
308 trace_cpu_idle(PWR_EVENT_EXIT, smp_processor_id()); 308 trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
309} 309}
310 310
311#ifdef CONFIG_SUSPEND 311#ifdef CONFIG_SUSPEND
diff --git a/arch/arm/mach-orion5x/include/mach/entry-macro.S b/arch/arm/mach-orion5x/include/mach/entry-macro.S
index 79eb502a1e64..73919a36b577 100644
--- a/arch/arm/mach-orion5x/include/mach/entry-macro.S
+++ b/arch/arm/mach-orion5x/include/mach/entry-macro.S
@@ -21,5 +21,5 @@
21 @ find cause bits that are unmasked 21 @ find cause bits that are unmasked
22 ands \irqstat, \irqstat, \tmp @ clear Z flag if any 22 ands \irqstat, \irqstat, \tmp @ clear Z flag if any
23 clzne \irqnr, \irqstat @ calc irqnr 23 clzne \irqnr, \irqstat @ calc irqnr
24 rsbne \irqnr, \irqnr, #31 24 rsbne \irqnr, \irqnr, #32
25 .endm 25 .endm
diff --git a/arch/arm/mach-pxa/ezx.c b/arch/arm/mach-pxa/ezx.c
index 9a9c15bfcd34..7c0d5618be5e 100644
--- a/arch/arm/mach-pxa/ezx.c
+++ b/arch/arm/mach-pxa/ezx.c
@@ -889,6 +889,7 @@ static void __init e680_init(void)
889 889
890 pxa_set_keypad_info(&e680_keypad_platform_data); 890 pxa_set_keypad_info(&e680_keypad_platform_data);
891 891
892 pwm_add_table(ezx_pwm_lookup, ARRAY_SIZE(ezx_pwm_lookup));
892 platform_add_devices(ARRAY_AND_SIZE(ezx_devices)); 893 platform_add_devices(ARRAY_AND_SIZE(ezx_devices));
893 platform_add_devices(ARRAY_AND_SIZE(e680_devices)); 894 platform_add_devices(ARRAY_AND_SIZE(e680_devices));
894} 895}
@@ -956,6 +957,7 @@ static void __init a1200_init(void)
956 957
957 pxa_set_keypad_info(&a1200_keypad_platform_data); 958 pxa_set_keypad_info(&a1200_keypad_platform_data);
958 959
960 pwm_add_table(ezx_pwm_lookup, ARRAY_SIZE(ezx_pwm_lookup));
959 platform_add_devices(ARRAY_AND_SIZE(ezx_devices)); 961 platform_add_devices(ARRAY_AND_SIZE(ezx_devices));
960 platform_add_devices(ARRAY_AND_SIZE(a1200_devices)); 962 platform_add_devices(ARRAY_AND_SIZE(a1200_devices));
961} 963}
@@ -1148,6 +1150,7 @@ static void __init a910_init(void)
1148 platform_device_register(&a910_camera); 1150 platform_device_register(&a910_camera);
1149 } 1151 }
1150 1152
1153 pwm_add_table(ezx_pwm_lookup, ARRAY_SIZE(ezx_pwm_lookup));
1151 platform_add_devices(ARRAY_AND_SIZE(ezx_devices)); 1154 platform_add_devices(ARRAY_AND_SIZE(ezx_devices));
1152 platform_add_devices(ARRAY_AND_SIZE(a910_devices)); 1155 platform_add_devices(ARRAY_AND_SIZE(a910_devices));
1153} 1156}
@@ -1215,6 +1218,7 @@ static void __init e6_init(void)
1215 1218
1216 pxa_set_keypad_info(&e6_keypad_platform_data); 1219 pxa_set_keypad_info(&e6_keypad_platform_data);
1217 1220
1221 pwm_add_table(ezx_pwm_lookup, ARRAY_SIZE(ezx_pwm_lookup));
1218 platform_add_devices(ARRAY_AND_SIZE(ezx_devices)); 1222 platform_add_devices(ARRAY_AND_SIZE(ezx_devices));
1219 platform_add_devices(ARRAY_AND_SIZE(e6_devices)); 1223 platform_add_devices(ARRAY_AND_SIZE(e6_devices));
1220} 1224}
@@ -1256,6 +1260,7 @@ static void __init e2_init(void)
1256 1260
1257 pxa_set_keypad_info(&e2_keypad_platform_data); 1261 pxa_set_keypad_info(&e2_keypad_platform_data);
1258 1262
1263 pwm_add_table(ezx_pwm_lookup, ARRAY_SIZE(ezx_pwm_lookup));
1259 platform_add_devices(ARRAY_AND_SIZE(ezx_devices)); 1264 platform_add_devices(ARRAY_AND_SIZE(ezx_devices));
1260 platform_add_devices(ARRAY_AND_SIZE(e2_devices)); 1265 platform_add_devices(ARRAY_AND_SIZE(e2_devices));
1261} 1266}
diff --git a/arch/arm/mach-pxa/palm27x.c b/arch/arm/mach-pxa/palm27x.c
index 13eba2b26e0a..8fbfb10047ec 100644
--- a/arch/arm/mach-pxa/palm27x.c
+++ b/arch/arm/mach-pxa/palm27x.c
@@ -344,7 +344,7 @@ void __init palm27x_pwm_init(int bl, int lcd)
344{ 344{
345 palm_bl_power = bl; 345 palm_bl_power = bl;
346 palm_lcd_power = lcd; 346 palm_lcd_power = lcd;
347 pwm_add_lookup(palm27x_pwm_lookup, ARRAY_SIZE(palm27x_pwm_lookup)); 347 pwm_add_table(palm27x_pwm_lookup, ARRAY_SIZE(palm27x_pwm_lookup));
348 platform_device_register(&palm27x_backlight); 348 platform_device_register(&palm27x_backlight);
349} 349}
350#endif 350#endif
diff --git a/arch/arm/mach-pxa/palmtc.c b/arch/arm/mach-pxa/palmtc.c
index aebf6de62468..0b5c3876720c 100644
--- a/arch/arm/mach-pxa/palmtc.c
+++ b/arch/arm/mach-pxa/palmtc.c
@@ -169,7 +169,7 @@ static inline void palmtc_keys_init(void) {}
169#if defined(CONFIG_BACKLIGHT_PWM) || defined(CONFIG_BACKLIGHT_PWM_MODULE) 169#if defined(CONFIG_BACKLIGHT_PWM) || defined(CONFIG_BACKLIGHT_PWM_MODULE)
170static struct pwm_lookup palmtc_pwm_lookup[] = { 170static struct pwm_lookup palmtc_pwm_lookup[] = {
171 PWM_LOOKUP("pxa25x-pwm.1", 0, "pwm-backlight.0", NULL, PALMTC_PERIOD_NS, 171 PWM_LOOKUP("pxa25x-pwm.1", 0, "pwm-backlight.0", NULL, PALMTC_PERIOD_NS,
172 PWM_PERIOD_NORMAL), 172 PWM_POLARITY_NORMAL),
173}; 173};
174 174
175static struct platform_pwm_backlight_data palmtc_backlight_data = { 175static struct platform_pwm_backlight_data palmtc_backlight_data = {
diff --git a/arch/arm/mach-s3c24xx/pll-s3c2440-12000000.c b/arch/arm/mach-s3c24xx/pll-s3c2440-12000000.c
index a19460e6e7b0..b355fca6cc2e 100644
--- a/arch/arm/mach-s3c24xx/pll-s3c2440-12000000.c
+++ b/arch/arm/mach-s3c24xx/pll-s3c2440-12000000.c
@@ -20,7 +20,7 @@
20#include <plat/cpu.h> 20#include <plat/cpu.h>
21#include <plat/cpu-freq-core.h> 21#include <plat/cpu-freq-core.h>
22 22
23static struct cpufreq_frequency_table s3c2440_plls_12[] __initdata = { 23static struct cpufreq_frequency_table s3c2440_plls_12[] = {
24 { .frequency = 75000000, .driver_data = PLLVAL(0x75, 3, 3), }, /* FVco 600.000000 */ 24 { .frequency = 75000000, .driver_data = PLLVAL(0x75, 3, 3), }, /* FVco 600.000000 */
25 { .frequency = 80000000, .driver_data = PLLVAL(0x98, 4, 3), }, /* FVco 640.000000 */ 25 { .frequency = 80000000, .driver_data = PLLVAL(0x98, 4, 3), }, /* FVco 640.000000 */
26 { .frequency = 90000000, .driver_data = PLLVAL(0x70, 2, 3), }, /* FVco 720.000000 */ 26 { .frequency = 90000000, .driver_data = PLLVAL(0x70, 2, 3), }, /* FVco 720.000000 */
diff --git a/arch/arm/mach-s3c24xx/pll-s3c2440-16934400.c b/arch/arm/mach-s3c24xx/pll-s3c2440-16934400.c
index 1191b2905625..be9a248b5ce9 100644
--- a/arch/arm/mach-s3c24xx/pll-s3c2440-16934400.c
+++ b/arch/arm/mach-s3c24xx/pll-s3c2440-16934400.c
@@ -20,7 +20,7 @@
20#include <plat/cpu.h> 20#include <plat/cpu.h>
21#include <plat/cpu-freq-core.h> 21#include <plat/cpu-freq-core.h>
22 22
23static struct cpufreq_frequency_table s3c2440_plls_169344[] __initdata = { 23static struct cpufreq_frequency_table s3c2440_plls_169344[] = {
24 { .frequency = 78019200, .driver_data = PLLVAL(121, 5, 3), }, /* FVco 624.153600 */ 24 { .frequency = 78019200, .driver_data = PLLVAL(121, 5, 3), }, /* FVco 624.153600 */
25 { .frequency = 84067200, .driver_data = PLLVAL(131, 5, 3), }, /* FVco 672.537600 */ 25 { .frequency = 84067200, .driver_data = PLLVAL(131, 5, 3), }, /* FVco 672.537600 */
26 { .frequency = 90115200, .driver_data = PLLVAL(141, 5, 3), }, /* FVco 720.921600 */ 26 { .frequency = 90115200, .driver_data = PLLVAL(141, 5, 3), }, /* FVco 720.921600 */
diff --git a/arch/arm/mach-shmobile/setup-r8a7793.c b/arch/arm/mach-shmobile/setup-r8a7793.c
index 1d2825cb7a65..5fce87f7f254 100644
--- a/arch/arm/mach-shmobile/setup-r8a7793.c
+++ b/arch/arm/mach-shmobile/setup-r8a7793.c
@@ -19,7 +19,7 @@
19#include "common.h" 19#include "common.h"
20#include "rcar-gen2.h" 20#include "rcar-gen2.h"
21 21
22static const char *r8a7793_boards_compat_dt[] __initconst = { 22static const char * const r8a7793_boards_compat_dt[] __initconst = {
23 "renesas,r8a7793", 23 "renesas,r8a7793",
24 NULL, 24 NULL,
25}; 25};
diff --git a/arch/arm/mach-zx/Kconfig b/arch/arm/mach-zx/Kconfig
index 7fdc5bf24f9b..446334a25cf5 100644
--- a/arch/arm/mach-zx/Kconfig
+++ b/arch/arm/mach-zx/Kconfig
@@ -13,7 +13,7 @@ config SOC_ZX296702
13 select ARM_GLOBAL_TIMER 13 select ARM_GLOBAL_TIMER
14 select HAVE_ARM_SCU if SMP 14 select HAVE_ARM_SCU if SMP
15 select HAVE_ARM_TWD if SMP 15 select HAVE_ARM_TWD if SMP
16 select PM_GENERIC_DOMAINS 16 select PM_GENERIC_DOMAINS if PM
17 help 17 help
18 Support for ZTE ZX296702 SoC which is a dual core CortexA9MP 18 Support for ZTE ZX296702 SoC which is a dual core CortexA9MP
19endif 19endif
diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c
index 845769e41332..c8c8b9ed02e0 100644
--- a/arch/arm/mm/context.c
+++ b/arch/arm/mm/context.c
@@ -165,13 +165,28 @@ static void flush_context(unsigned int cpu)
165 __flush_icache_all(); 165 __flush_icache_all();
166} 166}
167 167
168static int is_reserved_asid(u64 asid) 168static bool check_update_reserved_asid(u64 asid, u64 newasid)
169{ 169{
170 int cpu; 170 int cpu;
171 for_each_possible_cpu(cpu) 171 bool hit = false;
172 if (per_cpu(reserved_asids, cpu) == asid) 172
173 return 1; 173 /*
174 return 0; 174 * Iterate over the set of reserved ASIDs looking for a match.
175 * If we find one, then we can update our mm to use newasid
176 * (i.e. the same ASID in the current generation) but we can't
177 * exit the loop early, since we need to ensure that all copies
178 * of the old ASID are updated to reflect the mm. Failure to do
179 * so could result in us missing the reserved ASID in a future
180 * generation.
181 */
182 for_each_possible_cpu(cpu) {
183 if (per_cpu(reserved_asids, cpu) == asid) {
184 hit = true;
185 per_cpu(reserved_asids, cpu) = newasid;
186 }
187 }
188
189 return hit;
175} 190}
176 191
177static u64 new_context(struct mm_struct *mm, unsigned int cpu) 192static u64 new_context(struct mm_struct *mm, unsigned int cpu)
@@ -181,12 +196,14 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu)
181 u64 generation = atomic64_read(&asid_generation); 196 u64 generation = atomic64_read(&asid_generation);
182 197
183 if (asid != 0) { 198 if (asid != 0) {
199 u64 newasid = generation | (asid & ~ASID_MASK);
200
184 /* 201 /*
185 * If our current ASID was active during a rollover, we 202 * If our current ASID was active during a rollover, we
186 * can continue to use it and this was just a false alarm. 203 * can continue to use it and this was just a false alarm.
187 */ 204 */
188 if (is_reserved_asid(asid)) 205 if (check_update_reserved_asid(asid, newasid))
189 return generation | (asid & ~ASID_MASK); 206 return newasid;
190 207
191 /* 208 /*
192 * We had a valid ASID in a previous life, so try to re-use 209 * We had a valid ASID in a previous life, so try to re-use
@@ -194,7 +211,7 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu)
194 */ 211 */
195 asid &= ~ASID_MASK; 212 asid &= ~ASID_MASK;
196 if (!__test_and_set_bit(asid, asid_map)) 213 if (!__test_and_set_bit(asid, asid_map))
197 goto bump_gen; 214 return newasid;
198 } 215 }
199 216
200 /* 217 /*
@@ -216,11 +233,8 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu)
216 233
217 __set_bit(asid, asid_map); 234 __set_bit(asid, asid_map);
218 cur_idx = asid; 235 cur_idx = asid;
219
220bump_gen:
221 asid |= generation;
222 cpumask_clear(mm_cpumask(mm)); 236 cpumask_clear(mm_cpumask(mm));
223 return asid; 237 return asid | generation;
224} 238}
225 239
226void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk) 240void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk)
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index e62400e5fb99..534a60ae282e 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -1521,7 +1521,7 @@ static int __map_sg_chunk(struct device *dev, struct scatterlist *sg,
1521 return -ENOMEM; 1521 return -ENOMEM;
1522 1522
1523 for (count = 0, s = sg; count < (size >> PAGE_SHIFT); s = sg_next(s)) { 1523 for (count = 0, s = sg; count < (size >> PAGE_SHIFT); s = sg_next(s)) {
1524 phys_addr_t phys = sg_phys(s) & PAGE_MASK; 1524 phys_addr_t phys = page_to_phys(sg_page(s));
1525 unsigned int len = PAGE_ALIGN(s->offset + s->length); 1525 unsigned int len = PAGE_ALIGN(s->offset + s->length);
1526 1526
1527 if (!is_coherent && 1527 if (!is_coherent &&
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index 8a63b4cdc0f2..7f8cd1b3557f 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -22,6 +22,7 @@
22#include <linux/memblock.h> 22#include <linux/memblock.h>
23#include <linux/dma-contiguous.h> 23#include <linux/dma-contiguous.h>
24#include <linux/sizes.h> 24#include <linux/sizes.h>
25#include <linux/stop_machine.h>
25 26
26#include <asm/cp15.h> 27#include <asm/cp15.h>
27#include <asm/mach-types.h> 28#include <asm/mach-types.h>
@@ -627,12 +628,10 @@ static struct section_perm ro_perms[] = {
627 * safe to be called with preemption disabled, as under stop_machine(). 628 * safe to be called with preemption disabled, as under stop_machine().
628 */ 629 */
629static inline void section_update(unsigned long addr, pmdval_t mask, 630static inline void section_update(unsigned long addr, pmdval_t mask,
630 pmdval_t prot) 631 pmdval_t prot, struct mm_struct *mm)
631{ 632{
632 struct mm_struct *mm;
633 pmd_t *pmd; 633 pmd_t *pmd;
634 634
635 mm = current->active_mm;
636 pmd = pmd_offset(pud_offset(pgd_offset(mm, addr), addr), addr); 635 pmd = pmd_offset(pud_offset(pgd_offset(mm, addr), addr), addr);
637 636
638#ifdef CONFIG_ARM_LPAE 637#ifdef CONFIG_ARM_LPAE
@@ -656,49 +655,82 @@ static inline bool arch_has_strict_perms(void)
656 return !!(get_cr() & CR_XP); 655 return !!(get_cr() & CR_XP);
657} 656}
658 657
659#define set_section_perms(perms, field) { \ 658void set_section_perms(struct section_perm *perms, int n, bool set,
660 size_t i; \ 659 struct mm_struct *mm)
661 unsigned long addr; \ 660{
662 \ 661 size_t i;
663 if (!arch_has_strict_perms()) \ 662 unsigned long addr;
664 return; \ 663
665 \ 664 if (!arch_has_strict_perms())
666 for (i = 0; i < ARRAY_SIZE(perms); i++) { \ 665 return;
667 if (!IS_ALIGNED(perms[i].start, SECTION_SIZE) || \ 666
668 !IS_ALIGNED(perms[i].end, SECTION_SIZE)) { \ 667 for (i = 0; i < n; i++) {
669 pr_err("BUG: section %lx-%lx not aligned to %lx\n", \ 668 if (!IS_ALIGNED(perms[i].start, SECTION_SIZE) ||
670 perms[i].start, perms[i].end, \ 669 !IS_ALIGNED(perms[i].end, SECTION_SIZE)) {
671 SECTION_SIZE); \ 670 pr_err("BUG: section %lx-%lx not aligned to %lx\n",
672 continue; \ 671 perms[i].start, perms[i].end,
673 } \ 672 SECTION_SIZE);
674 \ 673 continue;
675 for (addr = perms[i].start; \ 674 }
676 addr < perms[i].end; \ 675
677 addr += SECTION_SIZE) \ 676 for (addr = perms[i].start;
678 section_update(addr, perms[i].mask, \ 677 addr < perms[i].end;
679 perms[i].field); \ 678 addr += SECTION_SIZE)
680 } \ 679 section_update(addr, perms[i].mask,
680 set ? perms[i].prot : perms[i].clear, mm);
681 }
682
681} 683}
682 684
683static inline void fix_kernmem_perms(void) 685static void update_sections_early(struct section_perm perms[], int n)
684{ 686{
685 set_section_perms(nx_perms, prot); 687 struct task_struct *t, *s;
688
689 read_lock(&tasklist_lock);
690 for_each_process(t) {
691 if (t->flags & PF_KTHREAD)
692 continue;
693 for_each_thread(t, s)
694 set_section_perms(perms, n, true, s->mm);
695 }
696 read_unlock(&tasklist_lock);
697 set_section_perms(perms, n, true, current->active_mm);
698 set_section_perms(perms, n, true, &init_mm);
699}
700
701int __fix_kernmem_perms(void *unused)
702{
703 update_sections_early(nx_perms, ARRAY_SIZE(nx_perms));
704 return 0;
705}
706
707void fix_kernmem_perms(void)
708{
709 stop_machine(__fix_kernmem_perms, NULL, NULL);
686} 710}
687 711
688#ifdef CONFIG_DEBUG_RODATA 712#ifdef CONFIG_DEBUG_RODATA
713int __mark_rodata_ro(void *unused)
714{
715 update_sections_early(ro_perms, ARRAY_SIZE(ro_perms));
716 return 0;
717}
718
689void mark_rodata_ro(void) 719void mark_rodata_ro(void)
690{ 720{
691 set_section_perms(ro_perms, prot); 721 stop_machine(__mark_rodata_ro, NULL, NULL);
692} 722}
693 723
694void set_kernel_text_rw(void) 724void set_kernel_text_rw(void)
695{ 725{
696 set_section_perms(ro_perms, clear); 726 set_section_perms(ro_perms, ARRAY_SIZE(ro_perms), false,
727 current->active_mm);
697} 728}
698 729
699void set_kernel_text_ro(void) 730void set_kernel_text_ro(void)
700{ 731{
701 set_section_perms(ro_perms, prot); 732 set_section_perms(ro_perms, ARRAY_SIZE(ro_perms), true,
733 current->active_mm);
702} 734}
703#endif /* CONFIG_DEBUG_RODATA */ 735#endif /* CONFIG_DEBUG_RODATA */
704 736
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
index de2b246fed38..8e1ea433c3f1 100644
--- a/arch/arm/mm/proc-v7.S
+++ b/arch/arm/mm/proc-v7.S
@@ -95,7 +95,7 @@ ENDPROC(cpu_v7_dcache_clean_area)
95.equ cpu_v7_suspend_size, 4 * 9 95.equ cpu_v7_suspend_size, 4 * 9
96#ifdef CONFIG_ARM_CPU_SUSPEND 96#ifdef CONFIG_ARM_CPU_SUSPEND
97ENTRY(cpu_v7_do_suspend) 97ENTRY(cpu_v7_do_suspend)
98 stmfd sp!, {r4 - r10, lr} 98 stmfd sp!, {r4 - r11, lr}
99 mrc p15, 0, r4, c13, c0, 0 @ FCSE/PID 99 mrc p15, 0, r4, c13, c0, 0 @ FCSE/PID
100 mrc p15, 0, r5, c13, c0, 3 @ User r/o thread ID 100 mrc p15, 0, r5, c13, c0, 3 @ User r/o thread ID
101 stmia r0!, {r4 - r5} 101 stmia r0!, {r4 - r5}
@@ -112,7 +112,7 @@ ENTRY(cpu_v7_do_suspend)
112 mrc p15, 0, r9, c1, c0, 1 @ Auxiliary control register 112 mrc p15, 0, r9, c1, c0, 1 @ Auxiliary control register
113 mrc p15, 0, r10, c1, c0, 2 @ Co-processor access control 113 mrc p15, 0, r10, c1, c0, 2 @ Co-processor access control
114 stmia r0, {r5 - r11} 114 stmia r0, {r5 - r11}
115 ldmfd sp!, {r4 - r10, pc} 115 ldmfd sp!, {r4 - r11, pc}
116ENDPROC(cpu_v7_do_suspend) 116ENDPROC(cpu_v7_do_suspend)
117 117
118ENTRY(cpu_v7_do_resume) 118ENTRY(cpu_v7_do_resume)
diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c
index 2f4b14cfddb4..591f9db3bf40 100644
--- a/arch/arm/net/bpf_jit_32.c
+++ b/arch/arm/net/bpf_jit_32.c
@@ -1061,7 +1061,7 @@ void bpf_jit_compile(struct bpf_prog *fp)
1061 } 1061 }
1062 build_epilogue(&ctx); 1062 build_epilogue(&ctx);
1063 1063
1064 flush_icache_range((u32)ctx.target, (u32)(ctx.target + ctx.idx)); 1064 flush_icache_range((u32)header, (u32)(ctx.target + ctx.idx));
1065 1065
1066#if __LINUX_ARM_ARCH__ < 7 1066#if __LINUX_ARM_ARCH__ < 7
1067 if (ctx.imm_count) 1067 if (ctx.imm_count)
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 9ac16a482ff1..871f21783866 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -49,7 +49,7 @@ config ARM64
49 select HAVE_ARCH_AUDITSYSCALL 49 select HAVE_ARCH_AUDITSYSCALL
50 select HAVE_ARCH_BITREVERSE 50 select HAVE_ARCH_BITREVERSE
51 select HAVE_ARCH_JUMP_LABEL 51 select HAVE_ARCH_JUMP_LABEL
52 select HAVE_ARCH_KASAN if SPARSEMEM_VMEMMAP 52 select HAVE_ARCH_KASAN if SPARSEMEM_VMEMMAP && !(ARM64_16K_PAGES && ARM64_VA_BITS_48)
53 select HAVE_ARCH_KGDB 53 select HAVE_ARCH_KGDB
54 select HAVE_ARCH_SECCOMP_FILTER 54 select HAVE_ARCH_SECCOMP_FILTER
55 select HAVE_ARCH_TRACEHOOK 55 select HAVE_ARCH_TRACEHOOK
@@ -316,6 +316,27 @@ config ARM64_ERRATUM_832075
316 316
317 If unsure, say Y. 317 If unsure, say Y.
318 318
319config ARM64_ERRATUM_834220
320 bool "Cortex-A57: 834220: Stage 2 translation fault might be incorrectly reported in presence of a Stage 1 fault"
321 depends on KVM
322 default y
323 help
324 This option adds an alternative code sequence to work around ARM
325 erratum 834220 on Cortex-A57 parts up to r1p2.
326
327 Affected Cortex-A57 parts might report a Stage 2 translation
328 fault as the result of a Stage 1 fault for load crossing a
329 page boundary when there is a permission or device memory
330 alignment fault at Stage 1 and a translation fault at Stage 2.
331
332 The workaround is to verify that the Stage 1 translation
333 doesn't generate a fault before handling the Stage 2 fault.
334 Please note that this does not necessarily enable the workaround,
335 as it depends on the alternative framework, which will only patch
336 the kernel if an affected CPU is detected.
337
338 If unsure, say Y.
339
319config ARM64_ERRATUM_845719 340config ARM64_ERRATUM_845719
320 bool "Cortex-A53: 845719: a load might read incorrect data" 341 bool "Cortex-A53: 845719: a load might read incorrect data"
321 depends on COMPAT 342 depends on COMPAT
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls2080a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls2080a.dtsi
index e81cd48d6245..925552e7b4f3 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls2080a.dtsi
+++ b/arch/arm64/boot/dts/freescale/fsl-ls2080a.dtsi
@@ -269,6 +269,7 @@
269 clock-frequency = <0>; /* Updated by bootloader */ 269 clock-frequency = <0>; /* Updated by bootloader */
270 voltage-ranges = <1800 1800 3300 3300>; 270 voltage-ranges = <1800 1800 3300 3300>;
271 sdhci,auto-cmd12; 271 sdhci,auto-cmd12;
272 little-endian;
272 bus-width = <4>; 273 bus-width = <4>;
273 }; 274 };
274 275
@@ -277,6 +278,7 @@
277 reg = <0x0 0x2300000 0x0 0x10000>; 278 reg = <0x0 0x2300000 0x0 0x10000>;
278 interrupts = <0 36 0x4>; /* Level high type */ 279 interrupts = <0 36 0x4>; /* Level high type */
279 gpio-controller; 280 gpio-controller;
281 little-endian;
280 #gpio-cells = <2>; 282 #gpio-cells = <2>;
281 interrupt-controller; 283 interrupt-controller;
282 #interrupt-cells = <2>; 284 #interrupt-cells = <2>;
@@ -287,6 +289,7 @@
287 reg = <0x0 0x2310000 0x0 0x10000>; 289 reg = <0x0 0x2310000 0x0 0x10000>;
288 interrupts = <0 36 0x4>; /* Level high type */ 290 interrupts = <0 36 0x4>; /* Level high type */
289 gpio-controller; 291 gpio-controller;
292 little-endian;
290 #gpio-cells = <2>; 293 #gpio-cells = <2>;
291 interrupt-controller; 294 interrupt-controller;
292 #interrupt-cells = <2>; 295 #interrupt-cells = <2>;
@@ -297,6 +300,7 @@
297 reg = <0x0 0x2320000 0x0 0x10000>; 300 reg = <0x0 0x2320000 0x0 0x10000>;
298 interrupts = <0 37 0x4>; /* Level high type */ 301 interrupts = <0 37 0x4>; /* Level high type */
299 gpio-controller; 302 gpio-controller;
303 little-endian;
300 #gpio-cells = <2>; 304 #gpio-cells = <2>;
301 interrupt-controller; 305 interrupt-controller;
302 #interrupt-cells = <2>; 306 #interrupt-cells = <2>;
@@ -307,6 +311,7 @@
307 reg = <0x0 0x2330000 0x0 0x10000>; 311 reg = <0x0 0x2330000 0x0 0x10000>;
308 interrupts = <0 37 0x4>; /* Level high type */ 312 interrupts = <0 37 0x4>; /* Level high type */
309 gpio-controller; 313 gpio-controller;
314 little-endian;
310 #gpio-cells = <2>; 315 #gpio-cells = <2>;
311 interrupt-controller; 316 interrupt-controller;
312 #interrupt-cells = <2>; 317 #interrupt-cells = <2>;
diff --git a/arch/arm64/crypto/aes-ce-cipher.c b/arch/arm64/crypto/aes-ce-cipher.c
index ce47792a983d..f7bd9bf0bbb3 100644
--- a/arch/arm64/crypto/aes-ce-cipher.c
+++ b/arch/arm64/crypto/aes-ce-cipher.c
@@ -237,7 +237,7 @@ EXPORT_SYMBOL(ce_aes_setkey);
237static struct crypto_alg aes_alg = { 237static struct crypto_alg aes_alg = {
238 .cra_name = "aes", 238 .cra_name = "aes",
239 .cra_driver_name = "aes-ce", 239 .cra_driver_name = "aes-ce",
240 .cra_priority = 300, 240 .cra_priority = 250,
241 .cra_flags = CRYPTO_ALG_TYPE_CIPHER, 241 .cra_flags = CRYPTO_ALG_TYPE_CIPHER,
242 .cra_blocksize = AES_BLOCK_SIZE, 242 .cra_blocksize = AES_BLOCK_SIZE,
243 .cra_ctxsize = sizeof(struct crypto_aes_ctx), 243 .cra_ctxsize = sizeof(struct crypto_aes_ctx),
diff --git a/arch/arm64/include/asm/arch_gicv3.h b/arch/arm64/include/asm/arch_gicv3.h
index 030cdcb46c6b..2731d3b25ed2 100644
--- a/arch/arm64/include/asm/arch_gicv3.h
+++ b/arch/arm64/include/asm/arch_gicv3.h
@@ -77,6 +77,7 @@
77#ifndef __ASSEMBLY__ 77#ifndef __ASSEMBLY__
78 78
79#include <linux/stringify.h> 79#include <linux/stringify.h>
80#include <asm/barrier.h>
80 81
81/* 82/*
82 * Low-level accessors 83 * Low-level accessors
diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h
index 624f9679f4b0..9622eb48f894 100644
--- a/arch/arm64/include/asm/barrier.h
+++ b/arch/arm64/include/asm/barrier.h
@@ -64,27 +64,31 @@ do { \
64 64
65#define smp_load_acquire(p) \ 65#define smp_load_acquire(p) \
66({ \ 66({ \
67 typeof(*p) ___p1; \ 67 union { typeof(*p) __val; char __c[1]; } __u; \
68 compiletime_assert_atomic_type(*p); \ 68 compiletime_assert_atomic_type(*p); \
69 switch (sizeof(*p)) { \ 69 switch (sizeof(*p)) { \
70 case 1: \ 70 case 1: \
71 asm volatile ("ldarb %w0, %1" \ 71 asm volatile ("ldarb %w0, %1" \
72 : "=r" (___p1) : "Q" (*p) : "memory"); \ 72 : "=r" (*(__u8 *)__u.__c) \
73 : "Q" (*p) : "memory"); \
73 break; \ 74 break; \
74 case 2: \ 75 case 2: \
75 asm volatile ("ldarh %w0, %1" \ 76 asm volatile ("ldarh %w0, %1" \
76 : "=r" (___p1) : "Q" (*p) : "memory"); \ 77 : "=r" (*(__u16 *)__u.__c) \
78 : "Q" (*p) : "memory"); \
77 break; \ 79 break; \
78 case 4: \ 80 case 4: \
79 asm volatile ("ldar %w0, %1" \ 81 asm volatile ("ldar %w0, %1" \
80 : "=r" (___p1) : "Q" (*p) : "memory"); \ 82 : "=r" (*(__u32 *)__u.__c) \
83 : "Q" (*p) : "memory"); \
81 break; \ 84 break; \
82 case 8: \ 85 case 8: \
83 asm volatile ("ldar %0, %1" \ 86 asm volatile ("ldar %0, %1" \
84 : "=r" (___p1) : "Q" (*p) : "memory"); \ 87 : "=r" (*(__u64 *)__u.__c) \
88 : "Q" (*p) : "memory"); \
85 break; \ 89 break; \
86 } \ 90 } \
87 ___p1; \ 91 __u.__val; \
88}) 92})
89 93
90#define read_barrier_depends() do { } while(0) 94#define read_barrier_depends() do { } while(0)
diff --git a/arch/arm64/include/asm/compat.h b/arch/arm64/include/asm/compat.h
index 7fbed6919b54..eb8432bb82b8 100644
--- a/arch/arm64/include/asm/compat.h
+++ b/arch/arm64/include/asm/compat.h
@@ -23,7 +23,6 @@
23 */ 23 */
24#include <linux/types.h> 24#include <linux/types.h>
25#include <linux/sched.h> 25#include <linux/sched.h>
26#include <linux/ptrace.h>
27 26
28#define COMPAT_USER_HZ 100 27#define COMPAT_USER_HZ 100
29#ifdef __AARCH64EB__ 28#ifdef __AARCH64EB__
@@ -234,7 +233,7 @@ static inline compat_uptr_t ptr_to_compat(void __user *uptr)
234 return (u32)(unsigned long)uptr; 233 return (u32)(unsigned long)uptr;
235} 234}
236 235
237#define compat_user_stack_pointer() (user_stack_pointer(current_pt_regs())) 236#define compat_user_stack_pointer() (user_stack_pointer(task_pt_regs(current)))
238 237
239static inline void __user *arch_compat_alloc_user_space(long len) 238static inline void __user *arch_compat_alloc_user_space(long len)
240{ 239{
diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
index 11d5bb0fdd54..8f271b83f910 100644
--- a/arch/arm64/include/asm/cpufeature.h
+++ b/arch/arm64/include/asm/cpufeature.h
@@ -29,8 +29,9 @@
29#define ARM64_HAS_PAN 4 29#define ARM64_HAS_PAN 4
30#define ARM64_HAS_LSE_ATOMICS 5 30#define ARM64_HAS_LSE_ATOMICS 5
31#define ARM64_WORKAROUND_CAVIUM_23154 6 31#define ARM64_WORKAROUND_CAVIUM_23154 6
32#define ARM64_WORKAROUND_834220 7
32 33
33#define ARM64_NCAPS 7 34#define ARM64_NCAPS 8
34 35
35#ifndef __ASSEMBLY__ 36#ifndef __ASSEMBLY__
36 37
@@ -46,8 +47,12 @@ enum ftr_type {
46#define FTR_STRICT true /* SANITY check strict matching required */ 47#define FTR_STRICT true /* SANITY check strict matching required */
47#define FTR_NONSTRICT false /* SANITY check ignored */ 48#define FTR_NONSTRICT false /* SANITY check ignored */
48 49
50#define FTR_SIGNED true /* Value should be treated as signed */
51#define FTR_UNSIGNED false /* Value should be treated as unsigned */
52
49struct arm64_ftr_bits { 53struct arm64_ftr_bits {
50 bool strict; /* CPU Sanity check: strict matching required ? */ 54 bool sign; /* Value is signed ? */
55 bool strict; /* CPU Sanity check: strict matching required ? */
51 enum ftr_type type; 56 enum ftr_type type;
52 u8 shift; 57 u8 shift;
53 u8 width; 58 u8 width;
@@ -123,6 +128,18 @@ cpuid_feature_extract_field(u64 features, int field)
123 return cpuid_feature_extract_field_width(features, field, 4); 128 return cpuid_feature_extract_field_width(features, field, 4);
124} 129}
125 130
131static inline unsigned int __attribute_const__
132cpuid_feature_extract_unsigned_field_width(u64 features, int field, int width)
133{
134 return (u64)(features << (64 - width - field)) >> (64 - width);
135}
136
137static inline unsigned int __attribute_const__
138cpuid_feature_extract_unsigned_field(u64 features, int field)
139{
140 return cpuid_feature_extract_unsigned_field_width(features, field, 4);
141}
142
126static inline u64 arm64_ftr_mask(struct arm64_ftr_bits *ftrp) 143static inline u64 arm64_ftr_mask(struct arm64_ftr_bits *ftrp)
127{ 144{
128 return (u64)GENMASK(ftrp->shift + ftrp->width - 1, ftrp->shift); 145 return (u64)GENMASK(ftrp->shift + ftrp->width - 1, ftrp->shift);
@@ -130,7 +147,9 @@ static inline u64 arm64_ftr_mask(struct arm64_ftr_bits *ftrp)
130 147
131static inline s64 arm64_ftr_value(struct arm64_ftr_bits *ftrp, u64 val) 148static inline s64 arm64_ftr_value(struct arm64_ftr_bits *ftrp, u64 val)
132{ 149{
133 return cpuid_feature_extract_field_width(val, ftrp->shift, ftrp->width); 150 return ftrp->sign ?
151 cpuid_feature_extract_field_width(val, ftrp->shift, ftrp->width) :
152 cpuid_feature_extract_unsigned_field_width(val, ftrp->shift, ftrp->width);
134} 153}
135 154
136static inline bool id_aa64mmfr0_mixed_endian_el0(u64 mmfr0) 155static inline bool id_aa64mmfr0_mixed_endian_el0(u64 mmfr0)
diff --git a/arch/arm64/include/asm/dma-mapping.h b/arch/arm64/include/asm/dma-mapping.h
index 54d0ead41afc..61e08f360e31 100644
--- a/arch/arm64/include/asm/dma-mapping.h
+++ b/arch/arm64/include/asm/dma-mapping.h
@@ -18,7 +18,6 @@
18 18
19#ifdef __KERNEL__ 19#ifdef __KERNEL__
20 20
21#include <linux/acpi.h>
22#include <linux/types.h> 21#include <linux/types.h>
23#include <linux/vmalloc.h> 22#include <linux/vmalloc.h>
24 23
@@ -26,22 +25,16 @@
26#include <asm/xen/hypervisor.h> 25#include <asm/xen/hypervisor.h>
27 26
28#define DMA_ERROR_CODE (~(dma_addr_t)0) 27#define DMA_ERROR_CODE (~(dma_addr_t)0)
29extern struct dma_map_ops *dma_ops;
30extern struct dma_map_ops dummy_dma_ops; 28extern struct dma_map_ops dummy_dma_ops;
31 29
32static inline struct dma_map_ops *__generic_dma_ops(struct device *dev) 30static inline struct dma_map_ops *__generic_dma_ops(struct device *dev)
33{ 31{
34 if (unlikely(!dev)) 32 if (dev && dev->archdata.dma_ops)
35 return dma_ops;
36 else if (dev->archdata.dma_ops)
37 return dev->archdata.dma_ops; 33 return dev->archdata.dma_ops;
38 else if (acpi_disabled)
39 return dma_ops;
40 34
41 /* 35 /*
42 * When ACPI is enabled, if arch_set_dma_ops is not called, 36 * We expect no ISA devices, and all other DMA masters are expected to
43 * we will disable device DMA capability by setting it 37 * have someone call arch_setup_dma_ops at device creation time.
44 * to dummy_dma_ops.
45 */ 38 */
46 return &dummy_dma_ops; 39 return &dummy_dma_ops;
47} 40}
diff --git a/arch/arm64/include/asm/hw_breakpoint.h b/arch/arm64/include/asm/hw_breakpoint.h
index e54415ec6935..9732908bfc8a 100644
--- a/arch/arm64/include/asm/hw_breakpoint.h
+++ b/arch/arm64/include/asm/hw_breakpoint.h
@@ -138,16 +138,18 @@ extern struct pmu perf_ops_bp;
138/* Determine number of BRP registers available. */ 138/* Determine number of BRP registers available. */
139static inline int get_num_brps(void) 139static inline int get_num_brps(void)
140{ 140{
141 u64 dfr0 = read_system_reg(SYS_ID_AA64DFR0_EL1);
141 return 1 + 142 return 1 +
142 cpuid_feature_extract_field(read_system_reg(SYS_ID_AA64DFR0_EL1), 143 cpuid_feature_extract_unsigned_field(dfr0,
143 ID_AA64DFR0_BRPS_SHIFT); 144 ID_AA64DFR0_BRPS_SHIFT);
144} 145}
145 146
146/* Determine number of WRP registers available. */ 147/* Determine number of WRP registers available. */
147static inline int get_num_wrps(void) 148static inline int get_num_wrps(void)
148{ 149{
150 u64 dfr0 = read_system_reg(SYS_ID_AA64DFR0_EL1);
149 return 1 + 151 return 1 +
150 cpuid_feature_extract_field(read_system_reg(SYS_ID_AA64DFR0_EL1), 152 cpuid_feature_extract_unsigned_field(dfr0,
151 ID_AA64DFR0_WRPS_SHIFT); 153 ID_AA64DFR0_WRPS_SHIFT);
152} 154}
153 155
diff --git a/arch/arm64/include/asm/irq.h b/arch/arm64/include/asm/irq.h
index 23eb450b820b..8e8d30684392 100644
--- a/arch/arm64/include/asm/irq.h
+++ b/arch/arm64/include/asm/irq.h
@@ -7,4 +7,9 @@ struct pt_regs;
7 7
8extern void set_handle_irq(void (*handle_irq)(struct pt_regs *)); 8extern void set_handle_irq(void (*handle_irq)(struct pt_regs *));
9 9
10static inline int nr_legacy_irqs(void)
11{
12 return 0;
13}
14
10#endif 15#endif
diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
index 17e92f05b1fe..25a40213bd9b 100644
--- a/arch/arm64/include/asm/kvm_emulate.h
+++ b/arch/arm64/include/asm/kvm_emulate.h
@@ -99,12 +99,22 @@ static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu)
99 *vcpu_cpsr(vcpu) |= COMPAT_PSR_T_BIT; 99 *vcpu_cpsr(vcpu) |= COMPAT_PSR_T_BIT;
100} 100}
101 101
102static inline unsigned long *vcpu_reg(const struct kvm_vcpu *vcpu, u8 reg_num) 102/*
103 * vcpu_get_reg and vcpu_set_reg should always be passed a register number
104 * coming from a read of ESR_EL2. Otherwise, it may give the wrong result on
105 * AArch32 with banked registers.
106 */
107static inline unsigned long vcpu_get_reg(const struct kvm_vcpu *vcpu,
108 u8 reg_num)
103{ 109{
104 if (vcpu_mode_is_32bit(vcpu)) 110 return (reg_num == 31) ? 0 : vcpu_gp_regs(vcpu)->regs.regs[reg_num];
105 return vcpu_reg32(vcpu, reg_num); 111}
106 112
107 return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.regs[reg_num]; 113static inline void vcpu_set_reg(struct kvm_vcpu *vcpu, u8 reg_num,
114 unsigned long val)
115{
116 if (reg_num != 31)
117 vcpu_gp_regs(vcpu)->regs.regs[reg_num] = val;
108} 118}
109 119
110/* Get vcpu SPSR for current mode */ 120/* Get vcpu SPSR for current mode */
diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h
index c0e87898ba96..24165784b803 100644
--- a/arch/arm64/include/asm/mmu_context.h
+++ b/arch/arm64/include/asm/mmu_context.h
@@ -101,7 +101,7 @@ static inline void cpu_set_default_tcr_t0sz(void)
101#define destroy_context(mm) do { } while(0) 101#define destroy_context(mm) do { } while(0)
102void check_and_switch_context(struct mm_struct *mm, unsigned int cpu); 102void check_and_switch_context(struct mm_struct *mm, unsigned int cpu);
103 103
104#define init_new_context(tsk,mm) ({ atomic64_set(&mm->context.id, 0); 0; }) 104#define init_new_context(tsk,mm) ({ atomic64_set(&(mm)->context.id, 0); 0; })
105 105
106/* 106/*
107 * This is called when "tsk" is about to enter lazy TLB mode. 107 * This is called when "tsk" is about to enter lazy TLB mode.
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index 9819a9426b69..63f52b55defe 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -81,6 +81,7 @@ extern void __pgd_error(const char *file, int line, unsigned long val);
81 81
82#define PAGE_KERNEL __pgprot(_PAGE_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE) 82#define PAGE_KERNEL __pgprot(_PAGE_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE)
83#define PAGE_KERNEL_RO __pgprot(_PAGE_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_RDONLY) 83#define PAGE_KERNEL_RO __pgprot(_PAGE_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_RDONLY)
84#define PAGE_KERNEL_ROX __pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_RDONLY)
84#define PAGE_KERNEL_EXEC __pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_WRITE) 85#define PAGE_KERNEL_EXEC __pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_WRITE)
85#define PAGE_KERNEL_EXEC_CONT __pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_CONT) 86#define PAGE_KERNEL_EXEC_CONT __pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_CONT)
86 87
@@ -275,10 +276,14 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
275 * hardware updates of the pte (ptep_set_access_flags safely changes 276 * hardware updates of the pte (ptep_set_access_flags safely changes
276 * valid ptes without going through an invalid entry). 277 * valid ptes without going through an invalid entry).
277 */ 278 */
278 if (IS_ENABLED(CONFIG_DEBUG_VM) && IS_ENABLED(CONFIG_ARM64_HW_AFDBM) && 279 if (IS_ENABLED(CONFIG_ARM64_HW_AFDBM) &&
279 pte_valid(*ptep)) { 280 pte_valid(*ptep) && pte_valid(pte)) {
280 BUG_ON(!pte_young(pte)); 281 VM_WARN_ONCE(!pte_young(pte),
281 BUG_ON(pte_write(*ptep) && !pte_dirty(pte)); 282 "%s: racy access flag clearing: 0x%016llx -> 0x%016llx",
283 __func__, pte_val(*ptep), pte_val(pte));
284 VM_WARN_ONCE(pte_write(*ptep) && !pte_dirty(pte),
285 "%s: racy dirty state clearing: 0x%016llx -> 0x%016llx",
286 __func__, pte_val(*ptep), pte_val(pte));
282 } 287 }
283 288
284 set_pte(ptep, pte); 289 set_pte(ptep, pte);
diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
index 24926f2504f7..feb6b4efa641 100644
--- a/arch/arm64/kernel/cpu_errata.c
+++ b/arch/arm64/kernel/cpu_errata.c
@@ -75,6 +75,15 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
75 (1 << MIDR_VARIANT_SHIFT) | 2), 75 (1 << MIDR_VARIANT_SHIFT) | 2),
76 }, 76 },
77#endif 77#endif
78#ifdef CONFIG_ARM64_ERRATUM_834220
79 {
80 /* Cortex-A57 r0p0 - r1p2 */
81 .desc = "ARM erratum 834220",
82 .capability = ARM64_WORKAROUND_834220,
83 MIDR_RANGE(MIDR_CORTEX_A57, 0x00,
84 (1 << MIDR_VARIANT_SHIFT) | 2),
85 },
86#endif
78#ifdef CONFIG_ARM64_ERRATUM_845719 87#ifdef CONFIG_ARM64_ERRATUM_845719
79 { 88 {
80 /* Cortex-A53 r0p[01234] */ 89 /* Cortex-A53 r0p[01234] */
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index c8cf89223b5a..0669c63281ea 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -44,8 +44,9 @@ unsigned int compat_elf_hwcap2 __read_mostly;
44 44
45DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS); 45DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS);
46 46
47#define ARM64_FTR_BITS(STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL) \ 47#define __ARM64_FTR_BITS(SIGNED, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL) \
48 { \ 48 { \
49 .sign = SIGNED, \
49 .strict = STRICT, \ 50 .strict = STRICT, \
50 .type = TYPE, \ 51 .type = TYPE, \
51 .shift = SHIFT, \ 52 .shift = SHIFT, \
@@ -53,6 +54,14 @@ DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS);
53 .safe_val = SAFE_VAL, \ 54 .safe_val = SAFE_VAL, \
54 } 55 }
55 56
57/* Define a feature with signed values */
58#define ARM64_FTR_BITS(STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL) \
59 __ARM64_FTR_BITS(FTR_SIGNED, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL)
60
61/* Define a feature with unsigned value */
62#define U_ARM64_FTR_BITS(STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL) \
63 __ARM64_FTR_BITS(FTR_UNSIGNED, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL)
64
56#define ARM64_FTR_END \ 65#define ARM64_FTR_END \
57 { \ 66 { \
58 .width = 0, \ 67 .width = 0, \
@@ -99,7 +108,7 @@ static struct arm64_ftr_bits ftr_id_aa64mmfr0[] = {
99 * Differing PARange is fine as long as all peripherals and memory are mapped 108 * Differing PARange is fine as long as all peripherals and memory are mapped
100 * within the minimum PARange of all CPUs 109 * within the minimum PARange of all CPUs
101 */ 110 */
102 ARM64_FTR_BITS(FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_PARANGE_SHIFT, 4, 0), 111 U_ARM64_FTR_BITS(FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_PARANGE_SHIFT, 4, 0),
103 ARM64_FTR_END, 112 ARM64_FTR_END,
104}; 113};
105 114
@@ -115,18 +124,18 @@ static struct arm64_ftr_bits ftr_id_aa64mmfr1[] = {
115}; 124};
116 125
117static struct arm64_ftr_bits ftr_ctr[] = { 126static struct arm64_ftr_bits ftr_ctr[] = {
118 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 31, 1, 1), /* RAO */ 127 U_ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 31, 1, 1), /* RAO */
119 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 28, 3, 0), 128 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 28, 3, 0),
120 ARM64_FTR_BITS(FTR_STRICT, FTR_HIGHER_SAFE, 24, 4, 0), /* CWG */ 129 U_ARM64_FTR_BITS(FTR_STRICT, FTR_HIGHER_SAFE, 24, 4, 0), /* CWG */
121 ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 20, 4, 0), /* ERG */ 130 U_ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 20, 4, 0), /* ERG */
122 ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 16, 4, 1), /* DminLine */ 131 U_ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 16, 4, 1), /* DminLine */
123 /* 132 /*
124 * Linux can handle differing I-cache policies. Userspace JITs will 133 * Linux can handle differing I-cache policies. Userspace JITs will
125 * make use of *minLine 134 * make use of *minLine
126 */ 135 */
127 ARM64_FTR_BITS(FTR_NONSTRICT, FTR_EXACT, 14, 2, 0), /* L1Ip */ 136 U_ARM64_FTR_BITS(FTR_NONSTRICT, FTR_EXACT, 14, 2, 0), /* L1Ip */
128 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 4, 10, 0), /* RAZ */ 137 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 4, 10, 0), /* RAZ */
129 ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0), /* IminLine */ 138 U_ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0), /* IminLine */
130 ARM64_FTR_END, 139 ARM64_FTR_END,
131}; 140};
132 141
@@ -144,12 +153,12 @@ static struct arm64_ftr_bits ftr_id_mmfr0[] = {
144 153
145static struct arm64_ftr_bits ftr_id_aa64dfr0[] = { 154static struct arm64_ftr_bits ftr_id_aa64dfr0[] = {
146 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 32, 32, 0), 155 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 32, 32, 0),
147 ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_CTX_CMPS_SHIFT, 4, 0), 156 U_ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_CTX_CMPS_SHIFT, 4, 0),
148 ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_WRPS_SHIFT, 4, 0), 157 U_ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_WRPS_SHIFT, 4, 0),
149 ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_BRPS_SHIFT, 4, 0), 158 U_ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_BRPS_SHIFT, 4, 0),
150 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64DFR0_PMUVER_SHIFT, 4, 0), 159 U_ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64DFR0_PMUVER_SHIFT, 4, 0),
151 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64DFR0_TRACEVER_SHIFT, 4, 0), 160 U_ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64DFR0_TRACEVER_SHIFT, 4, 0),
152 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64DFR0_DEBUGVER_SHIFT, 4, 0x6), 161 U_ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64DFR0_DEBUGVER_SHIFT, 4, 0x6),
153 ARM64_FTR_END, 162 ARM64_FTR_END,
154}; 163};
155 164
diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c
index 706679d0a0b4..212ae6361d8b 100644
--- a/arch/arm64/kernel/cpuinfo.c
+++ b/arch/arm64/kernel/cpuinfo.c
@@ -30,6 +30,7 @@
30#include <linux/seq_file.h> 30#include <linux/seq_file.h>
31#include <linux/sched.h> 31#include <linux/sched.h>
32#include <linux/smp.h> 32#include <linux/smp.h>
33#include <linux/delay.h>
33 34
34/* 35/*
35 * In case the boot CPU is hotpluggable, we record its initial state and 36 * In case the boot CPU is hotpluggable, we record its initial state and
@@ -112,6 +113,10 @@ static int c_show(struct seq_file *m, void *v)
112 */ 113 */
113 seq_printf(m, "processor\t: %d\n", i); 114 seq_printf(m, "processor\t: %d\n", i);
114 115
116 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
117 loops_per_jiffy / (500000UL/HZ),
118 loops_per_jiffy / (5000UL/HZ) % 100);
119
115 /* 120 /*
116 * Dump out the common processor features in a single line. 121 * Dump out the common processor features in a single line.
117 * Userspace should read the hwcaps with getauxval(AT_HWCAP) 122 * Userspace should read the hwcaps with getauxval(AT_HWCAP)
diff --git a/arch/arm64/kernel/efi.c b/arch/arm64/kernel/efi.c
index de46b50f4cdf..4eeb17198cfa 100644
--- a/arch/arm64/kernel/efi.c
+++ b/arch/arm64/kernel/efi.c
@@ -127,7 +127,11 @@ static int __init uefi_init(void)
127 table_size = sizeof(efi_config_table_64_t) * efi.systab->nr_tables; 127 table_size = sizeof(efi_config_table_64_t) * efi.systab->nr_tables;
128 config_tables = early_memremap(efi_to_phys(efi.systab->tables), 128 config_tables = early_memremap(efi_to_phys(efi.systab->tables),
129 table_size); 129 table_size);
130 130 if (config_tables == NULL) {
131 pr_warn("Unable to map EFI config table array.\n");
132 retval = -ENOMEM;
133 goto out;
134 }
131 retval = efi_config_parse_tables(config_tables, efi.systab->nr_tables, 135 retval = efi_config_parse_tables(config_tables, efi.systab->nr_tables,
132 sizeof(efi_config_table_64_t), NULL); 136 sizeof(efi_config_table_64_t), NULL);
133 137
@@ -209,6 +213,14 @@ void __init efi_init(void)
209 PAGE_ALIGN(params.mmap_size + (params.mmap & ~PAGE_MASK))); 213 PAGE_ALIGN(params.mmap_size + (params.mmap & ~PAGE_MASK)));
210 memmap.phys_map = params.mmap; 214 memmap.phys_map = params.mmap;
211 memmap.map = early_memremap(params.mmap, params.mmap_size); 215 memmap.map = early_memremap(params.mmap, params.mmap_size);
216 if (memmap.map == NULL) {
217 /*
218 * If we are booting via UEFI, the UEFI memory map is the only
219 * description of memory we have, so there is little point in
220 * proceeding if we cannot access it.
221 */
222 panic("Unable to map EFI memory map.\n");
223 }
212 memmap.map_end = memmap.map + params.mmap_size; 224 memmap.map_end = memmap.map + params.mmap_size;
213 memmap.desc_size = params.desc_size; 225 memmap.desc_size = params.desc_size;
214 memmap.desc_version = params.desc_ver; 226 memmap.desc_version = params.desc_ver;
@@ -224,8 +236,9 @@ static bool __init efi_virtmap_init(void)
224{ 236{
225 efi_memory_desc_t *md; 237 efi_memory_desc_t *md;
226 238
239 init_new_context(NULL, &efi_mm);
240
227 for_each_efi_memory_desc(&memmap, md) { 241 for_each_efi_memory_desc(&memmap, md) {
228 u64 paddr, npages, size;
229 pgprot_t prot; 242 pgprot_t prot;
230 243
231 if (!(md->attribute & EFI_MEMORY_RUNTIME)) 244 if (!(md->attribute & EFI_MEMORY_RUNTIME))
@@ -233,11 +246,6 @@ static bool __init efi_virtmap_init(void)
233 if (md->virt_addr == 0) 246 if (md->virt_addr == 0)
234 return false; 247 return false;
235 248
236 paddr = md->phys_addr;
237 npages = md->num_pages;
238 memrange_efi_to_native(&paddr, &npages);
239 size = npages << PAGE_SHIFT;
240
241 pr_info(" EFI remap 0x%016llx => %p\n", 249 pr_info(" EFI remap 0x%016llx => %p\n",
242 md->phys_addr, (void *)md->virt_addr); 250 md->phys_addr, (void *)md->virt_addr);
243 251
@@ -254,7 +262,9 @@ static bool __init efi_virtmap_init(void)
254 else 262 else
255 prot = PAGE_KERNEL; 263 prot = PAGE_KERNEL;
256 264
257 create_pgd_mapping(&efi_mm, paddr, md->virt_addr, size, prot); 265 create_pgd_mapping(&efi_mm, md->phys_addr, md->virt_addr,
266 md->num_pages << EFI_PAGE_SHIFT,
267 __pgprot(pgprot_val(prot) | PTE_NG));
258 } 268 }
259 return true; 269 return true;
260} 270}
@@ -270,12 +280,12 @@ static int __init arm64_enable_runtime_services(void)
270 280
271 if (!efi_enabled(EFI_BOOT)) { 281 if (!efi_enabled(EFI_BOOT)) {
272 pr_info("EFI services will not be available.\n"); 282 pr_info("EFI services will not be available.\n");
273 return -1; 283 return 0;
274 } 284 }
275 285
276 if (efi_runtime_disabled()) { 286 if (efi_runtime_disabled()) {
277 pr_info("EFI runtime services will be disabled.\n"); 287 pr_info("EFI runtime services will be disabled.\n");
278 return -1; 288 return 0;
279 } 289 }
280 290
281 pr_info("Remapping and enabling EFI services.\n"); 291 pr_info("Remapping and enabling EFI services.\n");
@@ -285,7 +295,7 @@ static int __init arm64_enable_runtime_services(void)
285 mapsize); 295 mapsize);
286 if (!memmap.map) { 296 if (!memmap.map) {
287 pr_err("Failed to remap EFI memory map\n"); 297 pr_err("Failed to remap EFI memory map\n");
288 return -1; 298 return -ENOMEM;
289 } 299 }
290 memmap.map_end = memmap.map + mapsize; 300 memmap.map_end = memmap.map + mapsize;
291 efi.memmap = &memmap; 301 efi.memmap = &memmap;
@@ -294,13 +304,13 @@ static int __init arm64_enable_runtime_services(void)
294 sizeof(efi_system_table_t)); 304 sizeof(efi_system_table_t));
295 if (!efi.systab) { 305 if (!efi.systab) {
296 pr_err("Failed to remap EFI System Table\n"); 306 pr_err("Failed to remap EFI System Table\n");
297 return -1; 307 return -ENOMEM;
298 } 308 }
299 set_bit(EFI_SYSTEM_TABLES, &efi.flags); 309 set_bit(EFI_SYSTEM_TABLES, &efi.flags);
300 310
301 if (!efi_virtmap_init()) { 311 if (!efi_virtmap_init()) {
302 pr_err("No UEFI virtual mapping was installed -- runtime services will not be available\n"); 312 pr_err("No UEFI virtual mapping was installed -- runtime services will not be available\n");
303 return -1; 313 return -ENOMEM;
304 } 314 }
305 315
306 /* Set up runtime services function pointers */ 316 /* Set up runtime services function pointers */
@@ -329,14 +339,7 @@ core_initcall(arm64_dmi_init);
329 339
330static void efi_set_pgd(struct mm_struct *mm) 340static void efi_set_pgd(struct mm_struct *mm)
331{ 341{
332 if (mm == &init_mm) 342 switch_mm(NULL, mm, NULL);
333 cpu_set_reserved_ttbr0();
334 else
335 cpu_switch_mm(mm->pgd, mm);
336
337 local_flush_tlb_all();
338 if (icache_is_aivivt())
339 __local_flush_icache_all();
340} 343}
341 344
342void efi_virtmap_load(void) 345void efi_virtmap_load(void)
diff --git a/arch/arm64/kernel/suspend.c b/arch/arm64/kernel/suspend.c
index fce95e17cf7f..1095aa483a1c 100644
--- a/arch/arm64/kernel/suspend.c
+++ b/arch/arm64/kernel/suspend.c
@@ -1,3 +1,4 @@
1#include <linux/ftrace.h>
1#include <linux/percpu.h> 2#include <linux/percpu.h>
2#include <linux/slab.h> 3#include <linux/slab.h>
3#include <asm/cacheflush.h> 4#include <asm/cacheflush.h>
@@ -71,6 +72,13 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
71 local_dbg_save(flags); 72 local_dbg_save(flags);
72 73
73 /* 74 /*
75 * Function graph tracer state gets incosistent when the kernel
76 * calls functions that never return (aka suspend finishers) hence
77 * disable graph tracing during their execution.
78 */
79 pause_graph_tracing();
80
81 /*
74 * mm context saved on the stack, it will be restored when 82 * mm context saved on the stack, it will be restored when
75 * the cpu comes out of reset through the identity mapped 83 * the cpu comes out of reset through the identity mapped
76 * page tables, so that the thread address space is properly 84 * page tables, so that the thread address space is properly
@@ -111,6 +119,8 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
111 hw_breakpoint_restore(NULL); 119 hw_breakpoint_restore(NULL);
112 } 120 }
113 121
122 unpause_graph_tracing();
123
114 /* 124 /*
115 * Restore pstate flags. OS lock and mdscr have been already 125 * Restore pstate flags. OS lock and mdscr have been already
116 * restored, so from this point onwards, debugging is fully 126 * restored, so from this point onwards, debugging is fully
diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S
index 1ee2c3937d4e..71426a78db12 100644
--- a/arch/arm64/kernel/vmlinux.lds.S
+++ b/arch/arm64/kernel/vmlinux.lds.S
@@ -5,6 +5,7 @@
5 */ 5 */
6 6
7#include <asm-generic/vmlinux.lds.h> 7#include <asm-generic/vmlinux.lds.h>
8#include <asm/cache.h>
8#include <asm/kernel-pgtable.h> 9#include <asm/kernel-pgtable.h>
9#include <asm/thread_info.h> 10#include <asm/thread_info.h>
10#include <asm/memory.h> 11#include <asm/memory.h>
@@ -140,7 +141,7 @@ SECTIONS
140 ARM_EXIT_KEEP(EXIT_DATA) 141 ARM_EXIT_KEEP(EXIT_DATA)
141 } 142 }
142 143
143 PERCPU_SECTION(64) 144 PERCPU_SECTION(L1_CACHE_BYTES)
144 145
145 . = ALIGN(PAGE_SIZE); 146 . = ALIGN(PAGE_SIZE);
146 __init_end = .; 147 __init_end = .;
@@ -158,7 +159,7 @@ SECTIONS
158 . = ALIGN(PAGE_SIZE); 159 . = ALIGN(PAGE_SIZE);
159 _data = .; 160 _data = .;
160 _sdata = .; 161 _sdata = .;
161 RW_DATA_SECTION(64, PAGE_SIZE, THREAD_SIZE) 162 RW_DATA_SECTION(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE)
162 PECOFF_EDATA_PADDING 163 PECOFF_EDATA_PADDING
163 _edata = .; 164 _edata = .;
164 165
diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c
index 68a0759b1375..15f0477b0d2a 100644
--- a/arch/arm64/kvm/handle_exit.c
+++ b/arch/arm64/kvm/handle_exit.c
@@ -37,7 +37,7 @@ static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run)
37{ 37{
38 int ret; 38 int ret;
39 39
40 trace_kvm_hvc_arm64(*vcpu_pc(vcpu), *vcpu_reg(vcpu, 0), 40 trace_kvm_hvc_arm64(*vcpu_pc(vcpu), vcpu_get_reg(vcpu, 0),
41 kvm_vcpu_hvc_get_imm(vcpu)); 41 kvm_vcpu_hvc_get_imm(vcpu));
42 42
43 ret = kvm_psci_call(vcpu); 43 ret = kvm_psci_call(vcpu);
diff --git a/arch/arm64/kvm/hyp.S b/arch/arm64/kvm/hyp.S
index 1599701ef044..86c289832272 100644
--- a/arch/arm64/kvm/hyp.S
+++ b/arch/arm64/kvm/hyp.S
@@ -864,6 +864,10 @@ ENTRY(__kvm_flush_vm_context)
864ENDPROC(__kvm_flush_vm_context) 864ENDPROC(__kvm_flush_vm_context)
865 865
866__kvm_hyp_panic: 866__kvm_hyp_panic:
867 // Stash PAR_EL1 before corrupting it in __restore_sysregs
868 mrs x0, par_el1
869 push x0, xzr
870
867 // Guess the context by looking at VTTBR: 871 // Guess the context by looking at VTTBR:
868 // If zero, then we're already a host. 872 // If zero, then we're already a host.
869 // Otherwise restore a minimal host context before panicing. 873 // Otherwise restore a minimal host context before panicing.
@@ -898,7 +902,7 @@ __kvm_hyp_panic:
898 mrs x3, esr_el2 902 mrs x3, esr_el2
899 mrs x4, far_el2 903 mrs x4, far_el2
900 mrs x5, hpfar_el2 904 mrs x5, hpfar_el2
901 mrs x6, par_el1 905 pop x6, xzr // active context PAR_EL1
902 mrs x7, tpidr_el2 906 mrs x7, tpidr_el2
903 907
904 mov lr, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\ 908 mov lr, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\
@@ -914,7 +918,7 @@ __kvm_hyp_panic:
914ENDPROC(__kvm_hyp_panic) 918ENDPROC(__kvm_hyp_panic)
915 919
916__hyp_panic_str: 920__hyp_panic_str:
917 .ascii "HYP panic:\nPS:%08x PC:%p ESR:%p\nFAR:%p HPFAR:%p PAR:%p\nVCPU:%p\n\0" 921 .ascii "HYP panic:\nPS:%08x PC:%016x ESR:%08x\nFAR:%016x HPFAR:%016x PAR:%016x\nVCPU:%p\n\0"
918 922
919 .align 2 923 .align 2
920 924
@@ -1015,9 +1019,15 @@ el1_trap:
1015 b.ne 1f // Not an abort we care about 1019 b.ne 1f // Not an abort we care about
1016 1020
1017 /* This is an abort. Check for permission fault */ 1021 /* This is an abort. Check for permission fault */
1022alternative_if_not ARM64_WORKAROUND_834220
1018 and x2, x1, #ESR_ELx_FSC_TYPE 1023 and x2, x1, #ESR_ELx_FSC_TYPE
1019 cmp x2, #FSC_PERM 1024 cmp x2, #FSC_PERM
1020 b.ne 1f // Not a permission fault 1025 b.ne 1f // Not a permission fault
1026alternative_else
1027 nop // Use the permission fault path to
1028 nop // check for a valid S1 translation,
1029 nop // regardless of the ESR value.
1030alternative_endif
1021 1031
1022 /* 1032 /*
1023 * Check for Stage-1 page table walk, which is guaranteed 1033 * Check for Stage-1 page table walk, which is guaranteed
diff --git a/arch/arm64/kvm/inject_fault.c b/arch/arm64/kvm/inject_fault.c
index 85c57158dcd9..648112e90ed5 100644
--- a/arch/arm64/kvm/inject_fault.c
+++ b/arch/arm64/kvm/inject_fault.c
@@ -48,7 +48,7 @@ static void prepare_fault32(struct kvm_vcpu *vcpu, u32 mode, u32 vect_offset)
48 48
49 /* Note: These now point to the banked copies */ 49 /* Note: These now point to the banked copies */
50 *vcpu_spsr(vcpu) = new_spsr_value; 50 *vcpu_spsr(vcpu) = new_spsr_value;
51 *vcpu_reg(vcpu, 14) = *vcpu_pc(vcpu) + return_offset; 51 *vcpu_reg32(vcpu, 14) = *vcpu_pc(vcpu) + return_offset;
52 52
53 /* Branch to exception vector */ 53 /* Branch to exception vector */
54 if (sctlr & (1 << 13)) 54 if (sctlr & (1 << 13))
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index 87a64e8db04c..d2650e84faf2 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -78,7 +78,7 @@ static u32 get_ccsidr(u32 csselr)
78 * See note at ARMv7 ARM B1.14.4 (TL;DR: S/W ops are not easily virtualized). 78 * See note at ARMv7 ARM B1.14.4 (TL;DR: S/W ops are not easily virtualized).
79 */ 79 */
80static bool access_dcsw(struct kvm_vcpu *vcpu, 80static bool access_dcsw(struct kvm_vcpu *vcpu,
81 const struct sys_reg_params *p, 81 struct sys_reg_params *p,
82 const struct sys_reg_desc *r) 82 const struct sys_reg_desc *r)
83{ 83{
84 if (!p->is_write) 84 if (!p->is_write)
@@ -94,21 +94,19 @@ static bool access_dcsw(struct kvm_vcpu *vcpu,
94 * sys_regs and leave it in complete control of the caches. 94 * sys_regs and leave it in complete control of the caches.
95 */ 95 */
96static bool access_vm_reg(struct kvm_vcpu *vcpu, 96static bool access_vm_reg(struct kvm_vcpu *vcpu,
97 const struct sys_reg_params *p, 97 struct sys_reg_params *p,
98 const struct sys_reg_desc *r) 98 const struct sys_reg_desc *r)
99{ 99{
100 unsigned long val;
101 bool was_enabled = vcpu_has_cache_enabled(vcpu); 100 bool was_enabled = vcpu_has_cache_enabled(vcpu);
102 101
103 BUG_ON(!p->is_write); 102 BUG_ON(!p->is_write);
104 103
105 val = *vcpu_reg(vcpu, p->Rt);
106 if (!p->is_aarch32) { 104 if (!p->is_aarch32) {
107 vcpu_sys_reg(vcpu, r->reg) = val; 105 vcpu_sys_reg(vcpu, r->reg) = p->regval;
108 } else { 106 } else {
109 if (!p->is_32bit) 107 if (!p->is_32bit)
110 vcpu_cp15_64_high(vcpu, r->reg) = val >> 32; 108 vcpu_cp15_64_high(vcpu, r->reg) = upper_32_bits(p->regval);
111 vcpu_cp15_64_low(vcpu, r->reg) = val & 0xffffffffUL; 109 vcpu_cp15_64_low(vcpu, r->reg) = lower_32_bits(p->regval);
112 } 110 }
113 111
114 kvm_toggle_cache(vcpu, was_enabled); 112 kvm_toggle_cache(vcpu, was_enabled);
@@ -122,22 +120,19 @@ static bool access_vm_reg(struct kvm_vcpu *vcpu,
122 * for both AArch64 and AArch32 accesses. 120 * for both AArch64 and AArch32 accesses.
123 */ 121 */
124static bool access_gic_sgi(struct kvm_vcpu *vcpu, 122static bool access_gic_sgi(struct kvm_vcpu *vcpu,
125 const struct sys_reg_params *p, 123 struct sys_reg_params *p,
126 const struct sys_reg_desc *r) 124 const struct sys_reg_desc *r)
127{ 125{
128 u64 val;
129
130 if (!p->is_write) 126 if (!p->is_write)
131 return read_from_write_only(vcpu, p); 127 return read_from_write_only(vcpu, p);
132 128
133 val = *vcpu_reg(vcpu, p->Rt); 129 vgic_v3_dispatch_sgi(vcpu, p->regval);
134 vgic_v3_dispatch_sgi(vcpu, val);
135 130
136 return true; 131 return true;
137} 132}
138 133
139static bool trap_raz_wi(struct kvm_vcpu *vcpu, 134static bool trap_raz_wi(struct kvm_vcpu *vcpu,
140 const struct sys_reg_params *p, 135 struct sys_reg_params *p,
141 const struct sys_reg_desc *r) 136 const struct sys_reg_desc *r)
142{ 137{
143 if (p->is_write) 138 if (p->is_write)
@@ -147,19 +142,19 @@ static bool trap_raz_wi(struct kvm_vcpu *vcpu,
147} 142}
148 143
149static bool trap_oslsr_el1(struct kvm_vcpu *vcpu, 144static bool trap_oslsr_el1(struct kvm_vcpu *vcpu,
150 const struct sys_reg_params *p, 145 struct sys_reg_params *p,
151 const struct sys_reg_desc *r) 146 const struct sys_reg_desc *r)
152{ 147{
153 if (p->is_write) { 148 if (p->is_write) {
154 return ignore_write(vcpu, p); 149 return ignore_write(vcpu, p);
155 } else { 150 } else {
156 *vcpu_reg(vcpu, p->Rt) = (1 << 3); 151 p->regval = (1 << 3);
157 return true; 152 return true;
158 } 153 }
159} 154}
160 155
161static bool trap_dbgauthstatus_el1(struct kvm_vcpu *vcpu, 156static bool trap_dbgauthstatus_el1(struct kvm_vcpu *vcpu,
162 const struct sys_reg_params *p, 157 struct sys_reg_params *p,
163 const struct sys_reg_desc *r) 158 const struct sys_reg_desc *r)
164{ 159{
165 if (p->is_write) { 160 if (p->is_write) {
@@ -167,7 +162,7 @@ static bool trap_dbgauthstatus_el1(struct kvm_vcpu *vcpu,
167 } else { 162 } else {
168 u32 val; 163 u32 val;
169 asm volatile("mrs %0, dbgauthstatus_el1" : "=r" (val)); 164 asm volatile("mrs %0, dbgauthstatus_el1" : "=r" (val));
170 *vcpu_reg(vcpu, p->Rt) = val; 165 p->regval = val;
171 return true; 166 return true;
172 } 167 }
173} 168}
@@ -200,17 +195,17 @@ static bool trap_dbgauthstatus_el1(struct kvm_vcpu *vcpu,
200 * now use the debug registers. 195 * now use the debug registers.
201 */ 196 */
202static bool trap_debug_regs(struct kvm_vcpu *vcpu, 197static bool trap_debug_regs(struct kvm_vcpu *vcpu,
203 const struct sys_reg_params *p, 198 struct sys_reg_params *p,
204 const struct sys_reg_desc *r) 199 const struct sys_reg_desc *r)
205{ 200{
206 if (p->is_write) { 201 if (p->is_write) {
207 vcpu_sys_reg(vcpu, r->reg) = *vcpu_reg(vcpu, p->Rt); 202 vcpu_sys_reg(vcpu, r->reg) = p->regval;
208 vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY; 203 vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY;
209 } else { 204 } else {
210 *vcpu_reg(vcpu, p->Rt) = vcpu_sys_reg(vcpu, r->reg); 205 p->regval = vcpu_sys_reg(vcpu, r->reg);
211 } 206 }
212 207
213 trace_trap_reg(__func__, r->reg, p->is_write, *vcpu_reg(vcpu, p->Rt)); 208 trace_trap_reg(__func__, r->reg, p->is_write, p->regval);
214 209
215 return true; 210 return true;
216} 211}
@@ -225,10 +220,10 @@ static bool trap_debug_regs(struct kvm_vcpu *vcpu,
225 * hyp.S code switches between host and guest values in future. 220 * hyp.S code switches between host and guest values in future.
226 */ 221 */
227static inline void reg_to_dbg(struct kvm_vcpu *vcpu, 222static inline void reg_to_dbg(struct kvm_vcpu *vcpu,
228 const struct sys_reg_params *p, 223 struct sys_reg_params *p,
229 u64 *dbg_reg) 224 u64 *dbg_reg)
230{ 225{
231 u64 val = *vcpu_reg(vcpu, p->Rt); 226 u64 val = p->regval;
232 227
233 if (p->is_32bit) { 228 if (p->is_32bit) {
234 val &= 0xffffffffUL; 229 val &= 0xffffffffUL;
@@ -240,19 +235,16 @@ static inline void reg_to_dbg(struct kvm_vcpu *vcpu,
240} 235}
241 236
242static inline void dbg_to_reg(struct kvm_vcpu *vcpu, 237static inline void dbg_to_reg(struct kvm_vcpu *vcpu,
243 const struct sys_reg_params *p, 238 struct sys_reg_params *p,
244 u64 *dbg_reg) 239 u64 *dbg_reg)
245{ 240{
246 u64 val = *dbg_reg; 241 p->regval = *dbg_reg;
247
248 if (p->is_32bit) 242 if (p->is_32bit)
249 val &= 0xffffffffUL; 243 p->regval &= 0xffffffffUL;
250
251 *vcpu_reg(vcpu, p->Rt) = val;
252} 244}
253 245
254static inline bool trap_bvr(struct kvm_vcpu *vcpu, 246static inline bool trap_bvr(struct kvm_vcpu *vcpu,
255 const struct sys_reg_params *p, 247 struct sys_reg_params *p,
256 const struct sys_reg_desc *rd) 248 const struct sys_reg_desc *rd)
257{ 249{
258 u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg]; 250 u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg];
@@ -294,7 +286,7 @@ static inline void reset_bvr(struct kvm_vcpu *vcpu,
294} 286}
295 287
296static inline bool trap_bcr(struct kvm_vcpu *vcpu, 288static inline bool trap_bcr(struct kvm_vcpu *vcpu,
297 const struct sys_reg_params *p, 289 struct sys_reg_params *p,
298 const struct sys_reg_desc *rd) 290 const struct sys_reg_desc *rd)
299{ 291{
300 u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg]; 292 u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg];
@@ -337,7 +329,7 @@ static inline void reset_bcr(struct kvm_vcpu *vcpu,
337} 329}
338 330
339static inline bool trap_wvr(struct kvm_vcpu *vcpu, 331static inline bool trap_wvr(struct kvm_vcpu *vcpu,
340 const struct sys_reg_params *p, 332 struct sys_reg_params *p,
341 const struct sys_reg_desc *rd) 333 const struct sys_reg_desc *rd)
342{ 334{
343 u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg]; 335 u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg];
@@ -380,7 +372,7 @@ static inline void reset_wvr(struct kvm_vcpu *vcpu,
380} 372}
381 373
382static inline bool trap_wcr(struct kvm_vcpu *vcpu, 374static inline bool trap_wcr(struct kvm_vcpu *vcpu,
383 const struct sys_reg_params *p, 375 struct sys_reg_params *p,
384 const struct sys_reg_desc *rd) 376 const struct sys_reg_desc *rd)
385{ 377{
386 u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg]; 378 u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg];
@@ -687,7 +679,7 @@ static const struct sys_reg_desc sys_reg_descs[] = {
687}; 679};
688 680
689static bool trap_dbgidr(struct kvm_vcpu *vcpu, 681static bool trap_dbgidr(struct kvm_vcpu *vcpu,
690 const struct sys_reg_params *p, 682 struct sys_reg_params *p,
691 const struct sys_reg_desc *r) 683 const struct sys_reg_desc *r)
692{ 684{
693 if (p->is_write) { 685 if (p->is_write) {
@@ -697,23 +689,23 @@ static bool trap_dbgidr(struct kvm_vcpu *vcpu,
697 u64 pfr = read_system_reg(SYS_ID_AA64PFR0_EL1); 689 u64 pfr = read_system_reg(SYS_ID_AA64PFR0_EL1);
698 u32 el3 = !!cpuid_feature_extract_field(pfr, ID_AA64PFR0_EL3_SHIFT); 690 u32 el3 = !!cpuid_feature_extract_field(pfr, ID_AA64PFR0_EL3_SHIFT);
699 691
700 *vcpu_reg(vcpu, p->Rt) = ((((dfr >> ID_AA64DFR0_WRPS_SHIFT) & 0xf) << 28) | 692 p->regval = ((((dfr >> ID_AA64DFR0_WRPS_SHIFT) & 0xf) << 28) |
701 (((dfr >> ID_AA64DFR0_BRPS_SHIFT) & 0xf) << 24) | 693 (((dfr >> ID_AA64DFR0_BRPS_SHIFT) & 0xf) << 24) |
702 (((dfr >> ID_AA64DFR0_CTX_CMPS_SHIFT) & 0xf) << 20) | 694 (((dfr >> ID_AA64DFR0_CTX_CMPS_SHIFT) & 0xf) << 20)
703 (6 << 16) | (el3 << 14) | (el3 << 12)); 695 | (6 << 16) | (el3 << 14) | (el3 << 12));
704 return true; 696 return true;
705 } 697 }
706} 698}
707 699
708static bool trap_debug32(struct kvm_vcpu *vcpu, 700static bool trap_debug32(struct kvm_vcpu *vcpu,
709 const struct sys_reg_params *p, 701 struct sys_reg_params *p,
710 const struct sys_reg_desc *r) 702 const struct sys_reg_desc *r)
711{ 703{
712 if (p->is_write) { 704 if (p->is_write) {
713 vcpu_cp14(vcpu, r->reg) = *vcpu_reg(vcpu, p->Rt); 705 vcpu_cp14(vcpu, r->reg) = p->regval;
714 vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY; 706 vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY;
715 } else { 707 } else {
716 *vcpu_reg(vcpu, p->Rt) = vcpu_cp14(vcpu, r->reg); 708 p->regval = vcpu_cp14(vcpu, r->reg);
717 } 709 }
718 710
719 return true; 711 return true;
@@ -731,7 +723,7 @@ static bool trap_debug32(struct kvm_vcpu *vcpu,
731 */ 723 */
732 724
733static inline bool trap_xvr(struct kvm_vcpu *vcpu, 725static inline bool trap_xvr(struct kvm_vcpu *vcpu,
734 const struct sys_reg_params *p, 726 struct sys_reg_params *p,
735 const struct sys_reg_desc *rd) 727 const struct sys_reg_desc *rd)
736{ 728{
737 u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg]; 729 u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg];
@@ -740,12 +732,12 @@ static inline bool trap_xvr(struct kvm_vcpu *vcpu,
740 u64 val = *dbg_reg; 732 u64 val = *dbg_reg;
741 733
742 val &= 0xffffffffUL; 734 val &= 0xffffffffUL;
743 val |= *vcpu_reg(vcpu, p->Rt) << 32; 735 val |= p->regval << 32;
744 *dbg_reg = val; 736 *dbg_reg = val;
745 737
746 vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY; 738 vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY;
747 } else { 739 } else {
748 *vcpu_reg(vcpu, p->Rt) = *dbg_reg >> 32; 740 p->regval = *dbg_reg >> 32;
749 } 741 }
750 742
751 trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg); 743 trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg);
@@ -991,7 +983,7 @@ int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run)
991 * Return 0 if the access has been handled, and -1 if not. 983 * Return 0 if the access has been handled, and -1 if not.
992 */ 984 */
993static int emulate_cp(struct kvm_vcpu *vcpu, 985static int emulate_cp(struct kvm_vcpu *vcpu,
994 const struct sys_reg_params *params, 986 struct sys_reg_params *params,
995 const struct sys_reg_desc *table, 987 const struct sys_reg_desc *table,
996 size_t num) 988 size_t num)
997{ 989{
@@ -1062,12 +1054,12 @@ static int kvm_handle_cp_64(struct kvm_vcpu *vcpu,
1062{ 1054{
1063 struct sys_reg_params params; 1055 struct sys_reg_params params;
1064 u32 hsr = kvm_vcpu_get_hsr(vcpu); 1056 u32 hsr = kvm_vcpu_get_hsr(vcpu);
1057 int Rt = (hsr >> 5) & 0xf;
1065 int Rt2 = (hsr >> 10) & 0xf; 1058 int Rt2 = (hsr >> 10) & 0xf;
1066 1059
1067 params.is_aarch32 = true; 1060 params.is_aarch32 = true;
1068 params.is_32bit = false; 1061 params.is_32bit = false;
1069 params.CRm = (hsr >> 1) & 0xf; 1062 params.CRm = (hsr >> 1) & 0xf;
1070 params.Rt = (hsr >> 5) & 0xf;
1071 params.is_write = ((hsr & 1) == 0); 1063 params.is_write = ((hsr & 1) == 0);
1072 1064
1073 params.Op0 = 0; 1065 params.Op0 = 0;
@@ -1076,15 +1068,12 @@ static int kvm_handle_cp_64(struct kvm_vcpu *vcpu,
1076 params.CRn = 0; 1068 params.CRn = 0;
1077 1069
1078 /* 1070 /*
1079 * Massive hack here. Store Rt2 in the top 32bits so we only 1071 * Make a 64-bit value out of Rt and Rt2. As we use the same trap
1080 * have one register to deal with. As we use the same trap
1081 * backends between AArch32 and AArch64, we get away with it. 1072 * backends between AArch32 and AArch64, we get away with it.
1082 */ 1073 */
1083 if (params.is_write) { 1074 if (params.is_write) {
1084 u64 val = *vcpu_reg(vcpu, params.Rt); 1075 params.regval = vcpu_get_reg(vcpu, Rt) & 0xffffffff;
1085 val &= 0xffffffff; 1076 params.regval |= vcpu_get_reg(vcpu, Rt2) << 32;
1086 val |= *vcpu_reg(vcpu, Rt2) << 32;
1087 *vcpu_reg(vcpu, params.Rt) = val;
1088 } 1077 }
1089 1078
1090 if (!emulate_cp(vcpu, &params, target_specific, nr_specific)) 1079 if (!emulate_cp(vcpu, &params, target_specific, nr_specific))
@@ -1095,11 +1084,10 @@ static int kvm_handle_cp_64(struct kvm_vcpu *vcpu,
1095 unhandled_cp_access(vcpu, &params); 1084 unhandled_cp_access(vcpu, &params);
1096 1085
1097out: 1086out:
1098 /* Do the opposite hack for the read side */ 1087 /* Split up the value between registers for the read side */
1099 if (!params.is_write) { 1088 if (!params.is_write) {
1100 u64 val = *vcpu_reg(vcpu, params.Rt); 1089 vcpu_set_reg(vcpu, Rt, lower_32_bits(params.regval));
1101 val >>= 32; 1090 vcpu_set_reg(vcpu, Rt2, upper_32_bits(params.regval));
1102 *vcpu_reg(vcpu, Rt2) = val;
1103 } 1091 }
1104 1092
1105 return 1; 1093 return 1;
@@ -1118,21 +1106,24 @@ static int kvm_handle_cp_32(struct kvm_vcpu *vcpu,
1118{ 1106{
1119 struct sys_reg_params params; 1107 struct sys_reg_params params;
1120 u32 hsr = kvm_vcpu_get_hsr(vcpu); 1108 u32 hsr = kvm_vcpu_get_hsr(vcpu);
1109 int Rt = (hsr >> 5) & 0xf;
1121 1110
1122 params.is_aarch32 = true; 1111 params.is_aarch32 = true;
1123 params.is_32bit = true; 1112 params.is_32bit = true;
1124 params.CRm = (hsr >> 1) & 0xf; 1113 params.CRm = (hsr >> 1) & 0xf;
1125 params.Rt = (hsr >> 5) & 0xf; 1114 params.regval = vcpu_get_reg(vcpu, Rt);
1126 params.is_write = ((hsr & 1) == 0); 1115 params.is_write = ((hsr & 1) == 0);
1127 params.CRn = (hsr >> 10) & 0xf; 1116 params.CRn = (hsr >> 10) & 0xf;
1128 params.Op0 = 0; 1117 params.Op0 = 0;
1129 params.Op1 = (hsr >> 14) & 0x7; 1118 params.Op1 = (hsr >> 14) & 0x7;
1130 params.Op2 = (hsr >> 17) & 0x7; 1119 params.Op2 = (hsr >> 17) & 0x7;
1131 1120
1132 if (!emulate_cp(vcpu, &params, target_specific, nr_specific)) 1121 if (!emulate_cp(vcpu, &params, target_specific, nr_specific) ||
1133 return 1; 1122 !emulate_cp(vcpu, &params, global, nr_global)) {
1134 if (!emulate_cp(vcpu, &params, global, nr_global)) 1123 if (!params.is_write)
1124 vcpu_set_reg(vcpu, Rt, params.regval);
1135 return 1; 1125 return 1;
1126 }
1136 1127
1137 unhandled_cp_access(vcpu, &params); 1128 unhandled_cp_access(vcpu, &params);
1138 return 1; 1129 return 1;
@@ -1175,7 +1166,7 @@ int kvm_handle_cp14_32(struct kvm_vcpu *vcpu, struct kvm_run *run)
1175} 1166}
1176 1167
1177static int emulate_sys_reg(struct kvm_vcpu *vcpu, 1168static int emulate_sys_reg(struct kvm_vcpu *vcpu,
1178 const struct sys_reg_params *params) 1169 struct sys_reg_params *params)
1179{ 1170{
1180 size_t num; 1171 size_t num;
1181 const struct sys_reg_desc *table, *r; 1172 const struct sys_reg_desc *table, *r;
@@ -1230,6 +1221,8 @@ int kvm_handle_sys_reg(struct kvm_vcpu *vcpu, struct kvm_run *run)
1230{ 1221{
1231 struct sys_reg_params params; 1222 struct sys_reg_params params;
1232 unsigned long esr = kvm_vcpu_get_hsr(vcpu); 1223 unsigned long esr = kvm_vcpu_get_hsr(vcpu);
1224 int Rt = (esr >> 5) & 0x1f;
1225 int ret;
1233 1226
1234 trace_kvm_handle_sys_reg(esr); 1227 trace_kvm_handle_sys_reg(esr);
1235 1228
@@ -1240,10 +1233,14 @@ int kvm_handle_sys_reg(struct kvm_vcpu *vcpu, struct kvm_run *run)
1240 params.CRn = (esr >> 10) & 0xf; 1233 params.CRn = (esr >> 10) & 0xf;
1241 params.CRm = (esr >> 1) & 0xf; 1234 params.CRm = (esr >> 1) & 0xf;
1242 params.Op2 = (esr >> 17) & 0x7; 1235 params.Op2 = (esr >> 17) & 0x7;
1243 params.Rt = (esr >> 5) & 0x1f; 1236 params.regval = vcpu_get_reg(vcpu, Rt);
1244 params.is_write = !(esr & 1); 1237 params.is_write = !(esr & 1);
1245 1238
1246 return emulate_sys_reg(vcpu, &params); 1239 ret = emulate_sys_reg(vcpu, &params);
1240
1241 if (!params.is_write)
1242 vcpu_set_reg(vcpu, Rt, params.regval);
1243 return ret;
1247} 1244}
1248 1245
1249/****************************************************************************** 1246/******************************************************************************
diff --git a/arch/arm64/kvm/sys_regs.h b/arch/arm64/kvm/sys_regs.h
index eaa324e4db4d..dbbb01cfbee9 100644
--- a/arch/arm64/kvm/sys_regs.h
+++ b/arch/arm64/kvm/sys_regs.h
@@ -28,7 +28,7 @@ struct sys_reg_params {
28 u8 CRn; 28 u8 CRn;
29 u8 CRm; 29 u8 CRm;
30 u8 Op2; 30 u8 Op2;
31 u8 Rt; 31 u64 regval;
32 bool is_write; 32 bool is_write;
33 bool is_aarch32; 33 bool is_aarch32;
34 bool is_32bit; /* Only valid if is_aarch32 is true */ 34 bool is_32bit; /* Only valid if is_aarch32 is true */
@@ -44,7 +44,7 @@ struct sys_reg_desc {
44 44
45 /* Trapped access from guest, if non-NULL. */ 45 /* Trapped access from guest, if non-NULL. */
46 bool (*access)(struct kvm_vcpu *, 46 bool (*access)(struct kvm_vcpu *,
47 const struct sys_reg_params *, 47 struct sys_reg_params *,
48 const struct sys_reg_desc *); 48 const struct sys_reg_desc *);
49 49
50 /* Initialization for vcpu. */ 50 /* Initialization for vcpu. */
@@ -77,9 +77,9 @@ static inline bool ignore_write(struct kvm_vcpu *vcpu,
77} 77}
78 78
79static inline bool read_zero(struct kvm_vcpu *vcpu, 79static inline bool read_zero(struct kvm_vcpu *vcpu,
80 const struct sys_reg_params *p) 80 struct sys_reg_params *p)
81{ 81{
82 *vcpu_reg(vcpu, p->Rt) = 0; 82 p->regval = 0;
83 return true; 83 return true;
84} 84}
85 85
diff --git a/arch/arm64/kvm/sys_regs_generic_v8.c b/arch/arm64/kvm/sys_regs_generic_v8.c
index 1e4576824165..ed90578fa120 100644
--- a/arch/arm64/kvm/sys_regs_generic_v8.c
+++ b/arch/arm64/kvm/sys_regs_generic_v8.c
@@ -31,13 +31,13 @@
31#include "sys_regs.h" 31#include "sys_regs.h"
32 32
33static bool access_actlr(struct kvm_vcpu *vcpu, 33static bool access_actlr(struct kvm_vcpu *vcpu,
34 const struct sys_reg_params *p, 34 struct sys_reg_params *p,
35 const struct sys_reg_desc *r) 35 const struct sys_reg_desc *r)
36{ 36{
37 if (p->is_write) 37 if (p->is_write)
38 return ignore_write(vcpu, p); 38 return ignore_write(vcpu, p);
39 39
40 *vcpu_reg(vcpu, p->Rt) = vcpu_sys_reg(vcpu, ACTLR_EL1); 40 p->regval = vcpu_sys_reg(vcpu, ACTLR_EL1);
41 return true; 41 return true;
42} 42}
43 43
diff --git a/arch/arm64/mm/context.c b/arch/arm64/mm/context.c
index f636a2639f03..e87f53ff5f58 100644
--- a/arch/arm64/mm/context.c
+++ b/arch/arm64/mm/context.c
@@ -76,13 +76,28 @@ static void flush_context(unsigned int cpu)
76 __flush_icache_all(); 76 __flush_icache_all();
77} 77}
78 78
79static int is_reserved_asid(u64 asid) 79static bool check_update_reserved_asid(u64 asid, u64 newasid)
80{ 80{
81 int cpu; 81 int cpu;
82 for_each_possible_cpu(cpu) 82 bool hit = false;
83 if (per_cpu(reserved_asids, cpu) == asid) 83
84 return 1; 84 /*
85 return 0; 85 * Iterate over the set of reserved ASIDs looking for a match.
86 * If we find one, then we can update our mm to use newasid
87 * (i.e. the same ASID in the current generation) but we can't
88 * exit the loop early, since we need to ensure that all copies
89 * of the old ASID are updated to reflect the mm. Failure to do
90 * so could result in us missing the reserved ASID in a future
91 * generation.
92 */
93 for_each_possible_cpu(cpu) {
94 if (per_cpu(reserved_asids, cpu) == asid) {
95 hit = true;
96 per_cpu(reserved_asids, cpu) = newasid;
97 }
98 }
99
100 return hit;
86} 101}
87 102
88static u64 new_context(struct mm_struct *mm, unsigned int cpu) 103static u64 new_context(struct mm_struct *mm, unsigned int cpu)
@@ -92,12 +107,14 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu)
92 u64 generation = atomic64_read(&asid_generation); 107 u64 generation = atomic64_read(&asid_generation);
93 108
94 if (asid != 0) { 109 if (asid != 0) {
110 u64 newasid = generation | (asid & ~ASID_MASK);
111
95 /* 112 /*
96 * If our current ASID was active during a rollover, we 113 * If our current ASID was active during a rollover, we
97 * can continue to use it and this was just a false alarm. 114 * can continue to use it and this was just a false alarm.
98 */ 115 */
99 if (is_reserved_asid(asid)) 116 if (check_update_reserved_asid(asid, newasid))
100 return generation | (asid & ~ASID_MASK); 117 return newasid;
101 118
102 /* 119 /*
103 * We had a valid ASID in a previous life, so try to re-use 120 * We had a valid ASID in a previous life, so try to re-use
@@ -105,7 +122,7 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu)
105 */ 122 */
106 asid &= ~ASID_MASK; 123 asid &= ~ASID_MASK;
107 if (!__test_and_set_bit(asid, asid_map)) 124 if (!__test_and_set_bit(asid, asid_map))
108 goto bump_gen; 125 return newasid;
109 } 126 }
110 127
111 /* 128 /*
@@ -129,10 +146,7 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu)
129set_asid: 146set_asid:
130 __set_bit(asid, asid_map); 147 __set_bit(asid, asid_map);
131 cur_idx = asid; 148 cur_idx = asid;
132 149 return asid | generation;
133bump_gen:
134 asid |= generation;
135 return asid;
136} 150}
137 151
138void check_and_switch_context(struct mm_struct *mm, unsigned int cpu) 152void check_and_switch_context(struct mm_struct *mm, unsigned int cpu)
diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
index 131a199114b4..7963aa4b5d28 100644
--- a/arch/arm64/mm/dma-mapping.c
+++ b/arch/arm64/mm/dma-mapping.c
@@ -18,6 +18,7 @@
18 */ 18 */
19 19
20#include <linux/gfp.h> 20#include <linux/gfp.h>
21#include <linux/acpi.h>
21#include <linux/export.h> 22#include <linux/export.h>
22#include <linux/slab.h> 23#include <linux/slab.h>
23#include <linux/genalloc.h> 24#include <linux/genalloc.h>
@@ -28,9 +29,6 @@
28 29
29#include <asm/cacheflush.h> 30#include <asm/cacheflush.h>
30 31
31struct dma_map_ops *dma_ops;
32EXPORT_SYMBOL(dma_ops);
33
34static pgprot_t __get_dma_pgprot(struct dma_attrs *attrs, pgprot_t prot, 32static pgprot_t __get_dma_pgprot(struct dma_attrs *attrs, pgprot_t prot,
35 bool coherent) 33 bool coherent)
36{ 34{
@@ -515,13 +513,7 @@ EXPORT_SYMBOL(dummy_dma_ops);
515 513
516static int __init arm64_dma_init(void) 514static int __init arm64_dma_init(void)
517{ 515{
518 int ret; 516 return atomic_pool_init();
519
520 dma_ops = &swiotlb_dma_ops;
521
522 ret = atomic_pool_init();
523
524 return ret;
525} 517}
526arch_initcall(arm64_dma_init); 518arch_initcall(arm64_dma_init);
527 519
@@ -552,10 +544,14 @@ static void *__iommu_alloc_attrs(struct device *dev, size_t size,
552{ 544{
553 bool coherent = is_device_dma_coherent(dev); 545 bool coherent = is_device_dma_coherent(dev);
554 int ioprot = dma_direction_to_prot(DMA_BIDIRECTIONAL, coherent); 546 int ioprot = dma_direction_to_prot(DMA_BIDIRECTIONAL, coherent);
547 size_t iosize = size;
555 void *addr; 548 void *addr;
556 549
557 if (WARN(!dev, "cannot create IOMMU mapping for unknown device\n")) 550 if (WARN(!dev, "cannot create IOMMU mapping for unknown device\n"))
558 return NULL; 551 return NULL;
552
553 size = PAGE_ALIGN(size);
554
559 /* 555 /*
560 * Some drivers rely on this, and we probably don't want the 556 * Some drivers rely on this, and we probably don't want the
561 * possibility of stale kernel data being read by devices anyway. 557 * possibility of stale kernel data being read by devices anyway.
@@ -566,7 +562,7 @@ static void *__iommu_alloc_attrs(struct device *dev, size_t size,
566 struct page **pages; 562 struct page **pages;
567 pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL, coherent); 563 pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL, coherent);
568 564
569 pages = iommu_dma_alloc(dev, size, gfp, ioprot, handle, 565 pages = iommu_dma_alloc(dev, iosize, gfp, ioprot, handle,
570 flush_page); 566 flush_page);
571 if (!pages) 567 if (!pages)
572 return NULL; 568 return NULL;
@@ -574,7 +570,7 @@ static void *__iommu_alloc_attrs(struct device *dev, size_t size,
574 addr = dma_common_pages_remap(pages, size, VM_USERMAP, prot, 570 addr = dma_common_pages_remap(pages, size, VM_USERMAP, prot,
575 __builtin_return_address(0)); 571 __builtin_return_address(0));
576 if (!addr) 572 if (!addr)
577 iommu_dma_free(dev, pages, size, handle); 573 iommu_dma_free(dev, pages, iosize, handle);
578 } else { 574 } else {
579 struct page *page; 575 struct page *page;
580 /* 576 /*
@@ -591,7 +587,7 @@ static void *__iommu_alloc_attrs(struct device *dev, size_t size,
591 if (!addr) 587 if (!addr)
592 return NULL; 588 return NULL;
593 589
594 *handle = iommu_dma_map_page(dev, page, 0, size, ioprot); 590 *handle = iommu_dma_map_page(dev, page, 0, iosize, ioprot);
595 if (iommu_dma_mapping_error(dev, *handle)) { 591 if (iommu_dma_mapping_error(dev, *handle)) {
596 if (coherent) 592 if (coherent)
597 __free_pages(page, get_order(size)); 593 __free_pages(page, get_order(size));
@@ -606,6 +602,9 @@ static void *__iommu_alloc_attrs(struct device *dev, size_t size,
606static void __iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr, 602static void __iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
607 dma_addr_t handle, struct dma_attrs *attrs) 603 dma_addr_t handle, struct dma_attrs *attrs)
608{ 604{
605 size_t iosize = size;
606
607 size = PAGE_ALIGN(size);
609 /* 608 /*
610 * @cpu_addr will be one of 3 things depending on how it was allocated: 609 * @cpu_addr will be one of 3 things depending on how it was allocated:
611 * - A remapped array of pages from iommu_dma_alloc(), for all 610 * - A remapped array of pages from iommu_dma_alloc(), for all
@@ -617,17 +616,17 @@ static void __iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
617 * Hence how dodgy the below logic looks... 616 * Hence how dodgy the below logic looks...
618 */ 617 */
619 if (__in_atomic_pool(cpu_addr, size)) { 618 if (__in_atomic_pool(cpu_addr, size)) {
620 iommu_dma_unmap_page(dev, handle, size, 0, NULL); 619 iommu_dma_unmap_page(dev, handle, iosize, 0, NULL);
621 __free_from_pool(cpu_addr, size); 620 __free_from_pool(cpu_addr, size);
622 } else if (is_vmalloc_addr(cpu_addr)){ 621 } else if (is_vmalloc_addr(cpu_addr)){
623 struct vm_struct *area = find_vm_area(cpu_addr); 622 struct vm_struct *area = find_vm_area(cpu_addr);
624 623
625 if (WARN_ON(!area || !area->pages)) 624 if (WARN_ON(!area || !area->pages))
626 return; 625 return;
627 iommu_dma_free(dev, area->pages, size, &handle); 626 iommu_dma_free(dev, area->pages, iosize, &handle);
628 dma_common_free_remap(cpu_addr, size, VM_USERMAP); 627 dma_common_free_remap(cpu_addr, size, VM_USERMAP);
629 } else { 628 } else {
630 iommu_dma_unmap_page(dev, handle, size, 0, NULL); 629 iommu_dma_unmap_page(dev, handle, iosize, 0, NULL);
631 __free_pages(virt_to_page(cpu_addr), get_order(size)); 630 __free_pages(virt_to_page(cpu_addr), get_order(size));
632 } 631 }
633} 632}
@@ -984,8 +983,8 @@ static void __iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
984void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, 983void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
985 struct iommu_ops *iommu, bool coherent) 984 struct iommu_ops *iommu, bool coherent)
986{ 985{
987 if (!acpi_disabled && !dev->archdata.dma_ops) 986 if (!dev->archdata.dma_ops)
988 dev->archdata.dma_ops = dma_ops; 987 dev->archdata.dma_ops = &swiotlb_dma_ops;
989 988
990 dev->archdata.dma_coherent = coherent; 989 dev->archdata.dma_coherent = coherent;
991 __iommu_setup_dma_ops(dev, dma_base, size, iommu); 990 __iommu_setup_dma_ops(dev, dma_base, size, iommu);
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index 19211c4a8911..92ddac1e8ca2 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -393,16 +393,16 @@ static struct fault_info {
393 { do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 1 translation fault" }, 393 { do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 1 translation fault" },
394 { do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 2 translation fault" }, 394 { do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 2 translation fault" },
395 { do_page_fault, SIGSEGV, SEGV_MAPERR, "level 3 translation fault" }, 395 { do_page_fault, SIGSEGV, SEGV_MAPERR, "level 3 translation fault" },
396 { do_bad, SIGBUS, 0, "reserved access flag fault" }, 396 { do_bad, SIGBUS, 0, "unknown 8" },
397 { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 1 access flag fault" }, 397 { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 1 access flag fault" },
398 { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 2 access flag fault" }, 398 { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 2 access flag fault" },
399 { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 3 access flag fault" }, 399 { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 3 access flag fault" },
400 { do_bad, SIGBUS, 0, "reserved permission fault" }, 400 { do_bad, SIGBUS, 0, "unknown 12" },
401 { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 1 permission fault" }, 401 { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 1 permission fault" },
402 { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 2 permission fault" }, 402 { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 2 permission fault" },
403 { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 3 permission fault" }, 403 { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 3 permission fault" },
404 { do_bad, SIGBUS, 0, "synchronous external abort" }, 404 { do_bad, SIGBUS, 0, "synchronous external abort" },
405 { do_bad, SIGBUS, 0, "asynchronous external abort" }, 405 { do_bad, SIGBUS, 0, "unknown 17" },
406 { do_bad, SIGBUS, 0, "unknown 18" }, 406 { do_bad, SIGBUS, 0, "unknown 18" },
407 { do_bad, SIGBUS, 0, "unknown 19" }, 407 { do_bad, SIGBUS, 0, "unknown 19" },
408 { do_bad, SIGBUS, 0, "synchronous abort (translation table walk)" }, 408 { do_bad, SIGBUS, 0, "synchronous abort (translation table walk)" },
@@ -410,16 +410,16 @@ static struct fault_info {
410 { do_bad, SIGBUS, 0, "synchronous abort (translation table walk)" }, 410 { do_bad, SIGBUS, 0, "synchronous abort (translation table walk)" },
411 { do_bad, SIGBUS, 0, "synchronous abort (translation table walk)" }, 411 { do_bad, SIGBUS, 0, "synchronous abort (translation table walk)" },
412 { do_bad, SIGBUS, 0, "synchronous parity error" }, 412 { do_bad, SIGBUS, 0, "synchronous parity error" },
413 { do_bad, SIGBUS, 0, "asynchronous parity error" }, 413 { do_bad, SIGBUS, 0, "unknown 25" },
414 { do_bad, SIGBUS, 0, "unknown 26" }, 414 { do_bad, SIGBUS, 0, "unknown 26" },
415 { do_bad, SIGBUS, 0, "unknown 27" }, 415 { do_bad, SIGBUS, 0, "unknown 27" },
416 { do_bad, SIGBUS, 0, "synchronous parity error (translation table walk" }, 416 { do_bad, SIGBUS, 0, "synchronous parity error (translation table walk)" },
417 { do_bad, SIGBUS, 0, "synchronous parity error (translation table walk" }, 417 { do_bad, SIGBUS, 0, "synchronous parity error (translation table walk)" },
418 { do_bad, SIGBUS, 0, "synchronous parity error (translation table walk" }, 418 { do_bad, SIGBUS, 0, "synchronous parity error (translation table walk)" },
419 { do_bad, SIGBUS, 0, "synchronous parity error (translation table walk" }, 419 { do_bad, SIGBUS, 0, "synchronous parity error (translation table walk)" },
420 { do_bad, SIGBUS, 0, "unknown 32" }, 420 { do_bad, SIGBUS, 0, "unknown 32" },
421 { do_bad, SIGBUS, BUS_ADRALN, "alignment fault" }, 421 { do_bad, SIGBUS, BUS_ADRALN, "alignment fault" },
422 { do_bad, SIGBUS, 0, "debug event" }, 422 { do_bad, SIGBUS, 0, "unknown 34" },
423 { do_bad, SIGBUS, 0, "unknown 35" }, 423 { do_bad, SIGBUS, 0, "unknown 35" },
424 { do_bad, SIGBUS, 0, "unknown 36" }, 424 { do_bad, SIGBUS, 0, "unknown 36" },
425 { do_bad, SIGBUS, 0, "unknown 37" }, 425 { do_bad, SIGBUS, 0, "unknown 37" },
@@ -433,21 +433,21 @@ static struct fault_info {
433 { do_bad, SIGBUS, 0, "unknown 45" }, 433 { do_bad, SIGBUS, 0, "unknown 45" },
434 { do_bad, SIGBUS, 0, "unknown 46" }, 434 { do_bad, SIGBUS, 0, "unknown 46" },
435 { do_bad, SIGBUS, 0, "unknown 47" }, 435 { do_bad, SIGBUS, 0, "unknown 47" },
436 { do_bad, SIGBUS, 0, "unknown 48" }, 436 { do_bad, SIGBUS, 0, "TLB conflict abort" },
437 { do_bad, SIGBUS, 0, "unknown 49" }, 437 { do_bad, SIGBUS, 0, "unknown 49" },
438 { do_bad, SIGBUS, 0, "unknown 50" }, 438 { do_bad, SIGBUS, 0, "unknown 50" },
439 { do_bad, SIGBUS, 0, "unknown 51" }, 439 { do_bad, SIGBUS, 0, "unknown 51" },
440 { do_bad, SIGBUS, 0, "implementation fault (lockdown abort)" }, 440 { do_bad, SIGBUS, 0, "implementation fault (lockdown abort)" },
441 { do_bad, SIGBUS, 0, "unknown 53" }, 441 { do_bad, SIGBUS, 0, "implementation fault (unsupported exclusive)" },
442 { do_bad, SIGBUS, 0, "unknown 54" }, 442 { do_bad, SIGBUS, 0, "unknown 54" },
443 { do_bad, SIGBUS, 0, "unknown 55" }, 443 { do_bad, SIGBUS, 0, "unknown 55" },
444 { do_bad, SIGBUS, 0, "unknown 56" }, 444 { do_bad, SIGBUS, 0, "unknown 56" },
445 { do_bad, SIGBUS, 0, "unknown 57" }, 445 { do_bad, SIGBUS, 0, "unknown 57" },
446 { do_bad, SIGBUS, 0, "implementation fault (coprocessor abort)" }, 446 { do_bad, SIGBUS, 0, "unknown 58" },
447 { do_bad, SIGBUS, 0, "unknown 59" }, 447 { do_bad, SIGBUS, 0, "unknown 59" },
448 { do_bad, SIGBUS, 0, "unknown 60" }, 448 { do_bad, SIGBUS, 0, "unknown 60" },
449 { do_bad, SIGBUS, 0, "unknown 61" }, 449 { do_bad, SIGBUS, 0, "section domain fault" },
450 { do_bad, SIGBUS, 0, "unknown 62" }, 450 { do_bad, SIGBUS, 0, "page domain fault" },
451 { do_bad, SIGBUS, 0, "unknown 63" }, 451 { do_bad, SIGBUS, 0, "unknown 63" },
452}; 452};
453 453
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index e3f563c81c48..873e363048c6 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -64,8 +64,12 @@ EXPORT_SYMBOL(phys_mem_access_prot);
64 64
65static void __init *early_alloc(unsigned long sz) 65static void __init *early_alloc(unsigned long sz)
66{ 66{
67 void *ptr = __va(memblock_alloc(sz, sz)); 67 phys_addr_t phys;
68 BUG_ON(!ptr); 68 void *ptr;
69
70 phys = memblock_alloc(sz, sz);
71 BUG_ON(!phys);
72 ptr = __va(phys);
69 memset(ptr, 0, sz); 73 memset(ptr, 0, sz);
70 return ptr; 74 return ptr;
71} 75}
@@ -81,55 +85,19 @@ static void split_pmd(pmd_t *pmd, pte_t *pte)
81 do { 85 do {
82 /* 86 /*
83 * Need to have the least restrictive permissions available 87 * Need to have the least restrictive permissions available
84 * permissions will be fixed up later. Default the new page 88 * permissions will be fixed up later
85 * range as contiguous ptes.
86 */ 89 */
87 set_pte(pte, pfn_pte(pfn, PAGE_KERNEL_EXEC_CONT)); 90 set_pte(pte, pfn_pte(pfn, PAGE_KERNEL_EXEC));
88 pfn++; 91 pfn++;
89 } while (pte++, i++, i < PTRS_PER_PTE); 92 } while (pte++, i++, i < PTRS_PER_PTE);
90} 93}
91 94
92/*
93 * Given a PTE with the CONT bit set, determine where the CONT range
94 * starts, and clear the entire range of PTE CONT bits.
95 */
96static void clear_cont_pte_range(pte_t *pte, unsigned long addr)
97{
98 int i;
99
100 pte -= CONT_RANGE_OFFSET(addr);
101 for (i = 0; i < CONT_PTES; i++) {
102 set_pte(pte, pte_mknoncont(*pte));
103 pte++;
104 }
105 flush_tlb_all();
106}
107
108/*
109 * Given a range of PTEs set the pfn and provided page protection flags
110 */
111static void __populate_init_pte(pte_t *pte, unsigned long addr,
112 unsigned long end, phys_addr_t phys,
113 pgprot_t prot)
114{
115 unsigned long pfn = __phys_to_pfn(phys);
116
117 do {
118 /* clear all the bits except the pfn, then apply the prot */
119 set_pte(pte, pfn_pte(pfn, prot));
120 pte++;
121 pfn++;
122 addr += PAGE_SIZE;
123 } while (addr != end);
124}
125
126static void alloc_init_pte(pmd_t *pmd, unsigned long addr, 95static void alloc_init_pte(pmd_t *pmd, unsigned long addr,
127 unsigned long end, phys_addr_t phys, 96 unsigned long end, unsigned long pfn,
128 pgprot_t prot, 97 pgprot_t prot,
129 void *(*alloc)(unsigned long size)) 98 void *(*alloc)(unsigned long size))
130{ 99{
131 pte_t *pte; 100 pte_t *pte;
132 unsigned long next;
133 101
134 if (pmd_none(*pmd) || pmd_sect(*pmd)) { 102 if (pmd_none(*pmd) || pmd_sect(*pmd)) {
135 pte = alloc(PTRS_PER_PTE * sizeof(pte_t)); 103 pte = alloc(PTRS_PER_PTE * sizeof(pte_t));
@@ -142,27 +110,9 @@ static void alloc_init_pte(pmd_t *pmd, unsigned long addr,
142 110
143 pte = pte_offset_kernel(pmd, addr); 111 pte = pte_offset_kernel(pmd, addr);
144 do { 112 do {
145 next = min(end, (addr + CONT_SIZE) & CONT_MASK); 113 set_pte(pte, pfn_pte(pfn, prot));
146 if (((addr | next | phys) & ~CONT_MASK) == 0) { 114 pfn++;
147 /* a block of CONT_PTES */ 115 } while (pte++, addr += PAGE_SIZE, addr != end);
148 __populate_init_pte(pte, addr, next, phys,
149 __pgprot(pgprot_val(prot) | PTE_CONT));
150 } else {
151 /*
152 * If the range being split is already inside of a
153 * contiguous range but this PTE isn't going to be
154 * contiguous, then we want to unmark the adjacent
155 * ranges, then update the portion of the range we
156 * are interrested in.
157 */
158 clear_cont_pte_range(pte, addr);
159 __populate_init_pte(pte, addr, next, phys, prot);
160 }
161
162 pte += (next - addr) >> PAGE_SHIFT;
163 phys += next - addr;
164 addr = next;
165 } while (addr != end);
166} 116}
167 117
168static void split_pud(pud_t *old_pud, pmd_t *pmd) 118static void split_pud(pud_t *old_pud, pmd_t *pmd)
@@ -223,7 +173,8 @@ static void alloc_init_pmd(struct mm_struct *mm, pud_t *pud,
223 } 173 }
224 } 174 }
225 } else { 175 } else {
226 alloc_init_pte(pmd, addr, next, phys, prot, alloc); 176 alloc_init_pte(pmd, addr, next, __phys_to_pfn(phys),
177 prot, alloc);
227 } 178 }
228 phys += next - addr; 179 phys += next - addr;
229 } while (pmd++, addr = next, addr != end); 180 } while (pmd++, addr = next, addr != end);
@@ -362,8 +313,8 @@ static void __init __map_memblock(phys_addr_t start, phys_addr_t end)
362 * for now. This will get more fine grained later once all memory 313 * for now. This will get more fine grained later once all memory
363 * is mapped 314 * is mapped
364 */ 315 */
365 unsigned long kernel_x_start = round_down(__pa(_stext), SECTION_SIZE); 316 unsigned long kernel_x_start = round_down(__pa(_stext), SWAPPER_BLOCK_SIZE);
366 unsigned long kernel_x_end = round_up(__pa(__init_end), SECTION_SIZE); 317 unsigned long kernel_x_end = round_up(__pa(__init_end), SWAPPER_BLOCK_SIZE);
367 318
368 if (end < kernel_x_start) { 319 if (end < kernel_x_start) {
369 create_mapping(start, __phys_to_virt(start), 320 create_mapping(start, __phys_to_virt(start),
@@ -451,18 +402,18 @@ static void __init fixup_executable(void)
451{ 402{
452#ifdef CONFIG_DEBUG_RODATA 403#ifdef CONFIG_DEBUG_RODATA
453 /* now that we are actually fully mapped, make the start/end more fine grained */ 404 /* now that we are actually fully mapped, make the start/end more fine grained */
454 if (!IS_ALIGNED((unsigned long)_stext, SECTION_SIZE)) { 405 if (!IS_ALIGNED((unsigned long)_stext, SWAPPER_BLOCK_SIZE)) {
455 unsigned long aligned_start = round_down(__pa(_stext), 406 unsigned long aligned_start = round_down(__pa(_stext),
456 SECTION_SIZE); 407 SWAPPER_BLOCK_SIZE);
457 408
458 create_mapping(aligned_start, __phys_to_virt(aligned_start), 409 create_mapping(aligned_start, __phys_to_virt(aligned_start),
459 __pa(_stext) - aligned_start, 410 __pa(_stext) - aligned_start,
460 PAGE_KERNEL); 411 PAGE_KERNEL);
461 } 412 }
462 413
463 if (!IS_ALIGNED((unsigned long)__init_end, SECTION_SIZE)) { 414 if (!IS_ALIGNED((unsigned long)__init_end, SWAPPER_BLOCK_SIZE)) {
464 unsigned long aligned_end = round_up(__pa(__init_end), 415 unsigned long aligned_end = round_up(__pa(__init_end),
465 SECTION_SIZE); 416 SWAPPER_BLOCK_SIZE);
466 create_mapping(__pa(__init_end), (unsigned long)__init_end, 417 create_mapping(__pa(__init_end), (unsigned long)__init_end,
467 aligned_end - __pa(__init_end), 418 aligned_end - __pa(__init_end),
468 PAGE_KERNEL); 419 PAGE_KERNEL);
@@ -475,7 +426,7 @@ void mark_rodata_ro(void)
475{ 426{
476 create_mapping_late(__pa(_stext), (unsigned long)_stext, 427 create_mapping_late(__pa(_stext), (unsigned long)_stext,
477 (unsigned long)_etext - (unsigned long)_stext, 428 (unsigned long)_etext - (unsigned long)_stext,
478 PAGE_KERNEL_EXEC | PTE_RDONLY); 429 PAGE_KERNEL_ROX);
479 430
480} 431}
481#endif 432#endif
diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c
index cf3c7d4a1b58..b162ad70effc 100644
--- a/arch/arm64/net/bpf_jit_comp.c
+++ b/arch/arm64/net/bpf_jit_comp.c
@@ -50,7 +50,7 @@ static const int bpf2a64[] = {
50 [BPF_REG_8] = A64_R(21), 50 [BPF_REG_8] = A64_R(21),
51 [BPF_REG_9] = A64_R(22), 51 [BPF_REG_9] = A64_R(22),
52 /* read-only frame pointer to access stack */ 52 /* read-only frame pointer to access stack */
53 [BPF_REG_FP] = A64_FP, 53 [BPF_REG_FP] = A64_R(25),
54 /* temporary register for internal BPF JIT */ 54 /* temporary register for internal BPF JIT */
55 [TMP_REG_1] = A64_R(23), 55 [TMP_REG_1] = A64_R(23),
56 [TMP_REG_2] = A64_R(24), 56 [TMP_REG_2] = A64_R(24),
@@ -139,6 +139,12 @@ static inline int epilogue_offset(const struct jit_ctx *ctx)
139/* Stack must be multiples of 16B */ 139/* Stack must be multiples of 16B */
140#define STACK_ALIGN(sz) (((sz) + 15) & ~15) 140#define STACK_ALIGN(sz) (((sz) + 15) & ~15)
141 141
142#define _STACK_SIZE \
143 (MAX_BPF_STACK \
144 + 4 /* extra for skb_copy_bits buffer */)
145
146#define STACK_SIZE STACK_ALIGN(_STACK_SIZE)
147
142static void build_prologue(struct jit_ctx *ctx) 148static void build_prologue(struct jit_ctx *ctx)
143{ 149{
144 const u8 r6 = bpf2a64[BPF_REG_6]; 150 const u8 r6 = bpf2a64[BPF_REG_6];
@@ -150,10 +156,35 @@ static void build_prologue(struct jit_ctx *ctx)
150 const u8 rx = bpf2a64[BPF_REG_X]; 156 const u8 rx = bpf2a64[BPF_REG_X];
151 const u8 tmp1 = bpf2a64[TMP_REG_1]; 157 const u8 tmp1 = bpf2a64[TMP_REG_1];
152 const u8 tmp2 = bpf2a64[TMP_REG_2]; 158 const u8 tmp2 = bpf2a64[TMP_REG_2];
153 int stack_size = MAX_BPF_STACK;
154 159
155 stack_size += 4; /* extra for skb_copy_bits buffer */ 160 /*
156 stack_size = STACK_ALIGN(stack_size); 161 * BPF prog stack layout
162 *
163 * high
164 * original A64_SP => 0:+-----+ BPF prologue
165 * |FP/LR|
166 * current A64_FP => -16:+-----+
167 * | ... | callee saved registers
168 * +-----+
169 * | | x25/x26
170 * BPF fp register => -80:+-----+ <= (BPF_FP)
171 * | |
172 * | ... | BPF prog stack
173 * | |
174 * +-----+ <= (BPF_FP - MAX_BPF_STACK)
175 * |RSVD | JIT scratchpad
176 * current A64_SP => +-----+ <= (BPF_FP - STACK_SIZE)
177 * | |
178 * | ... | Function call stack
179 * | |
180 * +-----+
181 * low
182 *
183 */
184
185 /* Save FP and LR registers to stay align with ARM64 AAPCS */
186 emit(A64_PUSH(A64_FP, A64_LR, A64_SP), ctx);
187 emit(A64_MOV(1, A64_FP, A64_SP), ctx);
157 188
158 /* Save callee-saved register */ 189 /* Save callee-saved register */
159 emit(A64_PUSH(r6, r7, A64_SP), ctx); 190 emit(A64_PUSH(r6, r7, A64_SP), ctx);
@@ -161,12 +192,15 @@ static void build_prologue(struct jit_ctx *ctx)
161 if (ctx->tmp_used) 192 if (ctx->tmp_used)
162 emit(A64_PUSH(tmp1, tmp2, A64_SP), ctx); 193 emit(A64_PUSH(tmp1, tmp2, A64_SP), ctx);
163 194
164 /* Set up BPF stack */ 195 /* Save fp (x25) and x26. SP requires 16 bytes alignment */
165 emit(A64_SUB_I(1, A64_SP, A64_SP, stack_size), ctx); 196 emit(A64_PUSH(fp, A64_R(26), A64_SP), ctx);
166 197
167 /* Set up frame pointer */ 198 /* Set up BPF prog stack base register (x25) */
168 emit(A64_MOV(1, fp, A64_SP), ctx); 199 emit(A64_MOV(1, fp, A64_SP), ctx);
169 200
201 /* Set up function call stack */
202 emit(A64_SUB_I(1, A64_SP, A64_SP, STACK_SIZE), ctx);
203
170 /* Clear registers A and X */ 204 /* Clear registers A and X */
171 emit_a64_mov_i64(ra, 0, ctx); 205 emit_a64_mov_i64(ra, 0, ctx);
172 emit_a64_mov_i64(rx, 0, ctx); 206 emit_a64_mov_i64(rx, 0, ctx);
@@ -182,13 +216,12 @@ static void build_epilogue(struct jit_ctx *ctx)
182 const u8 fp = bpf2a64[BPF_REG_FP]; 216 const u8 fp = bpf2a64[BPF_REG_FP];
183 const u8 tmp1 = bpf2a64[TMP_REG_1]; 217 const u8 tmp1 = bpf2a64[TMP_REG_1];
184 const u8 tmp2 = bpf2a64[TMP_REG_2]; 218 const u8 tmp2 = bpf2a64[TMP_REG_2];
185 int stack_size = MAX_BPF_STACK;
186
187 stack_size += 4; /* extra for skb_copy_bits buffer */
188 stack_size = STACK_ALIGN(stack_size);
189 219
190 /* We're done with BPF stack */ 220 /* We're done with BPF stack */
191 emit(A64_ADD_I(1, A64_SP, A64_SP, stack_size), ctx); 221 emit(A64_ADD_I(1, A64_SP, A64_SP, STACK_SIZE), ctx);
222
223 /* Restore fs (x25) and x26 */
224 emit(A64_POP(fp, A64_R(26), A64_SP), ctx);
192 225
193 /* Restore callee-saved register */ 226 /* Restore callee-saved register */
194 if (ctx->tmp_used) 227 if (ctx->tmp_used)
@@ -196,8 +229,8 @@ static void build_epilogue(struct jit_ctx *ctx)
196 emit(A64_POP(r8, r9, A64_SP), ctx); 229 emit(A64_POP(r8, r9, A64_SP), ctx);
197 emit(A64_POP(r6, r7, A64_SP), ctx); 230 emit(A64_POP(r6, r7, A64_SP), ctx);
198 231
199 /* Restore frame pointer */ 232 /* Restore FP/LR registers */
200 emit(A64_MOV(1, fp, A64_SP), ctx); 233 emit(A64_POP(A64_FP, A64_LR, A64_SP), ctx);
201 234
202 /* Set return value */ 235 /* Set return value */
203 emit(A64_MOV(1, A64_R(0), r0), ctx); 236 emit(A64_MOV(1, A64_R(0), r0), ctx);
@@ -557,7 +590,25 @@ emit_cond_jmp:
557 case BPF_ST | BPF_MEM | BPF_H: 590 case BPF_ST | BPF_MEM | BPF_H:
558 case BPF_ST | BPF_MEM | BPF_B: 591 case BPF_ST | BPF_MEM | BPF_B:
559 case BPF_ST | BPF_MEM | BPF_DW: 592 case BPF_ST | BPF_MEM | BPF_DW:
560 goto notyet; 593 /* Load imm to a register then store it */
594 ctx->tmp_used = 1;
595 emit_a64_mov_i(1, tmp2, off, ctx);
596 emit_a64_mov_i(1, tmp, imm, ctx);
597 switch (BPF_SIZE(code)) {
598 case BPF_W:
599 emit(A64_STR32(tmp, dst, tmp2), ctx);
600 break;
601 case BPF_H:
602 emit(A64_STRH(tmp, dst, tmp2), ctx);
603 break;
604 case BPF_B:
605 emit(A64_STRB(tmp, dst, tmp2), ctx);
606 break;
607 case BPF_DW:
608 emit(A64_STR64(tmp, dst, tmp2), ctx);
609 break;
610 }
611 break;
561 612
562 /* STX: *(size *)(dst + off) = src */ 613 /* STX: *(size *)(dst + off) = src */
563 case BPF_STX | BPF_MEM | BPF_W: 614 case BPF_STX | BPF_MEM | BPF_W:
@@ -624,7 +675,7 @@ emit_cond_jmp:
624 return -EINVAL; 675 return -EINVAL;
625 } 676 }
626 emit_a64_mov_i64(r3, size, ctx); 677 emit_a64_mov_i64(r3, size, ctx);
627 emit(A64_ADD_I(1, r4, fp, MAX_BPF_STACK), ctx); 678 emit(A64_SUB_I(1, r4, fp, STACK_SIZE), ctx);
628 emit_a64_mov_i64(r5, (unsigned long)bpf_load_pointer, ctx); 679 emit_a64_mov_i64(r5, (unsigned long)bpf_load_pointer, ctx);
629 emit(A64_PUSH(A64_FP, A64_LR, A64_SP), ctx); 680 emit(A64_PUSH(A64_FP, A64_LR, A64_SP), ctx);
630 emit(A64_MOV(1, A64_FP, A64_SP), ctx); 681 emit(A64_MOV(1, A64_FP, A64_SP), ctx);
@@ -758,7 +809,7 @@ void bpf_int_jit_compile(struct bpf_prog *prog)
758 if (bpf_jit_enable > 1) 809 if (bpf_jit_enable > 1)
759 bpf_jit_dump(prog->len, image_size, 2, ctx.image); 810 bpf_jit_dump(prog->len, image_size, 2, ctx.image);
760 811
761 bpf_flush_icache(ctx.image, ctx.image + ctx.idx); 812 bpf_flush_icache(header, ctx.image + ctx.idx);
762 813
763 set_memory_ro((unsigned long)header, header->pages); 814 set_memory_ro((unsigned long)header, header->pages);
764 prog->bpf_func = (void *)ctx.image; 815 prog->bpf_func = (void *)ctx.image;
diff --git a/arch/blackfin/kernel/perf_event.c b/arch/blackfin/kernel/perf_event.c
index 1e9c8b0bf486..170d786807c4 100644
--- a/arch/blackfin/kernel/perf_event.c
+++ b/arch/blackfin/kernel/perf_event.c
@@ -14,7 +14,7 @@
14 * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar 14 * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
15 * Copyright (C) 2009 Jaswinder Singh Rajput 15 * Copyright (C) 2009 Jaswinder Singh Rajput
16 * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter 16 * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
17 * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> 17 * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra
18 * Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com> 18 * Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com>
19 * 19 *
20 * ppc: 20 * ppc:
diff --git a/arch/ia64/include/asm/unistd.h b/arch/ia64/include/asm/unistd.h
index db73390568c8..74c132d901bd 100644
--- a/arch/ia64/include/asm/unistd.h
+++ b/arch/ia64/include/asm/unistd.h
@@ -11,7 +11,7 @@
11 11
12 12
13 13
14#define NR_syscalls 322 /* length of syscall table */ 14#define NR_syscalls 323 /* length of syscall table */
15 15
16/* 16/*
17 * The following defines stop scripts/checksyscalls.sh from complaining about 17 * The following defines stop scripts/checksyscalls.sh from complaining about
diff --git a/arch/ia64/include/uapi/asm/unistd.h b/arch/ia64/include/uapi/asm/unistd.h
index 9038726e7d26..762edce7572e 100644
--- a/arch/ia64/include/uapi/asm/unistd.h
+++ b/arch/ia64/include/uapi/asm/unistd.h
@@ -335,5 +335,6 @@
335#define __NR_userfaultfd 1343 335#define __NR_userfaultfd 1343
336#define __NR_membarrier 1344 336#define __NR_membarrier 1344
337#define __NR_kcmp 1345 337#define __NR_kcmp 1345
338#define __NR_mlock2 1346
338 339
339#endif /* _UAPI_ASM_IA64_UNISTD_H */ 340#endif /* _UAPI_ASM_IA64_UNISTD_H */
diff --git a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S
index dcd97f84d065..534a74acb849 100644
--- a/arch/ia64/kernel/entry.S
+++ b/arch/ia64/kernel/entry.S
@@ -1771,5 +1771,6 @@ sys_call_table:
1771 data8 sys_userfaultfd 1771 data8 sys_userfaultfd
1772 data8 sys_membarrier 1772 data8 sys_membarrier
1773 data8 sys_kcmp // 1345 1773 data8 sys_kcmp // 1345
1774 data8 sys_mlock2
1774 1775
1775 .org sys_call_table + 8*NR_syscalls // guard against failures to increase NR_syscalls 1776 .org sys_call_table + 8*NR_syscalls // guard against failures to increase NR_syscalls
diff --git a/arch/m68k/coldfire/m54xx.c b/arch/m68k/coldfire/m54xx.c
index f7836c6a6b60..c32f76791f48 100644
--- a/arch/m68k/coldfire/m54xx.c
+++ b/arch/m68k/coldfire/m54xx.c
@@ -98,7 +98,7 @@ static void __init mcf54xx_bootmem_alloc(void)
98 memstart = PAGE_ALIGN(_ramstart); 98 memstart = PAGE_ALIGN(_ramstart);
99 min_low_pfn = PFN_DOWN(_rambase); 99 min_low_pfn = PFN_DOWN(_rambase);
100 start_pfn = PFN_DOWN(memstart); 100 start_pfn = PFN_DOWN(memstart);
101 max_low_pfn = PFN_DOWN(_ramend); 101 max_pfn = max_low_pfn = PFN_DOWN(_ramend);
102 high_memory = (void *)_ramend; 102 high_memory = (void *)_ramend;
103 103
104 m68k_virt_to_node_shift = fls(_ramend - _rambase - 1) - 6; 104 m68k_virt_to_node_shift = fls(_ramend - _rambase - 1) - 6;
diff --git a/arch/m68k/include/asm/unistd.h b/arch/m68k/include/asm/unistd.h
index 0793a7f17417..f9d96bf86910 100644
--- a/arch/m68k/include/asm/unistd.h
+++ b/arch/m68k/include/asm/unistd.h
@@ -4,7 +4,7 @@
4#include <uapi/asm/unistd.h> 4#include <uapi/asm/unistd.h>
5 5
6 6
7#define NR_syscalls 375 7#define NR_syscalls 376
8 8
9#define __ARCH_WANT_OLD_READDIR 9#define __ARCH_WANT_OLD_READDIR
10#define __ARCH_WANT_OLD_STAT 10#define __ARCH_WANT_OLD_STAT
diff --git a/arch/m68k/include/uapi/asm/unistd.h b/arch/m68k/include/uapi/asm/unistd.h
index 5e6fae6c275f..36cf129de663 100644
--- a/arch/m68k/include/uapi/asm/unistd.h
+++ b/arch/m68k/include/uapi/asm/unistd.h
@@ -380,5 +380,6 @@
380#define __NR_sendmmsg 372 380#define __NR_sendmmsg 372
381#define __NR_userfaultfd 373 381#define __NR_userfaultfd 373
382#define __NR_membarrier 374 382#define __NR_membarrier 374
383#define __NR_mlock2 375
383 384
384#endif /* _UAPI_ASM_M68K_UNISTD_H_ */ 385#endif /* _UAPI_ASM_M68K_UNISTD_H_ */
diff --git a/arch/m68k/kernel/setup_no.c b/arch/m68k/kernel/setup_no.c
index 88c27d94a721..76b9113f3092 100644
--- a/arch/m68k/kernel/setup_no.c
+++ b/arch/m68k/kernel/setup_no.c
@@ -238,11 +238,14 @@ void __init setup_arch(char **cmdline_p)
238 * Give all the memory to the bootmap allocator, tell it to put the 238 * Give all the memory to the bootmap allocator, tell it to put the
239 * boot mem_map at the start of memory. 239 * boot mem_map at the start of memory.
240 */ 240 */
241 min_low_pfn = PFN_DOWN(memory_start);
242 max_pfn = max_low_pfn = PFN_DOWN(memory_end);
243
241 bootmap_size = init_bootmem_node( 244 bootmap_size = init_bootmem_node(
242 NODE_DATA(0), 245 NODE_DATA(0),
243 memory_start >> PAGE_SHIFT, /* map goes here */ 246 min_low_pfn, /* map goes here */
244 PAGE_OFFSET >> PAGE_SHIFT, /* 0 on coldfire */ 247 PFN_DOWN(PAGE_OFFSET),
245 memory_end >> PAGE_SHIFT); 248 max_pfn);
246 /* 249 /*
247 * Free the usable memory, we have to make sure we do not free 250 * Free the usable memory, we have to make sure we do not free
248 * the bootmem bitmap so we then reserve it after freeing it :-) 251 * the bootmem bitmap so we then reserve it after freeing it :-)
diff --git a/arch/m68k/kernel/syscalltable.S b/arch/m68k/kernel/syscalltable.S
index 5dd0e80042f5..282cd903f4c4 100644
--- a/arch/m68k/kernel/syscalltable.S
+++ b/arch/m68k/kernel/syscalltable.S
@@ -395,3 +395,4 @@ ENTRY(sys_call_table)
395 .long sys_sendmmsg 395 .long sys_sendmmsg
396 .long sys_userfaultfd 396 .long sys_userfaultfd
397 .long sys_membarrier 397 .long sys_membarrier
398 .long sys_mlock2 /* 375 */
diff --git a/arch/m68k/mm/motorola.c b/arch/m68k/mm/motorola.c
index b958916e5eac..8f37fdd80be9 100644
--- a/arch/m68k/mm/motorola.c
+++ b/arch/m68k/mm/motorola.c
@@ -250,7 +250,7 @@ void __init paging_init(void)
250 high_memory = phys_to_virt(max_addr); 250 high_memory = phys_to_virt(max_addr);
251 251
252 min_low_pfn = availmem >> PAGE_SHIFT; 252 min_low_pfn = availmem >> PAGE_SHIFT;
253 max_low_pfn = max_addr >> PAGE_SHIFT; 253 max_pfn = max_low_pfn = max_addr >> PAGE_SHIFT;
254 254
255 for (i = 0; i < m68k_num_memory; i++) { 255 for (i = 0; i < m68k_num_memory; i++) {
256 addr = m68k_memory[i].addr; 256 addr = m68k_memory[i].addr;
diff --git a/arch/m68k/sun3/config.c b/arch/m68k/sun3/config.c
index a8b942bf7163..2a5f43a68ae3 100644
--- a/arch/m68k/sun3/config.c
+++ b/arch/m68k/sun3/config.c
@@ -118,13 +118,13 @@ static void __init sun3_bootmem_alloc(unsigned long memory_start,
118 memory_end = memory_end & PAGE_MASK; 118 memory_end = memory_end & PAGE_MASK;
119 119
120 start_page = __pa(memory_start) >> PAGE_SHIFT; 120 start_page = __pa(memory_start) >> PAGE_SHIFT;
121 num_pages = __pa(memory_end) >> PAGE_SHIFT; 121 max_pfn = num_pages = __pa(memory_end) >> PAGE_SHIFT;
122 122
123 high_memory = (void *)memory_end; 123 high_memory = (void *)memory_end;
124 availmem = memory_start; 124 availmem = memory_start;
125 125
126 m68k_setup_node(0); 126 m68k_setup_node(0);
127 availmem += init_bootmem_node(NODE_DATA(0), start_page, 0, num_pages); 127 availmem += init_bootmem(start_page, num_pages);
128 availmem = (availmem + (PAGE_SIZE-1)) & PAGE_MASK; 128 availmem = (availmem + (PAGE_SIZE-1)) & PAGE_MASK;
129 129
130 free_bootmem(__pa(availmem), memory_end - (availmem)); 130 free_bootmem(__pa(availmem), memory_end - (availmem));
diff --git a/arch/microblaze/kernel/dma.c b/arch/microblaze/kernel/dma.c
index c89da6312954..bf4dec229437 100644
--- a/arch/microblaze/kernel/dma.c
+++ b/arch/microblaze/kernel/dma.c
@@ -61,7 +61,8 @@ static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl,
61 /* FIXME this part of code is untested */ 61 /* FIXME this part of code is untested */
62 for_each_sg(sgl, sg, nents, i) { 62 for_each_sg(sgl, sg, nents, i) {
63 sg->dma_address = sg_phys(sg); 63 sg->dma_address = sg_phys(sg);
64 __dma_sync(sg_phys(sg), sg->length, direction); 64 __dma_sync(page_to_phys(sg_page(sg)) + sg->offset,
65 sg->length, direction);
65 } 66 }
66 67
67 return nents; 68 return nents;
diff --git a/arch/mips/ath79/setup.c b/arch/mips/ath79/setup.c
index 1ba21204ebe0..8755d618e116 100644
--- a/arch/mips/ath79/setup.c
+++ b/arch/mips/ath79/setup.c
@@ -216,9 +216,9 @@ void __init plat_mem_setup(void)
216 AR71XX_RESET_SIZE); 216 AR71XX_RESET_SIZE);
217 ath79_pll_base = ioremap_nocache(AR71XX_PLL_BASE, 217 ath79_pll_base = ioremap_nocache(AR71XX_PLL_BASE,
218 AR71XX_PLL_SIZE); 218 AR71XX_PLL_SIZE);
219 ath79_detect_sys_type();
219 ath79_ddr_ctrl_init(); 220 ath79_ddr_ctrl_init();
220 221
221 ath79_detect_sys_type();
222 if (mips_machtype != ATH79_MACH_GENERIC_OF) 222 if (mips_machtype != ATH79_MACH_GENERIC_OF)
223 detect_memory_region(0, ATH79_MEM_SIZE_MIN, ATH79_MEM_SIZE_MAX); 223 detect_memory_region(0, ATH79_MEM_SIZE_MIN, ATH79_MEM_SIZE_MAX);
224 224
@@ -281,3 +281,8 @@ MIPS_MACHINE(ATH79_MACH_GENERIC,
281 "Generic", 281 "Generic",
282 "Generic AR71XX/AR724X/AR913X based board", 282 "Generic AR71XX/AR724X/AR913X based board",
283 ath79_generic_init); 283 ath79_generic_init);
284
285MIPS_MACHINE(ATH79_MACH_GENERIC_OF,
286 "DTB",
287 "Generic AR71XX/AR724X/AR913X based board (DT)",
288 NULL);
diff --git a/arch/mips/boot/dts/qca/ar9132.dtsi b/arch/mips/boot/dts/qca/ar9132.dtsi
index fb7734eadbf0..13d0439496a9 100644
--- a/arch/mips/boot/dts/qca/ar9132.dtsi
+++ b/arch/mips/boot/dts/qca/ar9132.dtsi
@@ -107,7 +107,7 @@
107 miscintc: interrupt-controller@18060010 { 107 miscintc: interrupt-controller@18060010 {
108 compatible = "qca,ar9132-misc-intc", 108 compatible = "qca,ar9132-misc-intc",
109 "qca,ar7100-misc-intc"; 109 "qca,ar7100-misc-intc";
110 reg = <0x18060010 0x4>; 110 reg = <0x18060010 0x8>;
111 111
112 interrupt-parent = <&cpuintc>; 112 interrupt-parent = <&cpuintc>;
113 interrupts = <6>; 113 interrupts = <6>;
diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h
index ad1fccdb8d13..2046c0230224 100644
--- a/arch/mips/include/asm/page.h
+++ b/arch/mips/include/asm/page.h
@@ -200,8 +200,9 @@ static inline int pfn_valid(unsigned long pfn)
200{ 200{
201 /* avoid <linux/mm.h> include hell */ 201 /* avoid <linux/mm.h> include hell */
202 extern unsigned long max_mapnr; 202 extern unsigned long max_mapnr;
203 unsigned long pfn_offset = ARCH_PFN_OFFSET;
203 204
204 return pfn >= ARCH_PFN_OFFSET && pfn < max_mapnr; 205 return pfn >= pfn_offset && pfn < max_mapnr;
205} 206}
206 207
207#elif defined(CONFIG_SPARSEMEM) 208#elif defined(CONFIG_SPARSEMEM)
diff --git a/arch/mips/kvm/emulate.c b/arch/mips/kvm/emulate.c
index d5fa3eaf39a1..41b1b090f56f 100644
--- a/arch/mips/kvm/emulate.c
+++ b/arch/mips/kvm/emulate.c
@@ -1581,7 +1581,7 @@ enum emulation_result kvm_mips_emulate_cache(uint32_t inst, uint32_t *opc,
1581 1581
1582 base = (inst >> 21) & 0x1f; 1582 base = (inst >> 21) & 0x1f;
1583 op_inst = (inst >> 16) & 0x1f; 1583 op_inst = (inst >> 16) & 0x1f;
1584 offset = inst & 0xffff; 1584 offset = (int16_t)inst;
1585 cache = (inst >> 16) & 0x3; 1585 cache = (inst >> 16) & 0x3;
1586 op = (inst >> 18) & 0x7; 1586 op = (inst >> 18) & 0x7;
1587 1587
diff --git a/arch/mips/kvm/locore.S b/arch/mips/kvm/locore.S
index 7bab3a4e8f7d..7e2210846b8b 100644
--- a/arch/mips/kvm/locore.S
+++ b/arch/mips/kvm/locore.S
@@ -157,9 +157,11 @@ FEXPORT(__kvm_mips_vcpu_run)
157 157
158FEXPORT(__kvm_mips_load_asid) 158FEXPORT(__kvm_mips_load_asid)
159 /* Set the ASID for the Guest Kernel */ 159 /* Set the ASID for the Guest Kernel */
160 INT_SLL t0, t0, 1 /* with kseg0 @ 0x40000000, kernel */ 160 PTR_L t0, VCPU_COP0(k1)
161 /* addresses shift to 0x80000000 */ 161 LONG_L t0, COP0_STATUS(t0)
162 bltz t0, 1f /* If kernel */ 162 andi t0, KSU_USER | ST0_ERL | ST0_EXL
163 xori t0, KSU_USER
164 bnez t0, 1f /* If kernel */
163 INT_ADDIU t1, k1, VCPU_GUEST_KERNEL_ASID /* (BD) */ 165 INT_ADDIU t1, k1, VCPU_GUEST_KERNEL_ASID /* (BD) */
164 INT_ADDIU t1, k1, VCPU_GUEST_USER_ASID /* else user */ 166 INT_ADDIU t1, k1, VCPU_GUEST_USER_ASID /* else user */
1651: 1671:
@@ -474,9 +476,11 @@ __kvm_mips_return_to_guest:
474 mtc0 t0, CP0_EPC 476 mtc0 t0, CP0_EPC
475 477
476 /* Set the ASID for the Guest Kernel */ 478 /* Set the ASID for the Guest Kernel */
477 INT_SLL t0, t0, 1 /* with kseg0 @ 0x40000000, kernel */ 479 PTR_L t0, VCPU_COP0(k1)
478 /* addresses shift to 0x80000000 */ 480 LONG_L t0, COP0_STATUS(t0)
479 bltz t0, 1f /* If kernel */ 481 andi t0, KSU_USER | ST0_ERL | ST0_EXL
482 xori t0, KSU_USER
483 bnez t0, 1f /* If kernel */
480 INT_ADDIU t1, k1, VCPU_GUEST_KERNEL_ASID /* (BD) */ 484 INT_ADDIU t1, k1, VCPU_GUEST_KERNEL_ASID /* (BD) */
481 INT_ADDIU t1, k1, VCPU_GUEST_USER_ASID /* else user */ 485 INT_ADDIU t1, k1, VCPU_GUEST_USER_ASID /* else user */
4821: 4861:
diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c
index 49ff3bfc007e..b9b803facdbf 100644
--- a/arch/mips/kvm/mips.c
+++ b/arch/mips/kvm/mips.c
@@ -279,7 +279,7 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
279 279
280 if (!gebase) { 280 if (!gebase) {
281 err = -ENOMEM; 281 err = -ENOMEM;
282 goto out_free_cpu; 282 goto out_uninit_cpu;
283 } 283 }
284 kvm_debug("Allocated %d bytes for KVM Exception Handlers @ %p\n", 284 kvm_debug("Allocated %d bytes for KVM Exception Handlers @ %p\n",
285 ALIGN(size, PAGE_SIZE), gebase); 285 ALIGN(size, PAGE_SIZE), gebase);
@@ -343,6 +343,9 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
343out_free_gebase: 343out_free_gebase:
344 kfree(gebase); 344 kfree(gebase);
345 345
346out_uninit_cpu:
347 kvm_vcpu_uninit(vcpu);
348
346out_free_cpu: 349out_free_cpu:
347 kfree(vcpu); 350 kfree(vcpu);
348 351
diff --git a/arch/mips/mm/dma-default.c b/arch/mips/mm/dma-default.c
index d8117be729a2..730d394ce5f0 100644
--- a/arch/mips/mm/dma-default.c
+++ b/arch/mips/mm/dma-default.c
@@ -145,7 +145,7 @@ static void *mips_dma_alloc_coherent(struct device *dev, size_t size,
145 145
146 gfp = massage_gfp_flags(dev, gfp); 146 gfp = massage_gfp_flags(dev, gfp);
147 147
148 if (IS_ENABLED(CONFIG_DMA_CMA) && !(gfp & GFP_ATOMIC)) 148 if (IS_ENABLED(CONFIG_DMA_CMA) && gfpflags_allow_blocking(gfp))
149 page = dma_alloc_from_contiguous(dev, 149 page = dma_alloc_from_contiguous(dev,
150 count, get_order(size)); 150 count, get_order(size));
151 if (!page) 151 if (!page)
diff --git a/arch/mips/pci/pci-rt2880.c b/arch/mips/pci/pci-rt2880.c
index 8a978022630b..dbbeccc3d714 100644
--- a/arch/mips/pci/pci-rt2880.c
+++ b/arch/mips/pci/pci-rt2880.c
@@ -11,6 +11,7 @@
11 * by the Free Software Foundation. 11 * by the Free Software Foundation.
12 */ 12 */
13 13
14#include <linux/delay.h>
14#include <linux/types.h> 15#include <linux/types.h>
15#include <linux/pci.h> 16#include <linux/pci.h>
16#include <linux/io.h> 17#include <linux/io.h>
@@ -232,8 +233,7 @@ static int rt288x_pci_probe(struct platform_device *pdev)
232 ioport_resource.end = RT2880_PCI_IO_BASE + RT2880_PCI_IO_SIZE - 1; 233 ioport_resource.end = RT2880_PCI_IO_BASE + RT2880_PCI_IO_SIZE - 1;
233 234
234 rt2880_pci_reg_write(0, RT2880_PCI_REG_PCICFG_ADDR); 235 rt2880_pci_reg_write(0, RT2880_PCI_REG_PCICFG_ADDR);
235 for (i = 0; i < 0xfffff; i++) 236 udelay(1);
236 ;
237 237
238 rt2880_pci_reg_write(0x79, RT2880_PCI_REG_ARBCTL); 238 rt2880_pci_reg_write(0x79, RT2880_PCI_REG_ARBCTL);
239 rt2880_pci_reg_write(0x07FF0001, RT2880_PCI_REG_BAR0SETUP_ADDR); 239 rt2880_pci_reg_write(0x07FF0001, RT2880_PCI_REG_BAR0SETUP_ADDR);
diff --git a/arch/mips/pmcs-msp71xx/msp_setup.c b/arch/mips/pmcs-msp71xx/msp_setup.c
index 4f925e06c414..78b2ef49dbc7 100644
--- a/arch/mips/pmcs-msp71xx/msp_setup.c
+++ b/arch/mips/pmcs-msp71xx/msp_setup.c
@@ -10,6 +10,8 @@
10 * option) any later version. 10 * option) any later version.
11 */ 11 */
12 12
13#include <linux/delay.h>
14
13#include <asm/bootinfo.h> 15#include <asm/bootinfo.h>
14#include <asm/cacheflush.h> 16#include <asm/cacheflush.h>
15#include <asm/idle.h> 17#include <asm/idle.h>
@@ -77,7 +79,7 @@ void msp7120_reset(void)
77 */ 79 */
78 80
79 /* Wait a bit for the DDRC to settle */ 81 /* Wait a bit for the DDRC to settle */
80 for (i = 0; i < 100000000; i++); 82 mdelay(125);
81 83
82#if defined(CONFIG_PMC_MSP7120_GW) 84#if defined(CONFIG_PMC_MSP7120_GW)
83 /* 85 /*
diff --git a/arch/mips/sni/reset.c b/arch/mips/sni/reset.c
index 244f9427625b..db8f88b6a3af 100644
--- a/arch/mips/sni/reset.c
+++ b/arch/mips/sni/reset.c
@@ -3,6 +3,8 @@
3 * 3 *
4 * Reset a SNI machine. 4 * Reset a SNI machine.
5 */ 5 */
6#include <linux/delay.h>
7
6#include <asm/io.h> 8#include <asm/io.h>
7#include <asm/reboot.h> 9#include <asm/reboot.h>
8#include <asm/sni.h> 10#include <asm/sni.h>
@@ -32,9 +34,9 @@ void sni_machine_restart(char *command)
32 for (;;) { 34 for (;;) {
33 for (i = 0; i < 100; i++) { 35 for (i = 0; i < 100; i++) {
34 kb_wait(); 36 kb_wait();
35 for (j = 0; j < 100000 ; j++) 37 udelay(50);
36 /* nothing */;
37 outb_p(0xfe, 0x64); /* pulse reset low */ 38 outb_p(0xfe, 0x64); /* pulse reset low */
39 udelay(50);
38 } 40 }
39 } 41 }
40} 42}
diff --git a/arch/mn10300/Kconfig b/arch/mn10300/Kconfig
index 4434b54e1d87..78ae5552fdb8 100644
--- a/arch/mn10300/Kconfig
+++ b/arch/mn10300/Kconfig
@@ -1,6 +1,7 @@
1config MN10300 1config MN10300
2 def_bool y 2 def_bool y
3 select HAVE_OPROFILE 3 select HAVE_OPROFILE
4 select HAVE_UID16
4 select GENERIC_IRQ_SHOW 5 select GENERIC_IRQ_SHOW
5 select ARCH_WANT_IPC_PARSE_VERSION 6 select ARCH_WANT_IPC_PARSE_VERSION
6 select HAVE_ARCH_TRACEHOOK 7 select HAVE_ARCH_TRACEHOOK
@@ -37,9 +38,6 @@ config HIGHMEM
37config NUMA 38config NUMA
38 def_bool n 39 def_bool n
39 40
40config UID16
41 def_bool y
42
43config RWSEM_GENERIC_SPINLOCK 41config RWSEM_GENERIC_SPINLOCK
44 def_bool y 42 def_bool y
45 43
diff --git a/arch/nios2/mm/cacheflush.c b/arch/nios2/mm/cacheflush.c
index 223cdcc8203f..87bf88ed04c6 100644
--- a/arch/nios2/mm/cacheflush.c
+++ b/arch/nios2/mm/cacheflush.c
@@ -23,22 +23,6 @@ static void __flush_dcache(unsigned long start, unsigned long end)
23 end += (cpuinfo.dcache_line_size - 1); 23 end += (cpuinfo.dcache_line_size - 1);
24 end &= ~(cpuinfo.dcache_line_size - 1); 24 end &= ~(cpuinfo.dcache_line_size - 1);
25 25
26 for (addr = start; addr < end; addr += cpuinfo.dcache_line_size) {
27 __asm__ __volatile__ (" flushda 0(%0)\n"
28 : /* Outputs */
29 : /* Inputs */ "r"(addr)
30 /* : No clobber */);
31 }
32}
33
34static void __flush_dcache_all(unsigned long start, unsigned long end)
35{
36 unsigned long addr;
37
38 start &= ~(cpuinfo.dcache_line_size - 1);
39 end += (cpuinfo.dcache_line_size - 1);
40 end &= ~(cpuinfo.dcache_line_size - 1);
41
42 if (end > start + cpuinfo.dcache_size) 26 if (end > start + cpuinfo.dcache_size)
43 end = start + cpuinfo.dcache_size; 27 end = start + cpuinfo.dcache_size;
44 28
@@ -112,7 +96,7 @@ static void flush_aliases(struct address_space *mapping, struct page *page)
112 96
113void flush_cache_all(void) 97void flush_cache_all(void)
114{ 98{
115 __flush_dcache_all(0, cpuinfo.dcache_size); 99 __flush_dcache(0, cpuinfo.dcache_size);
116 __flush_icache(0, cpuinfo.icache_size); 100 __flush_icache(0, cpuinfo.icache_size);
117} 101}
118 102
@@ -182,7 +166,7 @@ void __flush_dcache_page(struct address_space *mapping, struct page *page)
182 */ 166 */
183 unsigned long start = (unsigned long)page_address(page); 167 unsigned long start = (unsigned long)page_address(page);
184 168
185 __flush_dcache_all(start, start + PAGE_SIZE); 169 __flush_dcache(start, start + PAGE_SIZE);
186} 170}
187 171
188void flush_dcache_page(struct page *page) 172void flush_dcache_page(struct page *page)
@@ -268,7 +252,7 @@ void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
268{ 252{
269 flush_cache_page(vma, user_vaddr, page_to_pfn(page)); 253 flush_cache_page(vma, user_vaddr, page_to_pfn(page));
270 memcpy(dst, src, len); 254 memcpy(dst, src, len);
271 __flush_dcache_all((unsigned long)src, (unsigned long)src + len); 255 __flush_dcache((unsigned long)src, (unsigned long)src + len);
272 if (vma->vm_flags & VM_EXEC) 256 if (vma->vm_flags & VM_EXEC)
273 __flush_icache((unsigned long)src, (unsigned long)src + len); 257 __flush_icache((unsigned long)src, (unsigned long)src + len);
274} 258}
@@ -279,7 +263,7 @@ void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
279{ 263{
280 flush_cache_page(vma, user_vaddr, page_to_pfn(page)); 264 flush_cache_page(vma, user_vaddr, page_to_pfn(page));
281 memcpy(dst, src, len); 265 memcpy(dst, src, len);
282 __flush_dcache_all((unsigned long)dst, (unsigned long)dst + len); 266 __flush_dcache((unsigned long)dst, (unsigned long)dst + len);
283 if (vma->vm_flags & VM_EXEC) 267 if (vma->vm_flags & VM_EXEC)
284 __flush_icache((unsigned long)dst, (unsigned long)dst + len); 268 __flush_icache((unsigned long)dst, (unsigned long)dst + len);
285} 269}
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig
index c36546959e86..729f89163bc3 100644
--- a/arch/parisc/Kconfig
+++ b/arch/parisc/Kconfig
@@ -108,6 +108,9 @@ config PGTABLE_LEVELS
108 default 3 if 64BIT && PARISC_PAGE_SIZE_4KB 108 default 3 if 64BIT && PARISC_PAGE_SIZE_4KB
109 default 2 109 default 2
110 110
111config SYS_SUPPORTS_HUGETLBFS
112 def_bool y if PA20
113
111source "init/Kconfig" 114source "init/Kconfig"
112 115
113source "kernel/Kconfig.freezer" 116source "kernel/Kconfig.freezer"
diff --git a/arch/parisc/include/asm/hugetlb.h b/arch/parisc/include/asm/hugetlb.h
new file mode 100644
index 000000000000..7d56a9ccb752
--- /dev/null
+++ b/arch/parisc/include/asm/hugetlb.h
@@ -0,0 +1,85 @@
1#ifndef _ASM_PARISC64_HUGETLB_H
2#define _ASM_PARISC64_HUGETLB_H
3
4#include <asm/page.h>
5#include <asm-generic/hugetlb.h>
6
7
8void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
9 pte_t *ptep, pte_t pte);
10
11pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
12 pte_t *ptep);
13
14static inline int is_hugepage_only_range(struct mm_struct *mm,
15 unsigned long addr,
16 unsigned long len) {
17 return 0;
18}
19
20/*
21 * If the arch doesn't supply something else, assume that hugepage
22 * size aligned regions are ok without further preparation.
23 */
24static inline int prepare_hugepage_range(struct file *file,
25 unsigned long addr, unsigned long len)
26{
27 if (len & ~HPAGE_MASK)
28 return -EINVAL;
29 if (addr & ~HPAGE_MASK)
30 return -EINVAL;
31 return 0;
32}
33
34static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb,
35 unsigned long addr, unsigned long end,
36 unsigned long floor,
37 unsigned long ceiling)
38{
39 free_pgd_range(tlb, addr, end, floor, ceiling);
40}
41
42static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
43 unsigned long addr, pte_t *ptep)
44{
45}
46
47static inline int huge_pte_none(pte_t pte)
48{
49 return pte_none(pte);
50}
51
52static inline pte_t huge_pte_wrprotect(pte_t pte)
53{
54 return pte_wrprotect(pte);
55}
56
57static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
58 unsigned long addr, pte_t *ptep)
59{
60 pte_t old_pte = *ptep;
61 set_huge_pte_at(mm, addr, ptep, pte_wrprotect(old_pte));
62}
63
64static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
65 unsigned long addr, pte_t *ptep,
66 pte_t pte, int dirty)
67{
68 int changed = !pte_same(*ptep, pte);
69 if (changed) {
70 set_huge_pte_at(vma->vm_mm, addr, ptep, pte);
71 flush_tlb_page(vma, addr);
72 }
73 return changed;
74}
75
76static inline pte_t huge_ptep_get(pte_t *ptep)
77{
78 return *ptep;
79}
80
81static inline void arch_clear_hugepage_flags(struct page *page)
82{
83}
84
85#endif /* _ASM_PARISC64_HUGETLB_H */
diff --git a/arch/parisc/include/asm/page.h b/arch/parisc/include/asm/page.h
index 60d5d174dfe4..80e742a1c162 100644
--- a/arch/parisc/include/asm/page.h
+++ b/arch/parisc/include/asm/page.h
@@ -145,11 +145,22 @@ extern int npmem_ranges;
145#endif /* CONFIG_DISCONTIGMEM */ 145#endif /* CONFIG_DISCONTIGMEM */
146 146
147#ifdef CONFIG_HUGETLB_PAGE 147#ifdef CONFIG_HUGETLB_PAGE
148#define HPAGE_SHIFT 22 /* 4MB (is this fixed?) */ 148#define HPAGE_SHIFT PMD_SHIFT /* fixed for transparent huge pages */
149#define HPAGE_SIZE ((1UL) << HPAGE_SHIFT) 149#define HPAGE_SIZE ((1UL) << HPAGE_SHIFT)
150#define HPAGE_MASK (~(HPAGE_SIZE - 1)) 150#define HPAGE_MASK (~(HPAGE_SIZE - 1))
151#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) 151#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
152
153#if defined(CONFIG_64BIT) && defined(CONFIG_PARISC_PAGE_SIZE_4KB)
154# define REAL_HPAGE_SHIFT 20 /* 20 = 1MB */
155# define _HUGE_PAGE_SIZE_ENCODING_DEFAULT _PAGE_SIZE_ENCODING_1M
156#elif !defined(CONFIG_64BIT) && defined(CONFIG_PARISC_PAGE_SIZE_4KB)
157# define REAL_HPAGE_SHIFT 22 /* 22 = 4MB */
158# define _HUGE_PAGE_SIZE_ENCODING_DEFAULT _PAGE_SIZE_ENCODING_4M
159#else
160# define REAL_HPAGE_SHIFT 24 /* 24 = 16MB */
161# define _HUGE_PAGE_SIZE_ENCODING_DEFAULT _PAGE_SIZE_ENCODING_16M
152#endif 162#endif
163#endif /* CONFIG_HUGETLB_PAGE */
153 164
154#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT) 165#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
155 166
diff --git a/arch/parisc/include/asm/pgalloc.h b/arch/parisc/include/asm/pgalloc.h
index 3edbb9fc91b4..f2fd327dce2e 100644
--- a/arch/parisc/include/asm/pgalloc.h
+++ b/arch/parisc/include/asm/pgalloc.h
@@ -35,7 +35,7 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm)
35 PxD_FLAG_VALID | 35 PxD_FLAG_VALID |
36 PxD_FLAG_ATTACHED) 36 PxD_FLAG_ATTACHED)
37 + (__u32)(__pa((unsigned long)pgd) >> PxD_VALUE_SHIFT)); 37 + (__u32)(__pa((unsigned long)pgd) >> PxD_VALUE_SHIFT));
38 /* The first pmd entry also is marked with _PAGE_GATEWAY as 38 /* The first pmd entry also is marked with PxD_FLAG_ATTACHED as
39 * a signal that this pmd may not be freed */ 39 * a signal that this pmd may not be freed */
40 __pgd_val_set(*pgd, PxD_FLAG_ATTACHED); 40 __pgd_val_set(*pgd, PxD_FLAG_ATTACHED);
41#endif 41#endif
diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
index f93c4a4e6580..291cee28ccb6 100644
--- a/arch/parisc/include/asm/pgtable.h
+++ b/arch/parisc/include/asm/pgtable.h
@@ -83,7 +83,11 @@ static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
83 printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, (unsigned long)pgd_val(e)) 83 printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, (unsigned long)pgd_val(e))
84 84
85/* This is the size of the initially mapped kernel memory */ 85/* This is the size of the initially mapped kernel memory */
86#define KERNEL_INITIAL_ORDER 24 /* 0 to 1<<24 = 16MB */ 86#ifdef CONFIG_64BIT
87#define KERNEL_INITIAL_ORDER 25 /* 1<<25 = 32MB */
88#else
89#define KERNEL_INITIAL_ORDER 24 /* 1<<24 = 16MB */
90#endif
87#define KERNEL_INITIAL_SIZE (1 << KERNEL_INITIAL_ORDER) 91#define KERNEL_INITIAL_SIZE (1 << KERNEL_INITIAL_ORDER)
88 92
89#if CONFIG_PGTABLE_LEVELS == 3 93#if CONFIG_PGTABLE_LEVELS == 3
@@ -167,7 +171,7 @@ static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
167#define _PAGE_NO_CACHE_BIT 24 /* (0x080) Uncached Page (U bit) */ 171#define _PAGE_NO_CACHE_BIT 24 /* (0x080) Uncached Page (U bit) */
168#define _PAGE_ACCESSED_BIT 23 /* (0x100) Software: Page Accessed */ 172#define _PAGE_ACCESSED_BIT 23 /* (0x100) Software: Page Accessed */
169#define _PAGE_PRESENT_BIT 22 /* (0x200) Software: translation valid */ 173#define _PAGE_PRESENT_BIT 22 /* (0x200) Software: translation valid */
170/* bit 21 was formerly the FLUSH bit but is now unused */ 174#define _PAGE_HPAGE_BIT 21 /* (0x400) Software: Huge Page */
171#define _PAGE_USER_BIT 20 /* (0x800) Software: User accessible page */ 175#define _PAGE_USER_BIT 20 /* (0x800) Software: User accessible page */
172 176
173/* N.B. The bits are defined in terms of a 32 bit word above, so the */ 177/* N.B. The bits are defined in terms of a 32 bit word above, so the */
@@ -194,6 +198,7 @@ static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
194#define _PAGE_NO_CACHE (1 << xlate_pabit(_PAGE_NO_CACHE_BIT)) 198#define _PAGE_NO_CACHE (1 << xlate_pabit(_PAGE_NO_CACHE_BIT))
195#define _PAGE_ACCESSED (1 << xlate_pabit(_PAGE_ACCESSED_BIT)) 199#define _PAGE_ACCESSED (1 << xlate_pabit(_PAGE_ACCESSED_BIT))
196#define _PAGE_PRESENT (1 << xlate_pabit(_PAGE_PRESENT_BIT)) 200#define _PAGE_PRESENT (1 << xlate_pabit(_PAGE_PRESENT_BIT))
201#define _PAGE_HUGE (1 << xlate_pabit(_PAGE_HPAGE_BIT))
197#define _PAGE_USER (1 << xlate_pabit(_PAGE_USER_BIT)) 202#define _PAGE_USER (1 << xlate_pabit(_PAGE_USER_BIT))
198 203
199#define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | _PAGE_DIRTY | _PAGE_ACCESSED) 204#define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | _PAGE_DIRTY | _PAGE_ACCESSED)
@@ -217,7 +222,7 @@ static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
217#define PxD_FLAG_VALID (1 << xlate_pabit(_PxD_VALID_BIT)) 222#define PxD_FLAG_VALID (1 << xlate_pabit(_PxD_VALID_BIT))
218#define PxD_FLAG_MASK (0xf) 223#define PxD_FLAG_MASK (0xf)
219#define PxD_FLAG_SHIFT (4) 224#define PxD_FLAG_SHIFT (4)
220#define PxD_VALUE_SHIFT (8) /* (PAGE_SHIFT-PxD_FLAG_SHIFT) */ 225#define PxD_VALUE_SHIFT (PFN_PTE_SHIFT-PxD_FLAG_SHIFT)
221 226
222#ifndef __ASSEMBLY__ 227#ifndef __ASSEMBLY__
223 228
@@ -363,6 +368,19 @@ static inline pte_t pte_mkwrite(pte_t pte) { pte_val(pte) |= _PAGE_WRITE; return
363static inline pte_t pte_mkspecial(pte_t pte) { return pte; } 368static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
364 369
365/* 370/*
371 * Huge pte definitions.
372 */
373#ifdef CONFIG_HUGETLB_PAGE
374#define pte_huge(pte) (pte_val(pte) & _PAGE_HUGE)
375#define pte_mkhuge(pte) (__pte(pte_val(pte) | \
376 (parisc_requires_coherency() ? 0 : _PAGE_HUGE)))
377#else
378#define pte_huge(pte) (0)
379#define pte_mkhuge(pte) (pte)
380#endif
381
382
383/*
366 * Conversion functions: convert a page and protection to a page entry, 384 * Conversion functions: convert a page and protection to a page entry,
367 * and a page entry and page directory to the page they refer to. 385 * and a page entry and page directory to the page they refer to.
368 */ 386 */
@@ -410,8 +428,9 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
410/* Find an entry in the second-level page table.. */ 428/* Find an entry in the second-level page table.. */
411 429
412#if CONFIG_PGTABLE_LEVELS == 3 430#if CONFIG_PGTABLE_LEVELS == 3
431#define pmd_index(addr) (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1))
413#define pmd_offset(dir,address) \ 432#define pmd_offset(dir,address) \
414((pmd_t *) pgd_page_vaddr(*(dir)) + (((address)>>PMD_SHIFT) & (PTRS_PER_PMD-1))) 433((pmd_t *) pgd_page_vaddr(*(dir)) + pmd_index(address))
415#else 434#else
416#define pmd_offset(dir,addr) ((pmd_t *) dir) 435#define pmd_offset(dir,addr) ((pmd_t *) dir)
417#endif 436#endif
diff --git a/arch/parisc/include/asm/processor.h b/arch/parisc/include/asm/processor.h
index 54adb60c0a42..7e759ecb1343 100644
--- a/arch/parisc/include/asm/processor.h
+++ b/arch/parisc/include/asm/processor.h
@@ -192,33 +192,6 @@ void show_trace(struct task_struct *task, unsigned long *stack);
192 */ 192 */
193typedef unsigned int elf_caddr_t; 193typedef unsigned int elf_caddr_t;
194 194
195#define start_thread_som(regs, new_pc, new_sp) do { \
196 unsigned long *sp = (unsigned long *)new_sp; \
197 __u32 spaceid = (__u32)current->mm->context; \
198 unsigned long pc = (unsigned long)new_pc; \
199 /* offset pc for priv. level */ \
200 pc |= 3; \
201 \
202 regs->iasq[0] = spaceid; \
203 regs->iasq[1] = spaceid; \
204 regs->iaoq[0] = pc; \
205 regs->iaoq[1] = pc + 4; \
206 regs->sr[2] = LINUX_GATEWAY_SPACE; \
207 regs->sr[3] = 0xffff; \
208 regs->sr[4] = spaceid; \
209 regs->sr[5] = spaceid; \
210 regs->sr[6] = spaceid; \
211 regs->sr[7] = spaceid; \
212 regs->gr[ 0] = USER_PSW; \
213 regs->gr[30] = ((new_sp)+63)&~63; \
214 regs->gr[31] = pc; \
215 \
216 get_user(regs->gr[26],&sp[0]); \
217 get_user(regs->gr[25],&sp[-1]); \
218 get_user(regs->gr[24],&sp[-2]); \
219 get_user(regs->gr[23],&sp[-3]); \
220} while(0)
221
222/* The ELF abi wants things done a "wee bit" differently than 195/* The ELF abi wants things done a "wee bit" differently than
223 * som does. Supporting this behavior here avoids 196 * som does. Supporting this behavior here avoids
224 * having our own version of create_elf_tables. 197 * having our own version of create_elf_tables.
diff --git a/arch/parisc/include/uapi/asm/mman.h b/arch/parisc/include/uapi/asm/mman.h
index ecc3ae1ca28e..dd4d1876a020 100644
--- a/arch/parisc/include/uapi/asm/mman.h
+++ b/arch/parisc/include/uapi/asm/mman.h
@@ -49,16 +49,6 @@
49#define MADV_DONTFORK 10 /* don't inherit across fork */ 49#define MADV_DONTFORK 10 /* don't inherit across fork */
50#define MADV_DOFORK 11 /* do inherit across fork */ 50#define MADV_DOFORK 11 /* do inherit across fork */
51 51
52/* The range 12-64 is reserved for page size specification. */
53#define MADV_4K_PAGES 12 /* Use 4K pages */
54#define MADV_16K_PAGES 14 /* Use 16K pages */
55#define MADV_64K_PAGES 16 /* Use 64K pages */
56#define MADV_256K_PAGES 18 /* Use 256K pages */
57#define MADV_1M_PAGES 20 /* Use 1 Megabyte pages */
58#define MADV_4M_PAGES 22 /* Use 4 Megabyte pages */
59#define MADV_16M_PAGES 24 /* Use 16 Megabyte pages */
60#define MADV_64M_PAGES 26 /* Use 64 Megabyte pages */
61
62#define MADV_MERGEABLE 65 /* KSM may merge identical pages */ 52#define MADV_MERGEABLE 65 /* KSM may merge identical pages */
63#define MADV_UNMERGEABLE 66 /* KSM may not merge identical pages */ 53#define MADV_UNMERGEABLE 66 /* KSM may not merge identical pages */
64 54
diff --git a/arch/parisc/include/uapi/asm/unistd.h b/arch/parisc/include/uapi/asm/unistd.h
index 33170384d3ac..35bdccbb2036 100644
--- a/arch/parisc/include/uapi/asm/unistd.h
+++ b/arch/parisc/include/uapi/asm/unistd.h
@@ -360,8 +360,9 @@
360#define __NR_execveat (__NR_Linux + 342) 360#define __NR_execveat (__NR_Linux + 342)
361#define __NR_membarrier (__NR_Linux + 343) 361#define __NR_membarrier (__NR_Linux + 343)
362#define __NR_userfaultfd (__NR_Linux + 344) 362#define __NR_userfaultfd (__NR_Linux + 344)
363#define __NR_mlock2 (__NR_Linux + 345)
363 364
364#define __NR_Linux_syscalls (__NR_userfaultfd + 1) 365#define __NR_Linux_syscalls (__NR_mlock2 + 1)
365 366
366 367
367#define __IGNORE_select /* newselect */ 368#define __IGNORE_select /* newselect */
diff --git a/arch/parisc/kernel/asm-offsets.c b/arch/parisc/kernel/asm-offsets.c
index 59001cea13f9..d2f62570a7b1 100644
--- a/arch/parisc/kernel/asm-offsets.c
+++ b/arch/parisc/kernel/asm-offsets.c
@@ -290,6 +290,14 @@ int main(void)
290 DEFINE(ASM_PFN_PTE_SHIFT, PFN_PTE_SHIFT); 290 DEFINE(ASM_PFN_PTE_SHIFT, PFN_PTE_SHIFT);
291 DEFINE(ASM_PT_INITIAL, PT_INITIAL); 291 DEFINE(ASM_PT_INITIAL, PT_INITIAL);
292 BLANK(); 292 BLANK();
293 /* HUGEPAGE_SIZE is only used in vmlinux.lds.S to align kernel text
294 * and kernel data on physical huge pages */
295#ifdef CONFIG_HUGETLB_PAGE
296 DEFINE(HUGEPAGE_SIZE, 1UL << REAL_HPAGE_SHIFT);
297#else
298 DEFINE(HUGEPAGE_SIZE, PAGE_SIZE);
299#endif
300 BLANK();
293 DEFINE(EXCDATA_IP, offsetof(struct exception_data, fault_ip)); 301 DEFINE(EXCDATA_IP, offsetof(struct exception_data, fault_ip));
294 DEFINE(EXCDATA_SPACE, offsetof(struct exception_data, fault_space)); 302 DEFINE(EXCDATA_SPACE, offsetof(struct exception_data, fault_space));
295 DEFINE(EXCDATA_ADDR, offsetof(struct exception_data, fault_addr)); 303 DEFINE(EXCDATA_ADDR, offsetof(struct exception_data, fault_addr));
diff --git a/arch/parisc/kernel/entry.S b/arch/parisc/kernel/entry.S
index c5ef4081b01d..623496c11756 100644
--- a/arch/parisc/kernel/entry.S
+++ b/arch/parisc/kernel/entry.S
@@ -502,21 +502,38 @@
502 STREG \pte,0(\ptp) 502 STREG \pte,0(\ptp)
503 .endm 503 .endm
504 504
505 /* We have (depending on the page size):
506 * - 38 to 52-bit Physical Page Number
507 * - 12 to 26-bit page offset
508 */
505 /* bitshift difference between a PFN (based on kernel's PAGE_SIZE) 509 /* bitshift difference between a PFN (based on kernel's PAGE_SIZE)
506 * to a CPU TLB 4k PFN (4k => 12 bits to shift) */ 510 * to a CPU TLB 4k PFN (4k => 12 bits to shift) */
507 #define PAGE_ADD_SHIFT (PAGE_SHIFT-12) 511 #define PAGE_ADD_SHIFT (PAGE_SHIFT-12)
512 #define PAGE_ADD_HUGE_SHIFT (REAL_HPAGE_SHIFT-12)
508 513
509 /* Drop prot bits and convert to page addr for iitlbt and idtlbt */ 514 /* Drop prot bits and convert to page addr for iitlbt and idtlbt */
510 .macro convert_for_tlb_insert20 pte 515 .macro convert_for_tlb_insert20 pte,tmp
516#ifdef CONFIG_HUGETLB_PAGE
517 copy \pte,\tmp
518 extrd,u \tmp,(63-ASM_PFN_PTE_SHIFT)+(63-58)+PAGE_ADD_SHIFT,\
519 64-PAGE_SHIFT-PAGE_ADD_SHIFT,\pte
520
521 depdi _PAGE_SIZE_ENCODING_DEFAULT,63,\
522 (63-58)+PAGE_ADD_SHIFT,\pte
523 extrd,u,*= \tmp,_PAGE_HPAGE_BIT+32,1,%r0
524 depdi _HUGE_PAGE_SIZE_ENCODING_DEFAULT,63,\
525 (63-58)+PAGE_ADD_HUGE_SHIFT,\pte
526#else /* Huge pages disabled */
511 extrd,u \pte,(63-ASM_PFN_PTE_SHIFT)+(63-58)+PAGE_ADD_SHIFT,\ 527 extrd,u \pte,(63-ASM_PFN_PTE_SHIFT)+(63-58)+PAGE_ADD_SHIFT,\
512 64-PAGE_SHIFT-PAGE_ADD_SHIFT,\pte 528 64-PAGE_SHIFT-PAGE_ADD_SHIFT,\pte
513 depdi _PAGE_SIZE_ENCODING_DEFAULT,63,\ 529 depdi _PAGE_SIZE_ENCODING_DEFAULT,63,\
514 (63-58)+PAGE_ADD_SHIFT,\pte 530 (63-58)+PAGE_ADD_SHIFT,\pte
531#endif
515 .endm 532 .endm
516 533
517 /* Convert the pte and prot to tlb insertion values. How 534 /* Convert the pte and prot to tlb insertion values. How
518 * this happens is quite subtle, read below */ 535 * this happens is quite subtle, read below */
519 .macro make_insert_tlb spc,pte,prot 536 .macro make_insert_tlb spc,pte,prot,tmp
520 space_to_prot \spc \prot /* create prot id from space */ 537 space_to_prot \spc \prot /* create prot id from space */
521 /* The following is the real subtlety. This is depositing 538 /* The following is the real subtlety. This is depositing
522 * T <-> _PAGE_REFTRAP 539 * T <-> _PAGE_REFTRAP
@@ -553,7 +570,7 @@
553 depdi 1,12,1,\prot 570 depdi 1,12,1,\prot
554 571
555 /* Drop prot bits and convert to page addr for iitlbt and idtlbt */ 572 /* Drop prot bits and convert to page addr for iitlbt and idtlbt */
556 convert_for_tlb_insert20 \pte 573 convert_for_tlb_insert20 \pte \tmp
557 .endm 574 .endm
558 575
559 /* Identical macro to make_insert_tlb above, except it 576 /* Identical macro to make_insert_tlb above, except it
@@ -646,17 +663,12 @@
646 663
647 664
648 /* 665 /*
649 * Align fault_vector_20 on 4K boundary so that both 666 * Fault_vectors are architecturally required to be aligned on a 2K
650 * fault_vector_11 and fault_vector_20 are on the 667 * boundary
651 * same page. This is only necessary as long as we
652 * write protect the kernel text, which we may stop
653 * doing once we use large page translations to cover
654 * the static part of the kernel address space.
655 */ 668 */
656 669
657 .text 670 .text
658 671 .align 2048
659 .align 4096
660 672
661ENTRY(fault_vector_20) 673ENTRY(fault_vector_20)
662 /* First vector is invalid (0) */ 674 /* First vector is invalid (0) */
@@ -1147,7 +1159,7 @@ dtlb_miss_20w:
1147 tlb_lock spc,ptp,pte,t0,t1,dtlb_check_alias_20w 1159 tlb_lock spc,ptp,pte,t0,t1,dtlb_check_alias_20w
1148 update_accessed ptp,pte,t0,t1 1160 update_accessed ptp,pte,t0,t1
1149 1161
1150 make_insert_tlb spc,pte,prot 1162 make_insert_tlb spc,pte,prot,t1
1151 1163
1152 idtlbt pte,prot 1164 idtlbt pte,prot
1153 1165
@@ -1173,7 +1185,7 @@ nadtlb_miss_20w:
1173 tlb_lock spc,ptp,pte,t0,t1,nadtlb_check_alias_20w 1185 tlb_lock spc,ptp,pte,t0,t1,nadtlb_check_alias_20w
1174 update_accessed ptp,pte,t0,t1 1186 update_accessed ptp,pte,t0,t1
1175 1187
1176 make_insert_tlb spc,pte,prot 1188 make_insert_tlb spc,pte,prot,t1
1177 1189
1178 idtlbt pte,prot 1190 idtlbt pte,prot
1179 1191
@@ -1267,7 +1279,7 @@ dtlb_miss_20:
1267 tlb_lock spc,ptp,pte,t0,t1,dtlb_check_alias_20 1279 tlb_lock spc,ptp,pte,t0,t1,dtlb_check_alias_20
1268 update_accessed ptp,pte,t0,t1 1280 update_accessed ptp,pte,t0,t1
1269 1281
1270 make_insert_tlb spc,pte,prot 1282 make_insert_tlb spc,pte,prot,t1
1271 1283
1272 f_extend pte,t1 1284 f_extend pte,t1
1273 1285
@@ -1295,7 +1307,7 @@ nadtlb_miss_20:
1295 tlb_lock spc,ptp,pte,t0,t1,nadtlb_check_alias_20 1307 tlb_lock spc,ptp,pte,t0,t1,nadtlb_check_alias_20
1296 update_accessed ptp,pte,t0,t1 1308 update_accessed ptp,pte,t0,t1
1297 1309
1298 make_insert_tlb spc,pte,prot 1310 make_insert_tlb spc,pte,prot,t1
1299 1311
1300 f_extend pte,t1 1312 f_extend pte,t1
1301 1313
@@ -1404,7 +1416,7 @@ itlb_miss_20w:
1404 tlb_lock spc,ptp,pte,t0,t1,itlb_fault 1416 tlb_lock spc,ptp,pte,t0,t1,itlb_fault
1405 update_accessed ptp,pte,t0,t1 1417 update_accessed ptp,pte,t0,t1
1406 1418
1407 make_insert_tlb spc,pte,prot 1419 make_insert_tlb spc,pte,prot,t1
1408 1420
1409 iitlbt pte,prot 1421 iitlbt pte,prot
1410 1422
@@ -1428,7 +1440,7 @@ naitlb_miss_20w:
1428 tlb_lock spc,ptp,pte,t0,t1,naitlb_check_alias_20w 1440 tlb_lock spc,ptp,pte,t0,t1,naitlb_check_alias_20w
1429 update_accessed ptp,pte,t0,t1 1441 update_accessed ptp,pte,t0,t1
1430 1442
1431 make_insert_tlb spc,pte,prot 1443 make_insert_tlb spc,pte,prot,t1
1432 1444
1433 iitlbt pte,prot 1445 iitlbt pte,prot
1434 1446
@@ -1514,7 +1526,7 @@ itlb_miss_20:
1514 tlb_lock spc,ptp,pte,t0,t1,itlb_fault 1526 tlb_lock spc,ptp,pte,t0,t1,itlb_fault
1515 update_accessed ptp,pte,t0,t1 1527 update_accessed ptp,pte,t0,t1
1516 1528
1517 make_insert_tlb spc,pte,prot 1529 make_insert_tlb spc,pte,prot,t1
1518 1530
1519 f_extend pte,t1 1531 f_extend pte,t1
1520 1532
@@ -1534,7 +1546,7 @@ naitlb_miss_20:
1534 tlb_lock spc,ptp,pte,t0,t1,naitlb_check_alias_20 1546 tlb_lock spc,ptp,pte,t0,t1,naitlb_check_alias_20
1535 update_accessed ptp,pte,t0,t1 1547 update_accessed ptp,pte,t0,t1
1536 1548
1537 make_insert_tlb spc,pte,prot 1549 make_insert_tlb spc,pte,prot,t1
1538 1550
1539 f_extend pte,t1 1551 f_extend pte,t1
1540 1552
@@ -1566,7 +1578,7 @@ dbit_trap_20w:
1566 tlb_lock spc,ptp,pte,t0,t1,dbit_fault 1578 tlb_lock spc,ptp,pte,t0,t1,dbit_fault
1567 update_dirty ptp,pte,t1 1579 update_dirty ptp,pte,t1
1568 1580
1569 make_insert_tlb spc,pte,prot 1581 make_insert_tlb spc,pte,prot,t1
1570 1582
1571 idtlbt pte,prot 1583 idtlbt pte,prot
1572 1584
@@ -1610,7 +1622,7 @@ dbit_trap_20:
1610 tlb_lock spc,ptp,pte,t0,t1,dbit_fault 1622 tlb_lock spc,ptp,pte,t0,t1,dbit_fault
1611 update_dirty ptp,pte,t1 1623 update_dirty ptp,pte,t1
1612 1624
1613 make_insert_tlb spc,pte,prot 1625 make_insert_tlb spc,pte,prot,t1
1614 1626
1615 f_extend pte,t1 1627 f_extend pte,t1
1616 1628
diff --git a/arch/parisc/kernel/head.S b/arch/parisc/kernel/head.S
index e7d64527aff9..75aa0db9f69e 100644
--- a/arch/parisc/kernel/head.S
+++ b/arch/parisc/kernel/head.S
@@ -69,7 +69,7 @@ $bss_loop:
69 stw,ma %arg2,4(%r1) 69 stw,ma %arg2,4(%r1)
70 stw,ma %arg3,4(%r1) 70 stw,ma %arg3,4(%r1)
71 71
72 /* Initialize startup VM. Just map first 8/16 MB of memory */ 72 /* Initialize startup VM. Just map first 16/32 MB of memory */
73 load32 PA(swapper_pg_dir),%r4 73 load32 PA(swapper_pg_dir),%r4
74 mtctl %r4,%cr24 /* Initialize kernel root pointer */ 74 mtctl %r4,%cr24 /* Initialize kernel root pointer */
75 mtctl %r4,%cr25 /* Initialize user root pointer */ 75 mtctl %r4,%cr25 /* Initialize user root pointer */
@@ -107,7 +107,7 @@ $bss_loop:
107 /* Now initialize the PTEs themselves. We use RWX for 107 /* Now initialize the PTEs themselves. We use RWX for
108 * everything ... it will get remapped correctly later */ 108 * everything ... it will get remapped correctly later */
109 ldo 0+_PAGE_KERNEL_RWX(%r0),%r3 /* Hardwired 0 phys addr start */ 109 ldo 0+_PAGE_KERNEL_RWX(%r0),%r3 /* Hardwired 0 phys addr start */
110 ldi (1<<(KERNEL_INITIAL_ORDER-PAGE_SHIFT)),%r11 /* PFN count */ 110 load32 (1<<(KERNEL_INITIAL_ORDER-PAGE_SHIFT)),%r11 /* PFN count */
111 load32 PA(pg0),%r1 111 load32 PA(pg0),%r1
112 112
113$pgt_fill_loop: 113$pgt_fill_loop:
diff --git a/arch/parisc/kernel/pci.c b/arch/parisc/kernel/pci.c
index 64f2764a8cef..c99f3dde455c 100644
--- a/arch/parisc/kernel/pci.c
+++ b/arch/parisc/kernel/pci.c
@@ -171,24 +171,6 @@ void pcibios_set_master(struct pci_dev *dev)
171} 171}
172 172
173 173
174void __init pcibios_init_bus(struct pci_bus *bus)
175{
176 struct pci_dev *dev = bus->self;
177 unsigned short bridge_ctl;
178
179 /* We deal only with pci controllers and pci-pci bridges. */
180 if (!dev || (dev->class >> 8) != PCI_CLASS_BRIDGE_PCI)
181 return;
182
183 /* PCI-PCI bridge - set the cache line and default latency
184 (32) for primary and secondary buses. */
185 pci_write_config_byte(dev, PCI_SEC_LATENCY_TIMER, 32);
186
187 pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &bridge_ctl);
188 bridge_ctl |= PCI_BRIDGE_CTL_PARITY | PCI_BRIDGE_CTL_SERR;
189 pci_write_config_word(dev, PCI_BRIDGE_CONTROL, bridge_ctl);
190}
191
192/* 174/*
193 * pcibios align resources() is called every time generic PCI code 175 * pcibios align resources() is called every time generic PCI code
194 * wants to generate a new address. The process of looking for 176 * wants to generate a new address. The process of looking for
diff --git a/arch/parisc/kernel/setup.c b/arch/parisc/kernel/setup.c
index 72a3c658ad7b..f7ea626e29c9 100644
--- a/arch/parisc/kernel/setup.c
+++ b/arch/parisc/kernel/setup.c
@@ -130,7 +130,16 @@ void __init setup_arch(char **cmdline_p)
130 printk(KERN_INFO "The 32-bit Kernel has started...\n"); 130 printk(KERN_INFO "The 32-bit Kernel has started...\n");
131#endif 131#endif
132 132
133 printk(KERN_INFO "Default page size is %dKB.\n", (int)(PAGE_SIZE / 1024)); 133 printk(KERN_INFO "Kernel default page size is %d KB. Huge pages ",
134 (int)(PAGE_SIZE / 1024));
135#ifdef CONFIG_HUGETLB_PAGE
136 printk(KERN_CONT "enabled with %d MB physical and %d MB virtual size",
137 1 << (REAL_HPAGE_SHIFT - 20), 1 << (HPAGE_SHIFT - 20));
138#else
139 printk(KERN_CONT "disabled");
140#endif
141 printk(KERN_CONT ".\n");
142
134 143
135 pdc_console_init(); 144 pdc_console_init();
136 145
@@ -377,6 +386,7 @@ arch_initcall(parisc_init);
377void start_parisc(void) 386void start_parisc(void)
378{ 387{
379 extern void start_kernel(void); 388 extern void start_kernel(void);
389 extern void early_trap_init(void);
380 390
381 int ret, cpunum; 391 int ret, cpunum;
382 struct pdc_coproc_cfg coproc_cfg; 392 struct pdc_coproc_cfg coproc_cfg;
@@ -397,6 +407,8 @@ void start_parisc(void)
397 panic("must have an fpu to boot linux"); 407 panic("must have an fpu to boot linux");
398 } 408 }
399 409
410 early_trap_init(); /* initialize checksum of fault_vector */
411
400 start_kernel(); 412 start_kernel();
401 // not reached 413 // not reached
402} 414}
diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S
index 0b8d26d3ba43..3fbd7252a4b2 100644
--- a/arch/parisc/kernel/syscall.S
+++ b/arch/parisc/kernel/syscall.S
@@ -369,7 +369,7 @@ tracesys_exit:
369 ldo -16(%r30),%r29 /* Reference param save area */ 369 ldo -16(%r30),%r29 /* Reference param save area */
370#endif 370#endif
371 ldo TASK_REGS(%r1),%r26 371 ldo TASK_REGS(%r1),%r26
372 bl do_syscall_trace_exit,%r2 372 BL do_syscall_trace_exit,%r2
373 STREG %r28,TASK_PT_GR28(%r1) /* save return value now */ 373 STREG %r28,TASK_PT_GR28(%r1) /* save return value now */
374 ldo -THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 /* get task ptr */ 374 ldo -THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 /* get task ptr */
375 LDREG TI_TASK(%r1), %r1 375 LDREG TI_TASK(%r1), %r1
@@ -390,7 +390,7 @@ tracesys_sigexit:
390#ifdef CONFIG_64BIT 390#ifdef CONFIG_64BIT
391 ldo -16(%r30),%r29 /* Reference param save area */ 391 ldo -16(%r30),%r29 /* Reference param save area */
392#endif 392#endif
393 bl do_syscall_trace_exit,%r2 393 BL do_syscall_trace_exit,%r2
394 ldo TASK_REGS(%r1),%r26 394 ldo TASK_REGS(%r1),%r26
395 395
396 ldil L%syscall_exit_rfi,%r1 396 ldil L%syscall_exit_rfi,%r1
diff --git a/arch/parisc/kernel/syscall_table.S b/arch/parisc/kernel/syscall_table.S
index 78c3ef8c348d..d4ffcfbc9885 100644
--- a/arch/parisc/kernel/syscall_table.S
+++ b/arch/parisc/kernel/syscall_table.S
@@ -440,6 +440,7 @@
440 ENTRY_COMP(execveat) 440 ENTRY_COMP(execveat)
441 ENTRY_SAME(membarrier) 441 ENTRY_SAME(membarrier)
442 ENTRY_SAME(userfaultfd) 442 ENTRY_SAME(userfaultfd)
443 ENTRY_SAME(mlock2) /* 345 */
443 444
444 445
445.ifne (. - 90b) - (__NR_Linux_syscalls * (91b - 90b)) 446.ifne (. - 90b) - (__NR_Linux_syscalls * (91b - 90b))
diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
index b99b39f1da02..553b09855cfd 100644
--- a/arch/parisc/kernel/traps.c
+++ b/arch/parisc/kernel/traps.c
@@ -807,7 +807,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
807} 807}
808 808
809 809
810int __init check_ivt(void *iva) 810void __init initialize_ivt(const void *iva)
811{ 811{
812 extern u32 os_hpmc_size; 812 extern u32 os_hpmc_size;
813 extern const u32 os_hpmc[]; 813 extern const u32 os_hpmc[];
@@ -818,8 +818,8 @@ int __init check_ivt(void *iva)
818 u32 *hpmcp; 818 u32 *hpmcp;
819 u32 length; 819 u32 length;
820 820
821 if (strcmp((char *)iva, "cows can fly")) 821 if (strcmp((const char *)iva, "cows can fly"))
822 return -1; 822 panic("IVT invalid");
823 823
824 ivap = (u32 *)iva; 824 ivap = (u32 *)iva;
825 825
@@ -839,28 +839,23 @@ int __init check_ivt(void *iva)
839 check += ivap[i]; 839 check += ivap[i];
840 840
841 ivap[5] = -check; 841 ivap[5] = -check;
842
843 return 0;
844} 842}
845 843
846#ifndef CONFIG_64BIT
847extern const void fault_vector_11;
848#endif
849extern const void fault_vector_20;
850 844
851void __init trap_init(void) 845/* early_trap_init() is called before we set up kernel mappings and
846 * write-protect the kernel */
847void __init early_trap_init(void)
852{ 848{
853 void *iva; 849 extern const void fault_vector_20;
854 850
855 if (boot_cpu_data.cpu_type >= pcxu) 851#ifndef CONFIG_64BIT
856 iva = (void *) &fault_vector_20; 852 extern const void fault_vector_11;
857 else 853 initialize_ivt(&fault_vector_11);
858#ifdef CONFIG_64BIT
859 panic("Can't boot 64-bit OS on PA1.1 processor!");
860#else
861 iva = (void *) &fault_vector_11;
862#endif 854#endif
863 855
864 if (check_ivt(iva)) 856 initialize_ivt(&fault_vector_20);
865 panic("IVT invalid"); 857}
858
859void __init trap_init(void)
860{
866} 861}
diff --git a/arch/parisc/kernel/vmlinux.lds.S b/arch/parisc/kernel/vmlinux.lds.S
index 0dacc5ca555a..308f29081d46 100644
--- a/arch/parisc/kernel/vmlinux.lds.S
+++ b/arch/parisc/kernel/vmlinux.lds.S
@@ -60,7 +60,7 @@ SECTIONS
60 EXIT_DATA 60 EXIT_DATA
61 } 61 }
62 PERCPU_SECTION(8) 62 PERCPU_SECTION(8)
63 . = ALIGN(PAGE_SIZE); 63 . = ALIGN(HUGEPAGE_SIZE);
64 __init_end = .; 64 __init_end = .;
65 /* freed after init ends here */ 65 /* freed after init ends here */
66 66
@@ -116,7 +116,7 @@ SECTIONS
116 * that we can properly leave these 116 * that we can properly leave these
117 * as writable 117 * as writable
118 */ 118 */
119 . = ALIGN(PAGE_SIZE); 119 . = ALIGN(HUGEPAGE_SIZE);
120 data_start = .; 120 data_start = .;
121 121
122 EXCEPTION_TABLE(8) 122 EXCEPTION_TABLE(8)
@@ -135,8 +135,11 @@ SECTIONS
135 _edata = .; 135 _edata = .;
136 136
137 /* BSS */ 137 /* BSS */
138 BSS_SECTION(PAGE_SIZE, PAGE_SIZE, 8) 138 BSS_SECTION(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE)
139
140 /* bootmap is allocated in setup_bootmem() directly behind bss. */
139 141
142 . = ALIGN(HUGEPAGE_SIZE);
140 _end = . ; 143 _end = . ;
141 144
142 STABS_DEBUG 145 STABS_DEBUG
diff --git a/arch/parisc/mm/Makefile b/arch/parisc/mm/Makefile
index 758ceefb373a..134393de69d2 100644
--- a/arch/parisc/mm/Makefile
+++ b/arch/parisc/mm/Makefile
@@ -3,3 +3,4 @@
3# 3#
4 4
5obj-y := init.o fault.o ioremap.o 5obj-y := init.o fault.o ioremap.o
6obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
diff --git a/arch/parisc/mm/hugetlbpage.c b/arch/parisc/mm/hugetlbpage.c
new file mode 100644
index 000000000000..f6fdc77a72bd
--- /dev/null
+++ b/arch/parisc/mm/hugetlbpage.c
@@ -0,0 +1,161 @@
1/*
2 * PARISC64 Huge TLB page support.
3 *
4 * This parisc implementation is heavily based on the SPARC and x86 code.
5 *
6 * Copyright (C) 2015 Helge Deller <deller@gmx.de>
7 */
8
9#include <linux/fs.h>
10#include <linux/mm.h>
11#include <linux/hugetlb.h>
12#include <linux/pagemap.h>
13#include <linux/sysctl.h>
14
15#include <asm/mman.h>
16#include <asm/pgalloc.h>
17#include <asm/tlb.h>
18#include <asm/tlbflush.h>
19#include <asm/cacheflush.h>
20#include <asm/mmu_context.h>
21
22
23unsigned long
24hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
25 unsigned long len, unsigned long pgoff, unsigned long flags)
26{
27 struct hstate *h = hstate_file(file);
28
29 if (len & ~huge_page_mask(h))
30 return -EINVAL;
31 if (len > TASK_SIZE)
32 return -ENOMEM;
33
34 if (flags & MAP_FIXED)
35 if (prepare_hugepage_range(file, addr, len))
36 return -EINVAL;
37
38 if (addr)
39 addr = ALIGN(addr, huge_page_size(h));
40
41 /* we need to make sure the colouring is OK */
42 return arch_get_unmapped_area(file, addr, len, pgoff, flags);
43}
44
45
46pte_t *huge_pte_alloc(struct mm_struct *mm,
47 unsigned long addr, unsigned long sz)
48{
49 pgd_t *pgd;
50 pud_t *pud;
51 pmd_t *pmd;
52 pte_t *pte = NULL;
53
54 /* We must align the address, because our caller will run
55 * set_huge_pte_at() on whatever we return, which writes out
56 * all of the sub-ptes for the hugepage range. So we have
57 * to give it the first such sub-pte.
58 */
59 addr &= HPAGE_MASK;
60
61 pgd = pgd_offset(mm, addr);
62 pud = pud_alloc(mm, pgd, addr);
63 if (pud) {
64 pmd = pmd_alloc(mm, pud, addr);
65 if (pmd)
66 pte = pte_alloc_map(mm, NULL, pmd, addr);
67 }
68 return pte;
69}
70
71pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
72{
73 pgd_t *pgd;
74 pud_t *pud;
75 pmd_t *pmd;
76 pte_t *pte = NULL;
77
78 addr &= HPAGE_MASK;
79
80 pgd = pgd_offset(mm, addr);
81 if (!pgd_none(*pgd)) {
82 pud = pud_offset(pgd, addr);
83 if (!pud_none(*pud)) {
84 pmd = pmd_offset(pud, addr);
85 if (!pmd_none(*pmd))
86 pte = pte_offset_map(pmd, addr);
87 }
88 }
89 return pte;
90}
91
92/* Purge data and instruction TLB entries. Must be called holding
93 * the pa_tlb_lock. The TLB purge instructions are slow on SMP
94 * machines since the purge must be broadcast to all CPUs.
95 */
96static inline void purge_tlb_entries_huge(struct mm_struct *mm, unsigned long addr)
97{
98 int i;
99
100 /* We may use multiple physical huge pages (e.g. 2x1 MB) to emulate
101 * Linux standard huge pages (e.g. 2 MB) */
102 BUILD_BUG_ON(REAL_HPAGE_SHIFT > HPAGE_SHIFT);
103
104 addr &= HPAGE_MASK;
105 addr |= _HUGE_PAGE_SIZE_ENCODING_DEFAULT;
106
107 for (i = 0; i < (1 << (HPAGE_SHIFT-REAL_HPAGE_SHIFT)); i++) {
108 mtsp(mm->context, 1);
109 pdtlb(addr);
110 if (unlikely(split_tlb))
111 pitlb(addr);
112 addr += (1UL << REAL_HPAGE_SHIFT);
113 }
114}
115
116void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
117 pte_t *ptep, pte_t entry)
118{
119 unsigned long addr_start;
120 int i;
121
122 addr &= HPAGE_MASK;
123 addr_start = addr;
124
125 for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) {
126 /* Directly write pte entry. We could call set_pte_at(mm, addr, ptep, entry)
127 * instead, but then we get double locking on pa_tlb_lock. */
128 *ptep = entry;
129 ptep++;
130
131 /* Drop the PAGE_SIZE/non-huge tlb entry */
132 purge_tlb_entries(mm, addr);
133
134 addr += PAGE_SIZE;
135 pte_val(entry) += PAGE_SIZE;
136 }
137
138 purge_tlb_entries_huge(mm, addr_start);
139}
140
141
142pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
143 pte_t *ptep)
144{
145 pte_t entry;
146
147 entry = *ptep;
148 set_huge_pte_at(mm, addr, ptep, __pte(0));
149
150 return entry;
151}
152
153int pmd_huge(pmd_t pmd)
154{
155 return 0;
156}
157
158int pud_huge(pud_t pud)
159{
160 return 0;
161}
diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c
index c5fec4890fdf..1b366c477687 100644
--- a/arch/parisc/mm/init.c
+++ b/arch/parisc/mm/init.c
@@ -409,15 +409,11 @@ static void __init map_pages(unsigned long start_vaddr,
409 unsigned long vaddr; 409 unsigned long vaddr;
410 unsigned long ro_start; 410 unsigned long ro_start;
411 unsigned long ro_end; 411 unsigned long ro_end;
412 unsigned long fv_addr; 412 unsigned long kernel_end;
413 unsigned long gw_addr;
414 extern const unsigned long fault_vector_20;
415 extern void * const linux_gateway_page;
416 413
417 ro_start = __pa((unsigned long)_text); 414 ro_start = __pa((unsigned long)_text);
418 ro_end = __pa((unsigned long)&data_start); 415 ro_end = __pa((unsigned long)&data_start);
419 fv_addr = __pa((unsigned long)&fault_vector_20) & PAGE_MASK; 416 kernel_end = __pa((unsigned long)&_end);
420 gw_addr = __pa((unsigned long)&linux_gateway_page) & PAGE_MASK;
421 417
422 end_paddr = start_paddr + size; 418 end_paddr = start_paddr + size;
423 419
@@ -475,24 +471,25 @@ static void __init map_pages(unsigned long start_vaddr,
475 for (tmp2 = start_pte; tmp2 < PTRS_PER_PTE; tmp2++, pg_table++) { 471 for (tmp2 = start_pte; tmp2 < PTRS_PER_PTE; tmp2++, pg_table++) {
476 pte_t pte; 472 pte_t pte;
477 473
478 /*
479 * Map the fault vector writable so we can
480 * write the HPMC checksum.
481 */
482 if (force) 474 if (force)
483 pte = __mk_pte(address, pgprot); 475 pte = __mk_pte(address, pgprot);
484 else if (parisc_text_address(vaddr) && 476 else if (parisc_text_address(vaddr)) {
485 address != fv_addr)
486 pte = __mk_pte(address, PAGE_KERNEL_EXEC); 477 pte = __mk_pte(address, PAGE_KERNEL_EXEC);
478 if (address >= ro_start && address < kernel_end)
479 pte = pte_mkhuge(pte);
480 }
487 else 481 else
488#if defined(CONFIG_PARISC_PAGE_SIZE_4KB) 482#if defined(CONFIG_PARISC_PAGE_SIZE_4KB)
489 if (address >= ro_start && address < ro_end 483 if (address >= ro_start && address < ro_end) {
490 && address != fv_addr 484 pte = __mk_pte(address, PAGE_KERNEL_EXEC);
491 && address != gw_addr) 485 pte = pte_mkhuge(pte);
492 pte = __mk_pte(address, PAGE_KERNEL_RO); 486 } else
493 else
494#endif 487#endif
488 {
495 pte = __mk_pte(address, pgprot); 489 pte = __mk_pte(address, pgprot);
490 if (address >= ro_start && address < kernel_end)
491 pte = pte_mkhuge(pte);
492 }
496 493
497 if (address >= end_paddr) { 494 if (address >= end_paddr) {
498 if (force) 495 if (force)
@@ -536,15 +533,12 @@ void free_initmem(void)
536 533
537 /* force the kernel to see the new TLB entries */ 534 /* force the kernel to see the new TLB entries */
538 __flush_tlb_range(0, init_begin, init_end); 535 __flush_tlb_range(0, init_begin, init_end);
539 /* Attempt to catch anyone trying to execute code here 536
540 * by filling the page with BRK insns.
541 */
542 memset((void *)init_begin, 0x00, init_end - init_begin);
543 /* finally dump all the instructions which were cached, since the 537 /* finally dump all the instructions which were cached, since the
544 * pages are no-longer executable */ 538 * pages are no-longer executable */
545 flush_icache_range(init_begin, init_end); 539 flush_icache_range(init_begin, init_end);
546 540
547 free_initmem_default(-1); 541 free_initmem_default(POISON_FREE_INITMEM);
548 542
549 /* set up a new led state on systems shipped LED State panel */ 543 /* set up a new led state on systems shipped LED State panel */
550 pdc_chassis_send_status(PDC_CHASSIS_DIRECT_BCOMPLETE); 544 pdc_chassis_send_status(PDC_CHASSIS_DIRECT_BCOMPLETE);
@@ -728,8 +722,8 @@ static void __init pagetable_init(void)
728 unsigned long size; 722 unsigned long size;
729 723
730 start_paddr = pmem_ranges[range].start_pfn << PAGE_SHIFT; 724 start_paddr = pmem_ranges[range].start_pfn << PAGE_SHIFT;
731 end_paddr = start_paddr + (pmem_ranges[range].pages << PAGE_SHIFT);
732 size = pmem_ranges[range].pages << PAGE_SHIFT; 725 size = pmem_ranges[range].pages << PAGE_SHIFT;
726 end_paddr = start_paddr + size;
733 727
734 map_pages((unsigned long)__va(start_paddr), start_paddr, 728 map_pages((unsigned long)__va(start_paddr), start_paddr,
735 size, PAGE_KERNEL, 0); 729 size, PAGE_KERNEL, 0);
diff --git a/arch/powerpc/boot/dts/sbc8641d.dts b/arch/powerpc/boot/dts/sbc8641d.dts
index 631ede72e226..68f0ed7626bd 100644
--- a/arch/powerpc/boot/dts/sbc8641d.dts
+++ b/arch/powerpc/boot/dts/sbc8641d.dts
@@ -227,23 +227,15 @@
227 reg = <0x520 0x20>; 227 reg = <0x520 0x20>;
228 228
229 phy0: ethernet-phy@1f { 229 phy0: ethernet-phy@1f {
230 interrupt-parent = <&mpic>;
231 interrupts = <10 1>;
232 reg = <0x1f>; 230 reg = <0x1f>;
233 }; 231 };
234 phy1: ethernet-phy@0 { 232 phy1: ethernet-phy@0 {
235 interrupt-parent = <&mpic>;
236 interrupts = <10 1>;
237 reg = <0>; 233 reg = <0>;
238 }; 234 };
239 phy2: ethernet-phy@1 { 235 phy2: ethernet-phy@1 {
240 interrupt-parent = <&mpic>;
241 interrupts = <10 1>;
242 reg = <1>; 236 reg = <1>;
243 }; 237 };
244 phy3: ethernet-phy@2 { 238 phy3: ethernet-phy@2 {
245 interrupt-parent = <&mpic>;
246 interrupts = <10 1>;
247 reg = <2>; 239 reg = <2>;
248 }; 240 };
249 tbi0: tbi-phy@11 { 241 tbi0: tbi-phy@11 {
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index a908ada8e0a5..2220f7a60def 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -108,6 +108,7 @@
108#define MSR_TS_T __MASK(MSR_TS_T_LG) /* Transaction Transactional */ 108#define MSR_TS_T __MASK(MSR_TS_T_LG) /* Transaction Transactional */
109#define MSR_TS_MASK (MSR_TS_T | MSR_TS_S) /* Transaction State bits */ 109#define MSR_TS_MASK (MSR_TS_T | MSR_TS_S) /* Transaction State bits */
110#define MSR_TM_ACTIVE(x) (((x) & MSR_TS_MASK) != 0) /* Transaction active? */ 110#define MSR_TM_ACTIVE(x) (((x) & MSR_TS_MASK) != 0) /* Transaction active? */
111#define MSR_TM_RESV(x) (((x) & MSR_TS_MASK) == MSR_TS_MASK) /* Reserved */
111#define MSR_TM_TRANSACTIONAL(x) (((x) & MSR_TS_MASK) == MSR_TS_T) 112#define MSR_TM_TRANSACTIONAL(x) (((x) & MSR_TS_MASK) == MSR_TS_T)
112#define MSR_TM_SUSPENDED(x) (((x) & MSR_TS_MASK) == MSR_TS_S) 113#define MSR_TM_SUSPENDED(x) (((x) & MSR_TS_MASK) == MSR_TS_S)
113 114
diff --git a/arch/powerpc/include/asm/systbl.h b/arch/powerpc/include/asm/systbl.h
index c9e26cb264f4..5654ece02c0d 100644
--- a/arch/powerpc/include/asm/systbl.h
+++ b/arch/powerpc/include/asm/systbl.h
@@ -370,15 +370,16 @@ COMPAT_SYS(execveat)
370PPC64ONLY(switch_endian) 370PPC64ONLY(switch_endian)
371SYSCALL_SPU(userfaultfd) 371SYSCALL_SPU(userfaultfd)
372SYSCALL_SPU(membarrier) 372SYSCALL_SPU(membarrier)
373SYSCALL(semop) 373SYSCALL(ni_syscall)
374SYSCALL(semget) 374SYSCALL(ni_syscall)
375COMPAT_SYS(semctl) 375SYSCALL(ni_syscall)
376COMPAT_SYS(semtimedop) 376SYSCALL(ni_syscall)
377COMPAT_SYS(msgsnd) 377SYSCALL(ni_syscall)
378COMPAT_SYS(msgrcv) 378SYSCALL(ni_syscall)
379SYSCALL(msgget) 379SYSCALL(ni_syscall)
380COMPAT_SYS(msgctl) 380SYSCALL(ni_syscall)
381COMPAT_SYS(shmat) 381SYSCALL(ni_syscall)
382SYSCALL(shmdt) 382SYSCALL(ni_syscall)
383SYSCALL(shmget) 383SYSCALL(ni_syscall)
384COMPAT_SYS(shmctl) 384SYSCALL(ni_syscall)
385SYSCALL(mlock2)
diff --git a/arch/powerpc/include/asm/unistd.h b/arch/powerpc/include/asm/unistd.h
index 6d8f8023ac27..4b6b8ace18e0 100644
--- a/arch/powerpc/include/asm/unistd.h
+++ b/arch/powerpc/include/asm/unistd.h
@@ -12,7 +12,7 @@
12#include <uapi/asm/unistd.h> 12#include <uapi/asm/unistd.h>
13 13
14 14
15#define __NR_syscalls 378 15#define __NR_syscalls 379
16 16
17#define __NR__exit __NR_exit 17#define __NR__exit __NR_exit
18#define NR_syscalls __NR_syscalls 18#define NR_syscalls __NR_syscalls
diff --git a/arch/powerpc/include/uapi/asm/unistd.h b/arch/powerpc/include/uapi/asm/unistd.h
index 81579e93c659..12a05652377a 100644
--- a/arch/powerpc/include/uapi/asm/unistd.h
+++ b/arch/powerpc/include/uapi/asm/unistd.h
@@ -388,17 +388,6 @@
388#define __NR_switch_endian 363 388#define __NR_switch_endian 363
389#define __NR_userfaultfd 364 389#define __NR_userfaultfd 364
390#define __NR_membarrier 365 390#define __NR_membarrier 365
391#define __NR_semop 366 391#define __NR_mlock2 378
392#define __NR_semget 367
393#define __NR_semctl 368
394#define __NR_semtimedop 369
395#define __NR_msgsnd 370
396#define __NR_msgrcv 371
397#define __NR_msgget 372
398#define __NR_msgctl 373
399#define __NR_shmat 374
400#define __NR_shmdt 375
401#define __NR_shmget 376
402#define __NR_shmctl 377
403 392
404#endif /* _UAPI_ASM_POWERPC_UNISTD_H_ */ 393#endif /* _UAPI_ASM_POWERPC_UNISTD_H_ */
diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c
index 80dfe8965df9..8d14feb40f12 100644
--- a/arch/powerpc/kernel/eeh_driver.c
+++ b/arch/powerpc/kernel/eeh_driver.c
@@ -590,16 +590,10 @@ static int eeh_reset_device(struct eeh_pe *pe, struct pci_bus *bus)
590 eeh_ops->configure_bridge(pe); 590 eeh_ops->configure_bridge(pe);
591 eeh_pe_restore_bars(pe); 591 eeh_pe_restore_bars(pe);
592 592
593 /* 593 /* Clear frozen state */
594 * If it's PHB PE, the frozen state on all available PEs should have 594 rc = eeh_clear_pe_frozen_state(pe, false);
595 * been cleared by the PHB reset. Otherwise, we unfreeze the PE and its 595 if (rc)
596 * child PEs because they might be in frozen state. 596 return rc;
597 */
598 if (!(pe->type & EEH_PE_PHB)) {
599 rc = eeh_clear_pe_frozen_state(pe, false);
600 if (rc)
601 return rc;
602 }
603 597
604 /* Give the system 5 seconds to finish running the user-space 598 /* Give the system 5 seconds to finish running the user-space
605 * hotplug shutdown scripts, e.g. ifdown for ethernet. Yes, 599 * hotplug shutdown scripts, e.g. ifdown for ethernet. Yes,
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 75b6676c1a0b..646bf4d222c1 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -551,6 +551,24 @@ static void tm_reclaim_thread(struct thread_struct *thr,
551 msr_diff &= MSR_FP | MSR_VEC | MSR_VSX | MSR_FE0 | MSR_FE1; 551 msr_diff &= MSR_FP | MSR_VEC | MSR_VSX | MSR_FE0 | MSR_FE1;
552 } 552 }
553 553
554 /*
555 * Use the current MSR TM suspended bit to track if we have
556 * checkpointed state outstanding.
557 * On signal delivery, we'd normally reclaim the checkpointed
558 * state to obtain stack pointer (see:get_tm_stackpointer()).
559 * This will then directly return to userspace without going
560 * through __switch_to(). However, if the stack frame is bad,
561 * we need to exit this thread which calls __switch_to() which
562 * will again attempt to reclaim the already saved tm state.
563 * Hence we need to check that we've not already reclaimed
564 * this state.
565 * We do this using the current MSR, rather tracking it in
566 * some specific thread_struct bit, as it has the additional
567 * benifit of checking for a potential TM bad thing exception.
568 */
569 if (!MSR_TM_SUSPENDED(mfmsr()))
570 return;
571
554 tm_reclaim(thr, thr->regs->msr, cause); 572 tm_reclaim(thr, thr->regs->msr, cause);
555 573
556 /* Having done the reclaim, we now have the checkpointed 574 /* Having done the reclaim, we now have the checkpointed
diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
index 0dbee465af7a..ef7c24e84a62 100644
--- a/arch/powerpc/kernel/signal_32.c
+++ b/arch/powerpc/kernel/signal_32.c
@@ -875,6 +875,15 @@ static long restore_tm_user_regs(struct pt_regs *regs,
875 return 1; 875 return 1;
876#endif /* CONFIG_SPE */ 876#endif /* CONFIG_SPE */
877 877
878 /* Get the top half of the MSR from the user context */
879 if (__get_user(msr_hi, &tm_sr->mc_gregs[PT_MSR]))
880 return 1;
881 msr_hi <<= 32;
882 /* If TM bits are set to the reserved value, it's an invalid context */
883 if (MSR_TM_RESV(msr_hi))
884 return 1;
885 /* Pull in the MSR TM bits from the user context */
886 regs->msr = (regs->msr & ~MSR_TS_MASK) | (msr_hi & MSR_TS_MASK);
878 /* Now, recheckpoint. This loads up all of the checkpointed (older) 887 /* Now, recheckpoint. This loads up all of the checkpointed (older)
879 * registers, including FP and V[S]Rs. After recheckpointing, the 888 * registers, including FP and V[S]Rs. After recheckpointing, the
880 * transactional versions should be loaded. 889 * transactional versions should be loaded.
@@ -884,11 +893,6 @@ static long restore_tm_user_regs(struct pt_regs *regs,
884 current->thread.tm_texasr |= TEXASR_FS; 893 current->thread.tm_texasr |= TEXASR_FS;
885 /* This loads the checkpointed FP/VEC state, if used */ 894 /* This loads the checkpointed FP/VEC state, if used */
886 tm_recheckpoint(&current->thread, msr); 895 tm_recheckpoint(&current->thread, msr);
887 /* Get the top half of the MSR */
888 if (__get_user(msr_hi, &tm_sr->mc_gregs[PT_MSR]))
889 return 1;
890 /* Pull in MSR TM from user context */
891 regs->msr = (regs->msr & ~MSR_TS_MASK) | ((msr_hi<<32) & MSR_TS_MASK);
892 896
893 /* This loads the speculative FP/VEC state, if used */ 897 /* This loads the speculative FP/VEC state, if used */
894 if (msr & MSR_FP) { 898 if (msr & MSR_FP) {
diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
index 20756dfb9f34..c676ecec0869 100644
--- a/arch/powerpc/kernel/signal_64.c
+++ b/arch/powerpc/kernel/signal_64.c
@@ -438,6 +438,10 @@ static long restore_tm_sigcontexts(struct pt_regs *regs,
438 438
439 /* get MSR separately, transfer the LE bit if doing signal return */ 439 /* get MSR separately, transfer the LE bit if doing signal return */
440 err |= __get_user(msr, &sc->gp_regs[PT_MSR]); 440 err |= __get_user(msr, &sc->gp_regs[PT_MSR]);
441 /* Don't allow reserved mode. */
442 if (MSR_TM_RESV(msr))
443 return -EINVAL;
444
441 /* pull in MSR TM from user context */ 445 /* pull in MSR TM from user context */
442 regs->msr = (regs->msr & ~MSR_TS_MASK) | (msr & MSR_TS_MASK); 446 regs->msr = (regs->msr & ~MSR_TS_MASK) | (msr & MSR_TS_MASK);
443 447
diff --git a/arch/powerpc/platforms/powernv/opal-irqchip.c b/arch/powerpc/platforms/powernv/opal-irqchip.c
index 6ccfb6c1c707..e505223b4ec5 100644
--- a/arch/powerpc/platforms/powernv/opal-irqchip.c
+++ b/arch/powerpc/platforms/powernv/opal-irqchip.c
@@ -43,11 +43,34 @@ static unsigned int opal_irq_count;
43static unsigned int *opal_irqs; 43static unsigned int *opal_irqs;
44 44
45static void opal_handle_irq_work(struct irq_work *work); 45static void opal_handle_irq_work(struct irq_work *work);
46static __be64 last_outstanding_events; 46static u64 last_outstanding_events;
47static struct irq_work opal_event_irq_work = { 47static struct irq_work opal_event_irq_work = {
48 .func = opal_handle_irq_work, 48 .func = opal_handle_irq_work,
49}; 49};
50 50
51void opal_handle_events(uint64_t events)
52{
53 int virq, hwirq = 0;
54 u64 mask = opal_event_irqchip.mask;
55
56 if (!in_irq() && (events & mask)) {
57 last_outstanding_events = events;
58 irq_work_queue(&opal_event_irq_work);
59 return;
60 }
61
62 while (events & mask) {
63 hwirq = fls64(events) - 1;
64 if (BIT_ULL(hwirq) & mask) {
65 virq = irq_find_mapping(opal_event_irqchip.domain,
66 hwirq);
67 if (virq)
68 generic_handle_irq(virq);
69 }
70 events &= ~BIT_ULL(hwirq);
71 }
72}
73
51static void opal_event_mask(struct irq_data *d) 74static void opal_event_mask(struct irq_data *d)
52{ 75{
53 clear_bit(d->hwirq, &opal_event_irqchip.mask); 76 clear_bit(d->hwirq, &opal_event_irqchip.mask);
@@ -55,9 +78,21 @@ static void opal_event_mask(struct irq_data *d)
55 78
56static void opal_event_unmask(struct irq_data *d) 79static void opal_event_unmask(struct irq_data *d)
57{ 80{
81 __be64 events;
82
58 set_bit(d->hwirq, &opal_event_irqchip.mask); 83 set_bit(d->hwirq, &opal_event_irqchip.mask);
59 84
60 opal_poll_events(&last_outstanding_events); 85 opal_poll_events(&events);
86 last_outstanding_events = be64_to_cpu(events);
87
88 /*
89 * We can't just handle the events now with opal_handle_events().
90 * If we did we would deadlock when opal_event_unmask() is called from
91 * handle_level_irq() with the irq descriptor lock held, because
92 * calling opal_handle_events() would call generic_handle_irq() and
93 * then handle_level_irq() which would try to take the descriptor lock
94 * again. Instead queue the events for later.
95 */
61 if (last_outstanding_events & opal_event_irqchip.mask) 96 if (last_outstanding_events & opal_event_irqchip.mask)
62 /* Need to retrigger the interrupt */ 97 /* Need to retrigger the interrupt */
63 irq_work_queue(&opal_event_irq_work); 98 irq_work_queue(&opal_event_irq_work);
@@ -96,29 +131,6 @@ static int opal_event_map(struct irq_domain *d, unsigned int irq,
96 return 0; 131 return 0;
97} 132}
98 133
99void opal_handle_events(uint64_t events)
100{
101 int virq, hwirq = 0;
102 u64 mask = opal_event_irqchip.mask;
103
104 if (!in_irq() && (events & mask)) {
105 last_outstanding_events = events;
106 irq_work_queue(&opal_event_irq_work);
107 return;
108 }
109
110 while (events & mask) {
111 hwirq = fls64(events) - 1;
112 if (BIT_ULL(hwirq) & mask) {
113 virq = irq_find_mapping(opal_event_irqchip.domain,
114 hwirq);
115 if (virq)
116 generic_handle_irq(virq);
117 }
118 events &= ~BIT_ULL(hwirq);
119 }
120}
121
122static irqreturn_t opal_interrupt(int irq, void *data) 134static irqreturn_t opal_interrupt(int irq, void *data)
123{ 135{
124 __be64 events; 136 __be64 events;
@@ -131,7 +143,7 @@ static irqreturn_t opal_interrupt(int irq, void *data)
131 143
132static void opal_handle_irq_work(struct irq_work *work) 144static void opal_handle_irq_work(struct irq_work *work)
133{ 145{
134 opal_handle_events(be64_to_cpu(last_outstanding_events)); 146 opal_handle_events(last_outstanding_events);
135} 147}
136 148
137static int opal_event_match(struct irq_domain *h, struct device_node *node, 149static int opal_event_match(struct irq_domain *h, struct device_node *node,
diff --git a/arch/powerpc/platforms/powernv/opal.c b/arch/powerpc/platforms/powernv/opal.c
index 4296d55e88f3..57cffb80bc36 100644
--- a/arch/powerpc/platforms/powernv/opal.c
+++ b/arch/powerpc/platforms/powernv/opal.c
@@ -278,7 +278,7 @@ static void opal_handle_message(void)
278 278
279 /* Sanity check */ 279 /* Sanity check */
280 if (type >= OPAL_MSG_TYPE_MAX) { 280 if (type >= OPAL_MSG_TYPE_MAX) {
281 pr_warning("%s: Unknown message type: %u\n", __func__, type); 281 pr_warn_once("%s: Unknown message type: %u\n", __func__, type);
282 return; 282 return;
283 } 283 }
284 opal_message_do_notify(type, (void *)&msg); 284 opal_message_do_notify(type, (void *)&msg);
diff --git a/arch/s390/include/asm/cio.h b/arch/s390/include/asm/cio.h
index 0c5d8ee657f0..d1e7b0a0feeb 100644
--- a/arch/s390/include/asm/cio.h
+++ b/arch/s390/include/asm/cio.h
@@ -312,6 +312,7 @@ extern void css_schedule_reprobe(void);
312extern void reipl_ccw_dev(struct ccw_dev_id *id); 312extern void reipl_ccw_dev(struct ccw_dev_id *id);
313 313
314struct cio_iplinfo { 314struct cio_iplinfo {
315 u8 ssid;
315 u16 devno; 316 u16 devno;
316 int is_qdio; 317 int is_qdio;
317}; 318};
diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
index 3ad48f22de78..bab6739a1154 100644
--- a/arch/s390/include/asm/elf.h
+++ b/arch/s390/include/asm/elf.h
@@ -206,9 +206,16 @@ do { \
206} while (0) 206} while (0)
207#endif /* CONFIG_COMPAT */ 207#endif /* CONFIG_COMPAT */
208 208
209extern unsigned long mmap_rnd_mask; 209/*
210 210 * Cache aliasing on the latest machines calls for a mapping granularity
211#define STACK_RND_MASK (test_thread_flag(TIF_31BIT) ? 0x7ff : mmap_rnd_mask) 211 * of 512KB. For 64-bit processes use a 512KB alignment and a randomization
212 * of up to 1GB. For 31-bit processes the virtual address space is limited,
213 * use no alignment and limit the randomization to 8MB.
214 */
215#define BRK_RND_MASK (is_32bit_task() ? 0x7ffUL : 0x3ffffUL)
216#define MMAP_RND_MASK (is_32bit_task() ? 0x7ffUL : 0x3ff80UL)
217#define MMAP_ALIGN_MASK (is_32bit_task() ? 0 : 0x7fUL)
218#define STACK_RND_MASK MMAP_RND_MASK
212 219
213#define ARCH_DLINFO \ 220#define ARCH_DLINFO \
214do { \ 221do { \
diff --git a/arch/s390/include/asm/ipl.h b/arch/s390/include/asm/ipl.h
index 39ae6a359747..86634e71b69f 100644
--- a/arch/s390/include/asm/ipl.h
+++ b/arch/s390/include/asm/ipl.h
@@ -64,7 +64,8 @@ struct ipl_block_fcp {
64 64
65struct ipl_block_ccw { 65struct ipl_block_ccw {
66 u8 reserved1[84]; 66 u8 reserved1[84];
67 u8 reserved2[2]; 67 u16 reserved2 : 13;
68 u8 ssid : 3;
68 u16 devno; 69 u16 devno;
69 u8 vm_flags; 70 u8 vm_flags;
70 u8 reserved3[3]; 71 u8 reserved3[3];
diff --git a/arch/s390/include/asm/pci_dma.h b/arch/s390/include/asm/pci_dma.h
index 7a7abf1a5537..1aac41e83ea1 100644
--- a/arch/s390/include/asm/pci_dma.h
+++ b/arch/s390/include/asm/pci_dma.h
@@ -195,5 +195,7 @@ void zpci_dma_exit_device(struct zpci_dev *);
195void dma_free_seg_table(unsigned long); 195void dma_free_seg_table(unsigned long);
196unsigned long *dma_alloc_cpu_table(void); 196unsigned long *dma_alloc_cpu_table(void);
197void dma_cleanup_tables(unsigned long *); 197void dma_cleanup_tables(unsigned long *);
198void dma_update_cpu_trans(unsigned long *, void *, dma_addr_t, int); 198unsigned long *dma_walk_cpu_trans(unsigned long *rto, dma_addr_t dma_addr);
199void dma_update_cpu_trans(unsigned long *entry, void *page_addr, int flags);
200
199#endif 201#endif
diff --git a/arch/s390/include/asm/trace/diag.h b/arch/s390/include/asm/trace/diag.h
index 776f307960cc..cc6cfe7889da 100644
--- a/arch/s390/include/asm/trace/diag.h
+++ b/arch/s390/include/asm/trace/diag.h
@@ -19,7 +19,7 @@
19#define TRACE_INCLUDE_PATH asm/trace 19#define TRACE_INCLUDE_PATH asm/trace
20#define TRACE_INCLUDE_FILE diag 20#define TRACE_INCLUDE_FILE diag
21 21
22TRACE_EVENT(diagnose, 22TRACE_EVENT(s390_diagnose,
23 TP_PROTO(unsigned short nr), 23 TP_PROTO(unsigned short nr),
24 TP_ARGS(nr), 24 TP_ARGS(nr),
25 TP_STRUCT__entry( 25 TP_STRUCT__entry(
@@ -32,9 +32,9 @@ TRACE_EVENT(diagnose,
32); 32);
33 33
34#ifdef CONFIG_TRACEPOINTS 34#ifdef CONFIG_TRACEPOINTS
35void trace_diagnose_norecursion(int diag_nr); 35void trace_s390_diagnose_norecursion(int diag_nr);
36#else 36#else
37static inline void trace_diagnose_norecursion(int diag_nr) { } 37static inline void trace_s390_diagnose_norecursion(int diag_nr) { }
38#endif 38#endif
39 39
40#endif /* _TRACE_S390_DIAG_H */ 40#endif /* _TRACE_S390_DIAG_H */
diff --git a/arch/s390/include/uapi/asm/unistd.h b/arch/s390/include/uapi/asm/unistd.h
index a848adba1504..34ec202472c6 100644
--- a/arch/s390/include/uapi/asm/unistd.h
+++ b/arch/s390/include/uapi/asm/unistd.h
@@ -192,14 +192,14 @@
192#define __NR_set_tid_address 252 192#define __NR_set_tid_address 252
193#define __NR_fadvise64 253 193#define __NR_fadvise64 253
194#define __NR_timer_create 254 194#define __NR_timer_create 254
195#define __NR_timer_settime (__NR_timer_create+1) 195#define __NR_timer_settime 255
196#define __NR_timer_gettime (__NR_timer_create+2) 196#define __NR_timer_gettime 256
197#define __NR_timer_getoverrun (__NR_timer_create+3) 197#define __NR_timer_getoverrun 257
198#define __NR_timer_delete (__NR_timer_create+4) 198#define __NR_timer_delete 258
199#define __NR_clock_settime (__NR_timer_create+5) 199#define __NR_clock_settime 259
200#define __NR_clock_gettime (__NR_timer_create+6) 200#define __NR_clock_gettime 260
201#define __NR_clock_getres (__NR_timer_create+7) 201#define __NR_clock_getres 261
202#define __NR_clock_nanosleep (__NR_timer_create+8) 202#define __NR_clock_nanosleep 262
203/* Number 263 is reserved for vserver */ 203/* Number 263 is reserved for vserver */
204#define __NR_statfs64 265 204#define __NR_statfs64 265
205#define __NR_fstatfs64 266 205#define __NR_fstatfs64 266
@@ -309,7 +309,8 @@
309#define __NR_recvfrom 371 309#define __NR_recvfrom 371
310#define __NR_recvmsg 372 310#define __NR_recvmsg 372
311#define __NR_shutdown 373 311#define __NR_shutdown 373
312#define NR_syscalls 374 312#define __NR_mlock2 374
313#define NR_syscalls 375
313 314
314/* 315/*
315 * There are some system calls that are not present on 64 bit, some 316 * There are some system calls that are not present on 64 bit, some
diff --git a/arch/s390/kernel/compat_wrapper.c b/arch/s390/kernel/compat_wrapper.c
index 09f194052df3..fac4eeddef91 100644
--- a/arch/s390/kernel/compat_wrapper.c
+++ b/arch/s390/kernel/compat_wrapper.c
@@ -176,3 +176,4 @@ COMPAT_SYSCALL_WRAP4(accept4, int, fd, struct sockaddr __user *, upeer_sockaddr,
176COMPAT_SYSCALL_WRAP3(getsockname, int, fd, struct sockaddr __user *, usockaddr, int __user *, usockaddr_len); 176COMPAT_SYSCALL_WRAP3(getsockname, int, fd, struct sockaddr __user *, usockaddr, int __user *, usockaddr_len);
177COMPAT_SYSCALL_WRAP3(getpeername, int, fd, struct sockaddr __user *, usockaddr, int __user *, usockaddr_len); 177COMPAT_SYSCALL_WRAP3(getpeername, int, fd, struct sockaddr __user *, usockaddr, int __user *, usockaddr_len);
178COMPAT_SYSCALL_WRAP6(sendto, int, fd, void __user *, buff, size_t, len, unsigned int, flags, struct sockaddr __user *, addr, int, addr_len); 178COMPAT_SYSCALL_WRAP6(sendto, int, fd, void __user *, buff, size_t, len, unsigned int, flags, struct sockaddr __user *, addr, int, addr_len);
179COMPAT_SYSCALL_WRAP3(mlock2, unsigned long, start, size_t, len, int, flags);
diff --git a/arch/s390/kernel/diag.c b/arch/s390/kernel/diag.c
index f98766ede4e1..48b37b8357e6 100644
--- a/arch/s390/kernel/diag.c
+++ b/arch/s390/kernel/diag.c
@@ -121,14 +121,14 @@ device_initcall(show_diag_stat_init);
121void diag_stat_inc(enum diag_stat_enum nr) 121void diag_stat_inc(enum diag_stat_enum nr)
122{ 122{
123 this_cpu_inc(diag_stat.counter[nr]); 123 this_cpu_inc(diag_stat.counter[nr]);
124 trace_diagnose(diag_map[nr].code); 124 trace_s390_diagnose(diag_map[nr].code);
125} 125}
126EXPORT_SYMBOL(diag_stat_inc); 126EXPORT_SYMBOL(diag_stat_inc);
127 127
128void diag_stat_inc_norecursion(enum diag_stat_enum nr) 128void diag_stat_inc_norecursion(enum diag_stat_enum nr)
129{ 129{
130 this_cpu_inc(diag_stat.counter[nr]); 130 this_cpu_inc(diag_stat.counter[nr]);
131 trace_diagnose_norecursion(diag_map[nr].code); 131 trace_s390_diagnose_norecursion(diag_map[nr].code);
132} 132}
133EXPORT_SYMBOL(diag_stat_inc_norecursion); 133EXPORT_SYMBOL(diag_stat_inc_norecursion);
134 134
diff --git a/arch/s390/kernel/head.S b/arch/s390/kernel/head.S
index 1255c6c5353e..301ee9c70688 100644
--- a/arch/s390/kernel/head.S
+++ b/arch/s390/kernel/head.S
@@ -26,6 +26,7 @@
26#include <asm/asm-offsets.h> 26#include <asm/asm-offsets.h>
27#include <asm/thread_info.h> 27#include <asm/thread_info.h>
28#include <asm/page.h> 28#include <asm/page.h>
29#include <asm/ptrace.h>
29 30
30#define ARCH_OFFSET 4 31#define ARCH_OFFSET 4
31 32
@@ -59,19 +60,6 @@ __HEAD
59 .long 0x020006e0,0x20000050 60 .long 0x020006e0,0x20000050
60 61
61 .org 0x200 62 .org 0x200
62#
63# subroutine to set architecture mode
64#
65.Lsetmode:
66 mvi __LC_AR_MODE_ID,1 # set esame flag
67 slr %r0,%r0 # set cpuid to zero
68 lhi %r1,2 # mode 2 = esame (dump)
69 sigp %r1,%r0,0x12 # switch to esame mode
70 bras %r13,0f
71 .fill 16,4,0x0
720: lmh %r0,%r15,0(%r13) # clear high-order half of gprs
73 sam31 # switch to 31 bit addressing mode
74 br %r14
75 63
76# 64#
77# subroutine to wait for end I/O 65# subroutine to wait for end I/O
@@ -159,7 +147,14 @@ __HEAD
159 .long 0x02200050,0x00000000 147 .long 0x02200050,0x00000000
160 148
161iplstart: 149iplstart:
162 bas %r14,.Lsetmode # Immediately switch to 64 bit mode 150 mvi __LC_AR_MODE_ID,1 # set esame flag
151 slr %r0,%r0 # set cpuid to zero
152 lhi %r1,2 # mode 2 = esame (dump)
153 sigp %r1,%r0,0x12 # switch to esame mode
154 bras %r13,0f
155 .fill 16,4,0x0
1560: lmh %r0,%r15,0(%r13) # clear high-order half of gprs
157 sam31 # switch to 31 bit addressing mode
163 lh %r1,0xb8 # test if subchannel number 158 lh %r1,0xb8 # test if subchannel number
164 bct %r1,.Lnoload # is valid 159 bct %r1,.Lnoload # is valid
165 l %r1,0xb8 # load ipl subchannel number 160 l %r1,0xb8 # load ipl subchannel number
@@ -269,71 +264,6 @@ iplstart:
269.Lcpuid:.fill 8,1,0 264.Lcpuid:.fill 8,1,0
270 265
271# 266#
272# SALIPL loader support. Based on a patch by Rob van der Heij.
273# This entry point is called directly from the SALIPL loader and
274# doesn't need a builtin ipl record.
275#
276 .org 0x800
277ENTRY(start)
278 stm %r0,%r15,0x07b0 # store registers
279 bas %r14,.Lsetmode # Immediately switch to 64 bit mode
280 basr %r12,%r0
281.base:
282 l %r11,.parm
283 l %r8,.cmd # pointer to command buffer
284
285 ltr %r9,%r9 # do we have SALIPL parameters?
286 bp .sk8x8
287
288 mvc 0(64,%r8),0x00b0 # copy saved registers
289 xc 64(240-64,%r8),0(%r8) # remainder of buffer
290 tr 0(64,%r8),.lowcase
291 b .gotr
292.sk8x8:
293 mvc 0(240,%r8),0(%r9) # copy iplparms into buffer
294.gotr:
295 slr %r0,%r0
296 st %r0,INITRD_SIZE+ARCH_OFFSET-PARMAREA(%r11)
297 st %r0,INITRD_START+ARCH_OFFSET-PARMAREA(%r11)
298 j startup # continue with startup
299.cmd: .long COMMAND_LINE # address of command line buffer
300.parm: .long PARMAREA
301.lowcase:
302 .byte 0x00,0x01,0x02,0x03,0x04,0x05,0x06,0x07
303 .byte 0x08,0x09,0x0a,0x0b,0x0c,0x0d,0x0e,0x0f
304 .byte 0x10,0x11,0x12,0x13,0x14,0x15,0x16,0x17
305 .byte 0x18,0x19,0x1a,0x1b,0x1c,0x1d,0x1e,0x1f
306 .byte 0x20,0x21,0x22,0x23,0x24,0x25,0x26,0x27
307 .byte 0x28,0x29,0x2a,0x2b,0x2c,0x2d,0x2e,0x2f
308 .byte 0x30,0x31,0x32,0x33,0x34,0x35,0x36,0x37
309 .byte 0x38,0x39,0x3a,0x3b,0x3c,0x3d,0x3e,0x3f
310 .byte 0x40,0x41,0x42,0x43,0x44,0x45,0x46,0x47
311 .byte 0x48,0x49,0x4a,0x4b,0x4c,0x4d,0x4e,0x4f
312 .byte 0x50,0x51,0x52,0x53,0x54,0x55,0x56,0x57
313 .byte 0x58,0x59,0x5a,0x5b,0x5c,0x5d,0x5e,0x5f
314 .byte 0x60,0x61,0x62,0x63,0x64,0x65,0x66,0x67
315 .byte 0x68,0x69,0x6a,0x6b,0x6c,0x6d,0x6e,0x6f
316 .byte 0x70,0x71,0x72,0x73,0x74,0x75,0x76,0x77
317 .byte 0x78,0x79,0x7a,0x7b,0x7c,0x7d,0x7e,0x7f
318
319 .byte 0x80,0x81,0x82,0x83,0x84,0x85,0x86,0x87
320 .byte 0x88,0x89,0x8a,0x8b,0x8c,0x8d,0x8e,0x8f
321 .byte 0x90,0x91,0x92,0x93,0x94,0x95,0x96,0x97
322 .byte 0x98,0x99,0x9a,0x9b,0x9c,0x9d,0x9e,0x9f
323 .byte 0xa0,0xa1,0xa2,0xa3,0xa4,0xa5,0xa6,0xa7
324 .byte 0xa8,0xa9,0xaa,0xab,0xac,0xad,0xae,0xaf
325 .byte 0xb0,0xb1,0xb2,0xb3,0xb4,0xb5,0xb6,0xb7
326 .byte 0xb8,0xb9,0xba,0xbb,0xbc,0xbd,0xbe,0xbf
327 .byte 0xc0,0x81,0x82,0x83,0x84,0x85,0x86,0x87 # .abcdefg
328 .byte 0x88,0x89,0xca,0xcb,0xcc,0xcd,0xce,0xcf # hi
329 .byte 0xd0,0x91,0x92,0x93,0x94,0x95,0x96,0x97 # .jklmnop
330 .byte 0x98,0x99,0xda,0xdb,0xdc,0xdd,0xde,0xdf # qr
331 .byte 0xe0,0xe1,0xa2,0xa3,0xa4,0xa5,0xa6,0xa7 # ..stuvwx
332 .byte 0xa8,0xa9,0xea,0xeb,0xec,0xed,0xee,0xef # yz
333 .byte 0xf0,0xf1,0xf2,0xf3,0xf4,0xf5,0xf6,0xf7
334 .byte 0xf8,0xf9,0xfa,0xfb,0xfc,0xfd,0xfe,0xff
335
336#
337# startup-code at 0x10000, running in absolute addressing mode 267# startup-code at 0x10000, running in absolute addressing mode
338# this is called either by the ipl loader or directly by PSW restart 268# this is called either by the ipl loader or directly by PSW restart
339# or linload or SALIPL 269# or linload or SALIPL
@@ -364,7 +294,7 @@ ENTRY(startup_kdump)
364 bras %r13,0f 294 bras %r13,0f
365 .fill 16,4,0x0 295 .fill 16,4,0x0
3660: lmh %r0,%r15,0(%r13) # clear high-order half of gprs 2960: lmh %r0,%r15,0(%r13) # clear high-order half of gprs
367 sam31 # switch to 31 bit addressing mode 297 sam64 # switch to 64 bit addressing mode
368 basr %r13,0 # get base 298 basr %r13,0 # get base
369.LPG0: 299.LPG0:
370 xc 0x200(256),0x200 # partially clear lowcore 300 xc 0x200(256),0x200 # partially clear lowcore
@@ -395,7 +325,7 @@ ENTRY(startup_kdump)
395 jnz 1b 325 jnz 1b
396 j 4f 326 j 4f
3972: l %r15,.Lstack-.LPG0(%r13) 3272: l %r15,.Lstack-.LPG0(%r13)
398 ahi %r15,-96 328 ahi %r15,-STACK_FRAME_OVERHEAD
399 la %r2,.Lals_string-.LPG0(%r13) 329 la %r2,.Lals_string-.LPG0(%r13)
400 l %r3,.Lsclp_print-.LPG0(%r13) 330 l %r3,.Lsclp_print-.LPG0(%r13)
401 basr %r14,%r3 331 basr %r14,%r3
@@ -429,8 +359,7 @@ ENTRY(startup_kdump)
429 .long 1, 0xc0000000 359 .long 1, 0xc0000000
430#endif 360#endif
4314: 3614:
432 /* Continue with 64bit startup code in head64.S */ 362 /* Continue with startup code in head64.S */
433 sam64 # switch to 64 bit mode
434 jg startup_continue 363 jg startup_continue
435 364
436 .align 8 365 .align 8
diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c
index f6d8acd7e136..b1f0a90f933b 100644
--- a/arch/s390/kernel/ipl.c
+++ b/arch/s390/kernel/ipl.c
@@ -121,6 +121,7 @@ static char *dump_type_str(enum dump_type type)
121 * Must be in data section since the bss section 121 * Must be in data section since the bss section
122 * is not cleared when these are accessed. 122 * is not cleared when these are accessed.
123 */ 123 */
124static u8 ipl_ssid __attribute__((__section__(".data"))) = 0;
124static u16 ipl_devno __attribute__((__section__(".data"))) = 0; 125static u16 ipl_devno __attribute__((__section__(".data"))) = 0;
125u32 ipl_flags __attribute__((__section__(".data"))) = 0; 126u32 ipl_flags __attribute__((__section__(".data"))) = 0;
126 127
@@ -197,6 +198,33 @@ static ssize_t sys_##_prefix##_##_name##_show(struct kobject *kobj, \
197 return snprintf(page, PAGE_SIZE, _format, ##args); \ 198 return snprintf(page, PAGE_SIZE, _format, ##args); \
198} 199}
199 200
201#define IPL_ATTR_CCW_STORE_FN(_prefix, _name, _ipl_blk) \
202static ssize_t sys_##_prefix##_##_name##_store(struct kobject *kobj, \
203 struct kobj_attribute *attr, \
204 const char *buf, size_t len) \
205{ \
206 unsigned long long ssid, devno; \
207 \
208 if (sscanf(buf, "0.%llx.%llx\n", &ssid, &devno) != 2) \
209 return -EINVAL; \
210 \
211 if (ssid > __MAX_SSID || devno > __MAX_SUBCHANNEL) \
212 return -EINVAL; \
213 \
214 _ipl_blk.ssid = ssid; \
215 _ipl_blk.devno = devno; \
216 return len; \
217}
218
219#define DEFINE_IPL_CCW_ATTR_RW(_prefix, _name, _ipl_blk) \
220IPL_ATTR_SHOW_FN(_prefix, _name, "0.%x.%04x\n", \
221 _ipl_blk.ssid, _ipl_blk.devno); \
222IPL_ATTR_CCW_STORE_FN(_prefix, _name, _ipl_blk); \
223static struct kobj_attribute sys_##_prefix##_##_name##_attr = \
224 __ATTR(_name, (S_IRUGO | S_IWUSR), \
225 sys_##_prefix##_##_name##_show, \
226 sys_##_prefix##_##_name##_store) \
227
200#define DEFINE_IPL_ATTR_RO(_prefix, _name, _format, _value) \ 228#define DEFINE_IPL_ATTR_RO(_prefix, _name, _format, _value) \
201IPL_ATTR_SHOW_FN(_prefix, _name, _format, _value) \ 229IPL_ATTR_SHOW_FN(_prefix, _name, _format, _value) \
202static struct kobj_attribute sys_##_prefix##_##_name##_attr = \ 230static struct kobj_attribute sys_##_prefix##_##_name##_attr = \
@@ -395,7 +423,7 @@ static ssize_t sys_ipl_device_show(struct kobject *kobj,
395 423
396 switch (ipl_info.type) { 424 switch (ipl_info.type) {
397 case IPL_TYPE_CCW: 425 case IPL_TYPE_CCW:
398 return sprintf(page, "0.0.%04x\n", ipl_devno); 426 return sprintf(page, "0.%x.%04x\n", ipl_ssid, ipl_devno);
399 case IPL_TYPE_FCP: 427 case IPL_TYPE_FCP:
400 case IPL_TYPE_FCP_DUMP: 428 case IPL_TYPE_FCP_DUMP:
401 return sprintf(page, "0.0.%04x\n", ipl->ipl_info.fcp.devno); 429 return sprintf(page, "0.0.%04x\n", ipl->ipl_info.fcp.devno);
@@ -687,21 +715,14 @@ static ssize_t reipl_fcp_scpdata_write(struct file *filp, struct kobject *kobj,
687 struct bin_attribute *attr, 715 struct bin_attribute *attr,
688 char *buf, loff_t off, size_t count) 716 char *buf, loff_t off, size_t count)
689{ 717{
718 size_t scpdata_len = count;
690 size_t padding; 719 size_t padding;
691 size_t scpdata_len;
692
693 if (off < 0)
694 return -EINVAL;
695 720
696 if (off >= DIAG308_SCPDATA_SIZE)
697 return -ENOSPC;
698 721
699 if (count > DIAG308_SCPDATA_SIZE - off) 722 if (off)
700 count = DIAG308_SCPDATA_SIZE - off; 723 return -EINVAL;
701
702 memcpy(reipl_block_fcp->ipl_info.fcp.scp_data, buf + off, count);
703 scpdata_len = off + count;
704 724
725 memcpy(reipl_block_fcp->ipl_info.fcp.scp_data, buf, count);
705 if (scpdata_len % 8) { 726 if (scpdata_len % 8) {
706 padding = 8 - (scpdata_len % 8); 727 padding = 8 - (scpdata_len % 8);
707 memset(reipl_block_fcp->ipl_info.fcp.scp_data + scpdata_len, 728 memset(reipl_block_fcp->ipl_info.fcp.scp_data + scpdata_len,
@@ -717,7 +738,7 @@ static ssize_t reipl_fcp_scpdata_write(struct file *filp, struct kobject *kobj,
717} 738}
718static struct bin_attribute sys_reipl_fcp_scp_data_attr = 739static struct bin_attribute sys_reipl_fcp_scp_data_attr =
719 __BIN_ATTR(scp_data, (S_IRUGO | S_IWUSR), reipl_fcp_scpdata_read, 740 __BIN_ATTR(scp_data, (S_IRUGO | S_IWUSR), reipl_fcp_scpdata_read,
720 reipl_fcp_scpdata_write, PAGE_SIZE); 741 reipl_fcp_scpdata_write, DIAG308_SCPDATA_SIZE);
721 742
722static struct bin_attribute *reipl_fcp_bin_attrs[] = { 743static struct bin_attribute *reipl_fcp_bin_attrs[] = {
723 &sys_reipl_fcp_scp_data_attr, 744 &sys_reipl_fcp_scp_data_attr,
@@ -814,9 +835,7 @@ static struct attribute_group reipl_fcp_attr_group = {
814}; 835};
815 836
816/* CCW reipl device attributes */ 837/* CCW reipl device attributes */
817 838DEFINE_IPL_CCW_ATTR_RW(reipl_ccw, device, reipl_block_ccw->ipl_info.ccw);
818DEFINE_IPL_ATTR_RW(reipl_ccw, device, "0.0.%04llx\n", "0.0.%llx\n",
819 reipl_block_ccw->ipl_info.ccw.devno);
820 839
821/* NSS wrapper */ 840/* NSS wrapper */
822static ssize_t reipl_nss_loadparm_show(struct kobject *kobj, 841static ssize_t reipl_nss_loadparm_show(struct kobject *kobj,
@@ -1056,8 +1075,8 @@ static void __reipl_run(void *unused)
1056 1075
1057 switch (reipl_method) { 1076 switch (reipl_method) {
1058 case REIPL_METHOD_CCW_CIO: 1077 case REIPL_METHOD_CCW_CIO:
1078 devid.ssid = reipl_block_ccw->ipl_info.ccw.ssid;
1059 devid.devno = reipl_block_ccw->ipl_info.ccw.devno; 1079 devid.devno = reipl_block_ccw->ipl_info.ccw.devno;
1060 devid.ssid = 0;
1061 reipl_ccw_dev(&devid); 1080 reipl_ccw_dev(&devid);
1062 break; 1081 break;
1063 case REIPL_METHOD_CCW_VM: 1082 case REIPL_METHOD_CCW_VM:
@@ -1192,6 +1211,7 @@ static int __init reipl_ccw_init(void)
1192 1211
1193 reipl_block_ccw_init(reipl_block_ccw); 1212 reipl_block_ccw_init(reipl_block_ccw);
1194 if (ipl_info.type == IPL_TYPE_CCW) { 1213 if (ipl_info.type == IPL_TYPE_CCW) {
1214 reipl_block_ccw->ipl_info.ccw.ssid = ipl_ssid;
1195 reipl_block_ccw->ipl_info.ccw.devno = ipl_devno; 1215 reipl_block_ccw->ipl_info.ccw.devno = ipl_devno;
1196 reipl_block_ccw_fill_parms(reipl_block_ccw); 1216 reipl_block_ccw_fill_parms(reipl_block_ccw);
1197 } 1217 }
@@ -1336,9 +1356,7 @@ static struct attribute_group dump_fcp_attr_group = {
1336}; 1356};
1337 1357
1338/* CCW dump device attributes */ 1358/* CCW dump device attributes */
1339 1359DEFINE_IPL_CCW_ATTR_RW(dump_ccw, device, dump_block_ccw->ipl_info.ccw);
1340DEFINE_IPL_ATTR_RW(dump_ccw, device, "0.0.%04llx\n", "0.0.%llx\n",
1341 dump_block_ccw->ipl_info.ccw.devno);
1342 1360
1343static struct attribute *dump_ccw_attrs[] = { 1361static struct attribute *dump_ccw_attrs[] = {
1344 &sys_dump_ccw_device_attr.attr, 1362 &sys_dump_ccw_device_attr.attr,
@@ -1418,8 +1436,8 @@ static void __dump_run(void *unused)
1418 1436
1419 switch (dump_method) { 1437 switch (dump_method) {
1420 case DUMP_METHOD_CCW_CIO: 1438 case DUMP_METHOD_CCW_CIO:
1439 devid.ssid = dump_block_ccw->ipl_info.ccw.ssid;
1421 devid.devno = dump_block_ccw->ipl_info.ccw.devno; 1440 devid.devno = dump_block_ccw->ipl_info.ccw.devno;
1422 devid.ssid = 0;
1423 reipl_ccw_dev(&devid); 1441 reipl_ccw_dev(&devid);
1424 break; 1442 break;
1425 case DUMP_METHOD_CCW_VM: 1443 case DUMP_METHOD_CCW_VM:
@@ -1939,14 +1957,14 @@ void __init setup_ipl(void)
1939 ipl_info.type = get_ipl_type(); 1957 ipl_info.type = get_ipl_type();
1940 switch (ipl_info.type) { 1958 switch (ipl_info.type) {
1941 case IPL_TYPE_CCW: 1959 case IPL_TYPE_CCW:
1960 ipl_info.data.ccw.dev_id.ssid = ipl_ssid;
1942 ipl_info.data.ccw.dev_id.devno = ipl_devno; 1961 ipl_info.data.ccw.dev_id.devno = ipl_devno;
1943 ipl_info.data.ccw.dev_id.ssid = 0;
1944 break; 1962 break;
1945 case IPL_TYPE_FCP: 1963 case IPL_TYPE_FCP:
1946 case IPL_TYPE_FCP_DUMP: 1964 case IPL_TYPE_FCP_DUMP:
1965 ipl_info.data.fcp.dev_id.ssid = 0;
1947 ipl_info.data.fcp.dev_id.devno = 1966 ipl_info.data.fcp.dev_id.devno =
1948 IPL_PARMBLOCK_START->ipl_info.fcp.devno; 1967 IPL_PARMBLOCK_START->ipl_info.fcp.devno;
1949 ipl_info.data.fcp.dev_id.ssid = 0;
1950 ipl_info.data.fcp.wwpn = IPL_PARMBLOCK_START->ipl_info.fcp.wwpn; 1968 ipl_info.data.fcp.wwpn = IPL_PARMBLOCK_START->ipl_info.fcp.wwpn;
1951 ipl_info.data.fcp.lun = IPL_PARMBLOCK_START->ipl_info.fcp.lun; 1969 ipl_info.data.fcp.lun = IPL_PARMBLOCK_START->ipl_info.fcp.lun;
1952 break; 1970 break;
@@ -1978,6 +1996,7 @@ void __init ipl_save_parameters(void)
1978 if (cio_get_iplinfo(&iplinfo)) 1996 if (cio_get_iplinfo(&iplinfo))
1979 return; 1997 return;
1980 1998
1999 ipl_ssid = iplinfo.ssid;
1981 ipl_devno = iplinfo.devno; 2000 ipl_devno = iplinfo.devno;
1982 ipl_flags |= IPL_DEVNO_VALID; 2001 ipl_flags |= IPL_DEVNO_VALID;
1983 if (!iplinfo.is_qdio) 2002 if (!iplinfo.is_qdio)
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
index 688a3aad9c79..114ee8b96f17 100644
--- a/arch/s390/kernel/process.c
+++ b/arch/s390/kernel/process.c
@@ -243,11 +243,7 @@ unsigned long arch_align_stack(unsigned long sp)
243 243
244static inline unsigned long brk_rnd(void) 244static inline unsigned long brk_rnd(void)
245{ 245{
246 /* 8MB for 32bit, 1GB for 64bit */ 246 return (get_random_int() & BRK_RND_MASK) << PAGE_SHIFT;
247 if (is_32bit_task())
248 return (get_random_int() & 0x7ffUL) << PAGE_SHIFT;
249 else
250 return (get_random_int() & 0x3ffffUL) << PAGE_SHIFT;
251} 247}
252 248
253unsigned long arch_randomize_brk(struct mm_struct *mm) 249unsigned long arch_randomize_brk(struct mm_struct *mm)
diff --git a/arch/s390/kernel/sclp.c b/arch/s390/kernel/sclp.c
index fa0bdff1d413..9fe7781a45cd 100644
--- a/arch/s390/kernel/sclp.c
+++ b/arch/s390/kernel/sclp.c
@@ -21,7 +21,7 @@ static void _sclp_wait_int(void)
21 __ctl_load(cr0_new, 0, 0); 21 __ctl_load(cr0_new, 0, 0);
22 22
23 psw_ext_save = S390_lowcore.external_new_psw; 23 psw_ext_save = S390_lowcore.external_new_psw;
24 psw_mask = __extract_psw() & (PSW_MASK_EA | PSW_MASK_BA); 24 psw_mask = __extract_psw();
25 S390_lowcore.external_new_psw.mask = psw_mask; 25 S390_lowcore.external_new_psw.mask = psw_mask;
26 psw_wait.mask = psw_mask | PSW_MASK_EXT | PSW_MASK_WAIT; 26 psw_wait.mask = psw_mask | PSW_MASK_EXT | PSW_MASK_WAIT;
27 S390_lowcore.ext_int_code = 0; 27 S390_lowcore.ext_int_code = 0;
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index ce0cbd6ba7ca..c837bcacf218 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -764,9 +764,6 @@ static int __init setup_hwcaps(void)
764 get_cpu_id(&cpu_id); 764 get_cpu_id(&cpu_id);
765 add_device_randomness(&cpu_id, sizeof(cpu_id)); 765 add_device_randomness(&cpu_id, sizeof(cpu_id));
766 switch (cpu_id.machine) { 766 switch (cpu_id.machine) {
767 case 0x9672:
768 strcpy(elf_platform, "g5");
769 break;
770 case 0x2064: 767 case 0x2064:
771 case 0x2066: 768 case 0x2066:
772 default: /* Use "z900" as default for 64 bit kernels. */ 769 default: /* Use "z900" as default for 64 bit kernels. */
diff --git a/arch/s390/kernel/syscalls.S b/arch/s390/kernel/syscalls.S
index 8c56929c8d82..5378c3ea1b98 100644
--- a/arch/s390/kernel/syscalls.S
+++ b/arch/s390/kernel/syscalls.S
@@ -382,3 +382,4 @@ SYSCALL(sys_sendmsg,compat_sys_sendmsg) /* 370 */
382SYSCALL(sys_recvfrom,compat_sys_recvfrom) 382SYSCALL(sys_recvfrom,compat_sys_recvfrom)
383SYSCALL(sys_recvmsg,compat_sys_recvmsg) 383SYSCALL(sys_recvmsg,compat_sys_recvmsg)
384SYSCALL(sys_shutdown,sys_shutdown) 384SYSCALL(sys_shutdown,sys_shutdown)
385SYSCALL(sys_mlock2,compat_sys_mlock2)
diff --git a/arch/s390/kernel/trace.c b/arch/s390/kernel/trace.c
index 73239bb576c4..21a5df99552b 100644
--- a/arch/s390/kernel/trace.c
+++ b/arch/s390/kernel/trace.c
@@ -9,11 +9,11 @@
9#define CREATE_TRACE_POINTS 9#define CREATE_TRACE_POINTS
10#include <asm/trace/diag.h> 10#include <asm/trace/diag.h>
11 11
12EXPORT_TRACEPOINT_SYMBOL(diagnose); 12EXPORT_TRACEPOINT_SYMBOL(s390_diagnose);
13 13
14static DEFINE_PER_CPU(unsigned int, diagnose_trace_depth); 14static DEFINE_PER_CPU(unsigned int, diagnose_trace_depth);
15 15
16void trace_diagnose_norecursion(int diag_nr) 16void trace_s390_diagnose_norecursion(int diag_nr)
17{ 17{
18 unsigned long flags; 18 unsigned long flags;
19 unsigned int *depth; 19 unsigned int *depth;
@@ -22,7 +22,7 @@ void trace_diagnose_norecursion(int diag_nr)
22 depth = this_cpu_ptr(&diagnose_trace_depth); 22 depth = this_cpu_ptr(&diagnose_trace_depth);
23 if (*depth == 0) { 23 if (*depth == 0) {
24 (*depth)++; 24 (*depth)++;
25 trace_diagnose(diag_nr); 25 trace_s390_diagnose(diag_nr);
26 (*depth)--; 26 (*depth)--;
27 } 27 }
28 local_irq_restore(flags); 28 local_irq_restore(flags);
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
index 373e32346d68..6a75352f453c 100644
--- a/arch/s390/kvm/interrupt.c
+++ b/arch/s390/kvm/interrupt.c
@@ -1030,8 +1030,7 @@ static int __inject_extcall(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
1030 src_id, 0); 1030 src_id, 0);
1031 1031
1032 /* sending vcpu invalid */ 1032 /* sending vcpu invalid */
1033 if (src_id >= KVM_MAX_VCPUS || 1033 if (kvm_get_vcpu_by_id(vcpu->kvm, src_id) == NULL)
1034 kvm_get_vcpu(vcpu->kvm, src_id) == NULL)
1035 return -EINVAL; 1034 return -EINVAL;
1036 1035
1037 if (sclp.has_sigpif) 1036 if (sclp.has_sigpif)
@@ -1110,6 +1109,10 @@ static int __inject_sigp_emergency(struct kvm_vcpu *vcpu,
1110 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_EMERGENCY, 1109 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_EMERGENCY,
1111 irq->u.emerg.code, 0); 1110 irq->u.emerg.code, 0);
1112 1111
1112 /* sending vcpu invalid */
1113 if (kvm_get_vcpu_by_id(vcpu->kvm, irq->u.emerg.code) == NULL)
1114 return -EINVAL;
1115
1113 set_bit(irq->u.emerg.code, li->sigp_emerg_pending); 1116 set_bit(irq->u.emerg.code, li->sigp_emerg_pending);
1114 set_bit(IRQ_PEND_EXT_EMERGENCY, &li->pending_irqs); 1117 set_bit(IRQ_PEND_EXT_EMERGENCY, &li->pending_irqs);
1115 atomic_or(CPUSTAT_EXT_INT, li->cpuflags); 1118 atomic_or(CPUSTAT_EXT_INT, li->cpuflags);
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 8fe2f1c722dc..846589281b04 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -342,12 +342,16 @@ static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
342 r = 0; 342 r = 0;
343 break; 343 break;
344 case KVM_CAP_S390_VECTOR_REGISTERS: 344 case KVM_CAP_S390_VECTOR_REGISTERS:
345 if (MACHINE_HAS_VX) { 345 mutex_lock(&kvm->lock);
346 if (atomic_read(&kvm->online_vcpus)) {
347 r = -EBUSY;
348 } else if (MACHINE_HAS_VX) {
346 set_kvm_facility(kvm->arch.model.fac->mask, 129); 349 set_kvm_facility(kvm->arch.model.fac->mask, 129);
347 set_kvm_facility(kvm->arch.model.fac->list, 129); 350 set_kvm_facility(kvm->arch.model.fac->list, 129);
348 r = 0; 351 r = 0;
349 } else 352 } else
350 r = -EINVAL; 353 r = -EINVAL;
354 mutex_unlock(&kvm->lock);
351 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_VECTOR_REGISTERS %s", 355 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_VECTOR_REGISTERS %s",
352 r ? "(not available)" : "(success)"); 356 r ? "(not available)" : "(success)");
353 break; 357 break;
diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c
index 77191b85ea7a..d76b51cb4b62 100644
--- a/arch/s390/kvm/priv.c
+++ b/arch/s390/kvm/priv.c
@@ -660,7 +660,7 @@ static int handle_pfmf(struct kvm_vcpu *vcpu)
660 660
661 kvm_s390_get_regs_rre(vcpu, &reg1, &reg2); 661 kvm_s390_get_regs_rre(vcpu, &reg1, &reg2);
662 662
663 if (!MACHINE_HAS_PFMF) 663 if (!test_kvm_facility(vcpu->kvm, 8))
664 return kvm_s390_inject_program_int(vcpu, PGM_OPERATION); 664 return kvm_s390_inject_program_int(vcpu, PGM_OPERATION);
665 665
666 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 666 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
diff --git a/arch/s390/kvm/sigp.c b/arch/s390/kvm/sigp.c
index da690b69f9fe..77c22d685c7a 100644
--- a/arch/s390/kvm/sigp.c
+++ b/arch/s390/kvm/sigp.c
@@ -291,12 +291,8 @@ static int handle_sigp_dst(struct kvm_vcpu *vcpu, u8 order_code,
291 u16 cpu_addr, u32 parameter, u64 *status_reg) 291 u16 cpu_addr, u32 parameter, u64 *status_reg)
292{ 292{
293 int rc; 293 int rc;
294 struct kvm_vcpu *dst_vcpu; 294 struct kvm_vcpu *dst_vcpu = kvm_get_vcpu_by_id(vcpu->kvm, cpu_addr);
295 295
296 if (cpu_addr >= KVM_MAX_VCPUS)
297 return SIGP_CC_NOT_OPERATIONAL;
298
299 dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
300 if (!dst_vcpu) 296 if (!dst_vcpu)
301 return SIGP_CC_NOT_OPERATIONAL; 297 return SIGP_CC_NOT_OPERATIONAL;
302 298
@@ -478,7 +474,7 @@ int kvm_s390_handle_sigp_pei(struct kvm_vcpu *vcpu)
478 trace_kvm_s390_handle_sigp_pei(vcpu, order_code, cpu_addr); 474 trace_kvm_s390_handle_sigp_pei(vcpu, order_code, cpu_addr);
479 475
480 if (order_code == SIGP_EXTERNAL_CALL) { 476 if (order_code == SIGP_EXTERNAL_CALL) {
481 dest_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr); 477 dest_vcpu = kvm_get_vcpu_by_id(vcpu->kvm, cpu_addr);
482 BUG_ON(dest_vcpu == NULL); 478 BUG_ON(dest_vcpu == NULL);
483 479
484 kvm_s390_vcpu_wakeup(dest_vcpu); 480 kvm_s390_vcpu_wakeup(dest_vcpu);
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
index c3c07d3505ba..c722400c7697 100644
--- a/arch/s390/mm/init.c
+++ b/arch/s390/mm/init.c
@@ -48,37 +48,13 @@ EXPORT_SYMBOL(zero_page_mask);
48 48
49static void __init setup_zero_pages(void) 49static void __init setup_zero_pages(void)
50{ 50{
51 struct cpuid cpu_id;
52 unsigned int order; 51 unsigned int order;
53 struct page *page; 52 struct page *page;
54 int i; 53 int i;
55 54
56 get_cpu_id(&cpu_id); 55 /* Latest machines require a mapping granularity of 512KB */
57 switch (cpu_id.machine) { 56 order = 7;
58 case 0x9672: /* g5 */ 57
59 case 0x2064: /* z900 */
60 case 0x2066: /* z900 */
61 case 0x2084: /* z990 */
62 case 0x2086: /* z990 */
63 case 0x2094: /* z9-109 */
64 case 0x2096: /* z9-109 */
65 order = 0;
66 break;
67 case 0x2097: /* z10 */
68 case 0x2098: /* z10 */
69 case 0x2817: /* z196 */
70 case 0x2818: /* z196 */
71 order = 2;
72 break;
73 case 0x2827: /* zEC12 */
74 case 0x2828: /* zEC12 */
75 order = 5;
76 break;
77 case 0x2964: /* z13 */
78 default:
79 order = 7;
80 break;
81 }
82 /* Limit number of empty zero pages for small memory sizes */ 58 /* Limit number of empty zero pages for small memory sizes */
83 while (order > 2 && (totalram_pages >> 10) < (1UL << order)) 59 while (order > 2 && (totalram_pages >> 10) < (1UL << order))
84 order--; 60 order--;
diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
index 6e552af08c76..ea01477b4aa6 100644
--- a/arch/s390/mm/mmap.c
+++ b/arch/s390/mm/mmap.c
@@ -31,9 +31,6 @@
31#include <linux/security.h> 31#include <linux/security.h>
32#include <asm/pgalloc.h> 32#include <asm/pgalloc.h>
33 33
34unsigned long mmap_rnd_mask;
35static unsigned long mmap_align_mask;
36
37static unsigned long stack_maxrandom_size(void) 34static unsigned long stack_maxrandom_size(void)
38{ 35{
39 if (!(current->flags & PF_RANDOMIZE)) 36 if (!(current->flags & PF_RANDOMIZE))
@@ -62,10 +59,7 @@ static inline int mmap_is_legacy(void)
62 59
63unsigned long arch_mmap_rnd(void) 60unsigned long arch_mmap_rnd(void)
64{ 61{
65 if (is_32bit_task()) 62 return (get_random_int() & MMAP_RND_MASK) << PAGE_SHIFT;
66 return (get_random_int() & 0x7ff) << PAGE_SHIFT;
67 else
68 return (get_random_int() & mmap_rnd_mask) << PAGE_SHIFT;
69} 63}
70 64
71static unsigned long mmap_base_legacy(unsigned long rnd) 65static unsigned long mmap_base_legacy(unsigned long rnd)
@@ -92,7 +86,6 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
92 struct mm_struct *mm = current->mm; 86 struct mm_struct *mm = current->mm;
93 struct vm_area_struct *vma; 87 struct vm_area_struct *vma;
94 struct vm_unmapped_area_info info; 88 struct vm_unmapped_area_info info;
95 int do_color_align;
96 89
97 if (len > TASK_SIZE - mmap_min_addr) 90 if (len > TASK_SIZE - mmap_min_addr)
98 return -ENOMEM; 91 return -ENOMEM;
@@ -108,15 +101,14 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
108 return addr; 101 return addr;
109 } 102 }
110 103
111 do_color_align = 0;
112 if (filp || (flags & MAP_SHARED))
113 do_color_align = !is_32bit_task();
114
115 info.flags = 0; 104 info.flags = 0;
116 info.length = len; 105 info.length = len;
117 info.low_limit = mm->mmap_base; 106 info.low_limit = mm->mmap_base;
118 info.high_limit = TASK_SIZE; 107 info.high_limit = TASK_SIZE;
119 info.align_mask = do_color_align ? (mmap_align_mask << PAGE_SHIFT) : 0; 108 if (filp || (flags & MAP_SHARED))
109 info.align_mask = MMAP_ALIGN_MASK << PAGE_SHIFT;
110 else
111 info.align_mask = 0;
120 info.align_offset = pgoff << PAGE_SHIFT; 112 info.align_offset = pgoff << PAGE_SHIFT;
121 return vm_unmapped_area(&info); 113 return vm_unmapped_area(&info);
122} 114}
@@ -130,7 +122,6 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
130 struct mm_struct *mm = current->mm; 122 struct mm_struct *mm = current->mm;
131 unsigned long addr = addr0; 123 unsigned long addr = addr0;
132 struct vm_unmapped_area_info info; 124 struct vm_unmapped_area_info info;
133 int do_color_align;
134 125
135 /* requested length too big for entire address space */ 126 /* requested length too big for entire address space */
136 if (len > TASK_SIZE - mmap_min_addr) 127 if (len > TASK_SIZE - mmap_min_addr)
@@ -148,15 +139,14 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
148 return addr; 139 return addr;
149 } 140 }
150 141
151 do_color_align = 0;
152 if (filp || (flags & MAP_SHARED))
153 do_color_align = !is_32bit_task();
154
155 info.flags = VM_UNMAPPED_AREA_TOPDOWN; 142 info.flags = VM_UNMAPPED_AREA_TOPDOWN;
156 info.length = len; 143 info.length = len;
157 info.low_limit = max(PAGE_SIZE, mmap_min_addr); 144 info.low_limit = max(PAGE_SIZE, mmap_min_addr);
158 info.high_limit = mm->mmap_base; 145 info.high_limit = mm->mmap_base;
159 info.align_mask = do_color_align ? (mmap_align_mask << PAGE_SHIFT) : 0; 146 if (filp || (flags & MAP_SHARED))
147 info.align_mask = MMAP_ALIGN_MASK << PAGE_SHIFT;
148 else
149 info.align_mask = 0;
160 info.align_offset = pgoff << PAGE_SHIFT; 150 info.align_offset = pgoff << PAGE_SHIFT;
161 addr = vm_unmapped_area(&info); 151 addr = vm_unmapped_area(&info);
162 152
@@ -254,35 +244,3 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
254 mm->get_unmapped_area = s390_get_unmapped_area_topdown; 244 mm->get_unmapped_area = s390_get_unmapped_area_topdown;
255 } 245 }
256} 246}
257
258static int __init setup_mmap_rnd(void)
259{
260 struct cpuid cpu_id;
261
262 get_cpu_id(&cpu_id);
263 switch (cpu_id.machine) {
264 case 0x9672:
265 case 0x2064:
266 case 0x2066:
267 case 0x2084:
268 case 0x2086:
269 case 0x2094:
270 case 0x2096:
271 case 0x2097:
272 case 0x2098:
273 case 0x2817:
274 case 0x2818:
275 case 0x2827:
276 case 0x2828:
277 mmap_rnd_mask = 0x7ffUL;
278 mmap_align_mask = 0UL;
279 break;
280 case 0x2964: /* z13 */
281 default:
282 mmap_rnd_mask = 0x3ff80UL;
283 mmap_align_mask = 0x7fUL;
284 break;
285 }
286 return 0;
287}
288early_initcall(setup_mmap_rnd);
diff --git a/arch/s390/pci/pci_dma.c b/arch/s390/pci/pci_dma.c
index 37d10f74425a..d348f2c09a1e 100644
--- a/arch/s390/pci/pci_dma.c
+++ b/arch/s390/pci/pci_dma.c
@@ -33,7 +33,7 @@ unsigned long *dma_alloc_cpu_table(void)
33 return NULL; 33 return NULL;
34 34
35 for (entry = table; entry < table + ZPCI_TABLE_ENTRIES; entry++) 35 for (entry = table; entry < table + ZPCI_TABLE_ENTRIES; entry++)
36 *entry = ZPCI_TABLE_INVALID | ZPCI_TABLE_PROTECTED; 36 *entry = ZPCI_TABLE_INVALID;
37 return table; 37 return table;
38} 38}
39 39
@@ -51,7 +51,7 @@ static unsigned long *dma_alloc_page_table(void)
51 return NULL; 51 return NULL;
52 52
53 for (entry = table; entry < table + ZPCI_PT_ENTRIES; entry++) 53 for (entry = table; entry < table + ZPCI_PT_ENTRIES; entry++)
54 *entry = ZPCI_PTE_INVALID | ZPCI_TABLE_PROTECTED; 54 *entry = ZPCI_PTE_INVALID;
55 return table; 55 return table;
56} 56}
57 57
@@ -95,7 +95,7 @@ static unsigned long *dma_get_page_table_origin(unsigned long *entry)
95 return pto; 95 return pto;
96} 96}
97 97
98static unsigned long *dma_walk_cpu_trans(unsigned long *rto, dma_addr_t dma_addr) 98unsigned long *dma_walk_cpu_trans(unsigned long *rto, dma_addr_t dma_addr)
99{ 99{
100 unsigned long *sto, *pto; 100 unsigned long *sto, *pto;
101 unsigned int rtx, sx, px; 101 unsigned int rtx, sx, px;
@@ -114,20 +114,10 @@ static unsigned long *dma_walk_cpu_trans(unsigned long *rto, dma_addr_t dma_addr
114 return &pto[px]; 114 return &pto[px];
115} 115}
116 116
117void dma_update_cpu_trans(unsigned long *dma_table, void *page_addr, 117void dma_update_cpu_trans(unsigned long *entry, void *page_addr, int flags)
118 dma_addr_t dma_addr, int flags)
119{ 118{
120 unsigned long *entry;
121
122 entry = dma_walk_cpu_trans(dma_table, dma_addr);
123 if (!entry) {
124 WARN_ON_ONCE(1);
125 return;
126 }
127
128 if (flags & ZPCI_PTE_INVALID) { 119 if (flags & ZPCI_PTE_INVALID) {
129 invalidate_pt_entry(entry); 120 invalidate_pt_entry(entry);
130 return;
131 } else { 121 } else {
132 set_pt_pfaa(entry, page_addr); 122 set_pt_pfaa(entry, page_addr);
133 validate_pt_entry(entry); 123 validate_pt_entry(entry);
@@ -146,18 +136,25 @@ static int dma_update_trans(struct zpci_dev *zdev, unsigned long pa,
146 u8 *page_addr = (u8 *) (pa & PAGE_MASK); 136 u8 *page_addr = (u8 *) (pa & PAGE_MASK);
147 dma_addr_t start_dma_addr = dma_addr; 137 dma_addr_t start_dma_addr = dma_addr;
148 unsigned long irq_flags; 138 unsigned long irq_flags;
139 unsigned long *entry;
149 int i, rc = 0; 140 int i, rc = 0;
150 141
151 if (!nr_pages) 142 if (!nr_pages)
152 return -EINVAL; 143 return -EINVAL;
153 144
154 spin_lock_irqsave(&zdev->dma_table_lock, irq_flags); 145 spin_lock_irqsave(&zdev->dma_table_lock, irq_flags);
155 if (!zdev->dma_table) 146 if (!zdev->dma_table) {
147 rc = -EINVAL;
156 goto no_refresh; 148 goto no_refresh;
149 }
157 150
158 for (i = 0; i < nr_pages; i++) { 151 for (i = 0; i < nr_pages; i++) {
159 dma_update_cpu_trans(zdev->dma_table, page_addr, dma_addr, 152 entry = dma_walk_cpu_trans(zdev->dma_table, dma_addr);
160 flags); 153 if (!entry) {
154 rc = -ENOMEM;
155 goto undo_cpu_trans;
156 }
157 dma_update_cpu_trans(entry, page_addr, flags);
161 page_addr += PAGE_SIZE; 158 page_addr += PAGE_SIZE;
162 dma_addr += PAGE_SIZE; 159 dma_addr += PAGE_SIZE;
163 } 160 }
@@ -176,6 +173,18 @@ static int dma_update_trans(struct zpci_dev *zdev, unsigned long pa,
176 173
177 rc = zpci_refresh_trans((u64) zdev->fh << 32, start_dma_addr, 174 rc = zpci_refresh_trans((u64) zdev->fh << 32, start_dma_addr,
178 nr_pages * PAGE_SIZE); 175 nr_pages * PAGE_SIZE);
176undo_cpu_trans:
177 if (rc && ((flags & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_VALID)) {
178 flags = ZPCI_PTE_INVALID;
179 while (i-- > 0) {
180 page_addr -= PAGE_SIZE;
181 dma_addr -= PAGE_SIZE;
182 entry = dma_walk_cpu_trans(zdev->dma_table, dma_addr);
183 if (!entry)
184 break;
185 dma_update_cpu_trans(entry, page_addr, flags);
186 }
187 }
179 188
180no_refresh: 189no_refresh:
181 spin_unlock_irqrestore(&zdev->dma_table_lock, irq_flags); 190 spin_unlock_irqrestore(&zdev->dma_table_lock, irq_flags);
@@ -260,6 +269,16 @@ out:
260 spin_unlock_irqrestore(&zdev->iommu_bitmap_lock, flags); 269 spin_unlock_irqrestore(&zdev->iommu_bitmap_lock, flags);
261} 270}
262 271
272static inline void zpci_err_dma(unsigned long rc, unsigned long addr)
273{
274 struct {
275 unsigned long rc;
276 unsigned long addr;
277 } __packed data = {rc, addr};
278
279 zpci_err_hex(&data, sizeof(data));
280}
281
263static dma_addr_t s390_dma_map_pages(struct device *dev, struct page *page, 282static dma_addr_t s390_dma_map_pages(struct device *dev, struct page *page,
264 unsigned long offset, size_t size, 283 unsigned long offset, size_t size,
265 enum dma_data_direction direction, 284 enum dma_data_direction direction,
@@ -270,33 +289,40 @@ static dma_addr_t s390_dma_map_pages(struct device *dev, struct page *page,
270 unsigned long pa = page_to_phys(page) + offset; 289 unsigned long pa = page_to_phys(page) + offset;
271 int flags = ZPCI_PTE_VALID; 290 int flags = ZPCI_PTE_VALID;
272 dma_addr_t dma_addr; 291 dma_addr_t dma_addr;
292 int ret;
273 293
274 /* This rounds up number of pages based on size and offset */ 294 /* This rounds up number of pages based on size and offset */
275 nr_pages = iommu_num_pages(pa, size, PAGE_SIZE); 295 nr_pages = iommu_num_pages(pa, size, PAGE_SIZE);
276 iommu_page_index = dma_alloc_iommu(zdev, nr_pages); 296 iommu_page_index = dma_alloc_iommu(zdev, nr_pages);
277 if (iommu_page_index == -1) 297 if (iommu_page_index == -1) {
298 ret = -ENOSPC;
278 goto out_err; 299 goto out_err;
300 }
279 301
280 /* Use rounded up size */ 302 /* Use rounded up size */
281 size = nr_pages * PAGE_SIZE; 303 size = nr_pages * PAGE_SIZE;
282 304
283 dma_addr = zdev->start_dma + iommu_page_index * PAGE_SIZE; 305 dma_addr = zdev->start_dma + iommu_page_index * PAGE_SIZE;
284 if (dma_addr + size > zdev->end_dma) 306 if (dma_addr + size > zdev->end_dma) {
307 ret = -ERANGE;
285 goto out_free; 308 goto out_free;
309 }
286 310
287 if (direction == DMA_NONE || direction == DMA_TO_DEVICE) 311 if (direction == DMA_NONE || direction == DMA_TO_DEVICE)
288 flags |= ZPCI_TABLE_PROTECTED; 312 flags |= ZPCI_TABLE_PROTECTED;
289 313
290 if (!dma_update_trans(zdev, pa, dma_addr, size, flags)) { 314 ret = dma_update_trans(zdev, pa, dma_addr, size, flags);
291 atomic64_add(nr_pages, &zdev->mapped_pages); 315 if (ret)
292 return dma_addr + (offset & ~PAGE_MASK); 316 goto out_free;
293 } 317
318 atomic64_add(nr_pages, &zdev->mapped_pages);
319 return dma_addr + (offset & ~PAGE_MASK);
294 320
295out_free: 321out_free:
296 dma_free_iommu(zdev, iommu_page_index, nr_pages); 322 dma_free_iommu(zdev, iommu_page_index, nr_pages);
297out_err: 323out_err:
298 zpci_err("map error:\n"); 324 zpci_err("map error:\n");
299 zpci_err_hex(&pa, sizeof(pa)); 325 zpci_err_dma(ret, pa);
300 return DMA_ERROR_CODE; 326 return DMA_ERROR_CODE;
301} 327}
302 328
@@ -306,14 +332,16 @@ static void s390_dma_unmap_pages(struct device *dev, dma_addr_t dma_addr,
306{ 332{
307 struct zpci_dev *zdev = to_zpci(to_pci_dev(dev)); 333 struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
308 unsigned long iommu_page_index; 334 unsigned long iommu_page_index;
309 int npages; 335 int npages, ret;
310 336
311 npages = iommu_num_pages(dma_addr, size, PAGE_SIZE); 337 npages = iommu_num_pages(dma_addr, size, PAGE_SIZE);
312 dma_addr = dma_addr & PAGE_MASK; 338 dma_addr = dma_addr & PAGE_MASK;
313 if (dma_update_trans(zdev, 0, dma_addr, npages * PAGE_SIZE, 339 ret = dma_update_trans(zdev, 0, dma_addr, npages * PAGE_SIZE,
314 ZPCI_TABLE_PROTECTED | ZPCI_PTE_INVALID)) { 340 ZPCI_PTE_INVALID);
341 if (ret) {
315 zpci_err("unmap error:\n"); 342 zpci_err("unmap error:\n");
316 zpci_err_hex(&dma_addr, sizeof(dma_addr)); 343 zpci_err_dma(ret, dma_addr);
344 return;
317 } 345 }
318 346
319 atomic64_add(npages, &zdev->unmapped_pages); 347 atomic64_add(npages, &zdev->unmapped_pages);
diff --git a/arch/sh/include/uapi/asm/unistd_64.h b/arch/sh/include/uapi/asm/unistd_64.h
index e6820c86e8c7..47ebd5b5ed55 100644
--- a/arch/sh/include/uapi/asm/unistd_64.h
+++ b/arch/sh/include/uapi/asm/unistd_64.h
@@ -278,7 +278,7 @@
278#define __NR_fsetxattr 256 278#define __NR_fsetxattr 256
279#define __NR_getxattr 257 279#define __NR_getxattr 257
280#define __NR_lgetxattr 258 280#define __NR_lgetxattr 258
281#define __NR_fgetxattr 269 281#define __NR_fgetxattr 259
282#define __NR_listxattr 260 282#define __NR_listxattr 260
283#define __NR_llistxattr 261 283#define __NR_llistxattr 261
284#define __NR_flistxattr 262 284#define __NR_flistxattr 262
diff --git a/arch/sh/kernel/perf_event.c b/arch/sh/kernel/perf_event.c
index 7cfd7f153966..4dca18347ee9 100644
--- a/arch/sh/kernel/perf_event.c
+++ b/arch/sh/kernel/perf_event.c
@@ -10,7 +10,7 @@
10 * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar 10 * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
11 * Copyright (C) 2009 Jaswinder Singh Rajput 11 * Copyright (C) 2009 Jaswinder Singh Rajput
12 * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter 12 * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
13 * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> 13 * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra
14 * Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com> 14 * Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com>
15 * 15 *
16 * ppc: 16 * ppc:
diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c
index b0da5aedb336..3091267c5cc3 100644
--- a/arch/sparc/kernel/perf_event.c
+++ b/arch/sparc/kernel/perf_event.c
@@ -9,7 +9,7 @@
9 * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar 9 * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
10 * Copyright (C) 2009 Jaswinder Singh Rajput 10 * Copyright (C) 2009 Jaswinder Singh Rajput
11 * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter 11 * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
12 * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> 12 * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra
13 */ 13 */
14 14
15#include <linux/perf_event.h> 15#include <linux/perf_event.h>
diff --git a/arch/tile/kernel/perf_event.c b/arch/tile/kernel/perf_event.c
index bb509cee3b59..8767060d70fb 100644
--- a/arch/tile/kernel/perf_event.c
+++ b/arch/tile/kernel/perf_event.c
@@ -21,7 +21,7 @@
21 * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar 21 * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
22 * Copyright (C) 2009 Jaswinder Singh Rajput 22 * Copyright (C) 2009 Jaswinder Singh Rajput
23 * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter 23 * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
24 * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> 24 * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra
25 * Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com> 25 * Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com>
26 * Copyright (C) 2009 Google, Inc., Stephane Eranian 26 * Copyright (C) 2009 Google, Inc., Stephane Eranian
27 */ 27 */
diff --git a/arch/um/Makefile b/arch/um/Makefile
index 25ed4098640e..e3abe6f3156d 100644
--- a/arch/um/Makefile
+++ b/arch/um/Makefile
@@ -131,7 +131,7 @@ export LDS_ELF_FORMAT := $(ELF_FORMAT)
131# The wrappers will select whether using "malloc" or the kernel allocator. 131# The wrappers will select whether using "malloc" or the kernel allocator.
132LINK_WRAPS = -Wl,--wrap,malloc -Wl,--wrap,free -Wl,--wrap,calloc 132LINK_WRAPS = -Wl,--wrap,malloc -Wl,--wrap,free -Wl,--wrap,calloc
133 133
134LD_FLAGS_CMDLINE = $(foreach opt,$(LDFLAGS),-Wl,$(opt)) -lrt 134LD_FLAGS_CMDLINE = $(foreach opt,$(LDFLAGS),-Wl,$(opt))
135 135
136# Used by link-vmlinux.sh which has special support for um link 136# Used by link-vmlinux.sh which has special support for um link
137export CFLAGS_vmlinux := $(LINK-y) $(LINK_WRAPS) $(LD_FLAGS_CMDLINE) 137export CFLAGS_vmlinux := $(LINK-y) $(LINK_WRAPS) $(LD_FLAGS_CMDLINE)
diff --git a/arch/um/drivers/net_user.c b/arch/um/drivers/net_user.c
index e697a4136707..e9f8445861dc 100644
--- a/arch/um/drivers/net_user.c
+++ b/arch/um/drivers/net_user.c
@@ -249,21 +249,23 @@ void close_addr(unsigned char *addr, unsigned char *netmask, void *arg)
249 249
250char *split_if_spec(char *str, ...) 250char *split_if_spec(char *str, ...)
251{ 251{
252 char **arg, *end; 252 char **arg, *end, *ret = NULL;
253 va_list ap; 253 va_list ap;
254 254
255 va_start(ap, str); 255 va_start(ap, str);
256 while ((arg = va_arg(ap, char **)) != NULL) { 256 while ((arg = va_arg(ap, char **)) != NULL) {
257 if (*str == '\0') 257 if (*str == '\0')
258 return NULL; 258 goto out;
259 end = strchr(str, ','); 259 end = strchr(str, ',');
260 if (end != str) 260 if (end != str)
261 *arg = str; 261 *arg = str;
262 if (end == NULL) 262 if (end == NULL)
263 return NULL; 263 goto out;
264 *end++ = '\0'; 264 *end++ = '\0';
265 str = end; 265 str = end;
266 } 266 }
267 ret = str;
268out:
267 va_end(ap); 269 va_end(ap);
268 return str; 270 return ret;
269} 271}
diff --git a/arch/um/kernel/signal.c b/arch/um/kernel/signal.c
index 57acbd67d85d..fc8be0e3a4ff 100644
--- a/arch/um/kernel/signal.c
+++ b/arch/um/kernel/signal.c
@@ -69,7 +69,7 @@ void do_signal(struct pt_regs *regs)
69 struct ksignal ksig; 69 struct ksignal ksig;
70 int handled_sig = 0; 70 int handled_sig = 0;
71 71
72 while (get_signal(&ksig)) { 72 if (get_signal(&ksig)) {
73 handled_sig = 1; 73 handled_sig = 1;
74 /* Whee! Actually deliver the signal. */ 74 /* Whee! Actually deliver the signal. */
75 handle_signal(&ksig, regs); 75 handle_signal(&ksig, regs);
diff --git a/arch/x86/boot/boot.h b/arch/x86/boot/boot.h
index 0033e96c3f09..9011a88353de 100644
--- a/arch/x86/boot/boot.h
+++ b/arch/x86/boot/boot.h
@@ -23,7 +23,6 @@
23#include <stdarg.h> 23#include <stdarg.h>
24#include <linux/types.h> 24#include <linux/types.h>
25#include <linux/edd.h> 25#include <linux/edd.h>
26#include <asm/boot.h>
27#include <asm/setup.h> 26#include <asm/setup.h>
28#include "bitops.h" 27#include "bitops.h"
29#include "ctype.h" 28#include "ctype.h"
diff --git a/arch/x86/boot/video-mode.c b/arch/x86/boot/video-mode.c
index aa8a96b052e3..95c7a818c0ed 100644
--- a/arch/x86/boot/video-mode.c
+++ b/arch/x86/boot/video-mode.c
@@ -19,6 +19,8 @@
19#include "video.h" 19#include "video.h"
20#include "vesa.h" 20#include "vesa.h"
21 21
22#include <uapi/asm/boot.h>
23
22/* 24/*
23 * Common variables 25 * Common variables
24 */ 26 */
diff --git a/arch/x86/boot/video.c b/arch/x86/boot/video.c
index 05111bb8d018..77780e386e9b 100644
--- a/arch/x86/boot/video.c
+++ b/arch/x86/boot/video.c
@@ -13,6 +13,8 @@
13 * Select video mode 13 * Select video mode
14 */ 14 */
15 15
16#include <uapi/asm/boot.h>
17
16#include "boot.h" 18#include "boot.h"
17#include "video.h" 19#include "video.h"
18#include "vesa.h" 20#include "vesa.h"
diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
index 53616ca03244..a55697d19824 100644
--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
@@ -509,6 +509,17 @@ END(irq_entries_start)
509 * tracking that we're in kernel mode. 509 * tracking that we're in kernel mode.
510 */ 510 */
511 SWAPGS 511 SWAPGS
512
513 /*
514 * We need to tell lockdep that IRQs are off. We can't do this until
515 * we fix gsbase, and we should do it before enter_from_user_mode
516 * (which can take locks). Since TRACE_IRQS_OFF idempotent,
517 * the simplest way to handle it is to just call it twice if
518 * we enter from user mode. There's no reason to optimize this since
519 * TRACE_IRQS_OFF is a no-op if lockdep is off.
520 */
521 TRACE_IRQS_OFF
522
512#ifdef CONFIG_CONTEXT_TRACKING 523#ifdef CONFIG_CONTEXT_TRACKING
513 call enter_from_user_mode 524 call enter_from_user_mode
514#endif 525#endif
@@ -1049,12 +1060,18 @@ ENTRY(error_entry)
1049 SWAPGS 1060 SWAPGS
1050 1061
1051.Lerror_entry_from_usermode_after_swapgs: 1062.Lerror_entry_from_usermode_after_swapgs:
1063 /*
1064 * We need to tell lockdep that IRQs are off. We can't do this until
1065 * we fix gsbase, and we should do it before enter_from_user_mode
1066 * (which can take locks).
1067 */
1068 TRACE_IRQS_OFF
1052#ifdef CONFIG_CONTEXT_TRACKING 1069#ifdef CONFIG_CONTEXT_TRACKING
1053 call enter_from_user_mode 1070 call enter_from_user_mode
1054#endif 1071#endif
1072 ret
1055 1073
1056.Lerror_entry_done: 1074.Lerror_entry_done:
1057
1058 TRACE_IRQS_OFF 1075 TRACE_IRQS_OFF
1059 ret 1076 ret
1060 1077
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index 9f3905697f12..690b4027e17c 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -35,7 +35,7 @@
35#define MSR_IA32_PERFCTR0 0x000000c1 35#define MSR_IA32_PERFCTR0 0x000000c1
36#define MSR_IA32_PERFCTR1 0x000000c2 36#define MSR_IA32_PERFCTR1 0x000000c2
37#define MSR_FSB_FREQ 0x000000cd 37#define MSR_FSB_FREQ 0x000000cd
38#define MSR_NHM_PLATFORM_INFO 0x000000ce 38#define MSR_PLATFORM_INFO 0x000000ce
39 39
40#define MSR_NHM_SNB_PKG_CST_CFG_CTL 0x000000e2 40#define MSR_NHM_SNB_PKG_CST_CFG_CTL 0x000000e2
41#define NHM_C3_AUTO_DEMOTE (1UL << 25) 41#define NHM_C3_AUTO_DEMOTE (1UL << 25)
@@ -44,7 +44,6 @@
44#define SNB_C1_AUTO_UNDEMOTE (1UL << 27) 44#define SNB_C1_AUTO_UNDEMOTE (1UL << 27)
45#define SNB_C3_AUTO_UNDEMOTE (1UL << 28) 45#define SNB_C3_AUTO_UNDEMOTE (1UL << 28)
46 46
47#define MSR_PLATFORM_INFO 0x000000ce
48#define MSR_MTRRcap 0x000000fe 47#define MSR_MTRRcap 0x000000fe
49#define MSR_IA32_BBL_CR_CTL 0x00000119 48#define MSR_IA32_BBL_CR_CTL 0x00000119
50#define MSR_IA32_BBL_CR_CTL3 0x0000011e 49#define MSR_IA32_BBL_CR_CTL3 0x0000011e
diff --git a/arch/x86/include/asm/page_types.h b/arch/x86/include/asm/page_types.h
index c5b7fb2774d0..cc071c6f7d4d 100644
--- a/arch/x86/include/asm/page_types.h
+++ b/arch/x86/include/asm/page_types.h
@@ -9,19 +9,21 @@
9#define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT) 9#define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT)
10#define PAGE_MASK (~(PAGE_SIZE-1)) 10#define PAGE_MASK (~(PAGE_SIZE-1))
11 11
12#define PMD_PAGE_SIZE (_AC(1, UL) << PMD_SHIFT)
13#define PMD_PAGE_MASK (~(PMD_PAGE_SIZE-1))
14
15#define PUD_PAGE_SIZE (_AC(1, UL) << PUD_SHIFT)
16#define PUD_PAGE_MASK (~(PUD_PAGE_SIZE-1))
17
12#define __PHYSICAL_MASK ((phys_addr_t)((1ULL << __PHYSICAL_MASK_SHIFT) - 1)) 18#define __PHYSICAL_MASK ((phys_addr_t)((1ULL << __PHYSICAL_MASK_SHIFT) - 1))
13#define __VIRTUAL_MASK ((1UL << __VIRTUAL_MASK_SHIFT) - 1) 19#define __VIRTUAL_MASK ((1UL << __VIRTUAL_MASK_SHIFT) - 1)
14 20
15/* Cast PAGE_MASK to a signed type so that it is sign-extended if 21/* Cast *PAGE_MASK to a signed type so that it is sign-extended if
16 virtual addresses are 32-bits but physical addresses are larger 22 virtual addresses are 32-bits but physical addresses are larger
17 (ie, 32-bit PAE). */ 23 (ie, 32-bit PAE). */
18#define PHYSICAL_PAGE_MASK (((signed long)PAGE_MASK) & __PHYSICAL_MASK) 24#define PHYSICAL_PAGE_MASK (((signed long)PAGE_MASK) & __PHYSICAL_MASK)
19 25#define PHYSICAL_PMD_PAGE_MASK (((signed long)PMD_PAGE_MASK) & __PHYSICAL_MASK)
20#define PMD_PAGE_SIZE (_AC(1, UL) << PMD_SHIFT) 26#define PHYSICAL_PUD_PAGE_MASK (((signed long)PUD_PAGE_MASK) & __PHYSICAL_MASK)
21#define PMD_PAGE_MASK (~(PMD_PAGE_SIZE-1))
22
23#define PUD_PAGE_SIZE (_AC(1, UL) << PUD_SHIFT)
24#define PUD_PAGE_MASK (~(PUD_PAGE_SIZE-1))
25 27
26#define HPAGE_SHIFT PMD_SHIFT 28#define HPAGE_SHIFT PMD_SHIFT
27#define HPAGE_SIZE (_AC(1,UL) << HPAGE_SHIFT) 29#define HPAGE_SIZE (_AC(1,UL) << HPAGE_SHIFT)
diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
index dd5b0aa9dd2f..a471cadb9630 100644
--- a/arch/x86/include/asm/pgtable_types.h
+++ b/arch/x86/include/asm/pgtable_types.h
@@ -279,17 +279,14 @@ static inline pmdval_t native_pmd_val(pmd_t pmd)
279static inline pudval_t pud_pfn_mask(pud_t pud) 279static inline pudval_t pud_pfn_mask(pud_t pud)
280{ 280{
281 if (native_pud_val(pud) & _PAGE_PSE) 281 if (native_pud_val(pud) & _PAGE_PSE)
282 return PUD_PAGE_MASK & PHYSICAL_PAGE_MASK; 282 return PHYSICAL_PUD_PAGE_MASK;
283 else 283 else
284 return PTE_PFN_MASK; 284 return PTE_PFN_MASK;
285} 285}
286 286
287static inline pudval_t pud_flags_mask(pud_t pud) 287static inline pudval_t pud_flags_mask(pud_t pud)
288{ 288{
289 if (native_pud_val(pud) & _PAGE_PSE) 289 return ~pud_pfn_mask(pud);
290 return ~(PUD_PAGE_MASK & (pudval_t)PHYSICAL_PAGE_MASK);
291 else
292 return ~PTE_PFN_MASK;
293} 290}
294 291
295static inline pudval_t pud_flags(pud_t pud) 292static inline pudval_t pud_flags(pud_t pud)
@@ -300,17 +297,14 @@ static inline pudval_t pud_flags(pud_t pud)
300static inline pmdval_t pmd_pfn_mask(pmd_t pmd) 297static inline pmdval_t pmd_pfn_mask(pmd_t pmd)
301{ 298{
302 if (native_pmd_val(pmd) & _PAGE_PSE) 299 if (native_pmd_val(pmd) & _PAGE_PSE)
303 return PMD_PAGE_MASK & PHYSICAL_PAGE_MASK; 300 return PHYSICAL_PMD_PAGE_MASK;
304 else 301 else
305 return PTE_PFN_MASK; 302 return PTE_PFN_MASK;
306} 303}
307 304
308static inline pmdval_t pmd_flags_mask(pmd_t pmd) 305static inline pmdval_t pmd_flags_mask(pmd_t pmd)
309{ 306{
310 if (native_pmd_val(pmd) & _PAGE_PSE) 307 return ~pmd_pfn_mask(pmd);
311 return ~(PMD_PAGE_MASK & (pmdval_t)PHYSICAL_PAGE_MASK);
312 else
313 return ~PTE_PFN_MASK;
314} 308}
315 309
316static inline pmdval_t pmd_flags(pmd_t pmd) 310static inline pmdval_t pmd_flags(pmd_t pmd)
diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
index 48d34d28f5a6..cd0fc0cc78bc 100644
--- a/arch/x86/include/asm/x86_init.h
+++ b/arch/x86/include/asm/x86_init.h
@@ -1,7 +1,6 @@
1#ifndef _ASM_X86_PLATFORM_H 1#ifndef _ASM_X86_PLATFORM_H
2#define _ASM_X86_PLATFORM_H 2#define _ASM_X86_PLATFORM_H
3 3
4#include <asm/pgtable_types.h>
5#include <asm/bootparam.h> 4#include <asm/bootparam.h>
6 5
7struct mpc_bus; 6struct mpc_bus;
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 4ddd780aeac9..c2b7522cbf35 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -273,10 +273,9 @@ __setup("nosmap", setup_disable_smap);
273 273
274static __always_inline void setup_smap(struct cpuinfo_x86 *c) 274static __always_inline void setup_smap(struct cpuinfo_x86 *c)
275{ 275{
276 unsigned long eflags; 276 unsigned long eflags = native_save_fl();
277 277
278 /* This should have been cleared long ago */ 278 /* This should have been cleared long ago */
279 raw_local_save_flags(eflags);
280 BUG_ON(eflags & X86_EFLAGS_AC); 279 BUG_ON(eflags & X86_EFLAGS_AC);
281 280
282 if (cpu_has(c, X86_FEATURE_SMAP)) { 281 if (cpu_has(c, X86_FEATURE_SMAP)) {
diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c
index 7fc27f1cca58..b3e94ef461fd 100644
--- a/arch/x86/kernel/cpu/microcode/core.c
+++ b/arch/x86/kernel/cpu/microcode/core.c
@@ -698,3 +698,4 @@ int __init microcode_init(void)
698 return error; 698 return error;
699 699
700} 700}
701late_initcall(microcode_init);
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index 4562cf070c27..2bf79d7c97df 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -5,7 +5,7 @@
5 * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar 5 * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
6 * Copyright (C) 2009 Jaswinder Singh Rajput 6 * Copyright (C) 2009 Jaswinder Singh Rajput
7 * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter 7 * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
8 * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> 8 * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra
9 * Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com> 9 * Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com>
10 * Copyright (C) 2009 Google, Inc., Stephane Eranian 10 * Copyright (C) 2009 Google, Inc., Stephane Eranian
11 * 11 *
diff --git a/arch/x86/kernel/cpu/perf_event.h b/arch/x86/kernel/cpu/perf_event.h
index 499f533dd3cc..d0e35ebb2adb 100644
--- a/arch/x86/kernel/cpu/perf_event.h
+++ b/arch/x86/kernel/cpu/perf_event.h
@@ -5,7 +5,7 @@
5 * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar 5 * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
6 * Copyright (C) 2009 Jaswinder Singh Rajput 6 * Copyright (C) 2009 Jaswinder Singh Rajput
7 * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter 7 * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
8 * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> 8 * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra
9 * Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com> 9 * Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com>
10 * Copyright (C) 2009 Google, Inc., Stephane Eranian 10 * Copyright (C) 2009 Google, Inc., Stephane Eranian
11 * 11 *
@@ -387,7 +387,7 @@ struct cpu_hw_events {
387/* Check flags and event code/umask, and set the HSW N/A flag */ 387/* Check flags and event code/umask, and set the HSW N/A flag */
388#define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_NA(code, n) \ 388#define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_NA(code, n) \
389 __EVENT_CONSTRAINT(code, n, \ 389 __EVENT_CONSTRAINT(code, n, \
390 INTEL_ARCH_EVENT_MASK|INTEL_ARCH_EVENT_MASK, \ 390 INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
391 HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_NA_HSW) 391 HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_NA_HSW)
392 392
393 393
@@ -627,6 +627,7 @@ struct x86_perf_task_context {
627 u64 lbr_from[MAX_LBR_ENTRIES]; 627 u64 lbr_from[MAX_LBR_ENTRIES];
628 u64 lbr_to[MAX_LBR_ENTRIES]; 628 u64 lbr_to[MAX_LBR_ENTRIES];
629 u64 lbr_info[MAX_LBR_ENTRIES]; 629 u64 lbr_info[MAX_LBR_ENTRIES];
630 int tos;
630 int lbr_callstack_users; 631 int lbr_callstack_users;
631 int lbr_stack_state; 632 int lbr_stack_state;
632}; 633};
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
index f63360be2238..e2a430021e46 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -232,7 +232,7 @@ static struct event_constraint intel_hsw_event_constraints[] = {
232 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ 232 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
233 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ 233 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
234 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */ 234 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
235 INTEL_EVENT_CONSTRAINT(0x48, 0x4), /* L1D_PEND_MISS.* */ 235 INTEL_UEVENT_CONSTRAINT(0x148, 0x4), /* L1D_PEND_MISS.PENDING */
236 INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */ 236 INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
237 INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */ 237 INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */
238 /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */ 238 /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
diff --git a/arch/x86/kernel/cpu/perf_event_intel_cqm.c b/arch/x86/kernel/cpu/perf_event_intel_cqm.c
index 377e8f8ed391..a316ca96f1b6 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_cqm.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_cqm.c
@@ -298,7 +298,7 @@ static bool __match_event(struct perf_event *a, struct perf_event *b)
298static inline struct perf_cgroup *event_to_cgroup(struct perf_event *event) 298static inline struct perf_cgroup *event_to_cgroup(struct perf_event *event)
299{ 299{
300 if (event->attach_state & PERF_ATTACH_TASK) 300 if (event->attach_state & PERF_ATTACH_TASK)
301 return perf_cgroup_from_task(event->hw.target); 301 return perf_cgroup_from_task(event->hw.target, event->ctx);
302 302
303 return event->cgrp; 303 return event->cgrp;
304} 304}
diff --git a/arch/x86/kernel/cpu/perf_event_intel_lbr.c b/arch/x86/kernel/cpu/perf_event_intel_lbr.c
index bfd0b717e944..659f01e165d5 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_lbr.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_lbr.c
@@ -239,7 +239,7 @@ static void __intel_pmu_lbr_restore(struct x86_perf_task_context *task_ctx)
239 } 239 }
240 240
241 mask = x86_pmu.lbr_nr - 1; 241 mask = x86_pmu.lbr_nr - 1;
242 tos = intel_pmu_lbr_tos(); 242 tos = task_ctx->tos;
243 for (i = 0; i < tos; i++) { 243 for (i = 0; i < tos; i++) {
244 lbr_idx = (tos - i) & mask; 244 lbr_idx = (tos - i) & mask;
245 wrmsrl(x86_pmu.lbr_from + lbr_idx, task_ctx->lbr_from[i]); 245 wrmsrl(x86_pmu.lbr_from + lbr_idx, task_ctx->lbr_from[i]);
@@ -247,6 +247,7 @@ static void __intel_pmu_lbr_restore(struct x86_perf_task_context *task_ctx)
247 if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO) 247 if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO)
248 wrmsrl(MSR_LBR_INFO_0 + lbr_idx, task_ctx->lbr_info[i]); 248 wrmsrl(MSR_LBR_INFO_0 + lbr_idx, task_ctx->lbr_info[i]);
249 } 249 }
250 wrmsrl(x86_pmu.lbr_tos, tos);
250 task_ctx->lbr_stack_state = LBR_NONE; 251 task_ctx->lbr_stack_state = LBR_NONE;
251} 252}
252 253
@@ -270,6 +271,7 @@ static void __intel_pmu_lbr_save(struct x86_perf_task_context *task_ctx)
270 if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO) 271 if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO)
271 rdmsrl(MSR_LBR_INFO_0 + lbr_idx, task_ctx->lbr_info[i]); 272 rdmsrl(MSR_LBR_INFO_0 + lbr_idx, task_ctx->lbr_info[i]);
272 } 273 }
274 task_ctx->tos = tos;
273 task_ctx->lbr_stack_state = LBR_VALID; 275 task_ctx->lbr_stack_state = LBR_VALID;
274} 276}
275 277
diff --git a/arch/x86/kernel/fpu/signal.c b/arch/x86/kernel/fpu/signal.c
index ef29b742cea7..31c6a60505e6 100644
--- a/arch/x86/kernel/fpu/signal.c
+++ b/arch/x86/kernel/fpu/signal.c
@@ -385,20 +385,19 @@ fpu__alloc_mathframe(unsigned long sp, int ia32_frame,
385 */ 385 */
386void fpu__init_prepare_fx_sw_frame(void) 386void fpu__init_prepare_fx_sw_frame(void)
387{ 387{
388 int fsave_header_size = sizeof(struct fregs_state);
389 int size = xstate_size + FP_XSTATE_MAGIC2_SIZE; 388 int size = xstate_size + FP_XSTATE_MAGIC2_SIZE;
390 389
391 if (config_enabled(CONFIG_X86_32))
392 size += fsave_header_size;
393
394 fx_sw_reserved.magic1 = FP_XSTATE_MAGIC1; 390 fx_sw_reserved.magic1 = FP_XSTATE_MAGIC1;
395 fx_sw_reserved.extended_size = size; 391 fx_sw_reserved.extended_size = size;
396 fx_sw_reserved.xfeatures = xfeatures_mask; 392 fx_sw_reserved.xfeatures = xfeatures_mask;
397 fx_sw_reserved.xstate_size = xstate_size; 393 fx_sw_reserved.xstate_size = xstate_size;
398 394
399 if (config_enabled(CONFIG_IA32_EMULATION)) { 395 if (config_enabled(CONFIG_IA32_EMULATION) ||
396 config_enabled(CONFIG_X86_32)) {
397 int fsave_header_size = sizeof(struct fregs_state);
398
400 fx_sw_reserved_ia32 = fx_sw_reserved; 399 fx_sw_reserved_ia32 = fx_sw_reserved;
401 fx_sw_reserved_ia32.extended_size += fsave_header_size; 400 fx_sw_reserved_ia32.extended_size = size + fsave_header_size;
402 } 401 }
403} 402}
404 403
diff --git a/arch/x86/kernel/fpu/xstate.c b/arch/x86/kernel/fpu/xstate.c
index 6454f2731b56..70fc312221fc 100644
--- a/arch/x86/kernel/fpu/xstate.c
+++ b/arch/x86/kernel/fpu/xstate.c
@@ -694,7 +694,6 @@ void *get_xsave_addr(struct xregs_state *xsave, int xstate_feature)
694 if (!boot_cpu_has(X86_FEATURE_XSAVE)) 694 if (!boot_cpu_has(X86_FEATURE_XSAVE))
695 return NULL; 695 return NULL;
696 696
697 xsave = &current->thread.fpu.state.xsave;
698 /* 697 /*
699 * We should not ever be requesting features that we 698 * We should not ever be requesting features that we
700 * have not enabled. Remember that pcntxt_mask is 699 * have not enabled. Remember that pcntxt_mask is
diff --git a/arch/x86/kernel/irq_work.c b/arch/x86/kernel/irq_work.c
index dc5fa6a1e8d6..3512ba607361 100644
--- a/arch/x86/kernel/irq_work.c
+++ b/arch/x86/kernel/irq_work.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * x86 specific code for irq_work 2 * x86 specific code for irq_work
3 * 3 *
4 * Copyright (C) 2010 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> 4 * Copyright (C) 2010 Red Hat, Inc., Peter Zijlstra
5 */ 5 */
6 6
7#include <linux/kernel.h> 7#include <linux/kernel.h>
diff --git a/arch/x86/kernel/mcount_64.S b/arch/x86/kernel/mcount_64.S
index 94ea120fa21f..87e1762e2bca 100644
--- a/arch/x86/kernel/mcount_64.S
+++ b/arch/x86/kernel/mcount_64.S
@@ -278,6 +278,12 @@ trace:
278 /* save_mcount_regs fills in first two parameters */ 278 /* save_mcount_regs fills in first two parameters */
279 save_mcount_regs 279 save_mcount_regs
280 280
281 /*
282 * When DYNAMIC_FTRACE is not defined, ARCH_SUPPORTS_FTRACE_OPS is not
283 * set (see include/asm/ftrace.h and include/linux/ftrace.h). Only the
284 * ip and parent ip are used and the list function is called when
285 * function tracing is enabled.
286 */
281 call *ftrace_trace_function 287 call *ftrace_trace_function
282 288
283 restore_mcount_regs 289 restore_mcount_regs
diff --git a/arch/x86/kernel/pmem.c b/arch/x86/kernel/pmem.c
index 4f00b63d7ff3..14415aff1813 100644
--- a/arch/x86/kernel/pmem.c
+++ b/arch/x86/kernel/pmem.c
@@ -4,10 +4,22 @@
4 */ 4 */
5#include <linux/platform_device.h> 5#include <linux/platform_device.h>
6#include <linux/module.h> 6#include <linux/module.h>
7#include <linux/ioport.h>
8
9static int found(u64 start, u64 end, void *data)
10{
11 return 1;
12}
7 13
8static __init int register_e820_pmem(void) 14static __init int register_e820_pmem(void)
9{ 15{
16 char *pmem = "Persistent Memory (legacy)";
10 struct platform_device *pdev; 17 struct platform_device *pdev;
18 int rc;
19
20 rc = walk_iomem_res(pmem, IORESOURCE_MEM, 0, -1, NULL, found);
21 if (rc <= 0)
22 return 0;
11 23
12 /* 24 /*
13 * See drivers/nvdimm/e820.c for the implementation, this is 25 * See drivers/nvdimm/e820.c for the implementation, this is
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index 29db25f9a745..d2bbe343fda7 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -1250,8 +1250,6 @@ void __init setup_arch(char **cmdline_p)
1250 if (efi_enabled(EFI_BOOT)) 1250 if (efi_enabled(EFI_BOOT))
1251 efi_apply_memmap_quirks(); 1251 efi_apply_memmap_quirks();
1252#endif 1252#endif
1253
1254 microcode_init();
1255} 1253}
1256 1254
1257#ifdef CONFIG_X86_32 1255#ifdef CONFIG_X86_32
diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
index b7ffb7c00075..cb6282c3638f 100644
--- a/arch/x86/kernel/signal.c
+++ b/arch/x86/kernel/signal.c
@@ -690,12 +690,15 @@ handle_signal(struct ksignal *ksig, struct pt_regs *regs)
690 signal_setup_done(failed, ksig, stepping); 690 signal_setup_done(failed, ksig, stepping);
691} 691}
692 692
693#ifdef CONFIG_X86_32 693static inline unsigned long get_nr_restart_syscall(const struct pt_regs *regs)
694#define NR_restart_syscall __NR_restart_syscall 694{
695#else /* !CONFIG_X86_32 */ 695#if defined(CONFIG_X86_32) || !defined(CONFIG_X86_64)
696#define NR_restart_syscall \ 696 return __NR_restart_syscall;
697 test_thread_flag(TIF_IA32) ? __NR_ia32_restart_syscall : __NR_restart_syscall 697#else /* !CONFIG_X86_32 && CONFIG_X86_64 */
698#endif /* CONFIG_X86_32 */ 698 return test_thread_flag(TIF_IA32) ? __NR_ia32_restart_syscall :
699 __NR_restart_syscall | (regs->orig_ax & __X32_SYSCALL_BIT);
700#endif /* CONFIG_X86_32 || !CONFIG_X86_64 */
701}
699 702
700/* 703/*
701 * Note that 'init' is a special process: it doesn't get signals it doesn't 704 * Note that 'init' is a special process: it doesn't get signals it doesn't
@@ -724,7 +727,7 @@ void do_signal(struct pt_regs *regs)
724 break; 727 break;
725 728
726 case -ERESTART_RESTARTBLOCK: 729 case -ERESTART_RESTARTBLOCK:
727 regs->ax = NR_restart_syscall; 730 regs->ax = get_nr_restart_syscall(regs);
728 regs->ip -= 2; 731 regs->ip -= 2;
729 break; 732 break;
730 } 733 }
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 892ee2e5ecbc..fbabe4fcc7fb 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -509,7 +509,7 @@ void __inquire_remote_apic(int apicid)
509 */ 509 */
510#define UDELAY_10MS_DEFAULT 10000 510#define UDELAY_10MS_DEFAULT 10000
511 511
512static unsigned int init_udelay = INT_MAX; 512static unsigned int init_udelay = UINT_MAX;
513 513
514static int __init cpu_init_udelay(char *str) 514static int __init cpu_init_udelay(char *str)
515{ 515{
@@ -522,14 +522,15 @@ early_param("cpu_init_udelay", cpu_init_udelay);
522static void __init smp_quirk_init_udelay(void) 522static void __init smp_quirk_init_udelay(void)
523{ 523{
524 /* if cmdline changed it from default, leave it alone */ 524 /* if cmdline changed it from default, leave it alone */
525 if (init_udelay != INT_MAX) 525 if (init_udelay != UINT_MAX)
526 return; 526 return;
527 527
528 /* if modern processor, use no delay */ 528 /* if modern processor, use no delay */
529 if (((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) && (boot_cpu_data.x86 == 6)) || 529 if (((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) && (boot_cpu_data.x86 == 6)) ||
530 ((boot_cpu_data.x86_vendor == X86_VENDOR_AMD) && (boot_cpu_data.x86 >= 0xF))) 530 ((boot_cpu_data.x86_vendor == X86_VENDOR_AMD) && (boot_cpu_data.x86 >= 0xF))) {
531 init_udelay = 0; 531 init_udelay = 0;
532 532 return;
533 }
533 /* else, use legacy delay */ 534 /* else, use legacy delay */
534 init_udelay = UDELAY_10MS_DEFAULT; 535 init_udelay = UDELAY_10MS_DEFAULT;
535} 536}
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 87acc5221740..af823a388c19 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -7394,11 +7394,6 @@ static int handle_invvpid(struct kvm_vcpu *vcpu)
7394 7394
7395 switch (type) { 7395 switch (type) {
7396 case VMX_VPID_EXTENT_ALL_CONTEXT: 7396 case VMX_VPID_EXTENT_ALL_CONTEXT:
7397 if (get_vmcs12(vcpu)->virtual_processor_id == 0) {
7398 nested_vmx_failValid(vcpu,
7399 VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
7400 return 1;
7401 }
7402 __vmx_flush_tlb(vcpu, to_vmx(vcpu)->nested.vpid02); 7397 __vmx_flush_tlb(vcpu, to_vmx(vcpu)->nested.vpid02);
7403 nested_vmx_succeed(vcpu); 7398 nested_vmx_succeed(vcpu);
7404 break; 7399 break;
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 00462bd63129..eed32283d22c 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -2763,6 +2763,26 @@ static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu,
2763 return 0; 2763 return 0;
2764} 2764}
2765 2765
2766static int kvm_cpu_accept_dm_intr(struct kvm_vcpu *vcpu)
2767{
2768 return (!lapic_in_kernel(vcpu) ||
2769 kvm_apic_accept_pic_intr(vcpu));
2770}
2771
2772/*
2773 * if userspace requested an interrupt window, check that the
2774 * interrupt window is open.
2775 *
2776 * No need to exit to userspace if we already have an interrupt queued.
2777 */
2778static int kvm_vcpu_ready_for_interrupt_injection(struct kvm_vcpu *vcpu)
2779{
2780 return kvm_arch_interrupt_allowed(vcpu) &&
2781 !kvm_cpu_has_interrupt(vcpu) &&
2782 !kvm_event_needs_reinjection(vcpu) &&
2783 kvm_cpu_accept_dm_intr(vcpu);
2784}
2785
2766static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, 2786static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
2767 struct kvm_interrupt *irq) 2787 struct kvm_interrupt *irq)
2768{ 2788{
@@ -2786,6 +2806,7 @@ static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
2786 return -EEXIST; 2806 return -EEXIST;
2787 2807
2788 vcpu->arch.pending_external_vector = irq->irq; 2808 vcpu->arch.pending_external_vector = irq->irq;
2809 kvm_make_request(KVM_REQ_EVENT, vcpu);
2789 return 0; 2810 return 0;
2790} 2811}
2791 2812
@@ -5910,23 +5931,10 @@ static int emulator_fix_hypercall(struct x86_emulate_ctxt *ctxt)
5910 return emulator_write_emulated(ctxt, rip, instruction, 3, NULL); 5931 return emulator_write_emulated(ctxt, rip, instruction, 3, NULL);
5911} 5932}
5912 5933
5913/*
5914 * Check if userspace requested an interrupt window, and that the
5915 * interrupt window is open.
5916 *
5917 * No need to exit to userspace if we already have an interrupt queued.
5918 */
5919static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu) 5934static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu)
5920{ 5935{
5921 if (!vcpu->run->request_interrupt_window || pic_in_kernel(vcpu->kvm)) 5936 return vcpu->run->request_interrupt_window &&
5922 return false; 5937 likely(!pic_in_kernel(vcpu->kvm));
5923
5924 if (kvm_cpu_has_interrupt(vcpu))
5925 return false;
5926
5927 return (irqchip_split(vcpu->kvm)
5928 ? kvm_apic_accept_pic_intr(vcpu)
5929 : kvm_arch_interrupt_allowed(vcpu));
5930} 5938}
5931 5939
5932static void post_kvm_run_save(struct kvm_vcpu *vcpu) 5940static void post_kvm_run_save(struct kvm_vcpu *vcpu)
@@ -5937,17 +5945,9 @@ static void post_kvm_run_save(struct kvm_vcpu *vcpu)
5937 kvm_run->flags = is_smm(vcpu) ? KVM_RUN_X86_SMM : 0; 5945 kvm_run->flags = is_smm(vcpu) ? KVM_RUN_X86_SMM : 0;
5938 kvm_run->cr8 = kvm_get_cr8(vcpu); 5946 kvm_run->cr8 = kvm_get_cr8(vcpu);
5939 kvm_run->apic_base = kvm_get_apic_base(vcpu); 5947 kvm_run->apic_base = kvm_get_apic_base(vcpu);
5940 if (!irqchip_in_kernel(vcpu->kvm)) 5948 kvm_run->ready_for_interrupt_injection =
5941 kvm_run->ready_for_interrupt_injection = 5949 pic_in_kernel(vcpu->kvm) ||
5942 kvm_arch_interrupt_allowed(vcpu) && 5950 kvm_vcpu_ready_for_interrupt_injection(vcpu);
5943 !kvm_cpu_has_interrupt(vcpu) &&
5944 !kvm_event_needs_reinjection(vcpu);
5945 else if (!pic_in_kernel(vcpu->kvm))
5946 kvm_run->ready_for_interrupt_injection =
5947 kvm_apic_accept_pic_intr(vcpu) &&
5948 !kvm_cpu_has_interrupt(vcpu);
5949 else
5950 kvm_run->ready_for_interrupt_injection = 1;
5951} 5951}
5952 5952
5953static void update_cr8_intercept(struct kvm_vcpu *vcpu) 5953static void update_cr8_intercept(struct kvm_vcpu *vcpu)
@@ -6360,8 +6360,10 @@ void kvm_arch_mmu_notifier_invalidate_page(struct kvm *kvm,
6360static int vcpu_enter_guest(struct kvm_vcpu *vcpu) 6360static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
6361{ 6361{
6362 int r; 6362 int r;
6363 bool req_int_win = !lapic_in_kernel(vcpu) && 6363 bool req_int_win =
6364 vcpu->run->request_interrupt_window; 6364 dm_request_for_irq_injection(vcpu) &&
6365 kvm_cpu_accept_dm_intr(vcpu);
6366
6365 bool req_immediate_exit = false; 6367 bool req_immediate_exit = false;
6366 6368
6367 if (vcpu->requests) { 6369 if (vcpu->requests) {
@@ -6663,7 +6665,8 @@ static int vcpu_run(struct kvm_vcpu *vcpu)
6663 if (kvm_cpu_has_pending_timer(vcpu)) 6665 if (kvm_cpu_has_pending_timer(vcpu))
6664 kvm_inject_pending_timer_irqs(vcpu); 6666 kvm_inject_pending_timer_irqs(vcpu);
6665 6667
6666 if (dm_request_for_irq_injection(vcpu)) { 6668 if (dm_request_for_irq_injection(vcpu) &&
6669 kvm_vcpu_ready_for_interrupt_injection(vcpu)) {
6667 r = 0; 6670 r = 0;
6668 vcpu->run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN; 6671 vcpu->run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN;
6669 ++vcpu->stat.request_irq_exits; 6672 ++vcpu->stat.request_irq_exits;
diff --git a/arch/x86/mm/dump_pagetables.c b/arch/x86/mm/dump_pagetables.c
index a035c2aa7801..0f1c6fc3ddd8 100644
--- a/arch/x86/mm/dump_pagetables.c
+++ b/arch/x86/mm/dump_pagetables.c
@@ -89,7 +89,7 @@ static struct addr_marker address_markers[] = {
89 { 0/* VMALLOC_START */, "vmalloc() Area" }, 89 { 0/* VMALLOC_START */, "vmalloc() Area" },
90 { 0/*VMALLOC_END*/, "vmalloc() End" }, 90 { 0/*VMALLOC_END*/, "vmalloc() End" },
91# ifdef CONFIG_HIGHMEM 91# ifdef CONFIG_HIGHMEM
92 { 0/*PKMAP_BASE*/, "Persisent kmap() Area" }, 92 { 0/*PKMAP_BASE*/, "Persistent kmap() Area" },
93# endif 93# endif
94 { 0/*FIXADDR_START*/, "Fixmap Area" }, 94 { 0/*FIXADDR_START*/, "Fixmap Area" },
95#endif 95#endif
diff --git a/arch/x86/mm/mpx.c b/arch/x86/mm/mpx.c
index b0ae85f90f10..b2fd67da1701 100644
--- a/arch/x86/mm/mpx.c
+++ b/arch/x86/mm/mpx.c
@@ -101,19 +101,19 @@ static int get_reg_offset(struct insn *insn, struct pt_regs *regs,
101 switch (type) { 101 switch (type) {
102 case REG_TYPE_RM: 102 case REG_TYPE_RM:
103 regno = X86_MODRM_RM(insn->modrm.value); 103 regno = X86_MODRM_RM(insn->modrm.value);
104 if (X86_REX_B(insn->rex_prefix.value) == 1) 104 if (X86_REX_B(insn->rex_prefix.value))
105 regno += 8; 105 regno += 8;
106 break; 106 break;
107 107
108 case REG_TYPE_INDEX: 108 case REG_TYPE_INDEX:
109 regno = X86_SIB_INDEX(insn->sib.value); 109 regno = X86_SIB_INDEX(insn->sib.value);
110 if (X86_REX_X(insn->rex_prefix.value) == 1) 110 if (X86_REX_X(insn->rex_prefix.value))
111 regno += 8; 111 regno += 8;
112 break; 112 break;
113 113
114 case REG_TYPE_BASE: 114 case REG_TYPE_BASE:
115 regno = X86_SIB_BASE(insn->sib.value); 115 regno = X86_SIB_BASE(insn->sib.value);
116 if (X86_REX_B(insn->rex_prefix.value) == 1) 116 if (X86_REX_B(insn->rex_prefix.value))
117 regno += 8; 117 regno += 8;
118 break; 118 break;
119 119
@@ -586,6 +586,29 @@ static unsigned long mpx_bd_entry_to_bt_addr(struct mm_struct *mm,
586} 586}
587 587
588/* 588/*
589 * We only want to do a 4-byte get_user() on 32-bit. Otherwise,
590 * we might run off the end of the bounds table if we are on
591 * a 64-bit kernel and try to get 8 bytes.
592 */
593int get_user_bd_entry(struct mm_struct *mm, unsigned long *bd_entry_ret,
594 long __user *bd_entry_ptr)
595{
596 u32 bd_entry_32;
597 int ret;
598
599 if (is_64bit_mm(mm))
600 return get_user(*bd_entry_ret, bd_entry_ptr);
601
602 /*
603 * Note that get_user() uses the type of the *pointer* to
604 * establish the size of the get, not the destination.
605 */
606 ret = get_user(bd_entry_32, (u32 __user *)bd_entry_ptr);
607 *bd_entry_ret = bd_entry_32;
608 return ret;
609}
610
611/*
589 * Get the base of bounds tables pointed by specific bounds 612 * Get the base of bounds tables pointed by specific bounds
590 * directory entry. 613 * directory entry.
591 */ 614 */
@@ -605,7 +628,7 @@ static int get_bt_addr(struct mm_struct *mm,
605 int need_write = 0; 628 int need_write = 0;
606 629
607 pagefault_disable(); 630 pagefault_disable();
608 ret = get_user(bd_entry, bd_entry_ptr); 631 ret = get_user_bd_entry(mm, &bd_entry, bd_entry_ptr);
609 pagefault_enable(); 632 pagefault_enable();
610 if (!ret) 633 if (!ret)
611 break; 634 break;
@@ -700,11 +723,23 @@ static unsigned long mpx_get_bt_entry_offset_bytes(struct mm_struct *mm,
700 */ 723 */
701static inline unsigned long bd_entry_virt_space(struct mm_struct *mm) 724static inline unsigned long bd_entry_virt_space(struct mm_struct *mm)
702{ 725{
703 unsigned long long virt_space = (1ULL << boot_cpu_data.x86_virt_bits); 726 unsigned long long virt_space;
704 if (is_64bit_mm(mm)) 727 unsigned long long GB = (1ULL << 30);
705 return virt_space / MPX_BD_NR_ENTRIES_64; 728
706 else 729 /*
707 return virt_space / MPX_BD_NR_ENTRIES_32; 730 * This covers 32-bit emulation as well as 32-bit kernels
731 * running on 64-bit harware.
732 */
733 if (!is_64bit_mm(mm))
734 return (4ULL * GB) / MPX_BD_NR_ENTRIES_32;
735
736 /*
737 * 'x86_virt_bits' returns what the hardware is capable
738 * of, and returns the full >32-bit adddress space when
739 * running 32-bit kernels on 64-bit hardware.
740 */
741 virt_space = (1ULL << boot_cpu_data.x86_virt_bits);
742 return virt_space / MPX_BD_NR_ENTRIES_64;
708} 743}
709 744
710/* 745/*
diff --git a/arch/x86/pci/bus_numa.c b/arch/x86/pci/bus_numa.c
index 7bcf06a7cd12..6eb3c8af96e2 100644
--- a/arch/x86/pci/bus_numa.c
+++ b/arch/x86/pci/bus_numa.c
@@ -50,18 +50,9 @@ void x86_pci_root_bus_resources(int bus, struct list_head *resources)
50 if (!found) 50 if (!found)
51 pci_add_resource(resources, &info->busn); 51 pci_add_resource(resources, &info->busn);
52 52
53 list_for_each_entry(root_res, &info->resources, list) { 53 list_for_each_entry(root_res, &info->resources, list)
54 struct resource *res; 54 pci_add_resource(resources, &root_res->res);
55 struct resource *root;
56 55
57 res = &root_res->res;
58 pci_add_resource(resources, res);
59 if (res->flags & IORESOURCE_IO)
60 root = &ioport_resource;
61 else
62 root = &iomem_resource;
63 insert_resource(root, res);
64 }
65 return; 56 return;
66 57
67default_resources: 58default_resources:
diff --git a/arch/x86/um/signal.c b/arch/x86/um/signal.c
index 06934a8a4872..e5f854ce2d72 100644
--- a/arch/x86/um/signal.c
+++ b/arch/x86/um/signal.c
@@ -211,7 +211,7 @@ static int copy_sc_from_user(struct pt_regs *regs,
211 if (err) 211 if (err)
212 return 1; 212 return 1;
213 213
214 err = convert_fxsr_from_user(&fpx, sc.fpstate); 214 err = convert_fxsr_from_user(&fpx, (void *)sc.fpstate);
215 if (err) 215 if (err)
216 return 1; 216 return 1;
217 217
@@ -227,7 +227,7 @@ static int copy_sc_from_user(struct pt_regs *regs,
227 { 227 {
228 struct user_i387_struct fp; 228 struct user_i387_struct fp;
229 229
230 err = copy_from_user(&fp, sc.fpstate, 230 err = copy_from_user(&fp, (void *)sc.fpstate,
231 sizeof(struct user_i387_struct)); 231 sizeof(struct user_i387_struct));
232 if (err) 232 if (err)
233 return 1; 233 return 1;
@@ -291,7 +291,7 @@ static int copy_sc_to_user(struct sigcontext __user *to,
291#endif 291#endif
292#undef PUTREG 292#undef PUTREG
293 sc.oldmask = mask; 293 sc.oldmask = mask;
294 sc.fpstate = to_fp; 294 sc.fpstate = (unsigned long)to_fp;
295 295
296 err = copy_to_user(to, &sc, sizeof(struct sigcontext)); 296 err = copy_to_user(to, &sc, sizeof(struct sigcontext));
297 if (err) 297 if (err)
@@ -468,12 +468,10 @@ long sys_sigreturn(void)
468 struct sigframe __user *frame = (struct sigframe __user *)(sp - 8); 468 struct sigframe __user *frame = (struct sigframe __user *)(sp - 8);
469 sigset_t set; 469 sigset_t set;
470 struct sigcontext __user *sc = &frame->sc; 470 struct sigcontext __user *sc = &frame->sc;
471 unsigned long __user *oldmask = &sc->oldmask;
472 unsigned long __user *extramask = frame->extramask;
473 int sig_size = (_NSIG_WORDS - 1) * sizeof(unsigned long); 471 int sig_size = (_NSIG_WORDS - 1) * sizeof(unsigned long);
474 472
475 if (copy_from_user(&set.sig[0], oldmask, sizeof(set.sig[0])) || 473 if (copy_from_user(&set.sig[0], (void *)sc->oldmask, sizeof(set.sig[0])) ||
476 copy_from_user(&set.sig[1], extramask, sig_size)) 474 copy_from_user(&set.sig[1], frame->extramask, sig_size))
477 goto segfault; 475 goto segfault;
478 476
479 set_current_blocked(&set); 477 set_current_blocked(&set);
@@ -505,6 +503,7 @@ int setup_signal_stack_si(unsigned long stack_top, struct ksignal *ksig,
505{ 503{
506 struct rt_sigframe __user *frame; 504 struct rt_sigframe __user *frame;
507 int err = 0, sig = ksig->sig; 505 int err = 0, sig = ksig->sig;
506 unsigned long fp_to;
508 507
509 frame = (struct rt_sigframe __user *) 508 frame = (struct rt_sigframe __user *)
510 round_down(stack_top - sizeof(struct rt_sigframe), 16); 509 round_down(stack_top - sizeof(struct rt_sigframe), 16);
@@ -526,7 +525,10 @@ int setup_signal_stack_si(unsigned long stack_top, struct ksignal *ksig,
526 err |= __save_altstack(&frame->uc.uc_stack, PT_REGS_SP(regs)); 525 err |= __save_altstack(&frame->uc.uc_stack, PT_REGS_SP(regs));
527 err |= copy_sc_to_user(&frame->uc.uc_mcontext, &frame->fpstate, regs, 526 err |= copy_sc_to_user(&frame->uc.uc_mcontext, &frame->fpstate, regs,
528 set->sig[0]); 527 set->sig[0]);
529 err |= __put_user(&frame->fpstate, &frame->uc.uc_mcontext.fpstate); 528
529 fp_to = (unsigned long)&frame->fpstate;
530
531 err |= __put_user(fp_to, &frame->uc.uc_mcontext.fpstate);
530 if (sizeof(*set) == 16) { 532 if (sizeof(*set) == 16) {
531 err |= __put_user(set->sig[0], &frame->uc.uc_sigmask.sig[0]); 533 err |= __put_user(set->sig[0], &frame->uc.uc_sigmask.sig[0]);
532 err |= __put_user(set->sig[1], &frame->uc.uc_sigmask.sig[1]); 534 err |= __put_user(set->sig[1], &frame->uc.uc_sigmask.sig[1]);
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index ac161db63388..cb5e266a8bf7 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -2495,14 +2495,9 @@ void __init xen_init_mmu_ops(void)
2495{ 2495{
2496 x86_init.paging.pagetable_init = xen_pagetable_init; 2496 x86_init.paging.pagetable_init = xen_pagetable_init;
2497 2497
2498 /* Optimization - we can use the HVM one but it has no idea which 2498 if (xen_feature(XENFEAT_auto_translated_physmap))
2499 * VCPUs are descheduled - which means that it will needlessly IPI
2500 * them. Xen knows so let it do the job.
2501 */
2502 if (xen_feature(XENFEAT_auto_translated_physmap)) {
2503 pv_mmu_ops.flush_tlb_others = xen_flush_tlb_others;
2504 return; 2499 return;
2505 } 2500
2506 pv_mmu_ops = xen_mmu_ops; 2501 pv_mmu_ops = xen_mmu_ops;
2507 2502
2508 memset(dummy_mapping, 0xff, PAGE_SIZE); 2503 memset(dummy_mapping, 0xff, PAGE_SIZE);
diff --git a/arch/x86/xen/suspend.c b/arch/x86/xen/suspend.c
index feddabdab448..3705eabd7e22 100644
--- a/arch/x86/xen/suspend.c
+++ b/arch/x86/xen/suspend.c
@@ -68,26 +68,16 @@ static void xen_pv_post_suspend(int suspend_cancelled)
68 68
69void xen_arch_pre_suspend(void) 69void xen_arch_pre_suspend(void)
70{ 70{
71 int cpu;
72
73 for_each_online_cpu(cpu)
74 xen_pmu_finish(cpu);
75
76 if (xen_pv_domain()) 71 if (xen_pv_domain())
77 xen_pv_pre_suspend(); 72 xen_pv_pre_suspend();
78} 73}
79 74
80void xen_arch_post_suspend(int cancelled) 75void xen_arch_post_suspend(int cancelled)
81{ 76{
82 int cpu;
83
84 if (xen_pv_domain()) 77 if (xen_pv_domain())
85 xen_pv_post_suspend(cancelled); 78 xen_pv_post_suspend(cancelled);
86 else 79 else
87 xen_hvm_post_suspend(cancelled); 80 xen_hvm_post_suspend(cancelled);
88
89 for_each_online_cpu(cpu)
90 xen_pmu_init(cpu);
91} 81}
92 82
93static void xen_vcpu_notify_restore(void *data) 83static void xen_vcpu_notify_restore(void *data)
@@ -106,10 +96,20 @@ static void xen_vcpu_notify_suspend(void *data)
106 96
107void xen_arch_resume(void) 97void xen_arch_resume(void)
108{ 98{
99 int cpu;
100
109 on_each_cpu(xen_vcpu_notify_restore, NULL, 1); 101 on_each_cpu(xen_vcpu_notify_restore, NULL, 1);
102
103 for_each_online_cpu(cpu)
104 xen_pmu_init(cpu);
110} 105}
111 106
112void xen_arch_suspend(void) 107void xen_arch_suspend(void)
113{ 108{
109 int cpu;
110
111 for_each_online_cpu(cpu)
112 xen_pmu_finish(cpu);
113
114 on_each_cpu(xen_vcpu_notify_suspend, NULL, 1); 114 on_each_cpu(xen_vcpu_notify_suspend, NULL, 1);
115} 115}
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index 5bcdfc10c23a..5a37188b559f 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -1127,15 +1127,15 @@ void blkcg_exit_queue(struct request_queue *q)
1127 * of the main cic data structures. For now we allow a task to change 1127 * of the main cic data structures. For now we allow a task to change
1128 * its cgroup only if it's the only owner of its ioc. 1128 * its cgroup only if it's the only owner of its ioc.
1129 */ 1129 */
1130static int blkcg_can_attach(struct cgroup_subsys_state *css, 1130static int blkcg_can_attach(struct cgroup_taskset *tset)
1131 struct cgroup_taskset *tset)
1132{ 1131{
1133 struct task_struct *task; 1132 struct task_struct *task;
1133 struct cgroup_subsys_state *dst_css;
1134 struct io_context *ioc; 1134 struct io_context *ioc;
1135 int ret = 0; 1135 int ret = 0;
1136 1136
1137 /* task_lock() is needed to avoid races with exit_io_context() */ 1137 /* task_lock() is needed to avoid races with exit_io_context() */
1138 cgroup_taskset_for_each(task, tset) { 1138 cgroup_taskset_for_each(task, dst_css, tset) {
1139 task_lock(task); 1139 task_lock(task);
1140 ioc = task->io_context; 1140 ioc = task->io_context;
1141 if (ioc && atomic_read(&ioc->nr_tasks) > 1) 1141 if (ioc && atomic_read(&ioc->nr_tasks) > 1)
diff --git a/block/blk-core.c b/block/blk-core.c
index 5131993b23a1..3636be469fa2 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -2114,7 +2114,8 @@ blk_qc_t submit_bio(int rw, struct bio *bio)
2114EXPORT_SYMBOL(submit_bio); 2114EXPORT_SYMBOL(submit_bio);
2115 2115
2116/** 2116/**
2117 * blk_rq_check_limits - Helper function to check a request for the queue limit 2117 * blk_cloned_rq_check_limits - Helper function to check a cloned request
2118 * for new the queue limits
2118 * @q: the queue 2119 * @q: the queue
2119 * @rq: the request being checked 2120 * @rq: the request being checked
2120 * 2121 *
@@ -2125,20 +2126,13 @@ EXPORT_SYMBOL(submit_bio);
2125 * after it is inserted to @q, it should be checked against @q before 2126 * after it is inserted to @q, it should be checked against @q before
2126 * the insertion using this generic function. 2127 * the insertion using this generic function.
2127 * 2128 *
2128 * This function should also be useful for request stacking drivers
2129 * in some cases below, so export this function.
2130 * Request stacking drivers like request-based dm may change the queue 2129 * Request stacking drivers like request-based dm may change the queue
2131 * limits while requests are in the queue (e.g. dm's table swapping). 2130 * limits when retrying requests on other queues. Those requests need
2132 * Such request stacking drivers should check those requests against 2131 * to be checked against the new queue limits again during dispatch.
2133 * the new queue limits again when they dispatch those requests,
2134 * although such checkings are also done against the old queue limits
2135 * when submitting requests.
2136 */ 2132 */
2137int blk_rq_check_limits(struct request_queue *q, struct request *rq) 2133static int blk_cloned_rq_check_limits(struct request_queue *q,
2134 struct request *rq)
2138{ 2135{
2139 if (!rq_mergeable(rq))
2140 return 0;
2141
2142 if (blk_rq_sectors(rq) > blk_queue_get_max_sectors(q, rq->cmd_flags)) { 2136 if (blk_rq_sectors(rq) > blk_queue_get_max_sectors(q, rq->cmd_flags)) {
2143 printk(KERN_ERR "%s: over max size limit.\n", __func__); 2137 printk(KERN_ERR "%s: over max size limit.\n", __func__);
2144 return -EIO; 2138 return -EIO;
@@ -2158,7 +2152,6 @@ int blk_rq_check_limits(struct request_queue *q, struct request *rq)
2158 2152
2159 return 0; 2153 return 0;
2160} 2154}
2161EXPORT_SYMBOL_GPL(blk_rq_check_limits);
2162 2155
2163/** 2156/**
2164 * blk_insert_cloned_request - Helper for stacking drivers to submit a request 2157 * blk_insert_cloned_request - Helper for stacking drivers to submit a request
@@ -2170,7 +2163,7 @@ int blk_insert_cloned_request(struct request_queue *q, struct request *rq)
2170 unsigned long flags; 2163 unsigned long flags;
2171 int where = ELEVATOR_INSERT_BACK; 2164 int where = ELEVATOR_INSERT_BACK;
2172 2165
2173 if (blk_rq_check_limits(q, rq)) 2166 if (blk_cloned_rq_check_limits(q, rq))
2174 return -EIO; 2167 return -EIO;
2175 2168
2176 if (rq->rq_disk && 2169 if (rq->rq_disk &&
@@ -3412,6 +3405,9 @@ int blk_pre_runtime_suspend(struct request_queue *q)
3412{ 3405{
3413 int ret = 0; 3406 int ret = 0;
3414 3407
3408 if (!q->dev)
3409 return ret;
3410
3415 spin_lock_irq(q->queue_lock); 3411 spin_lock_irq(q->queue_lock);
3416 if (q->nr_pending) { 3412 if (q->nr_pending) {
3417 ret = -EBUSY; 3413 ret = -EBUSY;
@@ -3439,6 +3435,9 @@ EXPORT_SYMBOL(blk_pre_runtime_suspend);
3439 */ 3435 */
3440void blk_post_runtime_suspend(struct request_queue *q, int err) 3436void blk_post_runtime_suspend(struct request_queue *q, int err)
3441{ 3437{
3438 if (!q->dev)
3439 return;
3440
3442 spin_lock_irq(q->queue_lock); 3441 spin_lock_irq(q->queue_lock);
3443 if (!err) { 3442 if (!err) {
3444 q->rpm_status = RPM_SUSPENDED; 3443 q->rpm_status = RPM_SUSPENDED;
@@ -3463,6 +3462,9 @@ EXPORT_SYMBOL(blk_post_runtime_suspend);
3463 */ 3462 */
3464void blk_pre_runtime_resume(struct request_queue *q) 3463void blk_pre_runtime_resume(struct request_queue *q)
3465{ 3464{
3465 if (!q->dev)
3466 return;
3467
3466 spin_lock_irq(q->queue_lock); 3468 spin_lock_irq(q->queue_lock);
3467 q->rpm_status = RPM_RESUMING; 3469 q->rpm_status = RPM_RESUMING;
3468 spin_unlock_irq(q->queue_lock); 3470 spin_unlock_irq(q->queue_lock);
@@ -3485,6 +3487,9 @@ EXPORT_SYMBOL(blk_pre_runtime_resume);
3485 */ 3487 */
3486void blk_post_runtime_resume(struct request_queue *q, int err) 3488void blk_post_runtime_resume(struct request_queue *q, int err)
3487{ 3489{
3490 if (!q->dev)
3491 return;
3492
3488 spin_lock_irq(q->queue_lock); 3493 spin_lock_irq(q->queue_lock);
3489 if (!err) { 3494 if (!err) {
3490 q->rpm_status = RPM_ACTIVE; 3495 q->rpm_status = RPM_ACTIVE;
diff --git a/block/blk-merge.c b/block/blk-merge.c
index de5716d8e525..e01405a3e8b3 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -76,6 +76,9 @@ static struct bio *blk_bio_segment_split(struct request_queue *q,
76 struct bio_vec bv, bvprv, *bvprvp = NULL; 76 struct bio_vec bv, bvprv, *bvprvp = NULL;
77 struct bvec_iter iter; 77 struct bvec_iter iter;
78 unsigned seg_size = 0, nsegs = 0, sectors = 0; 78 unsigned seg_size = 0, nsegs = 0, sectors = 0;
79 unsigned front_seg_size = bio->bi_seg_front_size;
80 bool do_split = true;
81 struct bio *new = NULL;
79 82
80 bio_for_each_segment(bv, bio, iter) { 83 bio_for_each_segment(bv, bio, iter) {
81 if (sectors + (bv.bv_len >> 9) > queue_max_sectors(q)) 84 if (sectors + (bv.bv_len >> 9) > queue_max_sectors(q))
@@ -98,8 +101,11 @@ static struct bio *blk_bio_segment_split(struct request_queue *q,
98 101
99 seg_size += bv.bv_len; 102 seg_size += bv.bv_len;
100 bvprv = bv; 103 bvprv = bv;
101 bvprvp = &bv; 104 bvprvp = &bvprv;
102 sectors += bv.bv_len >> 9; 105 sectors += bv.bv_len >> 9;
106
107 if (nsegs == 1 && seg_size > front_seg_size)
108 front_seg_size = seg_size;
103 continue; 109 continue;
104 } 110 }
105new_segment: 111new_segment:
@@ -108,16 +114,29 @@ new_segment:
108 114
109 nsegs++; 115 nsegs++;
110 bvprv = bv; 116 bvprv = bv;
111 bvprvp = &bv; 117 bvprvp = &bvprv;
112 seg_size = bv.bv_len; 118 seg_size = bv.bv_len;
113 sectors += bv.bv_len >> 9; 119 sectors += bv.bv_len >> 9;
120
121 if (nsegs == 1 && seg_size > front_seg_size)
122 front_seg_size = seg_size;
114 } 123 }
115 124
116 *segs = nsegs; 125 do_split = false;
117 return NULL;
118split: 126split:
119 *segs = nsegs; 127 *segs = nsegs;
120 return bio_split(bio, sectors, GFP_NOIO, bs); 128
129 if (do_split) {
130 new = bio_split(bio, sectors, GFP_NOIO, bs);
131 if (new)
132 bio = new;
133 }
134
135 bio->bi_seg_front_size = front_seg_size;
136 if (seg_size > bio->bi_seg_back_size)
137 bio->bi_seg_back_size = seg_size;
138
139 return do_split ? new : NULL;
121} 140}
122 141
123void blk_queue_split(struct request_queue *q, struct bio **bio, 142void blk_queue_split(struct request_queue *q, struct bio **bio,
@@ -412,6 +431,12 @@ int blk_rq_map_sg(struct request_queue *q, struct request *rq,
412 if (sg) 431 if (sg)
413 sg_mark_end(sg); 432 sg_mark_end(sg);
414 433
434 /*
435 * Something must have been wrong if the figured number of
436 * segment is bigger than number of req's physical segments
437 */
438 WARN_ON(nsegs > rq->nr_phys_segments);
439
415 return nsegs; 440 return nsegs;
416} 441}
417EXPORT_SYMBOL(blk_rq_map_sg); 442EXPORT_SYMBOL(blk_rq_map_sg);
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 3ae09de62f19..6d6f8feb48c0 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -1291,15 +1291,16 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
1291 blk_mq_bio_to_request(rq, bio); 1291 blk_mq_bio_to_request(rq, bio);
1292 1292
1293 /* 1293 /*
1294 * we do limited pluging. If bio can be merged, do merge. 1294 * We do limited pluging. If the bio can be merged, do that.
1295 * Otherwise the existing request in the plug list will be 1295 * Otherwise the existing request in the plug list will be
1296 * issued. So the plug list will have one request at most 1296 * issued. So the plug list will have one request at most
1297 */ 1297 */
1298 if (plug) { 1298 if (plug) {
1299 /* 1299 /*
1300 * The plug list might get flushed before this. If that 1300 * The plug list might get flushed before this. If that
1301 * happens, same_queue_rq is invalid and plug list is empty 1301 * happens, same_queue_rq is invalid and plug list is
1302 **/ 1302 * empty
1303 */
1303 if (same_queue_rq && !list_empty(&plug->mq_list)) { 1304 if (same_queue_rq && !list_empty(&plug->mq_list)) {
1304 old_rq = same_queue_rq; 1305 old_rq = same_queue_rq;
1305 list_del_init(&old_rq->queuelist); 1306 list_del_init(&old_rq->queuelist);
@@ -1380,12 +1381,15 @@ static blk_qc_t blk_sq_make_request(struct request_queue *q, struct bio *bio)
1380 blk_mq_bio_to_request(rq, bio); 1381 blk_mq_bio_to_request(rq, bio);
1381 if (!request_count) 1382 if (!request_count)
1382 trace_block_plug(q); 1383 trace_block_plug(q);
1383 else if (request_count >= BLK_MAX_REQUEST_COUNT) { 1384
1385 blk_mq_put_ctx(data.ctx);
1386
1387 if (request_count >= BLK_MAX_REQUEST_COUNT) {
1384 blk_flush_plug_list(plug, false); 1388 blk_flush_plug_list(plug, false);
1385 trace_block_plug(q); 1389 trace_block_plug(q);
1386 } 1390 }
1391
1387 list_add_tail(&rq->queuelist, &plug->mq_list); 1392 list_add_tail(&rq->queuelist, &plug->mq_list);
1388 blk_mq_put_ctx(data.ctx);
1389 return cookie; 1393 return cookie;
1390 } 1394 }
1391 1395
diff --git a/block/blk-settings.c b/block/blk-settings.c
index 7d8f129a1516..dd4973583978 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -91,7 +91,8 @@ void blk_set_default_limits(struct queue_limits *lim)
91 lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK; 91 lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK;
92 lim->virt_boundary_mask = 0; 92 lim->virt_boundary_mask = 0;
93 lim->max_segment_size = BLK_MAX_SEGMENT_SIZE; 93 lim->max_segment_size = BLK_MAX_SEGMENT_SIZE;
94 lim->max_sectors = lim->max_hw_sectors = BLK_SAFE_MAX_SECTORS; 94 lim->max_sectors = lim->max_dev_sectors = lim->max_hw_sectors =
95 BLK_SAFE_MAX_SECTORS;
95 lim->chunk_sectors = 0; 96 lim->chunk_sectors = 0;
96 lim->max_write_same_sectors = 0; 97 lim->max_write_same_sectors = 0;
97 lim->max_discard_sectors = 0; 98 lim->max_discard_sectors = 0;
@@ -127,6 +128,7 @@ void blk_set_stacking_limits(struct queue_limits *lim)
127 lim->max_hw_sectors = UINT_MAX; 128 lim->max_hw_sectors = UINT_MAX;
128 lim->max_segment_size = UINT_MAX; 129 lim->max_segment_size = UINT_MAX;
129 lim->max_sectors = UINT_MAX; 130 lim->max_sectors = UINT_MAX;
131 lim->max_dev_sectors = UINT_MAX;
130 lim->max_write_same_sectors = UINT_MAX; 132 lim->max_write_same_sectors = UINT_MAX;
131} 133}
132EXPORT_SYMBOL(blk_set_stacking_limits); 134EXPORT_SYMBOL(blk_set_stacking_limits);
@@ -214,8 +216,8 @@ void blk_queue_bounce_limit(struct request_queue *q, u64 max_addr)
214EXPORT_SYMBOL(blk_queue_bounce_limit); 216EXPORT_SYMBOL(blk_queue_bounce_limit);
215 217
216/** 218/**
217 * blk_limits_max_hw_sectors - set hard and soft limit of max sectors for request 219 * blk_queue_max_hw_sectors - set max sectors for a request for this queue
218 * @limits: the queue limits 220 * @q: the request queue for the device
219 * @max_hw_sectors: max hardware sectors in the usual 512b unit 221 * @max_hw_sectors: max hardware sectors in the usual 512b unit
220 * 222 *
221 * Description: 223 * Description:
@@ -224,13 +226,19 @@ EXPORT_SYMBOL(blk_queue_bounce_limit);
224 * the device driver based upon the capabilities of the I/O 226 * the device driver based upon the capabilities of the I/O
225 * controller. 227 * controller.
226 * 228 *
229 * max_dev_sectors is a hard limit imposed by the storage device for
230 * READ/WRITE requests. It is set by the disk driver.
231 *
227 * max_sectors is a soft limit imposed by the block layer for 232 * max_sectors is a soft limit imposed by the block layer for
228 * filesystem type requests. This value can be overridden on a 233 * filesystem type requests. This value can be overridden on a
229 * per-device basis in /sys/block/<device>/queue/max_sectors_kb. 234 * per-device basis in /sys/block/<device>/queue/max_sectors_kb.
230 * The soft limit can not exceed max_hw_sectors. 235 * The soft limit can not exceed max_hw_sectors.
231 **/ 236 **/
232void blk_limits_max_hw_sectors(struct queue_limits *limits, unsigned int max_hw_sectors) 237void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_sectors)
233{ 238{
239 struct queue_limits *limits = &q->limits;
240 unsigned int max_sectors;
241
234 if ((max_hw_sectors << 9) < PAGE_CACHE_SIZE) { 242 if ((max_hw_sectors << 9) < PAGE_CACHE_SIZE) {
235 max_hw_sectors = 1 << (PAGE_CACHE_SHIFT - 9); 243 max_hw_sectors = 1 << (PAGE_CACHE_SHIFT - 9);
236 printk(KERN_INFO "%s: set to minimum %d\n", 244 printk(KERN_INFO "%s: set to minimum %d\n",
@@ -238,22 +246,9 @@ void blk_limits_max_hw_sectors(struct queue_limits *limits, unsigned int max_hw_
238 } 246 }
239 247
240 limits->max_hw_sectors = max_hw_sectors; 248 limits->max_hw_sectors = max_hw_sectors;
241 limits->max_sectors = min_t(unsigned int, max_hw_sectors, 249 max_sectors = min_not_zero(max_hw_sectors, limits->max_dev_sectors);
242 BLK_DEF_MAX_SECTORS); 250 max_sectors = min_t(unsigned int, max_sectors, BLK_DEF_MAX_SECTORS);
243} 251 limits->max_sectors = max_sectors;
244EXPORT_SYMBOL(blk_limits_max_hw_sectors);
245
246/**
247 * blk_queue_max_hw_sectors - set max sectors for a request for this queue
248 * @q: the request queue for the device
249 * @max_hw_sectors: max hardware sectors in the usual 512b unit
250 *
251 * Description:
252 * See description for blk_limits_max_hw_sectors().
253 **/
254void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_sectors)
255{
256 blk_limits_max_hw_sectors(&q->limits, max_hw_sectors);
257} 252}
258EXPORT_SYMBOL(blk_queue_max_hw_sectors); 253EXPORT_SYMBOL(blk_queue_max_hw_sectors);
259 254
@@ -527,6 +522,7 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
527 522
528 t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors); 523 t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors);
529 t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors); 524 t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors);
525 t->max_dev_sectors = min_not_zero(t->max_dev_sectors, b->max_dev_sectors);
530 t->max_write_same_sectors = min(t->max_write_same_sectors, 526 t->max_write_same_sectors = min(t->max_write_same_sectors,
531 b->max_write_same_sectors); 527 b->max_write_same_sectors);
532 t->bounce_pfn = min_not_zero(t->bounce_pfn, b->bounce_pfn); 528 t->bounce_pfn = min_not_zero(t->bounce_pfn, b->bounce_pfn);
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index 565b8dac5782..e140cc487ce1 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -205,6 +205,9 @@ queue_max_sectors_store(struct request_queue *q, const char *page, size_t count)
205 if (ret < 0) 205 if (ret < 0)
206 return ret; 206 return ret;
207 207
208 max_hw_sectors_kb = min_not_zero(max_hw_sectors_kb, (unsigned long)
209 q->limits.max_dev_sectors >> 1);
210
208 if (max_sectors_kb > max_hw_sectors_kb || max_sectors_kb < page_kb) 211 if (max_sectors_kb > max_hw_sectors_kb || max_sectors_kb < page_kb)
209 return -EINVAL; 212 return -EINVAL;
210 213
diff --git a/block/blk-timeout.c b/block/blk-timeout.c
index 246dfb16c3d9..aa40aa93381b 100644
--- a/block/blk-timeout.c
+++ b/block/blk-timeout.c
@@ -158,11 +158,13 @@ void blk_abort_request(struct request *req)
158{ 158{
159 if (blk_mark_rq_complete(req)) 159 if (blk_mark_rq_complete(req))
160 return; 160 return;
161 blk_delete_timer(req); 161
162 if (req->q->mq_ops) 162 if (req->q->mq_ops) {
163 blk_mq_rq_timed_out(req, false); 163 blk_mq_rq_timed_out(req, false);
164 else 164 } else {
165 blk_delete_timer(req);
165 blk_rq_timed_out(req); 166 blk_rq_timed_out(req);
167 }
166} 168}
167EXPORT_SYMBOL_GPL(blk_abort_request); 169EXPORT_SYMBOL_GPL(blk_abort_request);
168 170
diff --git a/block/blk.h b/block/blk.h
index da722eb786df..c43926d3d74d 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -72,8 +72,6 @@ void blk_dequeue_request(struct request *rq);
72void __blk_queue_free_tags(struct request_queue *q); 72void __blk_queue_free_tags(struct request_queue *q);
73bool __blk_end_bidi_request(struct request *rq, int error, 73bool __blk_end_bidi_request(struct request *rq, int error,
74 unsigned int nr_bytes, unsigned int bidi_bytes); 74 unsigned int nr_bytes, unsigned int bidi_bytes);
75int blk_queue_enter(struct request_queue *q, gfp_t gfp);
76void blk_queue_exit(struct request_queue *q);
77void blk_freeze_queue(struct request_queue *q); 75void blk_freeze_queue(struct request_queue *q);
78 76
79static inline void blk_queue_enter_live(struct request_queue *q) 77static inline void blk_queue_enter_live(struct request_queue *q)
diff --git a/block/noop-iosched.c b/block/noop-iosched.c
index 3de89d4690f3..a163c487cf38 100644
--- a/block/noop-iosched.c
+++ b/block/noop-iosched.c
@@ -21,10 +21,10 @@ static void noop_merged_requests(struct request_queue *q, struct request *rq,
21static int noop_dispatch(struct request_queue *q, int force) 21static int noop_dispatch(struct request_queue *q, int force)
22{ 22{
23 struct noop_data *nd = q->elevator->elevator_data; 23 struct noop_data *nd = q->elevator->elevator_data;
24 struct request *rq;
24 25
25 if (!list_empty(&nd->queue)) { 26 rq = list_first_entry_or_null(&nd->queue, struct request, queuelist);
26 struct request *rq; 27 if (rq) {
27 rq = list_entry(nd->queue.next, struct request, queuelist);
28 list_del_init(&rq->queuelist); 28 list_del_init(&rq->queuelist);
29 elv_dispatch_sort(q, rq); 29 elv_dispatch_sort(q, rq);
30 return 1; 30 return 1;
@@ -46,7 +46,7 @@ noop_former_request(struct request_queue *q, struct request *rq)
46 46
47 if (rq->queuelist.prev == &nd->queue) 47 if (rq->queuelist.prev == &nd->queue)
48 return NULL; 48 return NULL;
49 return list_entry(rq->queuelist.prev, struct request, queuelist); 49 return list_prev_entry(rq, queuelist);
50} 50}
51 51
52static struct request * 52static struct request *
@@ -56,7 +56,7 @@ noop_latter_request(struct request_queue *q, struct request *rq)
56 56
57 if (rq->queuelist.next == &nd->queue) 57 if (rq->queuelist.next == &nd->queue)
58 return NULL; 58 return NULL;
59 return list_entry(rq->queuelist.next, struct request, queuelist); 59 return list_next_entry(rq, queuelist);
60} 60}
61 61
62static int noop_init_queue(struct request_queue *q, struct elevator_type *e) 62static int noop_init_queue(struct request_queue *q, struct elevator_type *e)
diff --git a/block/partition-generic.c b/block/partition-generic.c
index 3b030157ec85..746935a5973c 100644
--- a/block/partition-generic.c
+++ b/block/partition-generic.c
@@ -397,7 +397,7 @@ static int drop_partitions(struct gendisk *disk, struct block_device *bdev)
397 struct hd_struct *part; 397 struct hd_struct *part;
398 int res; 398 int res;
399 399
400 if (bdev->bd_part_count) 400 if (bdev->bd_part_count || bdev->bd_super)
401 return -EBUSY; 401 return -EBUSY;
402 res = invalidate_partition(disk, 0); 402 res = invalidate_partition(disk, 0);
403 if (res) 403 if (res)
diff --git a/block/partitions/mac.c b/block/partitions/mac.c
index c2c48ec64b27..621317ac4d59 100644
--- a/block/partitions/mac.c
+++ b/block/partitions/mac.c
@@ -32,7 +32,7 @@ int mac_partition(struct parsed_partitions *state)
32 Sector sect; 32 Sector sect;
33 unsigned char *data; 33 unsigned char *data;
34 int slot, blocks_in_map; 34 int slot, blocks_in_map;
35 unsigned secsize; 35 unsigned secsize, datasize, partoffset;
36#ifdef CONFIG_PPC_PMAC 36#ifdef CONFIG_PPC_PMAC
37 int found_root = 0; 37 int found_root = 0;
38 int found_root_goodness = 0; 38 int found_root_goodness = 0;
@@ -50,10 +50,14 @@ int mac_partition(struct parsed_partitions *state)
50 } 50 }
51 secsize = be16_to_cpu(md->block_size); 51 secsize = be16_to_cpu(md->block_size);
52 put_dev_sector(sect); 52 put_dev_sector(sect);
53 data = read_part_sector(state, secsize/512, &sect); 53 datasize = round_down(secsize, 512);
54 data = read_part_sector(state, datasize / 512, &sect);
54 if (!data) 55 if (!data)
55 return -1; 56 return -1;
56 part = (struct mac_partition *) (data + secsize%512); 57 partoffset = secsize % 512;
58 if (partoffset + sizeof(*part) > datasize)
59 return -1;
60 part = (struct mac_partition *) (data + partoffset);
57 if (be16_to_cpu(part->signature) != MAC_PARTITION_MAGIC) { 61 if (be16_to_cpu(part->signature) != MAC_PARTITION_MAGIC) {
58 put_dev_sector(sect); 62 put_dev_sector(sect);
59 return 0; /* not a MacOS disk */ 63 return 0; /* not a MacOS disk */
diff --git a/crypto/ablkcipher.c b/crypto/ablkcipher.c
index b4ffc5be1a93..e5b5721809e2 100644
--- a/crypto/ablkcipher.c
+++ b/crypto/ablkcipher.c
@@ -277,12 +277,12 @@ static int ablkcipher_walk_first(struct ablkcipher_request *req,
277 if (WARN_ON_ONCE(in_irq())) 277 if (WARN_ON_ONCE(in_irq()))
278 return -EDEADLK; 278 return -EDEADLK;
279 279
280 walk->iv = req->info;
280 walk->nbytes = walk->total; 281 walk->nbytes = walk->total;
281 if (unlikely(!walk->total)) 282 if (unlikely(!walk->total))
282 return 0; 283 return 0;
283 284
284 walk->iv_buffer = NULL; 285 walk->iv_buffer = NULL;
285 walk->iv = req->info;
286 if (unlikely(((unsigned long)walk->iv & alignmask))) { 286 if (unlikely(((unsigned long)walk->iv & alignmask))) {
287 int err = ablkcipher_copy_iv(walk, tfm, alignmask); 287 int err = ablkcipher_copy_iv(walk, tfm, alignmask);
288 288
diff --git a/crypto/algif_aead.c b/crypto/algif_aead.c
index 0aa6fdfb448a..6d4d4569447e 100644
--- a/crypto/algif_aead.c
+++ b/crypto/algif_aead.c
@@ -125,7 +125,7 @@ static int aead_wait_for_data(struct sock *sk, unsigned flags)
125 if (flags & MSG_DONTWAIT) 125 if (flags & MSG_DONTWAIT)
126 return -EAGAIN; 126 return -EAGAIN;
127 127
128 set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); 128 sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
129 129
130 for (;;) { 130 for (;;) {
131 if (signal_pending(current)) 131 if (signal_pending(current))
@@ -139,7 +139,7 @@ static int aead_wait_for_data(struct sock *sk, unsigned flags)
139 } 139 }
140 finish_wait(sk_sleep(sk), &wait); 140 finish_wait(sk_sleep(sk), &wait);
141 141
142 clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); 142 sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
143 143
144 return err; 144 return err;
145} 145}
diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c
index af31a0ee4057..ca9efe17db1a 100644
--- a/crypto/algif_skcipher.c
+++ b/crypto/algif_skcipher.c
@@ -212,7 +212,7 @@ static int skcipher_wait_for_wmem(struct sock *sk, unsigned flags)
212 if (flags & MSG_DONTWAIT) 212 if (flags & MSG_DONTWAIT)
213 return -EAGAIN; 213 return -EAGAIN;
214 214
215 set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); 215 sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
216 216
217 for (;;) { 217 for (;;) {
218 if (signal_pending(current)) 218 if (signal_pending(current))
@@ -258,7 +258,7 @@ static int skcipher_wait_for_data(struct sock *sk, unsigned flags)
258 return -EAGAIN; 258 return -EAGAIN;
259 } 259 }
260 260
261 set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); 261 sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
262 262
263 for (;;) { 263 for (;;) {
264 if (signal_pending(current)) 264 if (signal_pending(current))
@@ -272,7 +272,7 @@ static int skcipher_wait_for_data(struct sock *sk, unsigned flags)
272 } 272 }
273 finish_wait(sk_sleep(sk), &wait); 273 finish_wait(sk_sleep(sk), &wait);
274 274
275 clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); 275 sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
276 276
277 return err; 277 return err;
278} 278}
diff --git a/crypto/blkcipher.c b/crypto/blkcipher.c
index 11b981492031..8cc1622b2ee0 100644
--- a/crypto/blkcipher.c
+++ b/crypto/blkcipher.c
@@ -326,12 +326,12 @@ static int blkcipher_walk_first(struct blkcipher_desc *desc,
326 if (WARN_ON_ONCE(in_irq())) 326 if (WARN_ON_ONCE(in_irq()))
327 return -EDEADLK; 327 return -EDEADLK;
328 328
329 walk->iv = desc->info;
329 walk->nbytes = walk->total; 330 walk->nbytes = walk->total;
330 if (unlikely(!walk->total)) 331 if (unlikely(!walk->total))
331 return 0; 332 return 0;
332 333
333 walk->buffer = NULL; 334 walk->buffer = NULL;
334 walk->iv = desc->info;
335 if (unlikely(((unsigned long)walk->iv & walk->alignmask))) { 335 if (unlikely(((unsigned long)walk->iv & walk->alignmask))) {
336 int err = blkcipher_copy_iv(walk); 336 int err = blkcipher_copy_iv(walk);
337 if (err) 337 if (err)
diff --git a/drivers/Makefile b/drivers/Makefile
index 73d039156ea7..795d0ca714bf 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -63,6 +63,7 @@ obj-$(CONFIG_FB_I810) += video/fbdev/i810/
63obj-$(CONFIG_FB_INTEL) += video/fbdev/intelfb/ 63obj-$(CONFIG_FB_INTEL) += video/fbdev/intelfb/
64 64
65obj-$(CONFIG_PARPORT) += parport/ 65obj-$(CONFIG_PARPORT) += parport/
66obj-$(CONFIG_NVM) += lightnvm/
66obj-y += base/ block/ misc/ mfd/ nfc/ 67obj-y += base/ block/ misc/ mfd/ nfc/
67obj-$(CONFIG_LIBNVDIMM) += nvdimm/ 68obj-$(CONFIG_LIBNVDIMM) += nvdimm/
68obj-$(CONFIG_DMA_SHARED_BUFFER) += dma-buf/ 69obj-$(CONFIG_DMA_SHARED_BUFFER) += dma-buf/
@@ -70,7 +71,6 @@ obj-$(CONFIG_NUBUS) += nubus/
70obj-y += macintosh/ 71obj-y += macintosh/
71obj-$(CONFIG_IDE) += ide/ 72obj-$(CONFIG_IDE) += ide/
72obj-$(CONFIG_SCSI) += scsi/ 73obj-$(CONFIG_SCSI) += scsi/
73obj-$(CONFIG_NVM) += lightnvm/
74obj-y += nvme/ 74obj-y += nvme/
75obj-$(CONFIG_ATA) += ata/ 75obj-$(CONFIG_ATA) += ata/
76obj-$(CONFIG_TARGET_CORE) += target/ 76obj-$(CONFIG_TARGET_CORE) += target/
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index 25dbb76c02cc..5eef4cb4f70e 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -58,10 +58,10 @@ config ACPI_CCA_REQUIRED
58 bool 58 bool
59 59
60config ACPI_DEBUGGER 60config ACPI_DEBUGGER
61 bool "In-kernel debugger (EXPERIMENTAL)" 61 bool "AML debugger interface (EXPERIMENTAL)"
62 select ACPI_DEBUG 62 select ACPI_DEBUG
63 help 63 help
64 Enable in-kernel debugging facilities: statistics, internal 64 Enable in-kernel debugging of AML facilities: statistics, internal
65 object dump, single step control method execution. 65 object dump, single step control method execution.
66 This is still under development, currently enabling this only 66 This is still under development, currently enabling this only
67 results in the compilation of the ACPICA debugger files. 67 results in the compilation of the ACPICA debugger files.
diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c
index 3c083d2cc434..6730f965b379 100644
--- a/drivers/acpi/cppc_acpi.c
+++ b/drivers/acpi/cppc_acpi.c
@@ -304,7 +304,7 @@ EXPORT_SYMBOL_GPL(acpi_get_psd_map);
304 304
305static int register_pcc_channel(int pcc_subspace_idx) 305static int register_pcc_channel(int pcc_subspace_idx)
306{ 306{
307 struct acpi_pcct_subspace *cppc_ss; 307 struct acpi_pcct_hw_reduced *cppc_ss;
308 unsigned int len; 308 unsigned int len;
309 309
310 if (pcc_subspace_idx >= 0) { 310 if (pcc_subspace_idx >= 0) {
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index f61a7c834540..b420fb46669d 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -1103,7 +1103,7 @@ static int acpi_ec_query(struct acpi_ec *ec, u8 *data)
1103 } 1103 }
1104 1104
1105err_exit: 1105err_exit:
1106 if (result && q) 1106 if (result)
1107 acpi_ec_delete_query(q); 1107 acpi_ec_delete_query(q);
1108 if (data) 1108 if (data)
1109 *data = value; 1109 *data = value;
diff --git a/drivers/acpi/nfit.c b/drivers/acpi/nfit.c
index f7dab53b352a..aa45d4802707 100644
--- a/drivers/acpi/nfit.c
+++ b/drivers/acpi/nfit.c
@@ -233,11 +233,12 @@ static bool add_spa(struct acpi_nfit_desc *acpi_desc,
233 struct nfit_table_prev *prev, 233 struct nfit_table_prev *prev,
234 struct acpi_nfit_system_address *spa) 234 struct acpi_nfit_system_address *spa)
235{ 235{
236 size_t length = min_t(size_t, sizeof(*spa), spa->header.length);
236 struct device *dev = acpi_desc->dev; 237 struct device *dev = acpi_desc->dev;
237 struct nfit_spa *nfit_spa; 238 struct nfit_spa *nfit_spa;
238 239
239 list_for_each_entry(nfit_spa, &prev->spas, list) { 240 list_for_each_entry(nfit_spa, &prev->spas, list) {
240 if (memcmp(nfit_spa->spa, spa, sizeof(*spa)) == 0) { 241 if (memcmp(nfit_spa->spa, spa, length) == 0) {
241 list_move_tail(&nfit_spa->list, &acpi_desc->spas); 242 list_move_tail(&nfit_spa->list, &acpi_desc->spas);
242 return true; 243 return true;
243 } 244 }
@@ -259,11 +260,12 @@ static bool add_memdev(struct acpi_nfit_desc *acpi_desc,
259 struct nfit_table_prev *prev, 260 struct nfit_table_prev *prev,
260 struct acpi_nfit_memory_map *memdev) 261 struct acpi_nfit_memory_map *memdev)
261{ 262{
263 size_t length = min_t(size_t, sizeof(*memdev), memdev->header.length);
262 struct device *dev = acpi_desc->dev; 264 struct device *dev = acpi_desc->dev;
263 struct nfit_memdev *nfit_memdev; 265 struct nfit_memdev *nfit_memdev;
264 266
265 list_for_each_entry(nfit_memdev, &prev->memdevs, list) 267 list_for_each_entry(nfit_memdev, &prev->memdevs, list)
266 if (memcmp(nfit_memdev->memdev, memdev, sizeof(*memdev)) == 0) { 268 if (memcmp(nfit_memdev->memdev, memdev, length) == 0) {
267 list_move_tail(&nfit_memdev->list, &acpi_desc->memdevs); 269 list_move_tail(&nfit_memdev->list, &acpi_desc->memdevs);
268 return true; 270 return true;
269 } 271 }
@@ -284,11 +286,12 @@ static bool add_dcr(struct acpi_nfit_desc *acpi_desc,
284 struct nfit_table_prev *prev, 286 struct nfit_table_prev *prev,
285 struct acpi_nfit_control_region *dcr) 287 struct acpi_nfit_control_region *dcr)
286{ 288{
289 size_t length = min_t(size_t, sizeof(*dcr), dcr->header.length);
287 struct device *dev = acpi_desc->dev; 290 struct device *dev = acpi_desc->dev;
288 struct nfit_dcr *nfit_dcr; 291 struct nfit_dcr *nfit_dcr;
289 292
290 list_for_each_entry(nfit_dcr, &prev->dcrs, list) 293 list_for_each_entry(nfit_dcr, &prev->dcrs, list)
291 if (memcmp(nfit_dcr->dcr, dcr, sizeof(*dcr)) == 0) { 294 if (memcmp(nfit_dcr->dcr, dcr, length) == 0) {
292 list_move_tail(&nfit_dcr->list, &acpi_desc->dcrs); 295 list_move_tail(&nfit_dcr->list, &acpi_desc->dcrs);
293 return true; 296 return true;
294 } 297 }
@@ -308,11 +311,12 @@ static bool add_bdw(struct acpi_nfit_desc *acpi_desc,
308 struct nfit_table_prev *prev, 311 struct nfit_table_prev *prev,
309 struct acpi_nfit_data_region *bdw) 312 struct acpi_nfit_data_region *bdw)
310{ 313{
314 size_t length = min_t(size_t, sizeof(*bdw), bdw->header.length);
311 struct device *dev = acpi_desc->dev; 315 struct device *dev = acpi_desc->dev;
312 struct nfit_bdw *nfit_bdw; 316 struct nfit_bdw *nfit_bdw;
313 317
314 list_for_each_entry(nfit_bdw, &prev->bdws, list) 318 list_for_each_entry(nfit_bdw, &prev->bdws, list)
315 if (memcmp(nfit_bdw->bdw, bdw, sizeof(*bdw)) == 0) { 319 if (memcmp(nfit_bdw->bdw, bdw, length) == 0) {
316 list_move_tail(&nfit_bdw->list, &acpi_desc->bdws); 320 list_move_tail(&nfit_bdw->list, &acpi_desc->bdws);
317 return true; 321 return true;
318 } 322 }
@@ -332,11 +336,12 @@ static bool add_idt(struct acpi_nfit_desc *acpi_desc,
332 struct nfit_table_prev *prev, 336 struct nfit_table_prev *prev,
333 struct acpi_nfit_interleave *idt) 337 struct acpi_nfit_interleave *idt)
334{ 338{
339 size_t length = min_t(size_t, sizeof(*idt), idt->header.length);
335 struct device *dev = acpi_desc->dev; 340 struct device *dev = acpi_desc->dev;
336 struct nfit_idt *nfit_idt; 341 struct nfit_idt *nfit_idt;
337 342
338 list_for_each_entry(nfit_idt, &prev->idts, list) 343 list_for_each_entry(nfit_idt, &prev->idts, list)
339 if (memcmp(nfit_idt->idt, idt, sizeof(*idt)) == 0) { 344 if (memcmp(nfit_idt->idt, idt, length) == 0) {
340 list_move_tail(&nfit_idt->list, &acpi_desc->idts); 345 list_move_tail(&nfit_idt->list, &acpi_desc->idts);
341 return true; 346 return true;
342 } 347 }
@@ -356,11 +361,12 @@ static bool add_flush(struct acpi_nfit_desc *acpi_desc,
356 struct nfit_table_prev *prev, 361 struct nfit_table_prev *prev,
357 struct acpi_nfit_flush_address *flush) 362 struct acpi_nfit_flush_address *flush)
358{ 363{
364 size_t length = min_t(size_t, sizeof(*flush), flush->header.length);
359 struct device *dev = acpi_desc->dev; 365 struct device *dev = acpi_desc->dev;
360 struct nfit_flush *nfit_flush; 366 struct nfit_flush *nfit_flush;
361 367
362 list_for_each_entry(nfit_flush, &prev->flushes, list) 368 list_for_each_entry(nfit_flush, &prev->flushes, list)
363 if (memcmp(nfit_flush->flush, flush, sizeof(*flush)) == 0) { 369 if (memcmp(nfit_flush->flush, flush, length) == 0) {
364 list_move_tail(&nfit_flush->list, &acpi_desc->flushes); 370 list_move_tail(&nfit_flush->list, &acpi_desc->flushes);
365 return true; 371 return true;
366 } 372 }
@@ -655,7 +661,7 @@ static ssize_t revision_show(struct device *dev,
655 struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus); 661 struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus);
656 struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); 662 struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
657 663
658 return sprintf(buf, "%d\n", acpi_desc->nfit->header.revision); 664 return sprintf(buf, "%d\n", acpi_desc->acpi_header.revision);
659} 665}
660static DEVICE_ATTR_RO(revision); 666static DEVICE_ATTR_RO(revision);
661 667
@@ -1652,7 +1658,6 @@ int acpi_nfit_init(struct acpi_nfit_desc *acpi_desc, acpi_size sz)
1652 1658
1653 data = (u8 *) acpi_desc->nfit; 1659 data = (u8 *) acpi_desc->nfit;
1654 end = data + sz; 1660 end = data + sz;
1655 data += sizeof(struct acpi_table_nfit);
1656 while (!IS_ERR_OR_NULL(data)) 1661 while (!IS_ERR_OR_NULL(data))
1657 data = add_table(acpi_desc, &prev, data, end); 1662 data = add_table(acpi_desc, &prev, data, end);
1658 1663
@@ -1748,13 +1753,29 @@ static int acpi_nfit_add(struct acpi_device *adev)
1748 return PTR_ERR(acpi_desc); 1753 return PTR_ERR(acpi_desc);
1749 } 1754 }
1750 1755
1751 acpi_desc->nfit = (struct acpi_table_nfit *) tbl; 1756 /*
1757 * Save the acpi header for later and then skip it,
1758 * making nfit point to the first nfit table header.
1759 */
1760 acpi_desc->acpi_header = *tbl;
1761 acpi_desc->nfit = (void *) tbl + sizeof(struct acpi_table_nfit);
1762 sz -= sizeof(struct acpi_table_nfit);
1752 1763
1753 /* Evaluate _FIT and override with that if present */ 1764 /* Evaluate _FIT and override with that if present */
1754 status = acpi_evaluate_object(adev->handle, "_FIT", NULL, &buf); 1765 status = acpi_evaluate_object(adev->handle, "_FIT", NULL, &buf);
1755 if (ACPI_SUCCESS(status) && buf.length > 0) { 1766 if (ACPI_SUCCESS(status) && buf.length > 0) {
1756 acpi_desc->nfit = (struct acpi_table_nfit *)buf.pointer; 1767 union acpi_object *obj;
1757 sz = buf.length; 1768 /*
1769 * Adjust for the acpi_object header of the _FIT
1770 */
1771 obj = buf.pointer;
1772 if (obj->type == ACPI_TYPE_BUFFER) {
1773 acpi_desc->nfit =
1774 (struct acpi_nfit_header *)obj->buffer.pointer;
1775 sz = obj->buffer.length;
1776 } else
1777 dev_dbg(dev, "%s invalid type %d, ignoring _FIT\n",
1778 __func__, (int) obj->type);
1758 } 1779 }
1759 1780
1760 rc = acpi_nfit_init(acpi_desc, sz); 1781 rc = acpi_nfit_init(acpi_desc, sz);
@@ -1777,7 +1798,8 @@ static void acpi_nfit_notify(struct acpi_device *adev, u32 event)
1777{ 1798{
1778 struct acpi_nfit_desc *acpi_desc = dev_get_drvdata(&adev->dev); 1799 struct acpi_nfit_desc *acpi_desc = dev_get_drvdata(&adev->dev);
1779 struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL }; 1800 struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL };
1780 struct acpi_table_nfit *nfit_saved; 1801 struct acpi_nfit_header *nfit_saved;
1802 union acpi_object *obj;
1781 struct device *dev = &adev->dev; 1803 struct device *dev = &adev->dev;
1782 acpi_status status; 1804 acpi_status status;
1783 int ret; 1805 int ret;
@@ -1788,7 +1810,7 @@ static void acpi_nfit_notify(struct acpi_device *adev, u32 event)
1788 if (!dev->driver) { 1810 if (!dev->driver) {
1789 /* dev->driver may be null if we're being removed */ 1811 /* dev->driver may be null if we're being removed */
1790 dev_dbg(dev, "%s: no driver found for dev\n", __func__); 1812 dev_dbg(dev, "%s: no driver found for dev\n", __func__);
1791 return; 1813 goto out_unlock;
1792 } 1814 }
1793 1815
1794 if (!acpi_desc) { 1816 if (!acpi_desc) {
@@ -1808,12 +1830,19 @@ static void acpi_nfit_notify(struct acpi_device *adev, u32 event)
1808 } 1830 }
1809 1831
1810 nfit_saved = acpi_desc->nfit; 1832 nfit_saved = acpi_desc->nfit;
1811 acpi_desc->nfit = (struct acpi_table_nfit *)buf.pointer; 1833 obj = buf.pointer;
1812 ret = acpi_nfit_init(acpi_desc, buf.length); 1834 if (obj->type == ACPI_TYPE_BUFFER) {
1813 if (!ret) { 1835 acpi_desc->nfit =
1814 /* Merge failed, restore old nfit, and exit */ 1836 (struct acpi_nfit_header *)obj->buffer.pointer;
1815 acpi_desc->nfit = nfit_saved; 1837 ret = acpi_nfit_init(acpi_desc, obj->buffer.length);
1816 dev_err(dev, "failed to merge updated NFIT\n"); 1838 if (ret) {
1839 /* Merge failed, restore old nfit, and exit */
1840 acpi_desc->nfit = nfit_saved;
1841 dev_err(dev, "failed to merge updated NFIT\n");
1842 }
1843 } else {
1844 /* Bad _FIT, restore old nfit */
1845 dev_err(dev, "Invalid _FIT\n");
1817 } 1846 }
1818 kfree(buf.pointer); 1847 kfree(buf.pointer);
1819 1848
diff --git a/drivers/acpi/nfit.h b/drivers/acpi/nfit.h
index 2ea5c0797c8f..3d549a383659 100644
--- a/drivers/acpi/nfit.h
+++ b/drivers/acpi/nfit.h
@@ -96,7 +96,8 @@ struct nfit_mem {
96 96
97struct acpi_nfit_desc { 97struct acpi_nfit_desc {
98 struct nvdimm_bus_descriptor nd_desc; 98 struct nvdimm_bus_descriptor nd_desc;
99 struct acpi_table_nfit *nfit; 99 struct acpi_table_header acpi_header;
100 struct acpi_nfit_header *nfit;
100 struct mutex spa_map_mutex; 101 struct mutex spa_map_mutex;
101 struct mutex init_mutex; 102 struct mutex init_mutex;
102 struct list_head spa_maps; 103 struct list_head spa_maps;
diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c
index 850d7bf0c873..ae3fe4e64203 100644
--- a/drivers/acpi/pci_root.c
+++ b/drivers/acpi/pci_root.c
@@ -768,6 +768,13 @@ static void pci_acpi_root_add_resources(struct acpi_pci_root_info *info)
768 else 768 else
769 continue; 769 continue;
770 770
771 /*
772 * Some legacy x86 host bridge drivers use iomem_resource and
773 * ioport_resource as default resource pool, skip it.
774 */
775 if (res == root)
776 continue;
777
771 conflict = insert_resource_conflict(root, res); 778 conflict = insert_resource_conflict(root, res);
772 if (conflict) { 779 if (conflict) {
773 dev_info(&info->bridge->dev, 780 dev_info(&info->bridge->dev,
diff --git a/drivers/acpi/sbshc.c b/drivers/acpi/sbshc.c
index bf034f8b7c1a..2fa8304171e0 100644
--- a/drivers/acpi/sbshc.c
+++ b/drivers/acpi/sbshc.c
@@ -14,7 +14,6 @@
14#include <linux/delay.h> 14#include <linux/delay.h>
15#include <linux/module.h> 15#include <linux/module.h>
16#include <linux/interrupt.h> 16#include <linux/interrupt.h>
17#include <linux/dmi.h>
18#include "sbshc.h" 17#include "sbshc.h"
19 18
20#define PREFIX "ACPI: " 19#define PREFIX "ACPI: "
@@ -30,6 +29,7 @@ struct acpi_smb_hc {
30 u8 query_bit; 29 u8 query_bit;
31 smbus_alarm_callback callback; 30 smbus_alarm_callback callback;
32 void *context; 31 void *context;
32 bool done;
33}; 33};
34 34
35static int acpi_smbus_hc_add(struct acpi_device *device); 35static int acpi_smbus_hc_add(struct acpi_device *device);
@@ -88,8 +88,6 @@ enum acpi_smb_offset {
88 ACPI_SMB_ALARM_DATA = 0x26, /* 2 bytes alarm data */ 88 ACPI_SMB_ALARM_DATA = 0x26, /* 2 bytes alarm data */
89}; 89};
90 90
91static bool macbook;
92
93static inline int smb_hc_read(struct acpi_smb_hc *hc, u8 address, u8 *data) 91static inline int smb_hc_read(struct acpi_smb_hc *hc, u8 address, u8 *data)
94{ 92{
95 return ec_read(hc->offset + address, data); 93 return ec_read(hc->offset + address, data);
@@ -100,27 +98,11 @@ static inline int smb_hc_write(struct acpi_smb_hc *hc, u8 address, u8 data)
100 return ec_write(hc->offset + address, data); 98 return ec_write(hc->offset + address, data);
101} 99}
102 100
103static inline int smb_check_done(struct acpi_smb_hc *hc)
104{
105 union acpi_smb_status status = {.raw = 0};
106 smb_hc_read(hc, ACPI_SMB_STATUS, &status.raw);
107 return status.fields.done && (status.fields.status == SMBUS_OK);
108}
109
110static int wait_transaction_complete(struct acpi_smb_hc *hc, int timeout) 101static int wait_transaction_complete(struct acpi_smb_hc *hc, int timeout)
111{ 102{
112 if (wait_event_timeout(hc->wait, smb_check_done(hc), 103 if (wait_event_timeout(hc->wait, hc->done, msecs_to_jiffies(timeout)))
113 msecs_to_jiffies(timeout)))
114 return 0; 104 return 0;
115 /* 105 return -ETIME;
116 * After the timeout happens, OS will try to check the status of SMbus.
117 * If the status is what OS expected, it will be regarded as the bogus
118 * timeout.
119 */
120 if (smb_check_done(hc))
121 return 0;
122 else
123 return -ETIME;
124} 106}
125 107
126static int acpi_smbus_transaction(struct acpi_smb_hc *hc, u8 protocol, 108static int acpi_smbus_transaction(struct acpi_smb_hc *hc, u8 protocol,
@@ -135,8 +117,7 @@ static int acpi_smbus_transaction(struct acpi_smb_hc *hc, u8 protocol,
135 } 117 }
136 118
137 mutex_lock(&hc->lock); 119 mutex_lock(&hc->lock);
138 if (macbook) 120 hc->done = false;
139 udelay(5);
140 if (smb_hc_read(hc, ACPI_SMB_PROTOCOL, &temp)) 121 if (smb_hc_read(hc, ACPI_SMB_PROTOCOL, &temp))
141 goto end; 122 goto end;
142 if (temp) { 123 if (temp) {
@@ -235,8 +216,10 @@ static int smbus_alarm(void *context)
235 if (smb_hc_read(hc, ACPI_SMB_STATUS, &status.raw)) 216 if (smb_hc_read(hc, ACPI_SMB_STATUS, &status.raw))
236 return 0; 217 return 0;
237 /* Check if it is only a completion notify */ 218 /* Check if it is only a completion notify */
238 if (status.fields.done) 219 if (status.fields.done && status.fields.status == SMBUS_OK) {
220 hc->done = true;
239 wake_up(&hc->wait); 221 wake_up(&hc->wait);
222 }
240 if (!status.fields.alarm) 223 if (!status.fields.alarm)
241 return 0; 224 return 0;
242 mutex_lock(&hc->lock); 225 mutex_lock(&hc->lock);
@@ -262,29 +245,12 @@ extern int acpi_ec_add_query_handler(struct acpi_ec *ec, u8 query_bit,
262 acpi_handle handle, acpi_ec_query_func func, 245 acpi_handle handle, acpi_ec_query_func func,
263 void *data); 246 void *data);
264 247
265static int macbook_dmi_match(const struct dmi_system_id *d)
266{
267 pr_debug("Detected MacBook, enabling workaround\n");
268 macbook = true;
269 return 0;
270}
271
272static struct dmi_system_id acpi_smbus_dmi_table[] = {
273 { macbook_dmi_match, "Apple MacBook", {
274 DMI_MATCH(DMI_BOARD_VENDOR, "Apple"),
275 DMI_MATCH(DMI_PRODUCT_NAME, "MacBook") },
276 },
277 { },
278};
279
280static int acpi_smbus_hc_add(struct acpi_device *device) 248static int acpi_smbus_hc_add(struct acpi_device *device)
281{ 249{
282 int status; 250 int status;
283 unsigned long long val; 251 unsigned long long val;
284 struct acpi_smb_hc *hc; 252 struct acpi_smb_hc *hc;
285 253
286 dmi_check_system(acpi_smbus_dmi_table);
287
288 if (!device) 254 if (!device)
289 return -EINVAL; 255 return -EINVAL;
290 256
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index ff02bb4218fc..cdfbcc54821f 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -314,16 +314,6 @@ static const struct pci_device_id ahci_pci_tbl[] = {
314 { PCI_VDEVICE(INTEL, 0x1f37), board_ahci_avn }, /* Avoton RAID */ 314 { PCI_VDEVICE(INTEL, 0x1f37), board_ahci_avn }, /* Avoton RAID */
315 { PCI_VDEVICE(INTEL, 0x1f3e), board_ahci_avn }, /* Avoton RAID */ 315 { PCI_VDEVICE(INTEL, 0x1f3e), board_ahci_avn }, /* Avoton RAID */
316 { PCI_VDEVICE(INTEL, 0x1f3f), board_ahci_avn }, /* Avoton RAID */ 316 { PCI_VDEVICE(INTEL, 0x1f3f), board_ahci_avn }, /* Avoton RAID */
317 { PCI_VDEVICE(INTEL, 0xa182), board_ahci }, /* Lewisburg AHCI*/
318 { PCI_VDEVICE(INTEL, 0xa202), board_ahci }, /* Lewisburg AHCI*/
319 { PCI_VDEVICE(INTEL, 0xa184), board_ahci }, /* Lewisburg RAID*/
320 { PCI_VDEVICE(INTEL, 0xa204), board_ahci }, /* Lewisburg RAID*/
321 { PCI_VDEVICE(INTEL, 0xa186), board_ahci }, /* Lewisburg RAID*/
322 { PCI_VDEVICE(INTEL, 0xa206), board_ahci }, /* Lewisburg RAID*/
323 { PCI_VDEVICE(INTEL, 0x2822), board_ahci }, /* Lewisburg RAID*/
324 { PCI_VDEVICE(INTEL, 0x2826), board_ahci }, /* Lewisburg RAID*/
325 { PCI_VDEVICE(INTEL, 0xa18e), board_ahci }, /* Lewisburg RAID*/
326 { PCI_VDEVICE(INTEL, 0xa20e), board_ahci }, /* Lewisburg RAID*/
327 { PCI_VDEVICE(INTEL, 0x2823), board_ahci }, /* Wellsburg RAID */ 317 { PCI_VDEVICE(INTEL, 0x2823), board_ahci }, /* Wellsburg RAID */
328 { PCI_VDEVICE(INTEL, 0x2827), board_ahci }, /* Wellsburg RAID */ 318 { PCI_VDEVICE(INTEL, 0x2827), board_ahci }, /* Wellsburg RAID */
329 { PCI_VDEVICE(INTEL, 0x8d02), board_ahci }, /* Wellsburg AHCI */ 319 { PCI_VDEVICE(INTEL, 0x8d02), board_ahci }, /* Wellsburg AHCI */
@@ -350,10 +340,22 @@ static const struct pci_device_id ahci_pci_tbl[] = {
350 { PCI_VDEVICE(INTEL, 0x9d03), board_ahci }, /* Sunrise Point-LP AHCI */ 340 { PCI_VDEVICE(INTEL, 0x9d03), board_ahci }, /* Sunrise Point-LP AHCI */
351 { PCI_VDEVICE(INTEL, 0x9d05), board_ahci }, /* Sunrise Point-LP RAID */ 341 { PCI_VDEVICE(INTEL, 0x9d05), board_ahci }, /* Sunrise Point-LP RAID */
352 { PCI_VDEVICE(INTEL, 0x9d07), board_ahci }, /* Sunrise Point-LP RAID */ 342 { PCI_VDEVICE(INTEL, 0x9d07), board_ahci }, /* Sunrise Point-LP RAID */
343 { PCI_VDEVICE(INTEL, 0xa102), board_ahci }, /* Sunrise Point-H AHCI */
353 { PCI_VDEVICE(INTEL, 0xa103), board_ahci }, /* Sunrise Point-H AHCI */ 344 { PCI_VDEVICE(INTEL, 0xa103), board_ahci }, /* Sunrise Point-H AHCI */
354 { PCI_VDEVICE(INTEL, 0xa105), board_ahci }, /* Sunrise Point-H RAID */ 345 { PCI_VDEVICE(INTEL, 0xa105), board_ahci }, /* Sunrise Point-H RAID */
346 { PCI_VDEVICE(INTEL, 0xa106), board_ahci }, /* Sunrise Point-H RAID */
355 { PCI_VDEVICE(INTEL, 0xa107), board_ahci }, /* Sunrise Point-H RAID */ 347 { PCI_VDEVICE(INTEL, 0xa107), board_ahci }, /* Sunrise Point-H RAID */
356 { PCI_VDEVICE(INTEL, 0xa10f), board_ahci }, /* Sunrise Point-H RAID */ 348 { PCI_VDEVICE(INTEL, 0xa10f), board_ahci }, /* Sunrise Point-H RAID */
349 { PCI_VDEVICE(INTEL, 0x2822), board_ahci }, /* Lewisburg RAID*/
350 { PCI_VDEVICE(INTEL, 0x2826), board_ahci }, /* Lewisburg RAID*/
351 { PCI_VDEVICE(INTEL, 0xa182), board_ahci }, /* Lewisburg AHCI*/
352 { PCI_VDEVICE(INTEL, 0xa184), board_ahci }, /* Lewisburg RAID*/
353 { PCI_VDEVICE(INTEL, 0xa186), board_ahci }, /* Lewisburg RAID*/
354 { PCI_VDEVICE(INTEL, 0xa18e), board_ahci }, /* Lewisburg RAID*/
355 { PCI_VDEVICE(INTEL, 0xa202), board_ahci }, /* Lewisburg AHCI*/
356 { PCI_VDEVICE(INTEL, 0xa204), board_ahci }, /* Lewisburg RAID*/
357 { PCI_VDEVICE(INTEL, 0xa206), board_ahci }, /* Lewisburg RAID*/
358 { PCI_VDEVICE(INTEL, 0xa20e), board_ahci }, /* Lewisburg RAID*/
357 359
358 /* JMicron 360/1/3/5/6, match class to avoid IDE function */ 360 /* JMicron 360/1/3/5/6, match class to avoid IDE function */
359 { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, 361 { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
diff --git a/drivers/ata/ahci_mvebu.c b/drivers/ata/ahci_mvebu.c
index 8490d37aee2a..f7a7fa81740e 100644
--- a/drivers/ata/ahci_mvebu.c
+++ b/drivers/ata/ahci_mvebu.c
@@ -62,6 +62,7 @@ static void ahci_mvebu_regret_option(struct ahci_host_priv *hpriv)
62 writel(0x80, hpriv->mmio + AHCI_VENDOR_SPECIFIC_0_DATA); 62 writel(0x80, hpriv->mmio + AHCI_VENDOR_SPECIFIC_0_DATA);
63} 63}
64 64
65#ifdef CONFIG_PM_SLEEP
65static int ahci_mvebu_suspend(struct platform_device *pdev, pm_message_t state) 66static int ahci_mvebu_suspend(struct platform_device *pdev, pm_message_t state)
66{ 67{
67 return ahci_platform_suspend_host(&pdev->dev); 68 return ahci_platform_suspend_host(&pdev->dev);
@@ -81,6 +82,10 @@ static int ahci_mvebu_resume(struct platform_device *pdev)
81 82
82 return ahci_platform_resume_host(&pdev->dev); 83 return ahci_platform_resume_host(&pdev->dev);
83} 84}
85#else
86#define ahci_mvebu_suspend NULL
87#define ahci_mvebu_resume NULL
88#endif
84 89
85static const struct ata_port_info ahci_mvebu_port_info = { 90static const struct ata_port_info ahci_mvebu_port_info = {
86 .flags = AHCI_FLAG_COMMON, 91 .flags = AHCI_FLAG_COMMON,
diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
index 096064cd6c52..4665512dae44 100644
--- a/drivers/ata/libahci.c
+++ b/drivers/ata/libahci.c
@@ -1273,6 +1273,15 @@ static int ahci_exec_polled_cmd(struct ata_port *ap, int pmp,
1273 ata_tf_to_fis(tf, pmp, is_cmd, fis); 1273 ata_tf_to_fis(tf, pmp, is_cmd, fis);
1274 ahci_fill_cmd_slot(pp, 0, cmd_fis_len | flags | (pmp << 12)); 1274 ahci_fill_cmd_slot(pp, 0, cmd_fis_len | flags | (pmp << 12));
1275 1275
1276 /* set port value for softreset of Port Multiplier */
1277 if (pp->fbs_enabled && pp->fbs_last_dev != pmp) {
1278 tmp = readl(port_mmio + PORT_FBS);
1279 tmp &= ~(PORT_FBS_DEV_MASK | PORT_FBS_DEC);
1280 tmp |= pmp << PORT_FBS_DEV_OFFSET;
1281 writel(tmp, port_mmio + PORT_FBS);
1282 pp->fbs_last_dev = pmp;
1283 }
1284
1276 /* issue & wait */ 1285 /* issue & wait */
1277 writel(1, port_mmio + PORT_CMD_ISSUE); 1286 writel(1, port_mmio + PORT_CMD_ISSUE);
1278 1287
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index cb0508af1459..961acc788f44 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -1505,12 +1505,20 @@ static const char *ata_err_string(unsigned int err_mask)
1505unsigned int ata_read_log_page(struct ata_device *dev, u8 log, 1505unsigned int ata_read_log_page(struct ata_device *dev, u8 log,
1506 u8 page, void *buf, unsigned int sectors) 1506 u8 page, void *buf, unsigned int sectors)
1507{ 1507{
1508 unsigned long ap_flags = dev->link->ap->flags;
1508 struct ata_taskfile tf; 1509 struct ata_taskfile tf;
1509 unsigned int err_mask; 1510 unsigned int err_mask;
1510 bool dma = false; 1511 bool dma = false;
1511 1512
1512 DPRINTK("read log page - log 0x%x, page 0x%x\n", log, page); 1513 DPRINTK("read log page - log 0x%x, page 0x%x\n", log, page);
1513 1514
1515 /*
1516 * Return error without actually issuing the command on controllers
1517 * which e.g. lockup on a read log page.
1518 */
1519 if (ap_flags & ATA_FLAG_NO_LOG_PAGE)
1520 return AC_ERR_DEV;
1521
1514retry: 1522retry:
1515 ata_tf_init(dev, &tf); 1523 ata_tf_init(dev, &tf);
1516 if (dev->dma_mode && ata_id_has_read_log_dma_ext(dev->id) && 1524 if (dev->dma_mode && ata_id_has_read_log_dma_ext(dev->id) &&
diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c
index 5389579c5120..a723ae929783 100644
--- a/drivers/ata/sata_fsl.c
+++ b/drivers/ata/sata_fsl.c
@@ -45,7 +45,8 @@ enum {
45 SATA_FSL_MAX_PRD_DIRECT = 16, /* Direct PRDT entries */ 45 SATA_FSL_MAX_PRD_DIRECT = 16, /* Direct PRDT entries */
46 46
47 SATA_FSL_HOST_FLAGS = (ATA_FLAG_SATA | ATA_FLAG_PIO_DMA | 47 SATA_FSL_HOST_FLAGS = (ATA_FLAG_SATA | ATA_FLAG_PIO_DMA |
48 ATA_FLAG_PMP | ATA_FLAG_NCQ | ATA_FLAG_AN), 48 ATA_FLAG_PMP | ATA_FLAG_NCQ |
49 ATA_FLAG_AN | ATA_FLAG_NO_LOG_PAGE),
49 50
50 SATA_FSL_MAX_CMDS = SATA_FSL_QUEUE_DEPTH, 51 SATA_FSL_MAX_CMDS = SATA_FSL_QUEUE_DEPTH,
51 SATA_FSL_CMD_HDR_SIZE = 16, /* 4 DWORDS */ 52 SATA_FSL_CMD_HDR_SIZE = 16, /* 4 DWORDS */
diff --git a/drivers/ata/sata_sil.c b/drivers/ata/sata_sil.c
index dea6edcbf145..29bcff086bce 100644
--- a/drivers/ata/sata_sil.c
+++ b/drivers/ata/sata_sil.c
@@ -630,6 +630,9 @@ static void sil_dev_config(struct ata_device *dev)
630 unsigned int n, quirks = 0; 630 unsigned int n, quirks = 0;
631 unsigned char model_num[ATA_ID_PROD_LEN + 1]; 631 unsigned char model_num[ATA_ID_PROD_LEN + 1];
632 632
633 /* This controller doesn't support trim */
634 dev->horkage |= ATA_HORKAGE_NOTRIM;
635
633 ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num)); 636 ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
634 637
635 for (n = 0; sil_blacklist[n].product; n++) 638 for (n = 0; sil_blacklist[n].product; n++)
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index 2804aed3f416..25425d3f2575 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -303,6 +303,10 @@ static int memory_subsys_offline(struct device *dev)
303 if (mem->state == MEM_OFFLINE) 303 if (mem->state == MEM_OFFLINE)
304 return 0; 304 return 0;
305 305
306 /* Can't offline block with non-present sections */
307 if (mem->section_count != sections_per_block)
308 return -EINVAL;
309
306 return memory_block_change_state(mem, MEM_OFFLINE, MEM_ONLINE); 310 return memory_block_change_state(mem, MEM_OFFLINE, MEM_ONLINE);
307} 311}
308 312
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index e03b1ad25a90..65f50eccd49b 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -390,6 +390,7 @@ static int pm_genpd_runtime_suspend(struct device *dev)
390 struct generic_pm_domain *genpd; 390 struct generic_pm_domain *genpd;
391 bool (*stop_ok)(struct device *__dev); 391 bool (*stop_ok)(struct device *__dev);
392 struct gpd_timing_data *td = &dev_gpd_data(dev)->td; 392 struct gpd_timing_data *td = &dev_gpd_data(dev)->td;
393 bool runtime_pm = pm_runtime_enabled(dev);
393 ktime_t time_start; 394 ktime_t time_start;
394 s64 elapsed_ns; 395 s64 elapsed_ns;
395 int ret; 396 int ret;
@@ -400,12 +401,19 @@ static int pm_genpd_runtime_suspend(struct device *dev)
400 if (IS_ERR(genpd)) 401 if (IS_ERR(genpd))
401 return -EINVAL; 402 return -EINVAL;
402 403
404 /*
405 * A runtime PM centric subsystem/driver may re-use the runtime PM
406 * callbacks for other purposes than runtime PM. In those scenarios
407 * runtime PM is disabled. Under these circumstances, we shall skip
408 * validating/measuring the PM QoS latency.
409 */
403 stop_ok = genpd->gov ? genpd->gov->stop_ok : NULL; 410 stop_ok = genpd->gov ? genpd->gov->stop_ok : NULL;
404 if (stop_ok && !stop_ok(dev)) 411 if (runtime_pm && stop_ok && !stop_ok(dev))
405 return -EBUSY; 412 return -EBUSY;
406 413
407 /* Measure suspend latency. */ 414 /* Measure suspend latency. */
408 time_start = ktime_get(); 415 if (runtime_pm)
416 time_start = ktime_get();
409 417
410 ret = genpd_save_dev(genpd, dev); 418 ret = genpd_save_dev(genpd, dev);
411 if (ret) 419 if (ret)
@@ -418,13 +426,15 @@ static int pm_genpd_runtime_suspend(struct device *dev)
418 } 426 }
419 427
420 /* Update suspend latency value if the measured time exceeds it. */ 428 /* Update suspend latency value if the measured time exceeds it. */
421 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start)); 429 if (runtime_pm) {
422 if (elapsed_ns > td->suspend_latency_ns) { 430 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
423 td->suspend_latency_ns = elapsed_ns; 431 if (elapsed_ns > td->suspend_latency_ns) {
424 dev_dbg(dev, "suspend latency exceeded, %lld ns\n", 432 td->suspend_latency_ns = elapsed_ns;
425 elapsed_ns); 433 dev_dbg(dev, "suspend latency exceeded, %lld ns\n",
426 genpd->max_off_time_changed = true; 434 elapsed_ns);
427 td->constraint_changed = true; 435 genpd->max_off_time_changed = true;
436 td->constraint_changed = true;
437 }
428 } 438 }
429 439
430 /* 440 /*
@@ -453,6 +463,7 @@ static int pm_genpd_runtime_resume(struct device *dev)
453{ 463{
454 struct generic_pm_domain *genpd; 464 struct generic_pm_domain *genpd;
455 struct gpd_timing_data *td = &dev_gpd_data(dev)->td; 465 struct gpd_timing_data *td = &dev_gpd_data(dev)->td;
466 bool runtime_pm = pm_runtime_enabled(dev);
456 ktime_t time_start; 467 ktime_t time_start;
457 s64 elapsed_ns; 468 s64 elapsed_ns;
458 int ret; 469 int ret;
@@ -479,14 +490,14 @@ static int pm_genpd_runtime_resume(struct device *dev)
479 490
480 out: 491 out:
481 /* Measure resume latency. */ 492 /* Measure resume latency. */
482 if (timed) 493 if (timed && runtime_pm)
483 time_start = ktime_get(); 494 time_start = ktime_get();
484 495
485 genpd_start_dev(genpd, dev); 496 genpd_start_dev(genpd, dev);
486 genpd_restore_dev(genpd, dev); 497 genpd_restore_dev(genpd, dev);
487 498
488 /* Update resume latency value if the measured time exceeds it. */ 499 /* Update resume latency value if the measured time exceeds it. */
489 if (timed) { 500 if (timed && runtime_pm) {
490 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start)); 501 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
491 if (elapsed_ns > td->resume_latency_ns) { 502 if (elapsed_ns > td->resume_latency_ns) {
492 td->resume_latency_ns = elapsed_ns; 503 td->resume_latency_ns = elapsed_ns;
@@ -1775,10 +1786,10 @@ int genpd_dev_pm_attach(struct device *dev)
1775 } 1786 }
1776 1787
1777 pd = of_genpd_get_from_provider(&pd_args); 1788 pd = of_genpd_get_from_provider(&pd_args);
1789 of_node_put(pd_args.np);
1778 if (IS_ERR(pd)) { 1790 if (IS_ERR(pd)) {
1779 dev_dbg(dev, "%s() failed to find PM domain: %ld\n", 1791 dev_dbg(dev, "%s() failed to find PM domain: %ld\n",
1780 __func__, PTR_ERR(pd)); 1792 __func__, PTR_ERR(pd));
1781 of_node_put(dev->of_node);
1782 return -EPROBE_DEFER; 1793 return -EPROBE_DEFER;
1783 } 1794 }
1784 1795
@@ -1796,7 +1807,6 @@ int genpd_dev_pm_attach(struct device *dev)
1796 if (ret < 0) { 1807 if (ret < 0) {
1797 dev_err(dev, "failed to add to PM domain %s: %d", 1808 dev_err(dev, "failed to add to PM domain %s: %d",
1798 pd->name, ret); 1809 pd->name, ret);
1799 of_node_put(dev->of_node);
1800 goto out; 1810 goto out;
1801 } 1811 }
1802 1812
diff --git a/drivers/base/power/domain_governor.c b/drivers/base/power/domain_governor.c
index e60dd12e23aa..1e937ac5f456 100644
--- a/drivers/base/power/domain_governor.c
+++ b/drivers/base/power/domain_governor.c
@@ -160,9 +160,6 @@ static bool default_power_down_ok(struct dev_pm_domain *pd)
160 struct gpd_timing_data *td; 160 struct gpd_timing_data *td;
161 s64 constraint_ns; 161 s64 constraint_ns;
162 162
163 if (!pdd->dev->driver)
164 continue;
165
166 /* 163 /*
167 * Check if the device is allowed to be off long enough for the 164 * Check if the device is allowed to be off long enough for the
168 * domain to turn off and on (that's how much time it will 165 * domain to turn off and on (that's how much time it will
diff --git a/drivers/base/power/wakeirq.c b/drivers/base/power/wakeirq.c
index eb6e67451dec..0d77cd6fd8d1 100644
--- a/drivers/base/power/wakeirq.c
+++ b/drivers/base/power/wakeirq.c
@@ -68,6 +68,9 @@ int dev_pm_set_wake_irq(struct device *dev, int irq)
68 struct wake_irq *wirq; 68 struct wake_irq *wirq;
69 int err; 69 int err;
70 70
71 if (irq < 0)
72 return -EINVAL;
73
71 wirq = kzalloc(sizeof(*wirq), GFP_KERNEL); 74 wirq = kzalloc(sizeof(*wirq), GFP_KERNEL);
72 if (!wirq) 75 if (!wirq)
73 return -ENOMEM; 76 return -ENOMEM;
@@ -167,6 +170,9 @@ int dev_pm_set_dedicated_wake_irq(struct device *dev, int irq)
167 struct wake_irq *wirq; 170 struct wake_irq *wirq;
168 int err; 171 int err;
169 172
173 if (irq < 0)
174 return -EINVAL;
175
170 wirq = kzalloc(sizeof(*wirq), GFP_KERNEL); 176 wirq = kzalloc(sizeof(*wirq), GFP_KERNEL);
171 if (!wirq) 177 if (!wirq)
172 return -ENOMEM; 178 return -ENOMEM;
diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c
index a28a562f7b7f..3457ac8c03e2 100644
--- a/drivers/block/mtip32xx/mtip32xx.c
+++ b/drivers/block/mtip32xx/mtip32xx.c
@@ -3810,7 +3810,6 @@ static int mtip_block_initialize(struct driver_data *dd)
3810 sector_t capacity; 3810 sector_t capacity;
3811 unsigned int index = 0; 3811 unsigned int index = 0;
3812 struct kobject *kobj; 3812 struct kobject *kobj;
3813 unsigned char thd_name[16];
3814 3813
3815 if (dd->disk) 3814 if (dd->disk)
3816 goto skip_create_disk; /* hw init done, before rebuild */ 3815 goto skip_create_disk; /* hw init done, before rebuild */
@@ -3958,10 +3957,9 @@ skip_create_disk:
3958 } 3957 }
3959 3958
3960start_service_thread: 3959start_service_thread:
3961 sprintf(thd_name, "mtip_svc_thd_%02d", index);
3962 dd->mtip_svc_handler = kthread_create_on_node(mtip_service_thread, 3960 dd->mtip_svc_handler = kthread_create_on_node(mtip_service_thread,
3963 dd, dd->numa_node, "%s", 3961 dd, dd->numa_node,
3964 thd_name); 3962 "mtip_svc_thd_%02d", index);
3965 3963
3966 if (IS_ERR(dd->mtip_svc_handler)) { 3964 if (IS_ERR(dd->mtip_svc_handler)) {
3967 dev_err(&dd->pdev->dev, "service thread failed to start\n"); 3965 dev_err(&dd->pdev->dev, "service thread failed to start\n");
diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c
index 6255d1c4bba4..8162475d96b5 100644
--- a/drivers/block/null_blk.c
+++ b/drivers/block/null_blk.c
@@ -8,6 +8,7 @@
8#include <linux/slab.h> 8#include <linux/slab.h>
9#include <linux/blk-mq.h> 9#include <linux/blk-mq.h>
10#include <linux/hrtimer.h> 10#include <linux/hrtimer.h>
11#include <linux/lightnvm.h>
11 12
12struct nullb_cmd { 13struct nullb_cmd {
13 struct list_head list; 14 struct list_head list;
@@ -17,6 +18,7 @@ struct nullb_cmd {
17 struct bio *bio; 18 struct bio *bio;
18 unsigned int tag; 19 unsigned int tag;
19 struct nullb_queue *nq; 20 struct nullb_queue *nq;
21 struct hrtimer timer;
20}; 22};
21 23
22struct nullb_queue { 24struct nullb_queue {
@@ -39,23 +41,14 @@ struct nullb {
39 41
40 struct nullb_queue *queues; 42 struct nullb_queue *queues;
41 unsigned int nr_queues; 43 unsigned int nr_queues;
44 char disk_name[DISK_NAME_LEN];
42}; 45};
43 46
44static LIST_HEAD(nullb_list); 47static LIST_HEAD(nullb_list);
45static struct mutex lock; 48static struct mutex lock;
46static int null_major; 49static int null_major;
47static int nullb_indexes; 50static int nullb_indexes;
48 51static struct kmem_cache *ppa_cache;
49struct completion_queue {
50 struct llist_head list;
51 struct hrtimer timer;
52};
53
54/*
55 * These are per-cpu for now, they will need to be configured by the
56 * complete_queues parameter and appropriately mapped.
57 */
58static DEFINE_PER_CPU(struct completion_queue, completion_queues);
59 52
60enum { 53enum {
61 NULL_IRQ_NONE = 0, 54 NULL_IRQ_NONE = 0,
@@ -119,6 +112,10 @@ static int nr_devices = 2;
119module_param(nr_devices, int, S_IRUGO); 112module_param(nr_devices, int, S_IRUGO);
120MODULE_PARM_DESC(nr_devices, "Number of devices to register"); 113MODULE_PARM_DESC(nr_devices, "Number of devices to register");
121 114
115static bool use_lightnvm;
116module_param(use_lightnvm, bool, S_IRUGO);
117MODULE_PARM_DESC(use_lightnvm, "Register as a LightNVM device");
118
122static int irqmode = NULL_IRQ_SOFTIRQ; 119static int irqmode = NULL_IRQ_SOFTIRQ;
123 120
124static int null_set_irqmode(const char *str, const struct kernel_param *kp) 121static int null_set_irqmode(const char *str, const struct kernel_param *kp)
@@ -135,8 +132,8 @@ static const struct kernel_param_ops null_irqmode_param_ops = {
135device_param_cb(irqmode, &null_irqmode_param_ops, &irqmode, S_IRUGO); 132device_param_cb(irqmode, &null_irqmode_param_ops, &irqmode, S_IRUGO);
136MODULE_PARM_DESC(irqmode, "IRQ completion handler. 0-none, 1-softirq, 2-timer"); 133MODULE_PARM_DESC(irqmode, "IRQ completion handler. 0-none, 1-softirq, 2-timer");
137 134
138static int completion_nsec = 10000; 135static unsigned long completion_nsec = 10000;
139module_param(completion_nsec, int, S_IRUGO); 136module_param(completion_nsec, ulong, S_IRUGO);
140MODULE_PARM_DESC(completion_nsec, "Time in ns to complete a request in hardware. Default: 10,000ns"); 137MODULE_PARM_DESC(completion_nsec, "Time in ns to complete a request in hardware. Default: 10,000ns");
141 138
142static int hw_queue_depth = 64; 139static int hw_queue_depth = 64;
@@ -173,6 +170,8 @@ static void free_cmd(struct nullb_cmd *cmd)
173 put_tag(cmd->nq, cmd->tag); 170 put_tag(cmd->nq, cmd->tag);
174} 171}
175 172
173static enum hrtimer_restart null_cmd_timer_expired(struct hrtimer *timer);
174
176static struct nullb_cmd *__alloc_cmd(struct nullb_queue *nq) 175static struct nullb_cmd *__alloc_cmd(struct nullb_queue *nq)
177{ 176{
178 struct nullb_cmd *cmd; 177 struct nullb_cmd *cmd;
@@ -183,6 +182,11 @@ static struct nullb_cmd *__alloc_cmd(struct nullb_queue *nq)
183 cmd = &nq->cmds[tag]; 182 cmd = &nq->cmds[tag];
184 cmd->tag = tag; 183 cmd->tag = tag;
185 cmd->nq = nq; 184 cmd->nq = nq;
185 if (irqmode == NULL_IRQ_TIMER) {
186 hrtimer_init(&cmd->timer, CLOCK_MONOTONIC,
187 HRTIMER_MODE_REL);
188 cmd->timer.function = null_cmd_timer_expired;
189 }
186 return cmd; 190 return cmd;
187 } 191 }
188 192
@@ -213,6 +217,8 @@ static struct nullb_cmd *alloc_cmd(struct nullb_queue *nq, int can_wait)
213 217
214static void end_cmd(struct nullb_cmd *cmd) 218static void end_cmd(struct nullb_cmd *cmd)
215{ 219{
220 struct request_queue *q = NULL;
221
216 switch (queue_mode) { 222 switch (queue_mode) {
217 case NULL_Q_MQ: 223 case NULL_Q_MQ:
218 blk_mq_end_request(cmd->rq, 0); 224 blk_mq_end_request(cmd->rq, 0);
@@ -223,55 +229,37 @@ static void end_cmd(struct nullb_cmd *cmd)
223 break; 229 break;
224 case NULL_Q_BIO: 230 case NULL_Q_BIO:
225 bio_endio(cmd->bio); 231 bio_endio(cmd->bio);
226 break; 232 goto free_cmd;
227 } 233 }
228 234
235 if (cmd->rq)
236 q = cmd->rq->q;
237
238 /* Restart queue if needed, as we are freeing a tag */
239 if (q && !q->mq_ops && blk_queue_stopped(q)) {
240 unsigned long flags;
241
242 spin_lock_irqsave(q->queue_lock, flags);
243 if (blk_queue_stopped(q))
244 blk_start_queue(q);
245 spin_unlock_irqrestore(q->queue_lock, flags);
246 }
247free_cmd:
229 free_cmd(cmd); 248 free_cmd(cmd);
230} 249}
231 250
232static enum hrtimer_restart null_cmd_timer_expired(struct hrtimer *timer) 251static enum hrtimer_restart null_cmd_timer_expired(struct hrtimer *timer)
233{ 252{
234 struct completion_queue *cq; 253 end_cmd(container_of(timer, struct nullb_cmd, timer));
235 struct llist_node *entry;
236 struct nullb_cmd *cmd;
237
238 cq = &per_cpu(completion_queues, smp_processor_id());
239
240 while ((entry = llist_del_all(&cq->list)) != NULL) {
241 entry = llist_reverse_order(entry);
242 do {
243 struct request_queue *q = NULL;
244
245 cmd = container_of(entry, struct nullb_cmd, ll_list);
246 entry = entry->next;
247 if (cmd->rq)
248 q = cmd->rq->q;
249 end_cmd(cmd);
250
251 if (q && !q->mq_ops && blk_queue_stopped(q)) {
252 spin_lock(q->queue_lock);
253 if (blk_queue_stopped(q))
254 blk_start_queue(q);
255 spin_unlock(q->queue_lock);
256 }
257 } while (entry);
258 }
259 254
260 return HRTIMER_NORESTART; 255 return HRTIMER_NORESTART;
261} 256}
262 257
263static void null_cmd_end_timer(struct nullb_cmd *cmd) 258static void null_cmd_end_timer(struct nullb_cmd *cmd)
264{ 259{
265 struct completion_queue *cq = &per_cpu(completion_queues, get_cpu()); 260 ktime_t kt = ktime_set(0, completion_nsec);
266 261
267 cmd->ll_list.next = NULL; 262 hrtimer_start(&cmd->timer, kt, HRTIMER_MODE_REL);
268 if (llist_add(&cmd->ll_list, &cq->list)) {
269 ktime_t kt = ktime_set(0, completion_nsec);
270
271 hrtimer_start(&cq->timer, kt, HRTIMER_MODE_REL_PINNED);
272 }
273
274 put_cpu();
275} 263}
276 264
277static void null_softirq_done_fn(struct request *rq) 265static void null_softirq_done_fn(struct request *rq)
@@ -369,6 +357,10 @@ static int null_queue_rq(struct blk_mq_hw_ctx *hctx,
369{ 357{
370 struct nullb_cmd *cmd = blk_mq_rq_to_pdu(bd->rq); 358 struct nullb_cmd *cmd = blk_mq_rq_to_pdu(bd->rq);
371 359
360 if (irqmode == NULL_IRQ_TIMER) {
361 hrtimer_init(&cmd->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
362 cmd->timer.function = null_cmd_timer_expired;
363 }
372 cmd->rq = bd->rq; 364 cmd->rq = bd->rq;
373 cmd->nq = hctx->driver_data; 365 cmd->nq = hctx->driver_data;
374 366
@@ -427,15 +419,157 @@ static void null_del_dev(struct nullb *nullb)
427{ 419{
428 list_del_init(&nullb->list); 420 list_del_init(&nullb->list);
429 421
430 del_gendisk(nullb->disk); 422 if (use_lightnvm)
423 nvm_unregister(nullb->disk_name);
424 else
425 del_gendisk(nullb->disk);
431 blk_cleanup_queue(nullb->q); 426 blk_cleanup_queue(nullb->q);
432 if (queue_mode == NULL_Q_MQ) 427 if (queue_mode == NULL_Q_MQ)
433 blk_mq_free_tag_set(&nullb->tag_set); 428 blk_mq_free_tag_set(&nullb->tag_set);
434 put_disk(nullb->disk); 429 if (!use_lightnvm)
430 put_disk(nullb->disk);
435 cleanup_queues(nullb); 431 cleanup_queues(nullb);
436 kfree(nullb); 432 kfree(nullb);
437} 433}
438 434
435#ifdef CONFIG_NVM
436
437static void null_lnvm_end_io(struct request *rq, int error)
438{
439 struct nvm_rq *rqd = rq->end_io_data;
440 struct nvm_dev *dev = rqd->dev;
441
442 dev->mt->end_io(rqd, error);
443
444 blk_put_request(rq);
445}
446
447static int null_lnvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd)
448{
449 struct request_queue *q = dev->q;
450 struct request *rq;
451 struct bio *bio = rqd->bio;
452
453 rq = blk_mq_alloc_request(q, bio_rw(bio), GFP_KERNEL, 0);
454 if (IS_ERR(rq))
455 return -ENOMEM;
456
457 rq->cmd_type = REQ_TYPE_DRV_PRIV;
458 rq->__sector = bio->bi_iter.bi_sector;
459 rq->ioprio = bio_prio(bio);
460
461 if (bio_has_data(bio))
462 rq->nr_phys_segments = bio_phys_segments(q, bio);
463
464 rq->__data_len = bio->bi_iter.bi_size;
465 rq->bio = rq->biotail = bio;
466
467 rq->end_io_data = rqd;
468
469 blk_execute_rq_nowait(q, NULL, rq, 0, null_lnvm_end_io);
470
471 return 0;
472}
473
474static int null_lnvm_id(struct nvm_dev *dev, struct nvm_id *id)
475{
476 sector_t size = gb * 1024 * 1024 * 1024ULL;
477 sector_t blksize;
478 struct nvm_id_group *grp;
479
480 id->ver_id = 0x1;
481 id->vmnt = 0;
482 id->cgrps = 1;
483 id->cap = 0x3;
484 id->dom = 0x1;
485
486 id->ppaf.blk_offset = 0;
487 id->ppaf.blk_len = 16;
488 id->ppaf.pg_offset = 16;
489 id->ppaf.pg_len = 16;
490 id->ppaf.sect_offset = 32;
491 id->ppaf.sect_len = 8;
492 id->ppaf.pln_offset = 40;
493 id->ppaf.pln_len = 8;
494 id->ppaf.lun_offset = 48;
495 id->ppaf.lun_len = 8;
496 id->ppaf.ch_offset = 56;
497 id->ppaf.ch_len = 8;
498
499 do_div(size, bs); /* convert size to pages */
500 do_div(size, 256); /* concert size to pgs pr blk */
501 grp = &id->groups[0];
502 grp->mtype = 0;
503 grp->fmtype = 0;
504 grp->num_ch = 1;
505 grp->num_pg = 256;
506 blksize = size;
507 do_div(size, (1 << 16));
508 grp->num_lun = size + 1;
509 do_div(blksize, grp->num_lun);
510 grp->num_blk = blksize;
511 grp->num_pln = 1;
512
513 grp->fpg_sz = bs;
514 grp->csecs = bs;
515 grp->trdt = 25000;
516 grp->trdm = 25000;
517 grp->tprt = 500000;
518 grp->tprm = 500000;
519 grp->tbet = 1500000;
520 grp->tbem = 1500000;
521 grp->mpos = 0x010101; /* single plane rwe */
522 grp->cpar = hw_queue_depth;
523
524 return 0;
525}
526
527static void *null_lnvm_create_dma_pool(struct nvm_dev *dev, char *name)
528{
529 mempool_t *virtmem_pool;
530
531 virtmem_pool = mempool_create_slab_pool(64, ppa_cache);
532 if (!virtmem_pool) {
533 pr_err("null_blk: Unable to create virtual memory pool\n");
534 return NULL;
535 }
536
537 return virtmem_pool;
538}
539
540static void null_lnvm_destroy_dma_pool(void *pool)
541{
542 mempool_destroy(pool);
543}
544
545static void *null_lnvm_dev_dma_alloc(struct nvm_dev *dev, void *pool,
546 gfp_t mem_flags, dma_addr_t *dma_handler)
547{
548 return mempool_alloc(pool, mem_flags);
549}
550
551static void null_lnvm_dev_dma_free(void *pool, void *entry,
552 dma_addr_t dma_handler)
553{
554 mempool_free(entry, pool);
555}
556
557static struct nvm_dev_ops null_lnvm_dev_ops = {
558 .identity = null_lnvm_id,
559 .submit_io = null_lnvm_submit_io,
560
561 .create_dma_pool = null_lnvm_create_dma_pool,
562 .destroy_dma_pool = null_lnvm_destroy_dma_pool,
563 .dev_dma_alloc = null_lnvm_dev_dma_alloc,
564 .dev_dma_free = null_lnvm_dev_dma_free,
565
566 /* Simulate nvme protocol restriction */
567 .max_phys_sect = 64,
568};
569#else
570static struct nvm_dev_ops null_lnvm_dev_ops;
571#endif /* CONFIG_NVM */
572
439static int null_open(struct block_device *bdev, fmode_t mode) 573static int null_open(struct block_device *bdev, fmode_t mode)
440{ 574{
441 return 0; 575 return 0;
@@ -575,11 +709,6 @@ static int null_add_dev(void)
575 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, nullb->q); 709 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, nullb->q);
576 queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, nullb->q); 710 queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, nullb->q);
577 711
578 disk = nullb->disk = alloc_disk_node(1, home_node);
579 if (!disk) {
580 rv = -ENOMEM;
581 goto out_cleanup_blk_queue;
582 }
583 712
584 mutex_lock(&lock); 713 mutex_lock(&lock);
585 list_add_tail(&nullb->list, &nullb_list); 714 list_add_tail(&nullb->list, &nullb_list);
@@ -589,6 +718,21 @@ static int null_add_dev(void)
589 blk_queue_logical_block_size(nullb->q, bs); 718 blk_queue_logical_block_size(nullb->q, bs);
590 blk_queue_physical_block_size(nullb->q, bs); 719 blk_queue_physical_block_size(nullb->q, bs);
591 720
721 sprintf(nullb->disk_name, "nullb%d", nullb->index);
722
723 if (use_lightnvm) {
724 rv = nvm_register(nullb->q, nullb->disk_name,
725 &null_lnvm_dev_ops);
726 if (rv)
727 goto out_cleanup_blk_queue;
728 goto done;
729 }
730
731 disk = nullb->disk = alloc_disk_node(1, home_node);
732 if (!disk) {
733 rv = -ENOMEM;
734 goto out_cleanup_lightnvm;
735 }
592 size = gb * 1024 * 1024 * 1024ULL; 736 size = gb * 1024 * 1024 * 1024ULL;
593 set_capacity(disk, size >> 9); 737 set_capacity(disk, size >> 9);
594 738
@@ -598,10 +742,15 @@ static int null_add_dev(void)
598 disk->fops = &null_fops; 742 disk->fops = &null_fops;
599 disk->private_data = nullb; 743 disk->private_data = nullb;
600 disk->queue = nullb->q; 744 disk->queue = nullb->q;
601 sprintf(disk->disk_name, "nullb%d", nullb->index); 745 strncpy(disk->disk_name, nullb->disk_name, DISK_NAME_LEN);
746
602 add_disk(disk); 747 add_disk(disk);
748done:
603 return 0; 749 return 0;
604 750
751out_cleanup_lightnvm:
752 if (use_lightnvm)
753 nvm_unregister(nullb->disk_name);
605out_cleanup_blk_queue: 754out_cleanup_blk_queue:
606 blk_cleanup_queue(nullb->q); 755 blk_cleanup_queue(nullb->q);
607out_cleanup_tags: 756out_cleanup_tags:
@@ -617,7 +766,9 @@ out:
617 766
618static int __init null_init(void) 767static int __init null_init(void)
619{ 768{
769 int ret = 0;
620 unsigned int i; 770 unsigned int i;
771 struct nullb *nullb;
621 772
622 if (bs > PAGE_SIZE) { 773 if (bs > PAGE_SIZE) {
623 pr_warn("null_blk: invalid block size\n"); 774 pr_warn("null_blk: invalid block size\n");
@@ -625,6 +776,18 @@ static int __init null_init(void)
625 bs = PAGE_SIZE; 776 bs = PAGE_SIZE;
626 } 777 }
627 778
779 if (use_lightnvm && bs != 4096) {
780 pr_warn("null_blk: LightNVM only supports 4k block size\n");
781 pr_warn("null_blk: defaults block size to 4k\n");
782 bs = 4096;
783 }
784
785 if (use_lightnvm && queue_mode != NULL_Q_MQ) {
786 pr_warn("null_blk: LightNVM only supported for blk-mq\n");
787 pr_warn("null_blk: defaults queue mode to blk-mq\n");
788 queue_mode = NULL_Q_MQ;
789 }
790
628 if (queue_mode == NULL_Q_MQ && use_per_node_hctx) { 791 if (queue_mode == NULL_Q_MQ && use_per_node_hctx) {
629 if (submit_queues < nr_online_nodes) { 792 if (submit_queues < nr_online_nodes) {
630 pr_warn("null_blk: submit_queues param is set to %u.", 793 pr_warn("null_blk: submit_queues param is set to %u.",
@@ -638,32 +801,38 @@ static int __init null_init(void)
638 801
639 mutex_init(&lock); 802 mutex_init(&lock);
640 803
641 /* Initialize a separate list for each CPU for issuing softirqs */
642 for_each_possible_cpu(i) {
643 struct completion_queue *cq = &per_cpu(completion_queues, i);
644
645 init_llist_head(&cq->list);
646
647 if (irqmode != NULL_IRQ_TIMER)
648 continue;
649
650 hrtimer_init(&cq->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
651 cq->timer.function = null_cmd_timer_expired;
652 }
653
654 null_major = register_blkdev(0, "nullb"); 804 null_major = register_blkdev(0, "nullb");
655 if (null_major < 0) 805 if (null_major < 0)
656 return null_major; 806 return null_major;
657 807
658 for (i = 0; i < nr_devices; i++) { 808 if (use_lightnvm) {
659 if (null_add_dev()) { 809 ppa_cache = kmem_cache_create("ppa_cache", 64 * sizeof(u64),
660 unregister_blkdev(null_major, "nullb"); 810 0, 0, NULL);
661 return -EINVAL; 811 if (!ppa_cache) {
812 pr_err("null_blk: unable to create ppa cache\n");
813 ret = -ENOMEM;
814 goto err_ppa;
662 } 815 }
663 } 816 }
664 817
818 for (i = 0; i < nr_devices; i++) {
819 ret = null_add_dev();
820 if (ret)
821 goto err_dev;
822 }
823
665 pr_info("null: module loaded\n"); 824 pr_info("null: module loaded\n");
666 return 0; 825 return 0;
826
827err_dev:
828 while (!list_empty(&nullb_list)) {
829 nullb = list_entry(nullb_list.next, struct nullb, list);
830 null_del_dev(nullb);
831 }
832 kmem_cache_destroy(ppa_cache);
833err_ppa:
834 unregister_blkdev(null_major, "nullb");
835 return ret;
667} 836}
668 837
669static void __exit null_exit(void) 838static void __exit null_exit(void)
@@ -678,6 +847,8 @@ static void __exit null_exit(void)
678 null_del_dev(nullb); 847 null_del_dev(nullb);
679 } 848 }
680 mutex_unlock(&lock); 849 mutex_unlock(&lock);
850
851 kmem_cache_destroy(ppa_cache);
681} 852}
682 853
683module_init(null_init); 854module_init(null_init);
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 235708c7c46e..81ea69fee7ca 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -3442,6 +3442,7 @@ static void rbd_queue_workfn(struct work_struct *work)
3442 goto err_rq; 3442 goto err_rq;
3443 } 3443 }
3444 img_request->rq = rq; 3444 img_request->rq = rq;
3445 snapc = NULL; /* img_request consumes a ref */
3445 3446
3446 if (op_type == OBJ_OP_DISCARD) 3447 if (op_type == OBJ_OP_DISCARD)
3447 result = rbd_img_request_fill(img_request, OBJ_REQUEST_NODATA, 3448 result = rbd_img_request_fill(img_request, OBJ_REQUEST_NODATA,
diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c
index f9099940c272..41fb1a917b17 100644
--- a/drivers/block/xen-blkback/blkback.c
+++ b/drivers/block/xen-blkback/blkback.c
@@ -950,6 +950,8 @@ static int xen_blkbk_parse_indirect(struct blkif_request *req,
950 goto unmap; 950 goto unmap;
951 951
952 for (n = 0, i = 0; n < nseg; n++) { 952 for (n = 0, i = 0; n < nseg; n++) {
953 uint8_t first_sect, last_sect;
954
953 if ((n % SEGS_PER_INDIRECT_FRAME) == 0) { 955 if ((n % SEGS_PER_INDIRECT_FRAME) == 0) {
954 /* Map indirect segments */ 956 /* Map indirect segments */
955 if (segments) 957 if (segments)
@@ -957,15 +959,18 @@ static int xen_blkbk_parse_indirect(struct blkif_request *req,
957 segments = kmap_atomic(pages[n/SEGS_PER_INDIRECT_FRAME]->page); 959 segments = kmap_atomic(pages[n/SEGS_PER_INDIRECT_FRAME]->page);
958 } 960 }
959 i = n % SEGS_PER_INDIRECT_FRAME; 961 i = n % SEGS_PER_INDIRECT_FRAME;
962
960 pending_req->segments[n]->gref = segments[i].gref; 963 pending_req->segments[n]->gref = segments[i].gref;
961 seg[n].nsec = segments[i].last_sect - 964
962 segments[i].first_sect + 1; 965 first_sect = READ_ONCE(segments[i].first_sect);
963 seg[n].offset = (segments[i].first_sect << 9); 966 last_sect = READ_ONCE(segments[i].last_sect);
964 if ((segments[i].last_sect >= (XEN_PAGE_SIZE >> 9)) || 967 if (last_sect >= (XEN_PAGE_SIZE >> 9) || last_sect < first_sect) {
965 (segments[i].last_sect < segments[i].first_sect)) {
966 rc = -EINVAL; 968 rc = -EINVAL;
967 goto unmap; 969 goto unmap;
968 } 970 }
971
972 seg[n].nsec = last_sect - first_sect + 1;
973 seg[n].offset = first_sect << 9;
969 preq->nr_sects += seg[n].nsec; 974 preq->nr_sects += seg[n].nsec;
970 } 975 }
971 976
diff --git a/drivers/block/xen-blkback/common.h b/drivers/block/xen-blkback/common.h
index 68e87a037b99..c929ae22764c 100644
--- a/drivers/block/xen-blkback/common.h
+++ b/drivers/block/xen-blkback/common.h
@@ -408,8 +408,8 @@ static inline void blkif_get_x86_32_req(struct blkif_request *dst,
408 struct blkif_x86_32_request *src) 408 struct blkif_x86_32_request *src)
409{ 409{
410 int i, n = BLKIF_MAX_SEGMENTS_PER_REQUEST, j; 410 int i, n = BLKIF_MAX_SEGMENTS_PER_REQUEST, j;
411 dst->operation = src->operation; 411 dst->operation = READ_ONCE(src->operation);
412 switch (src->operation) { 412 switch (dst->operation) {
413 case BLKIF_OP_READ: 413 case BLKIF_OP_READ:
414 case BLKIF_OP_WRITE: 414 case BLKIF_OP_WRITE:
415 case BLKIF_OP_WRITE_BARRIER: 415 case BLKIF_OP_WRITE_BARRIER:
@@ -456,8 +456,8 @@ static inline void blkif_get_x86_64_req(struct blkif_request *dst,
456 struct blkif_x86_64_request *src) 456 struct blkif_x86_64_request *src)
457{ 457{
458 int i, n = BLKIF_MAX_SEGMENTS_PER_REQUEST, j; 458 int i, n = BLKIF_MAX_SEGMENTS_PER_REQUEST, j;
459 dst->operation = src->operation; 459 dst->operation = READ_ONCE(src->operation);
460 switch (src->operation) { 460 switch (dst->operation) {
461 case BLKIF_OP_READ: 461 case BLKIF_OP_READ:
462 case BLKIF_OP_WRITE: 462 case BLKIF_OP_WRITE:
463 case BLKIF_OP_WRITE_BARRIER: 463 case BLKIF_OP_WRITE_BARRIER:
diff --git a/drivers/bus/omap-ocp2scp.c b/drivers/bus/omap-ocp2scp.c
index 9f1856948758..bf500e0e7362 100644
--- a/drivers/bus/omap-ocp2scp.c
+++ b/drivers/bus/omap-ocp2scp.c
@@ -117,7 +117,7 @@ static struct platform_driver omap_ocp2scp_driver = {
117 117
118module_platform_driver(omap_ocp2scp_driver); 118module_platform_driver(omap_ocp2scp_driver);
119 119
120MODULE_ALIAS("platform: omap-ocp2scp"); 120MODULE_ALIAS("platform:omap-ocp2scp");
121MODULE_AUTHOR("Texas Instruments Inc."); 121MODULE_AUTHOR("Texas Instruments Inc.");
122MODULE_DESCRIPTION("OMAP OCP2SCP driver"); 122MODULE_DESCRIPTION("OMAP OCP2SCP driver");
123MODULE_LICENSE("GPL v2"); 123MODULE_LICENSE("GPL v2");
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index 654f6f36a071..4cc72fa017c7 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -412,18 +412,42 @@ static enum si_sm_result start_next_msg(struct smi_info *smi_info)
412 return rv; 412 return rv;
413} 413}
414 414
415static void start_check_enables(struct smi_info *smi_info) 415static void smi_mod_timer(struct smi_info *smi_info, unsigned long new_val)
416{
417 smi_info->last_timeout_jiffies = jiffies;
418 mod_timer(&smi_info->si_timer, new_val);
419 smi_info->timer_running = true;
420}
421
422/*
423 * Start a new message and (re)start the timer and thread.
424 */
425static void start_new_msg(struct smi_info *smi_info, unsigned char *msg,
426 unsigned int size)
427{
428 smi_mod_timer(smi_info, jiffies + SI_TIMEOUT_JIFFIES);
429
430 if (smi_info->thread)
431 wake_up_process(smi_info->thread);
432
433 smi_info->handlers->start_transaction(smi_info->si_sm, msg, size);
434}
435
436static void start_check_enables(struct smi_info *smi_info, bool start_timer)
416{ 437{
417 unsigned char msg[2]; 438 unsigned char msg[2];
418 439
419 msg[0] = (IPMI_NETFN_APP_REQUEST << 2); 440 msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
420 msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD; 441 msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD;
421 442
422 smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2); 443 if (start_timer)
444 start_new_msg(smi_info, msg, 2);
445 else
446 smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
423 smi_info->si_state = SI_CHECKING_ENABLES; 447 smi_info->si_state = SI_CHECKING_ENABLES;
424} 448}
425 449
426static void start_clear_flags(struct smi_info *smi_info) 450static void start_clear_flags(struct smi_info *smi_info, bool start_timer)
427{ 451{
428 unsigned char msg[3]; 452 unsigned char msg[3];
429 453
@@ -432,7 +456,10 @@ static void start_clear_flags(struct smi_info *smi_info)
432 msg[1] = IPMI_CLEAR_MSG_FLAGS_CMD; 456 msg[1] = IPMI_CLEAR_MSG_FLAGS_CMD;
433 msg[2] = WDT_PRE_TIMEOUT_INT; 457 msg[2] = WDT_PRE_TIMEOUT_INT;
434 458
435 smi_info->handlers->start_transaction(smi_info->si_sm, msg, 3); 459 if (start_timer)
460 start_new_msg(smi_info, msg, 3);
461 else
462 smi_info->handlers->start_transaction(smi_info->si_sm, msg, 3);
436 smi_info->si_state = SI_CLEARING_FLAGS; 463 smi_info->si_state = SI_CLEARING_FLAGS;
437} 464}
438 465
@@ -442,10 +469,8 @@ static void start_getting_msg_queue(struct smi_info *smi_info)
442 smi_info->curr_msg->data[1] = IPMI_GET_MSG_CMD; 469 smi_info->curr_msg->data[1] = IPMI_GET_MSG_CMD;
443 smi_info->curr_msg->data_size = 2; 470 smi_info->curr_msg->data_size = 2;
444 471
445 smi_info->handlers->start_transaction( 472 start_new_msg(smi_info, smi_info->curr_msg->data,
446 smi_info->si_sm, 473 smi_info->curr_msg->data_size);
447 smi_info->curr_msg->data,
448 smi_info->curr_msg->data_size);
449 smi_info->si_state = SI_GETTING_MESSAGES; 474 smi_info->si_state = SI_GETTING_MESSAGES;
450} 475}
451 476
@@ -455,20 +480,11 @@ static void start_getting_events(struct smi_info *smi_info)
455 smi_info->curr_msg->data[1] = IPMI_READ_EVENT_MSG_BUFFER_CMD; 480 smi_info->curr_msg->data[1] = IPMI_READ_EVENT_MSG_BUFFER_CMD;
456 smi_info->curr_msg->data_size = 2; 481 smi_info->curr_msg->data_size = 2;
457 482
458 smi_info->handlers->start_transaction( 483 start_new_msg(smi_info, smi_info->curr_msg->data,
459 smi_info->si_sm, 484 smi_info->curr_msg->data_size);
460 smi_info->curr_msg->data,
461 smi_info->curr_msg->data_size);
462 smi_info->si_state = SI_GETTING_EVENTS; 485 smi_info->si_state = SI_GETTING_EVENTS;
463} 486}
464 487
465static void smi_mod_timer(struct smi_info *smi_info, unsigned long new_val)
466{
467 smi_info->last_timeout_jiffies = jiffies;
468 mod_timer(&smi_info->si_timer, new_val);
469 smi_info->timer_running = true;
470}
471
472/* 488/*
473 * When we have a situtaion where we run out of memory and cannot 489 * When we have a situtaion where we run out of memory and cannot
474 * allocate messages, we just leave them in the BMC and run the system 490 * allocate messages, we just leave them in the BMC and run the system
@@ -478,11 +494,11 @@ static void smi_mod_timer(struct smi_info *smi_info, unsigned long new_val)
478 * Note that we cannot just use disable_irq(), since the interrupt may 494 * Note that we cannot just use disable_irq(), since the interrupt may
479 * be shared. 495 * be shared.
480 */ 496 */
481static inline bool disable_si_irq(struct smi_info *smi_info) 497static inline bool disable_si_irq(struct smi_info *smi_info, bool start_timer)
482{ 498{
483 if ((smi_info->irq) && (!smi_info->interrupt_disabled)) { 499 if ((smi_info->irq) && (!smi_info->interrupt_disabled)) {
484 smi_info->interrupt_disabled = true; 500 smi_info->interrupt_disabled = true;
485 start_check_enables(smi_info); 501 start_check_enables(smi_info, start_timer);
486 return true; 502 return true;
487 } 503 }
488 return false; 504 return false;
@@ -492,7 +508,7 @@ static inline bool enable_si_irq(struct smi_info *smi_info)
492{ 508{
493 if ((smi_info->irq) && (smi_info->interrupt_disabled)) { 509 if ((smi_info->irq) && (smi_info->interrupt_disabled)) {
494 smi_info->interrupt_disabled = false; 510 smi_info->interrupt_disabled = false;
495 start_check_enables(smi_info); 511 start_check_enables(smi_info, true);
496 return true; 512 return true;
497 } 513 }
498 return false; 514 return false;
@@ -510,7 +526,7 @@ static struct ipmi_smi_msg *alloc_msg_handle_irq(struct smi_info *smi_info)
510 526
511 msg = ipmi_alloc_smi_msg(); 527 msg = ipmi_alloc_smi_msg();
512 if (!msg) { 528 if (!msg) {
513 if (!disable_si_irq(smi_info)) 529 if (!disable_si_irq(smi_info, true))
514 smi_info->si_state = SI_NORMAL; 530 smi_info->si_state = SI_NORMAL;
515 } else if (enable_si_irq(smi_info)) { 531 } else if (enable_si_irq(smi_info)) {
516 ipmi_free_smi_msg(msg); 532 ipmi_free_smi_msg(msg);
@@ -526,7 +542,7 @@ static void handle_flags(struct smi_info *smi_info)
526 /* Watchdog pre-timeout */ 542 /* Watchdog pre-timeout */
527 smi_inc_stat(smi_info, watchdog_pretimeouts); 543 smi_inc_stat(smi_info, watchdog_pretimeouts);
528 544
529 start_clear_flags(smi_info); 545 start_clear_flags(smi_info, true);
530 smi_info->msg_flags &= ~WDT_PRE_TIMEOUT_INT; 546 smi_info->msg_flags &= ~WDT_PRE_TIMEOUT_INT;
531 if (smi_info->intf) 547 if (smi_info->intf)
532 ipmi_smi_watchdog_pretimeout(smi_info->intf); 548 ipmi_smi_watchdog_pretimeout(smi_info->intf);
@@ -879,8 +895,7 @@ static enum si_sm_result smi_event_handler(struct smi_info *smi_info,
879 msg[0] = (IPMI_NETFN_APP_REQUEST << 2); 895 msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
880 msg[1] = IPMI_GET_MSG_FLAGS_CMD; 896 msg[1] = IPMI_GET_MSG_FLAGS_CMD;
881 897
882 smi_info->handlers->start_transaction( 898 start_new_msg(smi_info, msg, 2);
883 smi_info->si_sm, msg, 2);
884 smi_info->si_state = SI_GETTING_FLAGS; 899 smi_info->si_state = SI_GETTING_FLAGS;
885 goto restart; 900 goto restart;
886 } 901 }
@@ -910,7 +925,7 @@ static enum si_sm_result smi_event_handler(struct smi_info *smi_info,
910 * disable and messages disabled. 925 * disable and messages disabled.
911 */ 926 */
912 if (smi_info->supports_event_msg_buff || smi_info->irq) { 927 if (smi_info->supports_event_msg_buff || smi_info->irq) {
913 start_check_enables(smi_info); 928 start_check_enables(smi_info, true);
914 } else { 929 } else {
915 smi_info->curr_msg = alloc_msg_handle_irq(smi_info); 930 smi_info->curr_msg = alloc_msg_handle_irq(smi_info);
916 if (!smi_info->curr_msg) 931 if (!smi_info->curr_msg)
@@ -920,6 +935,13 @@ static enum si_sm_result smi_event_handler(struct smi_info *smi_info,
920 } 935 }
921 goto restart; 936 goto restart;
922 } 937 }
938
939 if (si_sm_result == SI_SM_IDLE && smi_info->timer_running) {
940 /* Ok it if fails, the timer will just go off. */
941 if (del_timer(&smi_info->si_timer))
942 smi_info->timer_running = false;
943 }
944
923 out: 945 out:
924 return si_sm_result; 946 return si_sm_result;
925} 947}
@@ -1208,14 +1230,14 @@ static int smi_start_processing(void *send_info,
1208 1230
1209 new_smi->intf = intf; 1231 new_smi->intf = intf;
1210 1232
1211 /* Try to claim any interrupts. */
1212 if (new_smi->irq_setup)
1213 new_smi->irq_setup(new_smi);
1214
1215 /* Set up the timer that drives the interface. */ 1233 /* Set up the timer that drives the interface. */
1216 setup_timer(&new_smi->si_timer, smi_timeout, (long)new_smi); 1234 setup_timer(&new_smi->si_timer, smi_timeout, (long)new_smi);
1217 smi_mod_timer(new_smi, jiffies + SI_TIMEOUT_JIFFIES); 1235 smi_mod_timer(new_smi, jiffies + SI_TIMEOUT_JIFFIES);
1218 1236
1237 /* Try to claim any interrupts. */
1238 if (new_smi->irq_setup)
1239 new_smi->irq_setup(new_smi);
1240
1219 /* 1241 /*
1220 * Check if the user forcefully enabled the daemon. 1242 * Check if the user forcefully enabled the daemon.
1221 */ 1243 */
@@ -2560,6 +2582,7 @@ static const struct of_device_id of_ipmi_match[] = {
2560 .data = (void *)(unsigned long) SI_BT }, 2582 .data = (void *)(unsigned long) SI_BT },
2561 {}, 2583 {},
2562}; 2584};
2585MODULE_DEVICE_TABLE(of, of_ipmi_match);
2563 2586
2564static int of_ipmi_probe(struct platform_device *dev) 2587static int of_ipmi_probe(struct platform_device *dev)
2565{ 2588{
@@ -2646,7 +2669,6 @@ static int of_ipmi_probe(struct platform_device *dev)
2646 } 2669 }
2647 return 0; 2670 return 0;
2648} 2671}
2649MODULE_DEVICE_TABLE(of, of_ipmi_match);
2650#else 2672#else
2651#define of_ipmi_match NULL 2673#define of_ipmi_match NULL
2652static int of_ipmi_probe(struct platform_device *dev) 2674static int of_ipmi_probe(struct platform_device *dev)
@@ -3613,7 +3635,7 @@ static int try_smi_init(struct smi_info *new_smi)
3613 * Start clearing the flags before we enable interrupts or the 3635 * Start clearing the flags before we enable interrupts or the
3614 * timer to avoid racing with the timer. 3636 * timer to avoid racing with the timer.
3615 */ 3637 */
3616 start_clear_flags(new_smi); 3638 start_clear_flags(new_smi, false);
3617 3639
3618 /* 3640 /*
3619 * IRQ is defined to be set when non-zero. req_events will 3641 * IRQ is defined to be set when non-zero. req_events will
@@ -3908,7 +3930,7 @@ static void cleanup_one_si(struct smi_info *to_clean)
3908 poll(to_clean); 3930 poll(to_clean);
3909 schedule_timeout_uninterruptible(1); 3931 schedule_timeout_uninterruptible(1);
3910 } 3932 }
3911 disable_si_irq(to_clean); 3933 disable_si_irq(to_clean, false);
3912 while (to_clean->curr_msg || (to_clean->si_state != SI_NORMAL)) { 3934 while (to_clean->curr_msg || (to_clean->si_state != SI_NORMAL)) {
3913 poll(to_clean); 3935 poll(to_clean);
3914 schedule_timeout_uninterruptible(1); 3936 schedule_timeout_uninterruptible(1);
diff --git a/drivers/char/ipmi/ipmi_watchdog.c b/drivers/char/ipmi/ipmi_watchdog.c
index 0ac3bd1a5497..096f0cef4da1 100644
--- a/drivers/char/ipmi/ipmi_watchdog.c
+++ b/drivers/char/ipmi/ipmi_watchdog.c
@@ -153,6 +153,9 @@ static int timeout = 10;
153/* The pre-timeout is disabled by default. */ 153/* The pre-timeout is disabled by default. */
154static int pretimeout; 154static int pretimeout;
155 155
156/* Default timeout to set on panic */
157static int panic_wdt_timeout = 255;
158
156/* Default action is to reset the board on a timeout. */ 159/* Default action is to reset the board on a timeout. */
157static unsigned char action_val = WDOG_TIMEOUT_RESET; 160static unsigned char action_val = WDOG_TIMEOUT_RESET;
158 161
@@ -293,6 +296,9 @@ MODULE_PARM_DESC(timeout, "Timeout value in seconds.");
293module_param(pretimeout, timeout, 0644); 296module_param(pretimeout, timeout, 0644);
294MODULE_PARM_DESC(pretimeout, "Pretimeout value in seconds."); 297MODULE_PARM_DESC(pretimeout, "Pretimeout value in seconds.");
295 298
299module_param(panic_wdt_timeout, timeout, 0644);
300MODULE_PARM_DESC(timeout, "Timeout value on kernel panic in seconds.");
301
296module_param_cb(action, &param_ops_str, action_op, 0644); 302module_param_cb(action, &param_ops_str, action_op, 0644);
297MODULE_PARM_DESC(action, "Timeout action. One of: " 303MODULE_PARM_DESC(action, "Timeout action. One of: "
298 "reset, none, power_cycle, power_off."); 304 "reset, none, power_cycle, power_off.");
@@ -1189,7 +1195,7 @@ static int wdog_panic_handler(struct notifier_block *this,
1189 /* Make sure we do this only once. */ 1195 /* Make sure we do this only once. */
1190 panic_event_handled = 1; 1196 panic_event_handled = 1;
1191 1197
1192 timeout = 255; 1198 timeout = panic_wdt_timeout;
1193 pretimeout = 0; 1199 pretimeout = 0;
1194 panic_halt_ipmi_set_timeout(); 1200 panic_halt_ipmi_set_timeout();
1195 } 1201 }
diff --git a/drivers/clk/clk-gpio.c b/drivers/clk/clk-gpio.c
index 10819e248414..335322dc403f 100644
--- a/drivers/clk/clk-gpio.c
+++ b/drivers/clk/clk-gpio.c
@@ -209,6 +209,8 @@ EXPORT_SYMBOL_GPL(clk_register_gpio_mux);
209 209
210struct clk_gpio_delayed_register_data { 210struct clk_gpio_delayed_register_data {
211 const char *gpio_name; 211 const char *gpio_name;
212 int num_parents;
213 const char **parent_names;
212 struct device_node *node; 214 struct device_node *node;
213 struct mutex lock; 215 struct mutex lock;
214 struct clk *clk; 216 struct clk *clk;
@@ -222,8 +224,6 @@ static struct clk *of_clk_gpio_delayed_register_get(
222{ 224{
223 struct clk_gpio_delayed_register_data *data = _data; 225 struct clk_gpio_delayed_register_data *data = _data;
224 struct clk *clk; 226 struct clk *clk;
225 const char **parent_names;
226 int i, num_parents;
227 int gpio; 227 int gpio;
228 enum of_gpio_flags of_flags; 228 enum of_gpio_flags of_flags;
229 229
@@ -248,26 +248,14 @@ static struct clk *of_clk_gpio_delayed_register_get(
248 return ERR_PTR(gpio); 248 return ERR_PTR(gpio);
249 } 249 }
250 250
251 num_parents = of_clk_get_parent_count(data->node); 251 clk = data->clk_register_get(data->node->name, data->parent_names,
252 252 data->num_parents, gpio, of_flags & OF_GPIO_ACTIVE_LOW);
253 parent_names = kcalloc(num_parents, sizeof(char *), GFP_KERNEL);
254 if (!parent_names) {
255 clk = ERR_PTR(-ENOMEM);
256 goto out;
257 }
258
259 for (i = 0; i < num_parents; i++)
260 parent_names[i] = of_clk_get_parent_name(data->node, i);
261
262 clk = data->clk_register_get(data->node->name, parent_names,
263 num_parents, gpio, of_flags & OF_GPIO_ACTIVE_LOW);
264 if (IS_ERR(clk)) 253 if (IS_ERR(clk))
265 goto out; 254 goto out;
266 255
267 data->clk = clk; 256 data->clk = clk;
268out: 257out:
269 mutex_unlock(&data->lock); 258 mutex_unlock(&data->lock);
270 kfree(parent_names);
271 259
272 return clk; 260 return clk;
273} 261}
@@ -296,11 +284,24 @@ static void __init of_gpio_clk_setup(struct device_node *node,
296 unsigned gpio, bool active_low)) 284 unsigned gpio, bool active_low))
297{ 285{
298 struct clk_gpio_delayed_register_data *data; 286 struct clk_gpio_delayed_register_data *data;
287 const char **parent_names;
288 int i, num_parents;
299 289
300 data = kzalloc(sizeof(*data), GFP_KERNEL); 290 data = kzalloc(sizeof(*data), GFP_KERNEL);
301 if (!data) 291 if (!data)
302 return; 292 return;
303 293
294 num_parents = of_clk_get_parent_count(node);
295
296 parent_names = kcalloc(num_parents, sizeof(char *), GFP_KERNEL);
297 if (!parent_names)
298 return;
299
300 for (i = 0; i < num_parents; i++)
301 parent_names[i] = of_clk_get_parent_name(node, i);
302
303 data->num_parents = num_parents;
304 data->parent_names = parent_names;
304 data->node = node; 305 data->node = node;
305 data->gpio_name = gpio_name; 306 data->gpio_name = gpio_name;
306 data->clk_register_get = clk_register_get; 307 data->clk_register_get = clk_register_get;
diff --git a/drivers/clk/clk-qoriq.c b/drivers/clk/clk-qoriq.c
index 1ab0fb81c6a0..7bc1c4527ae4 100644
--- a/drivers/clk/clk-qoriq.c
+++ b/drivers/clk/clk-qoriq.c
@@ -778,8 +778,10 @@ static struct clk * __init create_one_cmux(struct clockgen *cg, int idx)
778 */ 778 */
779 clksel = (cg_in(cg, hwc->reg) & CLKSEL_MASK) >> CLKSEL_SHIFT; 779 clksel = (cg_in(cg, hwc->reg) & CLKSEL_MASK) >> CLKSEL_SHIFT;
780 div = get_pll_div(cg, hwc, clksel); 780 div = get_pll_div(cg, hwc, clksel);
781 if (!div) 781 if (!div) {
782 kfree(hwc);
782 return NULL; 783 return NULL;
784 }
783 785
784 pct80_rate = clk_get_rate(div->clk); 786 pct80_rate = clk_get_rate(div->clk);
785 pct80_rate *= 8; 787 pct80_rate *= 8;
diff --git a/drivers/clk/clk-scpi.c b/drivers/clk/clk-scpi.c
index 0b501a9fef92..cd0f2726f5e0 100644
--- a/drivers/clk/clk-scpi.c
+++ b/drivers/clk/clk-scpi.c
@@ -292,6 +292,7 @@ static int scpi_clocks_probe(struct platform_device *pdev)
292 ret = scpi_clk_add(dev, child, match); 292 ret = scpi_clk_add(dev, child, match);
293 if (ret) { 293 if (ret) {
294 scpi_clocks_remove(pdev); 294 scpi_clocks_remove(pdev);
295 of_node_put(child);
295 return ret; 296 return ret;
296 } 297 }
297 } 298 }
diff --git a/drivers/clk/imx/clk-pllv1.c b/drivers/clk/imx/clk-pllv1.c
index 8564e4342c7d..82fe3662b5f6 100644
--- a/drivers/clk/imx/clk-pllv1.c
+++ b/drivers/clk/imx/clk-pllv1.c
@@ -52,7 +52,7 @@ static unsigned long clk_pllv1_recalc_rate(struct clk_hw *hw,
52 unsigned long parent_rate) 52 unsigned long parent_rate)
53{ 53{
54 struct clk_pllv1 *pll = to_clk_pllv1(hw); 54 struct clk_pllv1 *pll = to_clk_pllv1(hw);
55 long long ll; 55 unsigned long long ull;
56 int mfn_abs; 56 int mfn_abs;
57 unsigned int mfi, mfn, mfd, pd; 57 unsigned int mfi, mfn, mfd, pd;
58 u32 reg; 58 u32 reg;
@@ -94,16 +94,16 @@ static unsigned long clk_pllv1_recalc_rate(struct clk_hw *hw,
94 rate = parent_rate * 2; 94 rate = parent_rate * 2;
95 rate /= pd + 1; 95 rate /= pd + 1;
96 96
97 ll = (unsigned long long)rate * mfn_abs; 97 ull = (unsigned long long)rate * mfn_abs;
98 98
99 do_div(ll, mfd + 1); 99 do_div(ull, mfd + 1);
100 100
101 if (mfn_is_negative(pll, mfn)) 101 if (mfn_is_negative(pll, mfn))
102 ll = -ll; 102 ull = (rate * mfi) - ull;
103 else
104 ull = (rate * mfi) + ull;
103 105
104 ll = (rate * mfi) + ll; 106 return ull;
105
106 return ll;
107} 107}
108 108
109static struct clk_ops clk_pllv1_ops = { 109static struct clk_ops clk_pllv1_ops = {
diff --git a/drivers/clk/imx/clk-pllv2.c b/drivers/clk/imx/clk-pllv2.c
index b18f875eac6a..4aeda56ce372 100644
--- a/drivers/clk/imx/clk-pllv2.c
+++ b/drivers/clk/imx/clk-pllv2.c
@@ -79,7 +79,7 @@ static unsigned long __clk_pllv2_recalc_rate(unsigned long parent_rate,
79{ 79{
80 long mfi, mfn, mfd, pdf, ref_clk; 80 long mfi, mfn, mfd, pdf, ref_clk;
81 unsigned long dbl; 81 unsigned long dbl;
82 s64 temp; 82 u64 temp;
83 83
84 dbl = dp_ctl & MXC_PLL_DP_CTL_DPDCK0_2_EN; 84 dbl = dp_ctl & MXC_PLL_DP_CTL_DPDCK0_2_EN;
85 85
@@ -98,8 +98,9 @@ static unsigned long __clk_pllv2_recalc_rate(unsigned long parent_rate,
98 temp = (u64) ref_clk * abs(mfn); 98 temp = (u64) ref_clk * abs(mfn);
99 do_div(temp, mfd + 1); 99 do_div(temp, mfd + 1);
100 if (mfn < 0) 100 if (mfn < 0)
101 temp = -temp; 101 temp = (ref_clk * mfi) - temp;
102 temp = (ref_clk * mfi) + temp; 102 else
103 temp = (ref_clk * mfi) + temp;
103 104
104 return temp; 105 return temp;
105} 106}
@@ -126,7 +127,7 @@ static int __clk_pllv2_set_rate(unsigned long rate, unsigned long parent_rate,
126{ 127{
127 u32 reg; 128 u32 reg;
128 long mfi, pdf, mfn, mfd = 999999; 129 long mfi, pdf, mfn, mfd = 999999;
129 s64 temp64; 130 u64 temp64;
130 unsigned long quad_parent_rate; 131 unsigned long quad_parent_rate;
131 132
132 quad_parent_rate = 4 * parent_rate; 133 quad_parent_rate = 4 * parent_rate;
diff --git a/drivers/clk/imx/clk-vf610.c b/drivers/clk/imx/clk-vf610.c
index d1b1c95177bb..0a94d9661d91 100644
--- a/drivers/clk/imx/clk-vf610.c
+++ b/drivers/clk/imx/clk-vf610.c
@@ -335,22 +335,22 @@ static void __init vf610_clocks_init(struct device_node *ccm_node)
335 clk[VF610_CLK_SAI0_SEL] = imx_clk_mux("sai0_sel", CCM_CSCMR1, 0, 2, sai_sels, 4); 335 clk[VF610_CLK_SAI0_SEL] = imx_clk_mux("sai0_sel", CCM_CSCMR1, 0, 2, sai_sels, 4);
336 clk[VF610_CLK_SAI0_EN] = imx_clk_gate("sai0_en", "sai0_sel", CCM_CSCDR1, 16); 336 clk[VF610_CLK_SAI0_EN] = imx_clk_gate("sai0_en", "sai0_sel", CCM_CSCDR1, 16);
337 clk[VF610_CLK_SAI0_DIV] = imx_clk_divider("sai0_div", "sai0_en", CCM_CSCDR1, 0, 4); 337 clk[VF610_CLK_SAI0_DIV] = imx_clk_divider("sai0_div", "sai0_en", CCM_CSCDR1, 0, 4);
338 clk[VF610_CLK_SAI0] = imx_clk_gate2("sai0", "sai0_div", CCM_CCGR0, CCM_CCGRx_CGn(15)); 338 clk[VF610_CLK_SAI0] = imx_clk_gate2("sai0", "ipg_bus", CCM_CCGR0, CCM_CCGRx_CGn(15));
339 339
340 clk[VF610_CLK_SAI1_SEL] = imx_clk_mux("sai1_sel", CCM_CSCMR1, 2, 2, sai_sels, 4); 340 clk[VF610_CLK_SAI1_SEL] = imx_clk_mux("sai1_sel", CCM_CSCMR1, 2, 2, sai_sels, 4);
341 clk[VF610_CLK_SAI1_EN] = imx_clk_gate("sai1_en", "sai1_sel", CCM_CSCDR1, 17); 341 clk[VF610_CLK_SAI1_EN] = imx_clk_gate("sai1_en", "sai1_sel", CCM_CSCDR1, 17);
342 clk[VF610_CLK_SAI1_DIV] = imx_clk_divider("sai1_div", "sai1_en", CCM_CSCDR1, 4, 4); 342 clk[VF610_CLK_SAI1_DIV] = imx_clk_divider("sai1_div", "sai1_en", CCM_CSCDR1, 4, 4);
343 clk[VF610_CLK_SAI1] = imx_clk_gate2("sai1", "sai1_div", CCM_CCGR1, CCM_CCGRx_CGn(0)); 343 clk[VF610_CLK_SAI1] = imx_clk_gate2("sai1", "ipg_bus", CCM_CCGR1, CCM_CCGRx_CGn(0));
344 344
345 clk[VF610_CLK_SAI2_SEL] = imx_clk_mux("sai2_sel", CCM_CSCMR1, 4, 2, sai_sels, 4); 345 clk[VF610_CLK_SAI2_SEL] = imx_clk_mux("sai2_sel", CCM_CSCMR1, 4, 2, sai_sels, 4);
346 clk[VF610_CLK_SAI2_EN] = imx_clk_gate("sai2_en", "sai2_sel", CCM_CSCDR1, 18); 346 clk[VF610_CLK_SAI2_EN] = imx_clk_gate("sai2_en", "sai2_sel", CCM_CSCDR1, 18);
347 clk[VF610_CLK_SAI2_DIV] = imx_clk_divider("sai2_div", "sai2_en", CCM_CSCDR1, 8, 4); 347 clk[VF610_CLK_SAI2_DIV] = imx_clk_divider("sai2_div", "sai2_en", CCM_CSCDR1, 8, 4);
348 clk[VF610_CLK_SAI2] = imx_clk_gate2("sai2", "sai2_div", CCM_CCGR1, CCM_CCGRx_CGn(1)); 348 clk[VF610_CLK_SAI2] = imx_clk_gate2("sai2", "ipg_bus", CCM_CCGR1, CCM_CCGRx_CGn(1));
349 349
350 clk[VF610_CLK_SAI3_SEL] = imx_clk_mux("sai3_sel", CCM_CSCMR1, 6, 2, sai_sels, 4); 350 clk[VF610_CLK_SAI3_SEL] = imx_clk_mux("sai3_sel", CCM_CSCMR1, 6, 2, sai_sels, 4);
351 clk[VF610_CLK_SAI3_EN] = imx_clk_gate("sai3_en", "sai3_sel", CCM_CSCDR1, 19); 351 clk[VF610_CLK_SAI3_EN] = imx_clk_gate("sai3_en", "sai3_sel", CCM_CSCDR1, 19);
352 clk[VF610_CLK_SAI3_DIV] = imx_clk_divider("sai3_div", "sai3_en", CCM_CSCDR1, 12, 4); 352 clk[VF610_CLK_SAI3_DIV] = imx_clk_divider("sai3_div", "sai3_en", CCM_CSCDR1, 12, 4);
353 clk[VF610_CLK_SAI3] = imx_clk_gate2("sai3", "sai3_div", CCM_CCGR1, CCM_CCGRx_CGn(2)); 353 clk[VF610_CLK_SAI3] = imx_clk_gate2("sai3", "ipg_bus", CCM_CCGR1, CCM_CCGRx_CGn(2));
354 354
355 clk[VF610_CLK_NFC_SEL] = imx_clk_mux("nfc_sel", CCM_CSCMR1, 12, 2, nfc_sels, 4); 355 clk[VF610_CLK_NFC_SEL] = imx_clk_mux("nfc_sel", CCM_CSCMR1, 12, 2, nfc_sels, 4);
356 clk[VF610_CLK_NFC_EN] = imx_clk_gate("nfc_en", "nfc_sel", CCM_CSCDR2, 9); 356 clk[VF610_CLK_NFC_EN] = imx_clk_gate("nfc_en", "nfc_sel", CCM_CSCDR2, 9);
diff --git a/drivers/clk/mmp/clk-mmp2.c b/drivers/clk/mmp/clk-mmp2.c
index 09d2832fbd78..71fd29348f28 100644
--- a/drivers/clk/mmp/clk-mmp2.c
+++ b/drivers/clk/mmp/clk-mmp2.c
@@ -9,6 +9,7 @@
9 * warranty of any kind, whether express or implied. 9 * warranty of any kind, whether express or implied.
10 */ 10 */
11 11
12#include <linux/clk.h>
12#include <linux/module.h> 13#include <linux/module.h>
13#include <linux/kernel.h> 14#include <linux/kernel.h>
14#include <linux/spinlock.h> 15#include <linux/spinlock.h>
diff --git a/drivers/clk/mmp/clk-pxa168.c b/drivers/clk/mmp/clk-pxa168.c
index 93e967c0f972..75244915df05 100644
--- a/drivers/clk/mmp/clk-pxa168.c
+++ b/drivers/clk/mmp/clk-pxa168.c
@@ -9,6 +9,7 @@
9 * warranty of any kind, whether express or implied. 9 * warranty of any kind, whether express or implied.
10 */ 10 */
11 11
12#include <linux/clk.h>
12#include <linux/module.h> 13#include <linux/module.h>
13#include <linux/kernel.h> 14#include <linux/kernel.h>
14#include <linux/spinlock.h> 15#include <linux/spinlock.h>
diff --git a/drivers/clk/mmp/clk-pxa910.c b/drivers/clk/mmp/clk-pxa910.c
index 993abcdb32cc..37ba04ba1368 100644
--- a/drivers/clk/mmp/clk-pxa910.c
+++ b/drivers/clk/mmp/clk-pxa910.c
@@ -9,6 +9,7 @@
9 * warranty of any kind, whether express or implied. 9 * warranty of any kind, whether express or implied.
10 */ 10 */
11 11
12#include <linux/clk.h>
12#include <linux/module.h> 13#include <linux/module.h>
13#include <linux/kernel.h> 14#include <linux/kernel.h>
14#include <linux/spinlock.h> 15#include <linux/spinlock.h>
diff --git a/drivers/clk/sunxi/clk-a10-pll2.c b/drivers/clk/sunxi/clk-a10-pll2.c
index 5484c31ec568..0ee1f363e4be 100644
--- a/drivers/clk/sunxi/clk-a10-pll2.c
+++ b/drivers/clk/sunxi/clk-a10-pll2.c
@@ -41,15 +41,10 @@
41 41
42#define SUN4I_PLL2_OUTPUTS 4 42#define SUN4I_PLL2_OUTPUTS 4
43 43
44struct sun4i_pll2_data {
45 u32 post_div_offset;
46 u32 pre_div_flags;
47};
48
49static DEFINE_SPINLOCK(sun4i_a10_pll2_lock); 44static DEFINE_SPINLOCK(sun4i_a10_pll2_lock);
50 45
51static void __init sun4i_pll2_setup(struct device_node *node, 46static void __init sun4i_pll2_setup(struct device_node *node,
52 struct sun4i_pll2_data *data) 47 int post_div_offset)
53{ 48{
54 const char *clk_name = node->name, *parent; 49 const char *clk_name = node->name, *parent;
55 struct clk **clks, *base_clk, *prediv_clk; 50 struct clk **clks, *base_clk, *prediv_clk;
@@ -76,7 +71,7 @@ static void __init sun4i_pll2_setup(struct device_node *node,
76 parent, 0, reg, 71 parent, 0, reg,
77 SUN4I_PLL2_PRE_DIV_SHIFT, 72 SUN4I_PLL2_PRE_DIV_SHIFT,
78 SUN4I_PLL2_PRE_DIV_WIDTH, 73 SUN4I_PLL2_PRE_DIV_WIDTH,
79 data->pre_div_flags, 74 CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
80 &sun4i_a10_pll2_lock); 75 &sun4i_a10_pll2_lock);
81 if (!prediv_clk) { 76 if (!prediv_clk) {
82 pr_err("Couldn't register the prediv clock\n"); 77 pr_err("Couldn't register the prediv clock\n");
@@ -127,7 +122,7 @@ static void __init sun4i_pll2_setup(struct device_node *node,
127 */ 122 */
128 val = readl(reg); 123 val = readl(reg);
129 val &= ~(SUN4I_PLL2_POST_DIV_MASK << SUN4I_PLL2_POST_DIV_SHIFT); 124 val &= ~(SUN4I_PLL2_POST_DIV_MASK << SUN4I_PLL2_POST_DIV_SHIFT);
130 val |= (SUN4I_PLL2_POST_DIV_VALUE - data->post_div_offset) << SUN4I_PLL2_POST_DIV_SHIFT; 125 val |= (SUN4I_PLL2_POST_DIV_VALUE - post_div_offset) << SUN4I_PLL2_POST_DIV_SHIFT;
131 writel(val, reg); 126 writel(val, reg);
132 127
133 of_property_read_string_index(node, "clock-output-names", 128 of_property_read_string_index(node, "clock-output-names",
@@ -191,25 +186,17 @@ err_unmap:
191 iounmap(reg); 186 iounmap(reg);
192} 187}
193 188
194static struct sun4i_pll2_data sun4i_a10_pll2_data = {
195 .pre_div_flags = CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
196};
197
198static void __init sun4i_a10_pll2_setup(struct device_node *node) 189static void __init sun4i_a10_pll2_setup(struct device_node *node)
199{ 190{
200 sun4i_pll2_setup(node, &sun4i_a10_pll2_data); 191 sun4i_pll2_setup(node, 0);
201} 192}
202 193
203CLK_OF_DECLARE(sun4i_a10_pll2, "allwinner,sun4i-a10-pll2-clk", 194CLK_OF_DECLARE(sun4i_a10_pll2, "allwinner,sun4i-a10-pll2-clk",
204 sun4i_a10_pll2_setup); 195 sun4i_a10_pll2_setup);
205 196
206static struct sun4i_pll2_data sun5i_a13_pll2_data = {
207 .post_div_offset = 1,
208};
209
210static void __init sun5i_a13_pll2_setup(struct device_node *node) 197static void __init sun5i_a13_pll2_setup(struct device_node *node)
211{ 198{
212 sun4i_pll2_setup(node, &sun5i_a13_pll2_data); 199 sun4i_pll2_setup(node, 1);
213} 200}
214 201
215CLK_OF_DECLARE(sun5i_a13_pll2, "allwinner,sun5i-a13-pll2-clk", 202CLK_OF_DECLARE(sun5i_a13_pll2, "allwinner,sun5i-a13-pll2-clk",
diff --git a/drivers/clk/ti/clk-816x.c b/drivers/clk/ti/clk-816x.c
index 1dfad0c712cd..2a5d84fdddc5 100644
--- a/drivers/clk/ti/clk-816x.c
+++ b/drivers/clk/ti/clk-816x.c
@@ -20,6 +20,8 @@ static struct ti_dt_clk dm816x_clks[] = {
20 DT_CLK(NULL, "sys_clkin", "sys_clkin_ck"), 20 DT_CLK(NULL, "sys_clkin", "sys_clkin_ck"),
21 DT_CLK(NULL, "timer_sys_ck", "sys_clkin_ck"), 21 DT_CLK(NULL, "timer_sys_ck", "sys_clkin_ck"),
22 DT_CLK(NULL, "sys_32k_ck", "sys_32k_ck"), 22 DT_CLK(NULL, "sys_32k_ck", "sys_32k_ck"),
23 DT_CLK(NULL, "timer_32k_ck", "sysclk18_ck"),
24 DT_CLK(NULL, "timer_ext_ck", "tclkin_ck"),
23 DT_CLK(NULL, "mpu_ck", "mpu_ck"), 25 DT_CLK(NULL, "mpu_ck", "mpu_ck"),
24 DT_CLK(NULL, "timer1_fck", "timer1_fck"), 26 DT_CLK(NULL, "timer1_fck", "timer1_fck"),
25 DT_CLK(NULL, "timer2_fck", "timer2_fck"), 27 DT_CLK(NULL, "timer2_fck", "timer2_fck"),
diff --git a/drivers/clk/ti/clkt_dpll.c b/drivers/clk/ti/clkt_dpll.c
index 9023ca9caf84..b5cc6f66ae5d 100644
--- a/drivers/clk/ti/clkt_dpll.c
+++ b/drivers/clk/ti/clkt_dpll.c
@@ -240,7 +240,7 @@ u8 omap2_init_dpll_parent(struct clk_hw *hw)
240 */ 240 */
241unsigned long omap2_get_dpll_rate(struct clk_hw_omap *clk) 241unsigned long omap2_get_dpll_rate(struct clk_hw_omap *clk)
242{ 242{
243 long long dpll_clk; 243 u64 dpll_clk;
244 u32 dpll_mult, dpll_div, v; 244 u32 dpll_mult, dpll_div, v;
245 struct dpll_data *dd; 245 struct dpll_data *dd;
246 246
@@ -262,7 +262,7 @@ unsigned long omap2_get_dpll_rate(struct clk_hw_omap *clk)
262 dpll_div = v & dd->div1_mask; 262 dpll_div = v & dd->div1_mask;
263 dpll_div >>= __ffs(dd->div1_mask); 263 dpll_div >>= __ffs(dd->div1_mask);
264 264
265 dpll_clk = (long long)clk_get_rate(dd->clk_ref) * dpll_mult; 265 dpll_clk = (u64)clk_get_rate(dd->clk_ref) * dpll_mult;
266 do_div(dpll_clk, dpll_div + 1); 266 do_div(dpll_clk, dpll_div + 1);
267 267
268 return dpll_clk; 268 return dpll_clk;
diff --git a/drivers/clk/ti/divider.c b/drivers/clk/ti/divider.c
index 5b1726829e6d..df2558350fc1 100644
--- a/drivers/clk/ti/divider.c
+++ b/drivers/clk/ti/divider.c
@@ -214,7 +214,6 @@ static int ti_clk_divider_set_rate(struct clk_hw *hw, unsigned long rate,
214{ 214{
215 struct clk_divider *divider; 215 struct clk_divider *divider;
216 unsigned int div, value; 216 unsigned int div, value;
217 unsigned long flags = 0;
218 u32 val; 217 u32 val;
219 218
220 if (!hw || !rate) 219 if (!hw || !rate)
@@ -228,9 +227,6 @@ static int ti_clk_divider_set_rate(struct clk_hw *hw, unsigned long rate,
228 if (value > div_mask(divider)) 227 if (value > div_mask(divider))
229 value = div_mask(divider); 228 value = div_mask(divider);
230 229
231 if (divider->lock)
232 spin_lock_irqsave(divider->lock, flags);
233
234 if (divider->flags & CLK_DIVIDER_HIWORD_MASK) { 230 if (divider->flags & CLK_DIVIDER_HIWORD_MASK) {
235 val = div_mask(divider) << (divider->shift + 16); 231 val = div_mask(divider) << (divider->shift + 16);
236 } else { 232 } else {
@@ -240,9 +236,6 @@ static int ti_clk_divider_set_rate(struct clk_hw *hw, unsigned long rate,
240 val |= value << divider->shift; 236 val |= value << divider->shift;
241 ti_clk_ll_ops->clk_writel(val, divider->reg); 237 ti_clk_ll_ops->clk_writel(val, divider->reg);
242 238
243 if (divider->lock)
244 spin_unlock_irqrestore(divider->lock, flags);
245
246 return 0; 239 return 0;
247} 240}
248 241
@@ -256,8 +249,7 @@ static struct clk *_register_divider(struct device *dev, const char *name,
256 const char *parent_name, 249 const char *parent_name,
257 unsigned long flags, void __iomem *reg, 250 unsigned long flags, void __iomem *reg,
258 u8 shift, u8 width, u8 clk_divider_flags, 251 u8 shift, u8 width, u8 clk_divider_flags,
259 const struct clk_div_table *table, 252 const struct clk_div_table *table)
260 spinlock_t *lock)
261{ 253{
262 struct clk_divider *div; 254 struct clk_divider *div;
263 struct clk *clk; 255 struct clk *clk;
@@ -288,7 +280,6 @@ static struct clk *_register_divider(struct device *dev, const char *name,
288 div->shift = shift; 280 div->shift = shift;
289 div->width = width; 281 div->width = width;
290 div->flags = clk_divider_flags; 282 div->flags = clk_divider_flags;
291 div->lock = lock;
292 div->hw.init = &init; 283 div->hw.init = &init;
293 div->table = table; 284 div->table = table;
294 285
@@ -421,7 +412,7 @@ struct clk *ti_clk_register_divider(struct ti_clk *setup)
421 412
422 clk = _register_divider(NULL, setup->name, div->parent, 413 clk = _register_divider(NULL, setup->name, div->parent,
423 flags, (void __iomem *)reg, div->bit_shift, 414 flags, (void __iomem *)reg, div->bit_shift,
424 width, div_flags, table, NULL); 415 width, div_flags, table);
425 416
426 if (IS_ERR(clk)) 417 if (IS_ERR(clk))
427 kfree(table); 418 kfree(table);
@@ -584,8 +575,7 @@ static void __init of_ti_divider_clk_setup(struct device_node *node)
584 goto cleanup; 575 goto cleanup;
585 576
586 clk = _register_divider(NULL, node->name, parent_name, flags, reg, 577 clk = _register_divider(NULL, node->name, parent_name, flags, reg,
587 shift, width, clk_divider_flags, table, 578 shift, width, clk_divider_flags, table);
588 NULL);
589 579
590 if (!IS_ERR(clk)) { 580 if (!IS_ERR(clk)) {
591 of_clk_add_provider(node, of_clk_src_simple_get, clk); 581 of_clk_add_provider(node, of_clk_src_simple_get, clk);
diff --git a/drivers/clk/ti/fapll.c b/drivers/clk/ti/fapll.c
index f4b2e9888bdf..66a0d0ed8b55 100644
--- a/drivers/clk/ti/fapll.c
+++ b/drivers/clk/ti/fapll.c
@@ -168,7 +168,7 @@ static unsigned long ti_fapll_recalc_rate(struct clk_hw *hw,
168{ 168{
169 struct fapll_data *fd = to_fapll(hw); 169 struct fapll_data *fd = to_fapll(hw);
170 u32 fapll_n, fapll_p, v; 170 u32 fapll_n, fapll_p, v;
171 long long rate; 171 u64 rate;
172 172
173 if (ti_fapll_clock_is_bypass(fd)) 173 if (ti_fapll_clock_is_bypass(fd))
174 return parent_rate; 174 return parent_rate;
@@ -314,7 +314,7 @@ static unsigned long ti_fapll_synth_recalc_rate(struct clk_hw *hw,
314{ 314{
315 struct fapll_synth *synth = to_synth(hw); 315 struct fapll_synth *synth = to_synth(hw);
316 u32 synth_div_m; 316 u32 synth_div_m;
317 long long rate; 317 u64 rate;
318 318
319 /* The audio_pll_clk1 is hardwired to produce 32.768KiHz clock */ 319 /* The audio_pll_clk1 is hardwired to produce 32.768KiHz clock */
320 if (!synth->div) 320 if (!synth->div)
diff --git a/drivers/clk/ti/mux.c b/drivers/clk/ti/mux.c
index 69f08a1d047d..dab9ba88b9d6 100644
--- a/drivers/clk/ti/mux.c
+++ b/drivers/clk/ti/mux.c
@@ -69,7 +69,6 @@ static int ti_clk_mux_set_parent(struct clk_hw *hw, u8 index)
69{ 69{
70 struct clk_mux *mux = to_clk_mux(hw); 70 struct clk_mux *mux = to_clk_mux(hw);
71 u32 val; 71 u32 val;
72 unsigned long flags = 0;
73 72
74 if (mux->table) { 73 if (mux->table) {
75 index = mux->table[index]; 74 index = mux->table[index];
@@ -81,9 +80,6 @@ static int ti_clk_mux_set_parent(struct clk_hw *hw, u8 index)
81 index++; 80 index++;
82 } 81 }
83 82
84 if (mux->lock)
85 spin_lock_irqsave(mux->lock, flags);
86
87 if (mux->flags & CLK_MUX_HIWORD_MASK) { 83 if (mux->flags & CLK_MUX_HIWORD_MASK) {
88 val = mux->mask << (mux->shift + 16); 84 val = mux->mask << (mux->shift + 16);
89 } else { 85 } else {
@@ -93,9 +89,6 @@ static int ti_clk_mux_set_parent(struct clk_hw *hw, u8 index)
93 val |= index << mux->shift; 89 val |= index << mux->shift;
94 ti_clk_ll_ops->clk_writel(val, mux->reg); 90 ti_clk_ll_ops->clk_writel(val, mux->reg);
95 91
96 if (mux->lock)
97 spin_unlock_irqrestore(mux->lock, flags);
98
99 return 0; 92 return 0;
100} 93}
101 94
@@ -109,7 +102,7 @@ static struct clk *_register_mux(struct device *dev, const char *name,
109 const char **parent_names, u8 num_parents, 102 const char **parent_names, u8 num_parents,
110 unsigned long flags, void __iomem *reg, 103 unsigned long flags, void __iomem *reg,
111 u8 shift, u32 mask, u8 clk_mux_flags, 104 u8 shift, u32 mask, u8 clk_mux_flags,
112 u32 *table, spinlock_t *lock) 105 u32 *table)
113{ 106{
114 struct clk_mux *mux; 107 struct clk_mux *mux;
115 struct clk *clk; 108 struct clk *clk;
@@ -133,7 +126,6 @@ static struct clk *_register_mux(struct device *dev, const char *name,
133 mux->shift = shift; 126 mux->shift = shift;
134 mux->mask = mask; 127 mux->mask = mask;
135 mux->flags = clk_mux_flags; 128 mux->flags = clk_mux_flags;
136 mux->lock = lock;
137 mux->table = table; 129 mux->table = table;
138 mux->hw.init = &init; 130 mux->hw.init = &init;
139 131
@@ -175,7 +167,7 @@ struct clk *ti_clk_register_mux(struct ti_clk *setup)
175 167
176 return _register_mux(NULL, setup->name, mux->parents, mux->num_parents, 168 return _register_mux(NULL, setup->name, mux->parents, mux->num_parents,
177 flags, (void __iomem *)reg, mux->bit_shift, mask, 169 flags, (void __iomem *)reg, mux->bit_shift, mask,
178 mux_flags, NULL, NULL); 170 mux_flags, NULL);
179} 171}
180 172
181/** 173/**
@@ -227,8 +219,7 @@ static void of_mux_clk_setup(struct device_node *node)
227 mask = (1 << fls(mask)) - 1; 219 mask = (1 << fls(mask)) - 1;
228 220
229 clk = _register_mux(NULL, node->name, parent_names, num_parents, 221 clk = _register_mux(NULL, node->name, parent_names, num_parents,
230 flags, reg, shift, mask, clk_mux_flags, NULL, 222 flags, reg, shift, mask, clk_mux_flags, NULL);
231 NULL);
232 223
233 if (!IS_ERR(clk)) 224 if (!IS_ERR(clk))
234 of_clk_add_provider(node, of_clk_src_simple_get, clk); 225 of_clk_add_provider(node, of_clk_src_simple_get, clk);
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index 71cfdf7c9708..2eb5f0efae90 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -1,4 +1,5 @@
1menu "Clock Source drivers" 1menu "Clock Source drivers"
2 depends on !ARCH_USES_GETTIMEOFFSET
2 3
3config CLKSRC_OF 4config CLKSRC_OF
4 bool 5 bool
diff --git a/drivers/clocksource/fsl_ftm_timer.c b/drivers/clocksource/fsl_ftm_timer.c
index 10202f1fdfd7..517e1c7624d4 100644
--- a/drivers/clocksource/fsl_ftm_timer.c
+++ b/drivers/clocksource/fsl_ftm_timer.c
@@ -203,7 +203,7 @@ static int __init ftm_clockevent_init(unsigned long freq, int irq)
203 int err; 203 int err;
204 204
205 ftm_writel(0x00, priv->clkevt_base + FTM_CNTIN); 205 ftm_writel(0x00, priv->clkevt_base + FTM_CNTIN);
206 ftm_writel(~0UL, priv->clkevt_base + FTM_MOD); 206 ftm_writel(~0u, priv->clkevt_base + FTM_MOD);
207 207
208 ftm_reset_counter(priv->clkevt_base); 208 ftm_reset_counter(priv->clkevt_base);
209 209
@@ -230,7 +230,7 @@ static int __init ftm_clocksource_init(unsigned long freq)
230 int err; 230 int err;
231 231
232 ftm_writel(0x00, priv->clksrc_base + FTM_CNTIN); 232 ftm_writel(0x00, priv->clksrc_base + FTM_CNTIN);
233 ftm_writel(~0UL, priv->clksrc_base + FTM_MOD); 233 ftm_writel(~0u, priv->clksrc_base + FTM_MOD);
234 234
235 ftm_reset_counter(priv->clksrc_base); 235 ftm_reset_counter(priv->clksrc_base);
236 236
diff --git a/drivers/clocksource/mmio.c b/drivers/clocksource/mmio.c
index 1593ade2a815..c4f7d7a9b689 100644
--- a/drivers/clocksource/mmio.c
+++ b/drivers/clocksource/mmio.c
@@ -55,7 +55,7 @@ int __init clocksource_mmio_init(void __iomem *base, const char *name,
55{ 55{
56 struct clocksource_mmio *cs; 56 struct clocksource_mmio *cs;
57 57
58 if (bits > 32 || bits < 16) 58 if (bits > 64 || bits < 16)
59 return -EINVAL; 59 return -EINVAL;
60 60
61 cs = kzalloc(sizeof(struct clocksource_mmio), GFP_KERNEL); 61 cs = kzalloc(sizeof(struct clocksource_mmio), GFP_KERNEL);
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
index 1582c1c016b0..b1f8a73e5a94 100644
--- a/drivers/cpufreq/Kconfig.arm
+++ b/drivers/cpufreq/Kconfig.arm
@@ -84,6 +84,7 @@ config ARM_KIRKWOOD_CPUFREQ
84config ARM_MT8173_CPUFREQ 84config ARM_MT8173_CPUFREQ
85 bool "Mediatek MT8173 CPUFreq support" 85 bool "Mediatek MT8173 CPUFreq support"
86 depends on ARCH_MEDIATEK && REGULATOR 86 depends on ARCH_MEDIATEK && REGULATOR
87 depends on ARM64 || (ARM_CPU_TOPOLOGY && COMPILE_TEST)
87 depends on !CPU_THERMAL || THERMAL=y 88 depends on !CPU_THERMAL || THERMAL=y
88 select PM_OPP 89 select PM_OPP
89 help 90 help
@@ -201,7 +202,7 @@ config ARM_SA1110_CPUFREQ
201 202
202config ARM_SCPI_CPUFREQ 203config ARM_SCPI_CPUFREQ
203 tristate "SCPI based CPUfreq driver" 204 tristate "SCPI based CPUfreq driver"
204 depends on ARM_BIG_LITTLE_CPUFREQ && ARM_SCPI_PROTOCOL 205 depends on ARM_BIG_LITTLE_CPUFREQ && ARM_SCPI_PROTOCOL && COMMON_CLK_SCPI
205 help 206 help
206 This adds the CPUfreq driver support for ARM big.LITTLE platforms 207 This adds the CPUfreq driver support for ARM big.LITTLE platforms
207 using SCPI protocol for CPU power management. 208 using SCPI protocol for CPU power management.
@@ -225,7 +226,7 @@ config ARM_TEGRA20_CPUFREQ
225 226
226config ARM_TEGRA124_CPUFREQ 227config ARM_TEGRA124_CPUFREQ
227 tristate "Tegra124 CPUFreq support" 228 tristate "Tegra124 CPUFreq support"
228 depends on ARCH_TEGRA && CPUFREQ_DT 229 depends on ARCH_TEGRA && CPUFREQ_DT && REGULATOR
229 default y 230 default y
230 help 231 help
231 This adds the CPUFreq driver support for Tegra124 SOCs. 232 This adds the CPUFreq driver support for Tegra124 SOCs.
diff --git a/drivers/cpufreq/Kconfig.x86 b/drivers/cpufreq/Kconfig.x86
index adbd1de1cea5..c59bdcb83217 100644
--- a/drivers/cpufreq/Kconfig.x86
+++ b/drivers/cpufreq/Kconfig.x86
@@ -5,7 +5,6 @@
5config X86_INTEL_PSTATE 5config X86_INTEL_PSTATE
6 bool "Intel P state control" 6 bool "Intel P state control"
7 depends on X86 7 depends on X86
8 select ACPI_PROCESSOR if ACPI
9 help 8 help
10 This driver provides a P state for Intel core processors. 9 This driver provides a P state for Intel core processors.
11 The driver implements an internal governor and will become 10 The driver implements an internal governor and will become
diff --git a/drivers/cpufreq/cppc_cpufreq.c b/drivers/cpufreq/cppc_cpufreq.c
index e8cb334094b0..7c0bdfb1a2ca 100644
--- a/drivers/cpufreq/cppc_cpufreq.c
+++ b/drivers/cpufreq/cppc_cpufreq.c
@@ -98,10 +98,11 @@ static int cppc_cpufreq_cpu_init(struct cpufreq_policy *policy)
98 policy->max = cpu->perf_caps.highest_perf; 98 policy->max = cpu->perf_caps.highest_perf;
99 policy->cpuinfo.min_freq = policy->min; 99 policy->cpuinfo.min_freq = policy->min;
100 policy->cpuinfo.max_freq = policy->max; 100 policy->cpuinfo.max_freq = policy->max;
101 policy->shared_type = cpu->shared_type;
101 102
102 if (policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) 103 if (policy->shared_type == CPUFREQ_SHARED_TYPE_ANY)
103 cpumask_copy(policy->cpus, cpu->shared_cpu_map); 104 cpumask_copy(policy->cpus, cpu->shared_cpu_map);
104 else { 105 else if (policy->shared_type == CPUFREQ_SHARED_TYPE_ALL) {
105 /* Support only SW_ANY for now. */ 106 /* Support only SW_ANY for now. */
106 pr_debug("Unsupported CPU co-ord type\n"); 107 pr_debug("Unsupported CPU co-ord type\n");
107 return -EFAULT; 108 return -EFAULT;
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 7c48e7316d91..8412ce5f93a7 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -976,10 +976,14 @@ static int cpufreq_init_policy(struct cpufreq_policy *policy)
976 976
977 new_policy.governor = gov; 977 new_policy.governor = gov;
978 978
979 /* Use the default policy if its valid. */ 979 /* Use the default policy if there is no last_policy. */
980 if (cpufreq_driver->setpolicy) 980 if (cpufreq_driver->setpolicy) {
981 cpufreq_parse_governor(gov->name, &new_policy.policy, NULL); 981 if (policy->last_policy)
982 982 new_policy.policy = policy->last_policy;
983 else
984 cpufreq_parse_governor(gov->name, &new_policy.policy,
985 NULL);
986 }
983 /* set default policy */ 987 /* set default policy */
984 return cpufreq_set_policy(policy, &new_policy); 988 return cpufreq_set_policy(policy, &new_policy);
985} 989}
@@ -1330,6 +1334,8 @@ static void cpufreq_offline_prepare(unsigned int cpu)
1330 if (has_target()) 1334 if (has_target())
1331 strncpy(policy->last_governor, policy->governor->name, 1335 strncpy(policy->last_governor, policy->governor->name,
1332 CPUFREQ_NAME_LEN); 1336 CPUFREQ_NAME_LEN);
1337 else
1338 policy->last_policy = policy->policy;
1333 } else if (cpu == policy->cpu) { 1339 } else if (cpu == policy->cpu) {
1334 /* Nominate new CPU */ 1340 /* Nominate new CPU */
1335 policy->cpu = cpumask_any(policy->cpus); 1341 policy->cpu = cpumask_any(policy->cpus);
@@ -1401,13 +1407,10 @@ static void cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
1401 } 1407 }
1402 1408
1403 cpumask_clear_cpu(cpu, policy->real_cpus); 1409 cpumask_clear_cpu(cpu, policy->real_cpus);
1410 remove_cpu_dev_symlink(policy, cpu);
1404 1411
1405 if (cpumask_empty(policy->real_cpus)) { 1412 if (cpumask_empty(policy->real_cpus))
1406 cpufreq_policy_free(policy, true); 1413 cpufreq_policy_free(policy, true);
1407 return;
1408 }
1409
1410 remove_cpu_dev_symlink(policy, cpu);
1411} 1414}
1412 1415
1413static void handle_update(struct work_struct *work) 1416static void handle_update(struct work_struct *work)
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 2e31d097def6..98fb8821382d 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -34,14 +34,10 @@
34#include <asm/cpu_device_id.h> 34#include <asm/cpu_device_id.h>
35#include <asm/cpufeature.h> 35#include <asm/cpufeature.h>
36 36
37#if IS_ENABLED(CONFIG_ACPI) 37#define ATOM_RATIOS 0x66a
38#include <acpi/processor.h> 38#define ATOM_VIDS 0x66b
39#endif 39#define ATOM_TURBO_RATIOS 0x66c
40 40#define ATOM_TURBO_VIDS 0x66d
41#define BYT_RATIOS 0x66a
42#define BYT_VIDS 0x66b
43#define BYT_TURBO_RATIOS 0x66c
44#define BYT_TURBO_VIDS 0x66d
45 41
46#define FRAC_BITS 8 42#define FRAC_BITS 8
47#define int_tofp(X) ((int64_t)(X) << FRAC_BITS) 43#define int_tofp(X) ((int64_t)(X) << FRAC_BITS)
@@ -117,9 +113,6 @@ struct cpudata {
117 u64 prev_mperf; 113 u64 prev_mperf;
118 u64 prev_tsc; 114 u64 prev_tsc;
119 struct sample sample; 115 struct sample sample;
120#if IS_ENABLED(CONFIG_ACPI)
121 struct acpi_processor_performance acpi_perf_data;
122#endif
123}; 116};
124 117
125static struct cpudata **all_cpu_data; 118static struct cpudata **all_cpu_data;
@@ -150,7 +143,6 @@ struct cpu_defaults {
150static struct pstate_adjust_policy pid_params; 143static struct pstate_adjust_policy pid_params;
151static struct pstate_funcs pstate_funcs; 144static struct pstate_funcs pstate_funcs;
152static int hwp_active; 145static int hwp_active;
153static int no_acpi_perf;
154 146
155struct perf_limits { 147struct perf_limits {
156 int no_turbo; 148 int no_turbo;
@@ -163,8 +155,6 @@ struct perf_limits {
163 int max_sysfs_pct; 155 int max_sysfs_pct;
164 int min_policy_pct; 156 int min_policy_pct;
165 int min_sysfs_pct; 157 int min_sysfs_pct;
166 int max_perf_ctl;
167 int min_perf_ctl;
168}; 158};
169 159
170static struct perf_limits performance_limits = { 160static struct perf_limits performance_limits = {
@@ -191,8 +181,6 @@ static struct perf_limits powersave_limits = {
191 .max_sysfs_pct = 100, 181 .max_sysfs_pct = 100,
192 .min_policy_pct = 0, 182 .min_policy_pct = 0,
193 .min_sysfs_pct = 0, 183 .min_sysfs_pct = 0,
194 .max_perf_ctl = 0,
195 .min_perf_ctl = 0,
196}; 184};
197 185
198#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE 186#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE
@@ -201,153 +189,6 @@ static struct perf_limits *limits = &performance_limits;
201static struct perf_limits *limits = &powersave_limits; 189static struct perf_limits *limits = &powersave_limits;
202#endif 190#endif
203 191
204#if IS_ENABLED(CONFIG_ACPI)
205/*
206 * The max target pstate ratio is a 8 bit value in both PLATFORM_INFO MSR and
207 * in TURBO_RATIO_LIMIT MSR, which pstate driver stores in max_pstate and
208 * max_turbo_pstate fields. The PERF_CTL MSR contains 16 bit value for P state
209 * ratio, out of it only high 8 bits are used. For example 0x1700 is setting
210 * target ratio 0x17. The _PSS control value stores in a format which can be
211 * directly written to PERF_CTL MSR. But in intel_pstate driver this shift
212 * occurs during write to PERF_CTL (E.g. for cores core_set_pstate()).
213 * This function converts the _PSS control value to intel pstate driver format
214 * for comparison and assignment.
215 */
216static int convert_to_native_pstate_format(struct cpudata *cpu, int index)
217{
218 return cpu->acpi_perf_data.states[index].control >> 8;
219}
220
221static int intel_pstate_init_perf_limits(struct cpufreq_policy *policy)
222{
223 struct cpudata *cpu;
224 int ret;
225 bool turbo_absent = false;
226 int max_pstate_index;
227 int min_pss_ctl, max_pss_ctl, turbo_pss_ctl;
228 int i;
229
230 cpu = all_cpu_data[policy->cpu];
231
232 pr_debug("intel_pstate: default limits 0x%x 0x%x 0x%x\n",
233 cpu->pstate.min_pstate, cpu->pstate.max_pstate,
234 cpu->pstate.turbo_pstate);
235
236 if (!cpu->acpi_perf_data.shared_cpu_map &&
237 zalloc_cpumask_var_node(&cpu->acpi_perf_data.shared_cpu_map,
238 GFP_KERNEL, cpu_to_node(policy->cpu))) {
239 return -ENOMEM;
240 }
241
242 ret = acpi_processor_register_performance(&cpu->acpi_perf_data,
243 policy->cpu);
244 if (ret)
245 return ret;
246
247 /*
248 * Check if the control value in _PSS is for PERF_CTL MSR, which should
249 * guarantee that the states returned by it map to the states in our
250 * list directly.
251 */
252 if (cpu->acpi_perf_data.control_register.space_id !=
253 ACPI_ADR_SPACE_FIXED_HARDWARE)
254 return -EIO;
255
256 pr_debug("intel_pstate: CPU%u - ACPI _PSS perf data\n", policy->cpu);
257 for (i = 0; i < cpu->acpi_perf_data.state_count; i++)
258 pr_debug(" %cP%d: %u MHz, %u mW, 0x%x\n",
259 (i == cpu->acpi_perf_data.state ? '*' : ' '), i,
260 (u32) cpu->acpi_perf_data.states[i].core_frequency,
261 (u32) cpu->acpi_perf_data.states[i].power,
262 (u32) cpu->acpi_perf_data.states[i].control);
263
264 /*
265 * If there is only one entry _PSS, simply ignore _PSS and continue as
266 * usual without taking _PSS into account
267 */
268 if (cpu->acpi_perf_data.state_count < 2)
269 return 0;
270
271 turbo_pss_ctl = convert_to_native_pstate_format(cpu, 0);
272 min_pss_ctl = convert_to_native_pstate_format(cpu,
273 cpu->acpi_perf_data.state_count - 1);
274 /* Check if there is a turbo freq in _PSS */
275 if (turbo_pss_ctl <= cpu->pstate.max_pstate &&
276 turbo_pss_ctl > cpu->pstate.min_pstate) {
277 pr_debug("intel_pstate: no turbo range exists in _PSS\n");
278 limits->no_turbo = limits->turbo_disabled = 1;
279 cpu->pstate.turbo_pstate = cpu->pstate.max_pstate;
280 turbo_absent = true;
281 }
282
283 /* Check if the max non turbo p state < Intel P state max */
284 max_pstate_index = turbo_absent ? 0 : 1;
285 max_pss_ctl = convert_to_native_pstate_format(cpu, max_pstate_index);
286 if (max_pss_ctl < cpu->pstate.max_pstate &&
287 max_pss_ctl > cpu->pstate.min_pstate)
288 cpu->pstate.max_pstate = max_pss_ctl;
289
290 /* check If min perf > Intel P State min */
291 if (min_pss_ctl > cpu->pstate.min_pstate &&
292 min_pss_ctl < cpu->pstate.max_pstate) {
293 cpu->pstate.min_pstate = min_pss_ctl;
294 policy->cpuinfo.min_freq = min_pss_ctl * cpu->pstate.scaling;
295 }
296
297 if (turbo_absent)
298 policy->cpuinfo.max_freq = cpu->pstate.max_pstate *
299 cpu->pstate.scaling;
300 else {
301 policy->cpuinfo.max_freq = cpu->pstate.turbo_pstate *
302 cpu->pstate.scaling;
303 /*
304 * The _PSS table doesn't contain whole turbo frequency range.
305 * This just contains +1 MHZ above the max non turbo frequency,
306 * with control value corresponding to max turbo ratio. But
307 * when cpufreq set policy is called, it will call with this
308 * max frequency, which will cause a reduced performance as
309 * this driver uses real max turbo frequency as the max
310 * frequeny. So correct this frequency in _PSS table to
311 * correct max turbo frequency based on the turbo ratio.
312 * Also need to convert to MHz as _PSS freq is in MHz.
313 */
314 cpu->acpi_perf_data.states[0].core_frequency =
315 turbo_pss_ctl * 100;
316 }
317
318 pr_debug("intel_pstate: Updated limits using _PSS 0x%x 0x%x 0x%x\n",
319 cpu->pstate.min_pstate, cpu->pstate.max_pstate,
320 cpu->pstate.turbo_pstate);
321 pr_debug("intel_pstate: policy max_freq=%d Khz min_freq = %d KHz\n",
322 policy->cpuinfo.max_freq, policy->cpuinfo.min_freq);
323
324 return 0;
325}
326
327static int intel_pstate_exit_perf_limits(struct cpufreq_policy *policy)
328{
329 struct cpudata *cpu;
330
331 if (!no_acpi_perf)
332 return 0;
333
334 cpu = all_cpu_data[policy->cpu];
335 acpi_processor_unregister_performance(policy->cpu);
336 return 0;
337}
338
339#else
340static int intel_pstate_init_perf_limits(struct cpufreq_policy *policy)
341{
342 return 0;
343}
344
345static int intel_pstate_exit_perf_limits(struct cpufreq_policy *policy)
346{
347 return 0;
348}
349#endif
350
351static inline void pid_reset(struct _pid *pid, int setpoint, int busy, 192static inline void pid_reset(struct _pid *pid, int setpoint, int busy,
352 int deadband, int integral) { 193 int deadband, int integral) {
353 pid->setpoint = setpoint; 194 pid->setpoint = setpoint;
@@ -687,31 +528,31 @@ static void intel_pstate_hwp_enable(struct cpudata *cpudata)
687 wrmsrl_on_cpu(cpudata->cpu, MSR_PM_ENABLE, 0x1); 528 wrmsrl_on_cpu(cpudata->cpu, MSR_PM_ENABLE, 0x1);
688} 529}
689 530
690static int byt_get_min_pstate(void) 531static int atom_get_min_pstate(void)
691{ 532{
692 u64 value; 533 u64 value;
693 534
694 rdmsrl(BYT_RATIOS, value); 535 rdmsrl(ATOM_RATIOS, value);
695 return (value >> 8) & 0x7F; 536 return (value >> 8) & 0x7F;
696} 537}
697 538
698static int byt_get_max_pstate(void) 539static int atom_get_max_pstate(void)
699{ 540{
700 u64 value; 541 u64 value;
701 542
702 rdmsrl(BYT_RATIOS, value); 543 rdmsrl(ATOM_RATIOS, value);
703 return (value >> 16) & 0x7F; 544 return (value >> 16) & 0x7F;
704} 545}
705 546
706static int byt_get_turbo_pstate(void) 547static int atom_get_turbo_pstate(void)
707{ 548{
708 u64 value; 549 u64 value;
709 550
710 rdmsrl(BYT_TURBO_RATIOS, value); 551 rdmsrl(ATOM_TURBO_RATIOS, value);
711 return value & 0x7F; 552 return value & 0x7F;
712} 553}
713 554
714static void byt_set_pstate(struct cpudata *cpudata, int pstate) 555static void atom_set_pstate(struct cpudata *cpudata, int pstate)
715{ 556{
716 u64 val; 557 u64 val;
717 int32_t vid_fp; 558 int32_t vid_fp;
@@ -736,27 +577,42 @@ static void byt_set_pstate(struct cpudata *cpudata, int pstate)
736 wrmsrl_on_cpu(cpudata->cpu, MSR_IA32_PERF_CTL, val); 577 wrmsrl_on_cpu(cpudata->cpu, MSR_IA32_PERF_CTL, val);
737} 578}
738 579
739#define BYT_BCLK_FREQS 5 580static int silvermont_get_scaling(void)
740static int byt_freq_table[BYT_BCLK_FREQS] = { 833, 1000, 1333, 1167, 800};
741
742static int byt_get_scaling(void)
743{ 581{
744 u64 value; 582 u64 value;
745 int i; 583 int i;
584 /* Defined in Table 35-6 from SDM (Sept 2015) */
585 static int silvermont_freq_table[] = {
586 83300, 100000, 133300, 116700, 80000};
746 587
747 rdmsrl(MSR_FSB_FREQ, value); 588 rdmsrl(MSR_FSB_FREQ, value);
748 i = value & 0x3; 589 i = value & 0x7;
590 WARN_ON(i > 4);
749 591
750 BUG_ON(i > BYT_BCLK_FREQS); 592 return silvermont_freq_table[i];
593}
751 594
752 return byt_freq_table[i] * 100; 595static int airmont_get_scaling(void)
596{
597 u64 value;
598 int i;
599 /* Defined in Table 35-10 from SDM (Sept 2015) */
600 static int airmont_freq_table[] = {
601 83300, 100000, 133300, 116700, 80000,
602 93300, 90000, 88900, 87500};
603
604 rdmsrl(MSR_FSB_FREQ, value);
605 i = value & 0xF;
606 WARN_ON(i > 8);
607
608 return airmont_freq_table[i];
753} 609}
754 610
755static void byt_get_vid(struct cpudata *cpudata) 611static void atom_get_vid(struct cpudata *cpudata)
756{ 612{
757 u64 value; 613 u64 value;
758 614
759 rdmsrl(BYT_VIDS, value); 615 rdmsrl(ATOM_VIDS, value);
760 cpudata->vid.min = int_tofp((value >> 8) & 0x7f); 616 cpudata->vid.min = int_tofp((value >> 8) & 0x7f);
761 cpudata->vid.max = int_tofp((value >> 16) & 0x7f); 617 cpudata->vid.max = int_tofp((value >> 16) & 0x7f);
762 cpudata->vid.ratio = div_fp( 618 cpudata->vid.ratio = div_fp(
@@ -764,7 +620,7 @@ static void byt_get_vid(struct cpudata *cpudata)
764 int_tofp(cpudata->pstate.max_pstate - 620 int_tofp(cpudata->pstate.max_pstate -
765 cpudata->pstate.min_pstate)); 621 cpudata->pstate.min_pstate));
766 622
767 rdmsrl(BYT_TURBO_VIDS, value); 623 rdmsrl(ATOM_TURBO_VIDS, value);
768 cpudata->vid.turbo = value & 0x7f; 624 cpudata->vid.turbo = value & 0x7f;
769} 625}
770 626
@@ -885,7 +741,7 @@ static struct cpu_defaults core_params = {
885 }, 741 },
886}; 742};
887 743
888static struct cpu_defaults byt_params = { 744static struct cpu_defaults silvermont_params = {
889 .pid_policy = { 745 .pid_policy = {
890 .sample_rate_ms = 10, 746 .sample_rate_ms = 10,
891 .deadband = 0, 747 .deadband = 0,
@@ -895,13 +751,33 @@ static struct cpu_defaults byt_params = {
895 .i_gain_pct = 4, 751 .i_gain_pct = 4,
896 }, 752 },
897 .funcs = { 753 .funcs = {
898 .get_max = byt_get_max_pstate, 754 .get_max = atom_get_max_pstate,
899 .get_max_physical = byt_get_max_pstate, 755 .get_max_physical = atom_get_max_pstate,
900 .get_min = byt_get_min_pstate, 756 .get_min = atom_get_min_pstate,
901 .get_turbo = byt_get_turbo_pstate, 757 .get_turbo = atom_get_turbo_pstate,
902 .set = byt_set_pstate, 758 .set = atom_set_pstate,
903 .get_scaling = byt_get_scaling, 759 .get_scaling = silvermont_get_scaling,
904 .get_vid = byt_get_vid, 760 .get_vid = atom_get_vid,
761 },
762};
763
764static struct cpu_defaults airmont_params = {
765 .pid_policy = {
766 .sample_rate_ms = 10,
767 .deadband = 0,
768 .setpoint = 60,
769 .p_gain_pct = 14,
770 .d_gain_pct = 0,
771 .i_gain_pct = 4,
772 },
773 .funcs = {
774 .get_max = atom_get_max_pstate,
775 .get_max_physical = atom_get_max_pstate,
776 .get_min = atom_get_min_pstate,
777 .get_turbo = atom_get_turbo_pstate,
778 .set = atom_set_pstate,
779 .get_scaling = airmont_get_scaling,
780 .get_vid = atom_get_vid,
905 }, 781 },
906}; 782};
907 783
@@ -938,23 +814,12 @@ static void intel_pstate_get_min_max(struct cpudata *cpu, int *min, int *max)
938 * policy, or by cpu specific default values determined through 814 * policy, or by cpu specific default values determined through
939 * experimentation. 815 * experimentation.
940 */ 816 */
941 if (limits->max_perf_ctl && limits->max_sysfs_pct >= 817 max_perf_adj = fp_toint(mul_fp(int_tofp(max_perf), limits->max_perf));
942 limits->max_policy_pct) { 818 *max = clamp_t(int, max_perf_adj,
943 *max = limits->max_perf_ctl; 819 cpu->pstate.min_pstate, cpu->pstate.turbo_pstate);
944 } else {
945 max_perf_adj = fp_toint(mul_fp(int_tofp(max_perf),
946 limits->max_perf));
947 *max = clamp_t(int, max_perf_adj, cpu->pstate.min_pstate,
948 cpu->pstate.turbo_pstate);
949 }
950 820
951 if (limits->min_perf_ctl) { 821 min_perf = fp_toint(mul_fp(int_tofp(max_perf), limits->min_perf));
952 *min = limits->min_perf_ctl; 822 *min = clamp_t(int, min_perf, cpu->pstate.min_pstate, max_perf);
953 } else {
954 min_perf = fp_toint(mul_fp(int_tofp(max_perf),
955 limits->min_perf));
956 *min = clamp_t(int, min_perf, cpu->pstate.min_pstate, max_perf);
957 }
958} 823}
959 824
960static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate, bool force) 825static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate, bool force)
@@ -1153,7 +1018,7 @@ static void intel_pstate_timer_func(unsigned long __data)
1153static const struct x86_cpu_id intel_pstate_cpu_ids[] = { 1018static const struct x86_cpu_id intel_pstate_cpu_ids[] = {
1154 ICPU(0x2a, core_params), 1019 ICPU(0x2a, core_params),
1155 ICPU(0x2d, core_params), 1020 ICPU(0x2d, core_params),
1156 ICPU(0x37, byt_params), 1021 ICPU(0x37, silvermont_params),
1157 ICPU(0x3a, core_params), 1022 ICPU(0x3a, core_params),
1158 ICPU(0x3c, core_params), 1023 ICPU(0x3c, core_params),
1159 ICPU(0x3d, core_params), 1024 ICPU(0x3d, core_params),
@@ -1162,7 +1027,7 @@ static const struct x86_cpu_id intel_pstate_cpu_ids[] = {
1162 ICPU(0x45, core_params), 1027 ICPU(0x45, core_params),
1163 ICPU(0x46, core_params), 1028 ICPU(0x46, core_params),
1164 ICPU(0x47, core_params), 1029 ICPU(0x47, core_params),
1165 ICPU(0x4c, byt_params), 1030 ICPU(0x4c, airmont_params),
1166 ICPU(0x4e, core_params), 1031 ICPU(0x4e, core_params),
1167 ICPU(0x4f, core_params), 1032 ICPU(0x4f, core_params),
1168 ICPU(0x5e, core_params), 1033 ICPU(0x5e, core_params),
@@ -1229,12 +1094,6 @@ static unsigned int intel_pstate_get(unsigned int cpu_num)
1229 1094
1230static int intel_pstate_set_policy(struct cpufreq_policy *policy) 1095static int intel_pstate_set_policy(struct cpufreq_policy *policy)
1231{ 1096{
1232#if IS_ENABLED(CONFIG_ACPI)
1233 struct cpudata *cpu;
1234 int i;
1235#endif
1236 pr_debug("intel_pstate: %s max %u policy->max %u\n", __func__,
1237 policy->cpuinfo.max_freq, policy->max);
1238 if (!policy->cpuinfo.max_freq) 1097 if (!policy->cpuinfo.max_freq)
1239 return -ENODEV; 1098 return -ENODEV;
1240 1099
@@ -1242,6 +1101,8 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
1242 policy->max >= policy->cpuinfo.max_freq) { 1101 policy->max >= policy->cpuinfo.max_freq) {
1243 pr_debug("intel_pstate: set performance\n"); 1102 pr_debug("intel_pstate: set performance\n");
1244 limits = &performance_limits; 1103 limits = &performance_limits;
1104 if (hwp_active)
1105 intel_pstate_hwp_set();
1245 return 0; 1106 return 0;
1246 } 1107 }
1247 1108
@@ -1249,7 +1110,8 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
1249 limits = &powersave_limits; 1110 limits = &powersave_limits;
1250 limits->min_policy_pct = (policy->min * 100) / policy->cpuinfo.max_freq; 1111 limits->min_policy_pct = (policy->min * 100) / policy->cpuinfo.max_freq;
1251 limits->min_policy_pct = clamp_t(int, limits->min_policy_pct, 0 , 100); 1112 limits->min_policy_pct = clamp_t(int, limits->min_policy_pct, 0 , 100);
1252 limits->max_policy_pct = (policy->max * 100) / policy->cpuinfo.max_freq; 1113 limits->max_policy_pct = DIV_ROUND_UP(policy->max * 100,
1114 policy->cpuinfo.max_freq);
1253 limits->max_policy_pct = clamp_t(int, limits->max_policy_pct, 0 , 100); 1115 limits->max_policy_pct = clamp_t(int, limits->max_policy_pct, 0 , 100);
1254 1116
1255 /* Normalize user input to [min_policy_pct, max_policy_pct] */ 1117 /* Normalize user input to [min_policy_pct, max_policy_pct] */
@@ -1261,6 +1123,7 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
1261 limits->max_sysfs_pct); 1123 limits->max_sysfs_pct);
1262 limits->max_perf_pct = max(limits->min_policy_pct, 1124 limits->max_perf_pct = max(limits->min_policy_pct,
1263 limits->max_perf_pct); 1125 limits->max_perf_pct);
1126 limits->max_perf = round_up(limits->max_perf, FRAC_BITS);
1264 1127
1265 /* Make sure min_perf_pct <= max_perf_pct */ 1128 /* Make sure min_perf_pct <= max_perf_pct */
1266 limits->min_perf_pct = min(limits->max_perf_pct, limits->min_perf_pct); 1129 limits->min_perf_pct = min(limits->max_perf_pct, limits->min_perf_pct);
@@ -1270,23 +1133,6 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
1270 limits->max_perf = div_fp(int_tofp(limits->max_perf_pct), 1133 limits->max_perf = div_fp(int_tofp(limits->max_perf_pct),
1271 int_tofp(100)); 1134 int_tofp(100));
1272 1135
1273#if IS_ENABLED(CONFIG_ACPI)
1274 cpu = all_cpu_data[policy->cpu];
1275 for (i = 0; i < cpu->acpi_perf_data.state_count; i++) {
1276 int control;
1277
1278 control = convert_to_native_pstate_format(cpu, i);
1279 if (control * cpu->pstate.scaling == policy->max)
1280 limits->max_perf_ctl = control;
1281 if (control * cpu->pstate.scaling == policy->min)
1282 limits->min_perf_ctl = control;
1283 }
1284
1285 pr_debug("intel_pstate: max %u policy_max %u perf_ctl [0x%x-0x%x]\n",
1286 policy->cpuinfo.max_freq, policy->max, limits->min_perf_ctl,
1287 limits->max_perf_ctl);
1288#endif
1289
1290 if (hwp_active) 1136 if (hwp_active)
1291 intel_pstate_hwp_set(); 1137 intel_pstate_hwp_set();
1292 1138
@@ -1341,30 +1187,18 @@ static int intel_pstate_cpu_init(struct cpufreq_policy *policy)
1341 policy->cpuinfo.min_freq = cpu->pstate.min_pstate * cpu->pstate.scaling; 1187 policy->cpuinfo.min_freq = cpu->pstate.min_pstate * cpu->pstate.scaling;
1342 policy->cpuinfo.max_freq = 1188 policy->cpuinfo.max_freq =
1343 cpu->pstate.turbo_pstate * cpu->pstate.scaling; 1189 cpu->pstate.turbo_pstate * cpu->pstate.scaling;
1344 if (!no_acpi_perf)
1345 intel_pstate_init_perf_limits(policy);
1346 /*
1347 * If there is no acpi perf data or error, we ignore and use Intel P
1348 * state calculated limits, So this is not fatal error.
1349 */
1350 policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; 1190 policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
1351 cpumask_set_cpu(policy->cpu, policy->cpus); 1191 cpumask_set_cpu(policy->cpu, policy->cpus);
1352 1192
1353 return 0; 1193 return 0;
1354} 1194}
1355 1195
1356static int intel_pstate_cpu_exit(struct cpufreq_policy *policy)
1357{
1358 return intel_pstate_exit_perf_limits(policy);
1359}
1360
1361static struct cpufreq_driver intel_pstate_driver = { 1196static struct cpufreq_driver intel_pstate_driver = {
1362 .flags = CPUFREQ_CONST_LOOPS, 1197 .flags = CPUFREQ_CONST_LOOPS,
1363 .verify = intel_pstate_verify_policy, 1198 .verify = intel_pstate_verify_policy,
1364 .setpolicy = intel_pstate_set_policy, 1199 .setpolicy = intel_pstate_set_policy,
1365 .get = intel_pstate_get, 1200 .get = intel_pstate_get,
1366 .init = intel_pstate_cpu_init, 1201 .init = intel_pstate_cpu_init,
1367 .exit = intel_pstate_cpu_exit,
1368 .stop_cpu = intel_pstate_stop_cpu, 1202 .stop_cpu = intel_pstate_stop_cpu,
1369 .name = "intel_pstate", 1203 .name = "intel_pstate",
1370}; 1204};
@@ -1406,6 +1240,7 @@ static void copy_cpu_funcs(struct pstate_funcs *funcs)
1406} 1240}
1407 1241
1408#if IS_ENABLED(CONFIG_ACPI) 1242#if IS_ENABLED(CONFIG_ACPI)
1243#include <acpi/processor.h>
1409 1244
1410static bool intel_pstate_no_acpi_pss(void) 1245static bool intel_pstate_no_acpi_pss(void)
1411{ 1246{
@@ -1601,9 +1436,6 @@ static int __init intel_pstate_setup(char *str)
1601 force_load = 1; 1436 force_load = 1;
1602 if (!strcmp(str, "hwp_only")) 1437 if (!strcmp(str, "hwp_only"))
1603 hwp_only = 1; 1438 hwp_only = 1;
1604 if (!strcmp(str, "no_acpi"))
1605 no_acpi_perf = 1;
1606
1607 return 0; 1439 return 0;
1608} 1440}
1609early_param("intel_pstate", intel_pstate_setup); 1441early_param("intel_pstate", intel_pstate_setup);
diff --git a/drivers/cpufreq/s3c24xx-cpufreq.c b/drivers/cpufreq/s3c24xx-cpufreq.c
index 733aa5153e74..68ef8fd9482f 100644
--- a/drivers/cpufreq/s3c24xx-cpufreq.c
+++ b/drivers/cpufreq/s3c24xx-cpufreq.c
@@ -648,7 +648,7 @@ late_initcall(s3c_cpufreq_initcall);
648 * 648 *
649 * Register the given set of PLLs with the system. 649 * Register the given set of PLLs with the system.
650 */ 650 */
651int __init s3c_plltab_register(struct cpufreq_frequency_table *plls, 651int s3c_plltab_register(struct cpufreq_frequency_table *plls,
652 unsigned int plls_no) 652 unsigned int plls_no)
653{ 653{
654 struct cpufreq_frequency_table *vals; 654 struct cpufreq_frequency_table *vals;
diff --git a/drivers/crypto/nx/nx-aes-ccm.c b/drivers/crypto/nx/nx-aes-ccm.c
index 73ef49922788..7038f364acb5 100644
--- a/drivers/crypto/nx/nx-aes-ccm.c
+++ b/drivers/crypto/nx/nx-aes-ccm.c
@@ -409,7 +409,7 @@ static int ccm_nx_decrypt(struct aead_request *req,
409 processed += to_process; 409 processed += to_process;
410 } while (processed < nbytes); 410 } while (processed < nbytes);
411 411
412 rc = memcmp(csbcpb->cpb.aes_ccm.out_pat_or_mac, priv->oauth_tag, 412 rc = crypto_memneq(csbcpb->cpb.aes_ccm.out_pat_or_mac, priv->oauth_tag,
413 authsize) ? -EBADMSG : 0; 413 authsize) ? -EBADMSG : 0;
414out: 414out:
415 spin_unlock_irqrestore(&nx_ctx->lock, irq_flags); 415 spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
diff --git a/drivers/crypto/nx/nx-aes-gcm.c b/drivers/crypto/nx/nx-aes-gcm.c
index eee624f589b6..abd465f479c4 100644
--- a/drivers/crypto/nx/nx-aes-gcm.c
+++ b/drivers/crypto/nx/nx-aes-gcm.c
@@ -21,6 +21,7 @@
21 21
22#include <crypto/internal/aead.h> 22#include <crypto/internal/aead.h>
23#include <crypto/aes.h> 23#include <crypto/aes.h>
24#include <crypto/algapi.h>
24#include <crypto/scatterwalk.h> 25#include <crypto/scatterwalk.h>
25#include <linux/module.h> 26#include <linux/module.h>
26#include <linux/types.h> 27#include <linux/types.h>
@@ -418,7 +419,7 @@ mac:
418 itag, req->src, req->assoclen + nbytes, 419 itag, req->src, req->assoclen + nbytes,
419 crypto_aead_authsize(crypto_aead_reqtfm(req)), 420 crypto_aead_authsize(crypto_aead_reqtfm(req)),
420 SCATTERWALK_FROM_SG); 421 SCATTERWALK_FROM_SG);
421 rc = memcmp(itag, otag, 422 rc = crypto_memneq(itag, otag,
422 crypto_aead_authsize(crypto_aead_reqtfm(req))) ? 423 crypto_aead_authsize(crypto_aead_reqtfm(req))) ?
423 -EBADMSG : 0; 424 -EBADMSG : 0;
424 } 425 }
diff --git a/drivers/crypto/qat/qat_common/adf_ctl_drv.c b/drivers/crypto/qat/qat_common/adf_ctl_drv.c
index 03856ad280b9..473d36d91644 100644
--- a/drivers/crypto/qat/qat_common/adf_ctl_drv.c
+++ b/drivers/crypto/qat/qat_common/adf_ctl_drv.c
@@ -198,7 +198,7 @@ static int adf_copy_key_value_data(struct adf_accel_dev *accel_dev,
198 goto out_err; 198 goto out_err;
199 } 199 }
200 200
201 params_head = section_head->params; 201 params_head = section.params;
202 202
203 while (params_head) { 203 while (params_head) {
204 if (copy_from_user(&key_val, (void __user *)params_head, 204 if (copy_from_user(&key_val, (void __user *)params_head,
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
index 46f531e19ccf..b6f9f42e2985 100644
--- a/drivers/crypto/talitos.c
+++ b/drivers/crypto/talitos.c
@@ -977,7 +977,7 @@ static void ipsec_esp_decrypt_swauth_done(struct device *dev,
977 } else 977 } else
978 oicv = (char *)&edesc->link_tbl[0]; 978 oicv = (char *)&edesc->link_tbl[0];
979 979
980 err = memcmp(oicv, icv, authsize) ? -EBADMSG : 0; 980 err = crypto_memneq(oicv, icv, authsize) ? -EBADMSG : 0;
981 } 981 }
982 982
983 kfree(edesc); 983 kfree(edesc);
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
index 4e55239c7a30..53d22eb73b56 100644
--- a/drivers/dma/at_hdmac.c
+++ b/drivers/dma/at_hdmac.c
@@ -729,8 +729,8 @@ atc_prep_dma_interleaved(struct dma_chan *chan,
729 return NULL; 729 return NULL;
730 730
731 dev_info(chan2dev(chan), 731 dev_info(chan2dev(chan),
732 "%s: src=0x%08x, dest=0x%08x, numf=%d, frame_size=%d, flags=0x%lx\n", 732 "%s: src=%pad, dest=%pad, numf=%d, frame_size=%d, flags=0x%lx\n",
733 __func__, xt->src_start, xt->dst_start, xt->numf, 733 __func__, &xt->src_start, &xt->dst_start, xt->numf,
734 xt->frame_size, flags); 734 xt->frame_size, flags);
735 735
736 /* 736 /*
@@ -824,8 +824,8 @@ atc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
824 u32 ctrla; 824 u32 ctrla;
825 u32 ctrlb; 825 u32 ctrlb;
826 826
827 dev_vdbg(chan2dev(chan), "prep_dma_memcpy: d0x%x s0x%x l0x%zx f0x%lx\n", 827 dev_vdbg(chan2dev(chan), "prep_dma_memcpy: d%pad s%pad l0x%zx f0x%lx\n",
828 dest, src, len, flags); 828 &dest, &src, len, flags);
829 829
830 if (unlikely(!len)) { 830 if (unlikely(!len)) {
831 dev_dbg(chan2dev(chan), "prep_dma_memcpy: length is zero!\n"); 831 dev_dbg(chan2dev(chan), "prep_dma_memcpy: length is zero!\n");
@@ -938,8 +938,8 @@ atc_prep_dma_memset(struct dma_chan *chan, dma_addr_t dest, int value,
938 void __iomem *vaddr; 938 void __iomem *vaddr;
939 dma_addr_t paddr; 939 dma_addr_t paddr;
940 940
941 dev_vdbg(chan2dev(chan), "%s: d0x%x v0x%x l0x%zx f0x%lx\n", __func__, 941 dev_vdbg(chan2dev(chan), "%s: d%pad v0x%x l0x%zx f0x%lx\n", __func__,
942 dest, value, len, flags); 942 &dest, value, len, flags);
943 943
944 if (unlikely(!len)) { 944 if (unlikely(!len)) {
945 dev_dbg(chan2dev(chan), "%s: length is zero!\n", __func__); 945 dev_dbg(chan2dev(chan), "%s: length is zero!\n", __func__);
@@ -1022,8 +1022,8 @@ atc_prep_dma_memset_sg(struct dma_chan *chan,
1022 dma_addr_t dest = sg_dma_address(sg); 1022 dma_addr_t dest = sg_dma_address(sg);
1023 size_t len = sg_dma_len(sg); 1023 size_t len = sg_dma_len(sg);
1024 1024
1025 dev_vdbg(chan2dev(chan), "%s: d0x%08x, l0x%zx\n", 1025 dev_vdbg(chan2dev(chan), "%s: d%pad, l0x%zx\n",
1026 __func__, dest, len); 1026 __func__, &dest, len);
1027 1027
1028 if (!is_dma_fill_aligned(chan->device, dest, 0, len)) { 1028 if (!is_dma_fill_aligned(chan->device, dest, 0, len)) {
1029 dev_err(chan2dev(chan), "%s: buffer is not aligned\n", 1029 dev_err(chan2dev(chan), "%s: buffer is not aligned\n",
@@ -1439,9 +1439,9 @@ atc_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
1439 unsigned int periods = buf_len / period_len; 1439 unsigned int periods = buf_len / period_len;
1440 unsigned int i; 1440 unsigned int i;
1441 1441
1442 dev_vdbg(chan2dev(chan), "prep_dma_cyclic: %s buf@0x%08x - %d (%d/%d)\n", 1442 dev_vdbg(chan2dev(chan), "prep_dma_cyclic: %s buf@%pad - %d (%d/%d)\n",
1443 direction == DMA_MEM_TO_DEV ? "TO DEVICE" : "FROM DEVICE", 1443 direction == DMA_MEM_TO_DEV ? "TO DEVICE" : "FROM DEVICE",
1444 buf_addr, 1444 &buf_addr,
1445 periods, buf_len, period_len); 1445 periods, buf_len, period_len);
1446 1446
1447 if (unlikely(!atslave || !buf_len || !period_len)) { 1447 if (unlikely(!atslave || !buf_len || !period_len)) {
diff --git a/drivers/dma/at_hdmac_regs.h b/drivers/dma/at_hdmac_regs.h
index d1cfc8c876f9..7f58f06157f6 100644
--- a/drivers/dma/at_hdmac_regs.h
+++ b/drivers/dma/at_hdmac_regs.h
@@ -385,9 +385,9 @@ static void vdbg_dump_regs(struct at_dma_chan *atchan) {}
385static void atc_dump_lli(struct at_dma_chan *atchan, struct at_lli *lli) 385static void atc_dump_lli(struct at_dma_chan *atchan, struct at_lli *lli)
386{ 386{
387 dev_crit(chan2dev(&atchan->chan_common), 387 dev_crit(chan2dev(&atchan->chan_common),
388 " desc: s0x%x d0x%x ctrl0x%x:0x%x l0x%x\n", 388 " desc: s%pad d%pad ctrl0x%x:0x%x l0x%pad\n",
389 lli->saddr, lli->daddr, 389 &lli->saddr, &lli->daddr,
390 lli->ctrla, lli->ctrlb, lli->dscr); 390 lli->ctrla, lli->ctrlb, &lli->dscr);
391} 391}
392 392
393 393
diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c
index b5e132d4bae5..370c661c7d7b 100644
--- a/drivers/dma/at_xdmac.c
+++ b/drivers/dma/at_xdmac.c
@@ -156,7 +156,7 @@
156#define AT_XDMAC_CC_WRIP (0x1 << 23) /* Write in Progress (read only) */ 156#define AT_XDMAC_CC_WRIP (0x1 << 23) /* Write in Progress (read only) */
157#define AT_XDMAC_CC_WRIP_DONE (0x0 << 23) 157#define AT_XDMAC_CC_WRIP_DONE (0x0 << 23)
158#define AT_XDMAC_CC_WRIP_IN_PROGRESS (0x1 << 23) 158#define AT_XDMAC_CC_WRIP_IN_PROGRESS (0x1 << 23)
159#define AT_XDMAC_CC_PERID(i) (0x7f & (h) << 24) /* Channel Peripheral Identifier */ 159#define AT_XDMAC_CC_PERID(i) (0x7f & (i) << 24) /* Channel Peripheral Identifier */
160#define AT_XDMAC_CDS_MSP 0x2C /* Channel Data Stride Memory Set Pattern */ 160#define AT_XDMAC_CDS_MSP 0x2C /* Channel Data Stride Memory Set Pattern */
161#define AT_XDMAC_CSUS 0x30 /* Channel Source Microblock Stride */ 161#define AT_XDMAC_CSUS 0x30 /* Channel Source Microblock Stride */
162#define AT_XDMAC_CDUS 0x34 /* Channel Destination Microblock Stride */ 162#define AT_XDMAC_CDUS 0x34 /* Channel Destination Microblock Stride */
@@ -920,8 +920,8 @@ at_xdmac_interleaved_queue_desc(struct dma_chan *chan,
920 desc->lld.mbr_cfg = chan_cc; 920 desc->lld.mbr_cfg = chan_cc;
921 921
922 dev_dbg(chan2dev(chan), 922 dev_dbg(chan2dev(chan),
923 "%s: lld: mbr_sa=0x%08x, mbr_da=0x%08x, mbr_ubc=0x%08x, mbr_cfg=0x%08x\n", 923 "%s: lld: mbr_sa=%pad, mbr_da=%pad, mbr_ubc=0x%08x, mbr_cfg=0x%08x\n",
924 __func__, desc->lld.mbr_sa, desc->lld.mbr_da, 924 __func__, &desc->lld.mbr_sa, &desc->lld.mbr_da,
925 desc->lld.mbr_ubc, desc->lld.mbr_cfg); 925 desc->lld.mbr_ubc, desc->lld.mbr_cfg);
926 926
927 /* Chain lld. */ 927 /* Chain lld. */
@@ -953,8 +953,8 @@ at_xdmac_prep_interleaved(struct dma_chan *chan,
953 if ((xt->numf > 1) && (xt->frame_size > 1)) 953 if ((xt->numf > 1) && (xt->frame_size > 1))
954 return NULL; 954 return NULL;
955 955
956 dev_dbg(chan2dev(chan), "%s: src=0x%08x, dest=0x%08x, numf=%d, frame_size=%d, flags=0x%lx\n", 956 dev_dbg(chan2dev(chan), "%s: src=%pad, dest=%pad, numf=%d, frame_size=%d, flags=0x%lx\n",
957 __func__, xt->src_start, xt->dst_start, xt->numf, 957 __func__, &xt->src_start, &xt->dst_start, xt->numf,
958 xt->frame_size, flags); 958 xt->frame_size, flags);
959 959
960 src_addr = xt->src_start; 960 src_addr = xt->src_start;
@@ -965,7 +965,9 @@ at_xdmac_prep_interleaved(struct dma_chan *chan,
965 NULL, 965 NULL,
966 src_addr, dst_addr, 966 src_addr, dst_addr,
967 xt, xt->sgl); 967 xt, xt->sgl);
968 for (i = 0; i < xt->numf; i++) 968
969 /* Length of the block is (BLEN+1) microblocks. */
970 for (i = 0; i < xt->numf - 1; i++)
969 at_xdmac_increment_block_count(chan, first); 971 at_xdmac_increment_block_count(chan, first);
970 972
971 dev_dbg(chan2dev(chan), "%s: add desc 0x%p to descs_list 0x%p\n", 973 dev_dbg(chan2dev(chan), "%s: add desc 0x%p to descs_list 0x%p\n",
@@ -1086,6 +1088,7 @@ at_xdmac_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
1086 /* Check remaining length and change data width if needed. */ 1088 /* Check remaining length and change data width if needed. */
1087 dwidth = at_xdmac_align_width(chan, 1089 dwidth = at_xdmac_align_width(chan,
1088 src_addr | dst_addr | xfer_size); 1090 src_addr | dst_addr | xfer_size);
1091 chan_cc &= ~AT_XDMAC_CC_DWIDTH_MASK;
1089 chan_cc |= AT_XDMAC_CC_DWIDTH(dwidth); 1092 chan_cc |= AT_XDMAC_CC_DWIDTH(dwidth);
1090 1093
1091 ublen = xfer_size >> dwidth; 1094 ublen = xfer_size >> dwidth;
@@ -1179,8 +1182,8 @@ static struct at_xdmac_desc *at_xdmac_memset_create_desc(struct dma_chan *chan,
1179 desc->lld.mbr_cfg = chan_cc; 1182 desc->lld.mbr_cfg = chan_cc;
1180 1183
1181 dev_dbg(chan2dev(chan), 1184 dev_dbg(chan2dev(chan),
1182 "%s: lld: mbr_da=0x%08x, mbr_ds=0x%08x, mbr_ubc=0x%08x, mbr_cfg=0x%08x\n", 1185 "%s: lld: mbr_da=%pad, mbr_ds=%pad, mbr_ubc=0x%08x, mbr_cfg=0x%08x\n",
1183 __func__, desc->lld.mbr_da, desc->lld.mbr_ds, desc->lld.mbr_ubc, 1186 __func__, &desc->lld.mbr_da, &desc->lld.mbr_ds, desc->lld.mbr_ubc,
1184 desc->lld.mbr_cfg); 1187 desc->lld.mbr_cfg);
1185 1188
1186 return desc; 1189 return desc;
@@ -1193,8 +1196,8 @@ at_xdmac_prep_dma_memset(struct dma_chan *chan, dma_addr_t dest, int value,
1193 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan); 1196 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
1194 struct at_xdmac_desc *desc; 1197 struct at_xdmac_desc *desc;
1195 1198
1196 dev_dbg(chan2dev(chan), "%s: dest=0x%08x, len=%d, pattern=0x%x, flags=0x%lx\n", 1199 dev_dbg(chan2dev(chan), "%s: dest=%pad, len=%d, pattern=0x%x, flags=0x%lx\n",
1197 __func__, dest, len, value, flags); 1200 __func__, &dest, len, value, flags);
1198 1201
1199 if (unlikely(!len)) 1202 if (unlikely(!len))
1200 return NULL; 1203 return NULL;
@@ -1229,8 +1232,8 @@ at_xdmac_prep_dma_memset_sg(struct dma_chan *chan, struct scatterlist *sgl,
1229 1232
1230 /* Prepare descriptors. */ 1233 /* Prepare descriptors. */
1231 for_each_sg(sgl, sg, sg_len, i) { 1234 for_each_sg(sgl, sg, sg_len, i) {
1232 dev_dbg(chan2dev(chan), "%s: dest=0x%08x, len=%d, pattern=0x%x, flags=0x%lx\n", 1235 dev_dbg(chan2dev(chan), "%s: dest=%pad, len=%d, pattern=0x%x, flags=0x%lx\n",
1233 __func__, sg_dma_address(sg), sg_dma_len(sg), 1236 __func__, &sg_dma_address(sg), sg_dma_len(sg),
1234 value, flags); 1237 value, flags);
1235 desc = at_xdmac_memset_create_desc(chan, atchan, 1238 desc = at_xdmac_memset_create_desc(chan, atchan,
1236 sg_dma_address(sg), 1239 sg_dma_address(sg),
@@ -1333,7 +1336,7 @@ at_xdmac_prep_dma_memset_sg(struct dma_chan *chan, struct scatterlist *sgl,
1333 * since we don't care about the stride anymore. 1336 * since we don't care about the stride anymore.
1334 */ 1337 */
1335 if ((i == (sg_len - 1)) && 1338 if ((i == (sg_len - 1)) &&
1336 sg_dma_len(ppsg) == sg_dma_len(psg)) { 1339 sg_dma_len(psg) == sg_dma_len(sg)) {
1337 dev_dbg(chan2dev(chan), 1340 dev_dbg(chan2dev(chan),
1338 "%s: desc 0x%p can be merged with desc 0x%p\n", 1341 "%s: desc 0x%p can be merged with desc 0x%p\n",
1339 __func__, desc, pdesc); 1342 __func__, desc, pdesc);
diff --git a/drivers/dma/bcm2835-dma.c b/drivers/dma/bcm2835-dma.c
index c92d6a70ccf3..996c4b00d323 100644
--- a/drivers/dma/bcm2835-dma.c
+++ b/drivers/dma/bcm2835-dma.c
@@ -31,6 +31,7 @@
31 */ 31 */
32#include <linux/dmaengine.h> 32#include <linux/dmaengine.h>
33#include <linux/dma-mapping.h> 33#include <linux/dma-mapping.h>
34#include <linux/dmapool.h>
34#include <linux/err.h> 35#include <linux/err.h>
35#include <linux/init.h> 36#include <linux/init.h>
36#include <linux/interrupt.h> 37#include <linux/interrupt.h>
@@ -62,6 +63,11 @@ struct bcm2835_dma_cb {
62 uint32_t pad[2]; 63 uint32_t pad[2];
63}; 64};
64 65
66struct bcm2835_cb_entry {
67 struct bcm2835_dma_cb *cb;
68 dma_addr_t paddr;
69};
70
65struct bcm2835_chan { 71struct bcm2835_chan {
66 struct virt_dma_chan vc; 72 struct virt_dma_chan vc;
67 struct list_head node; 73 struct list_head node;
@@ -72,18 +78,18 @@ struct bcm2835_chan {
72 78
73 int ch; 79 int ch;
74 struct bcm2835_desc *desc; 80 struct bcm2835_desc *desc;
81 struct dma_pool *cb_pool;
75 82
76 void __iomem *chan_base; 83 void __iomem *chan_base;
77 int irq_number; 84 int irq_number;
78}; 85};
79 86
80struct bcm2835_desc { 87struct bcm2835_desc {
88 struct bcm2835_chan *c;
81 struct virt_dma_desc vd; 89 struct virt_dma_desc vd;
82 enum dma_transfer_direction dir; 90 enum dma_transfer_direction dir;
83 91
84 unsigned int control_block_size; 92 struct bcm2835_cb_entry *cb_list;
85 struct bcm2835_dma_cb *control_block_base;
86 dma_addr_t control_block_base_phys;
87 93
88 unsigned int frames; 94 unsigned int frames;
89 size_t size; 95 size_t size;
@@ -143,10 +149,13 @@ static inline struct bcm2835_desc *to_bcm2835_dma_desc(
143static void bcm2835_dma_desc_free(struct virt_dma_desc *vd) 149static void bcm2835_dma_desc_free(struct virt_dma_desc *vd)
144{ 150{
145 struct bcm2835_desc *desc = container_of(vd, struct bcm2835_desc, vd); 151 struct bcm2835_desc *desc = container_of(vd, struct bcm2835_desc, vd);
146 dma_free_coherent(desc->vd.tx.chan->device->dev, 152 int i;
147 desc->control_block_size, 153
148 desc->control_block_base, 154 for (i = 0; i < desc->frames; i++)
149 desc->control_block_base_phys); 155 dma_pool_free(desc->c->cb_pool, desc->cb_list[i].cb,
156 desc->cb_list[i].paddr);
157
158 kfree(desc->cb_list);
150 kfree(desc); 159 kfree(desc);
151} 160}
152 161
@@ -199,7 +208,7 @@ static void bcm2835_dma_start_desc(struct bcm2835_chan *c)
199 208
200 c->desc = d = to_bcm2835_dma_desc(&vd->tx); 209 c->desc = d = to_bcm2835_dma_desc(&vd->tx);
201 210
202 writel(d->control_block_base_phys, c->chan_base + BCM2835_DMA_ADDR); 211 writel(d->cb_list[0].paddr, c->chan_base + BCM2835_DMA_ADDR);
203 writel(BCM2835_DMA_ACTIVE, c->chan_base + BCM2835_DMA_CS); 212 writel(BCM2835_DMA_ACTIVE, c->chan_base + BCM2835_DMA_CS);
204} 213}
205 214
@@ -232,9 +241,16 @@ static irqreturn_t bcm2835_dma_callback(int irq, void *data)
232static int bcm2835_dma_alloc_chan_resources(struct dma_chan *chan) 241static int bcm2835_dma_alloc_chan_resources(struct dma_chan *chan)
233{ 242{
234 struct bcm2835_chan *c = to_bcm2835_dma_chan(chan); 243 struct bcm2835_chan *c = to_bcm2835_dma_chan(chan);
244 struct device *dev = c->vc.chan.device->dev;
245
246 dev_dbg(dev, "Allocating DMA channel %d\n", c->ch);
235 247
236 dev_dbg(c->vc.chan.device->dev, 248 c->cb_pool = dma_pool_create(dev_name(dev), dev,
237 "Allocating DMA channel %d\n", c->ch); 249 sizeof(struct bcm2835_dma_cb), 0, 0);
250 if (!c->cb_pool) {
251 dev_err(dev, "unable to allocate descriptor pool\n");
252 return -ENOMEM;
253 }
238 254
239 return request_irq(c->irq_number, 255 return request_irq(c->irq_number,
240 bcm2835_dma_callback, 0, "DMA IRQ", c); 256 bcm2835_dma_callback, 0, "DMA IRQ", c);
@@ -246,6 +262,7 @@ static void bcm2835_dma_free_chan_resources(struct dma_chan *chan)
246 262
247 vchan_free_chan_resources(&c->vc); 263 vchan_free_chan_resources(&c->vc);
248 free_irq(c->irq_number, c); 264 free_irq(c->irq_number, c);
265 dma_pool_destroy(c->cb_pool);
249 266
250 dev_dbg(c->vc.chan.device->dev, "Freeing DMA channel %u\n", c->ch); 267 dev_dbg(c->vc.chan.device->dev, "Freeing DMA channel %u\n", c->ch);
251} 268}
@@ -261,8 +278,7 @@ static size_t bcm2835_dma_desc_size_pos(struct bcm2835_desc *d, dma_addr_t addr)
261 size_t size; 278 size_t size;
262 279
263 for (size = i = 0; i < d->frames; i++) { 280 for (size = i = 0; i < d->frames; i++) {
264 struct bcm2835_dma_cb *control_block = 281 struct bcm2835_dma_cb *control_block = d->cb_list[i].cb;
265 &d->control_block_base[i];
266 size_t this_size = control_block->length; 282 size_t this_size = control_block->length;
267 dma_addr_t dma; 283 dma_addr_t dma;
268 284
@@ -343,6 +359,7 @@ static struct dma_async_tx_descriptor *bcm2835_dma_prep_dma_cyclic(
343 dma_addr_t dev_addr; 359 dma_addr_t dev_addr;
344 unsigned int es, sync_type; 360 unsigned int es, sync_type;
345 unsigned int frame; 361 unsigned int frame;
362 int i;
346 363
347 /* Grab configuration */ 364 /* Grab configuration */
348 if (!is_slave_direction(direction)) { 365 if (!is_slave_direction(direction)) {
@@ -374,27 +391,31 @@ static struct dma_async_tx_descriptor *bcm2835_dma_prep_dma_cyclic(
374 if (!d) 391 if (!d)
375 return NULL; 392 return NULL;
376 393
394 d->c = c;
377 d->dir = direction; 395 d->dir = direction;
378 d->frames = buf_len / period_len; 396 d->frames = buf_len / period_len;
379 397
380 /* Allocate memory for control blocks */ 398 d->cb_list = kcalloc(d->frames, sizeof(*d->cb_list), GFP_KERNEL);
381 d->control_block_size = d->frames * sizeof(struct bcm2835_dma_cb); 399 if (!d->cb_list) {
382 d->control_block_base = dma_zalloc_coherent(chan->device->dev,
383 d->control_block_size, &d->control_block_base_phys,
384 GFP_NOWAIT);
385
386 if (!d->control_block_base) {
387 kfree(d); 400 kfree(d);
388 return NULL; 401 return NULL;
389 } 402 }
403 /* Allocate memory for control blocks */
404 for (i = 0; i < d->frames; i++) {
405 struct bcm2835_cb_entry *cb_entry = &d->cb_list[i];
406
407 cb_entry->cb = dma_pool_zalloc(c->cb_pool, GFP_ATOMIC,
408 &cb_entry->paddr);
409 if (!cb_entry->cb)
410 goto error_cb;
411 }
390 412
391 /* 413 /*
392 * Iterate over all frames, create a control block 414 * Iterate over all frames, create a control block
393 * for each frame and link them together. 415 * for each frame and link them together.
394 */ 416 */
395 for (frame = 0; frame < d->frames; frame++) { 417 for (frame = 0; frame < d->frames; frame++) {
396 struct bcm2835_dma_cb *control_block = 418 struct bcm2835_dma_cb *control_block = d->cb_list[frame].cb;
397 &d->control_block_base[frame];
398 419
399 /* Setup adresses */ 420 /* Setup adresses */
400 if (d->dir == DMA_DEV_TO_MEM) { 421 if (d->dir == DMA_DEV_TO_MEM) {
@@ -428,12 +449,21 @@ static struct dma_async_tx_descriptor *bcm2835_dma_prep_dma_cyclic(
428 * This DMA engine driver currently only supports cyclic DMA. 449 * This DMA engine driver currently only supports cyclic DMA.
429 * Therefore, wrap around at number of frames. 450 * Therefore, wrap around at number of frames.
430 */ 451 */
431 control_block->next = d->control_block_base_phys + 452 control_block->next = d->cb_list[((frame + 1) % d->frames)].paddr;
432 sizeof(struct bcm2835_dma_cb)
433 * ((frame + 1) % d->frames);
434 } 453 }
435 454
436 return vchan_tx_prep(&c->vc, &d->vd, flags); 455 return vchan_tx_prep(&c->vc, &d->vd, flags);
456error_cb:
457 i--;
458 for (; i >= 0; i--) {
459 struct bcm2835_cb_entry *cb_entry = &d->cb_list[i];
460
461 dma_pool_free(c->cb_pool, cb_entry->cb, cb_entry->paddr);
462 }
463
464 kfree(d->cb_list);
465 kfree(d);
466 return NULL;
437} 467}
438 468
439static int bcm2835_dma_slave_config(struct dma_chan *chan, 469static int bcm2835_dma_slave_config(struct dma_chan *chan,
diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c
index 6b03e4e84e6b..16fe773fb846 100644
--- a/drivers/dma/edma.c
+++ b/drivers/dma/edma.c
@@ -107,7 +107,7 @@
107 107
108/* CCCFG register */ 108/* CCCFG register */
109#define GET_NUM_DMACH(x) (x & 0x7) /* bits 0-2 */ 109#define GET_NUM_DMACH(x) (x & 0x7) /* bits 0-2 */
110#define GET_NUM_QDMACH(x) (x & 0x70 >> 4) /* bits 4-6 */ 110#define GET_NUM_QDMACH(x) ((x & 0x70) >> 4) /* bits 4-6 */
111#define GET_NUM_PAENTRY(x) ((x & 0x7000) >> 12) /* bits 12-14 */ 111#define GET_NUM_PAENTRY(x) ((x & 0x7000) >> 12) /* bits 12-14 */
112#define GET_NUM_EVQUE(x) ((x & 0x70000) >> 16) /* bits 16-18 */ 112#define GET_NUM_EVQUE(x) ((x & 0x70000) >> 16) /* bits 16-18 */
113#define GET_NUM_REGN(x) ((x & 0x300000) >> 20) /* bits 20-21 */ 113#define GET_NUM_REGN(x) ((x & 0x300000) >> 20) /* bits 20-21 */
@@ -1565,7 +1565,7 @@ static void edma_tc_set_pm_state(struct edma_tc *tc, bool enable)
1565 struct platform_device *tc_pdev; 1565 struct platform_device *tc_pdev;
1566 int ret; 1566 int ret;
1567 1567
1568 if (!tc) 1568 if (!IS_ENABLED(CONFIG_OF) || !tc)
1569 return; 1569 return;
1570 1570
1571 tc_pdev = of_find_device_by_node(tc->node); 1571 tc_pdev = of_find_device_by_node(tc->node);
@@ -1752,16 +1752,14 @@ static enum dma_status edma_tx_status(struct dma_chan *chan,
1752 return ret; 1752 return ret;
1753} 1753}
1754 1754
1755static bool edma_is_memcpy_channel(int ch_num, u16 *memcpy_channels) 1755static bool edma_is_memcpy_channel(int ch_num, s32 *memcpy_channels)
1756{ 1756{
1757 s16 *memcpy_ch = memcpy_channels;
1758
1759 if (!memcpy_channels) 1757 if (!memcpy_channels)
1760 return false; 1758 return false;
1761 while (*memcpy_ch != -1) { 1759 while (*memcpy_channels != -1) {
1762 if (*memcpy_ch == ch_num) 1760 if (*memcpy_channels == ch_num)
1763 return true; 1761 return true;
1764 memcpy_ch++; 1762 memcpy_channels++;
1765 } 1763 }
1766 return false; 1764 return false;
1767} 1765}
@@ -1775,7 +1773,7 @@ static void edma_dma_init(struct edma_cc *ecc, bool legacy_mode)
1775{ 1773{
1776 struct dma_device *s_ddev = &ecc->dma_slave; 1774 struct dma_device *s_ddev = &ecc->dma_slave;
1777 struct dma_device *m_ddev = NULL; 1775 struct dma_device *m_ddev = NULL;
1778 s16 *memcpy_channels = ecc->info->memcpy_channels; 1776 s32 *memcpy_channels = ecc->info->memcpy_channels;
1779 int i, j; 1777 int i, j;
1780 1778
1781 dma_cap_zero(s_ddev->cap_mask); 1779 dma_cap_zero(s_ddev->cap_mask);
@@ -1996,16 +1994,16 @@ static struct edma_soc_info *edma_setup_info_from_dt(struct device *dev,
1996 prop = of_find_property(dev->of_node, "ti,edma-memcpy-channels", &sz); 1994 prop = of_find_property(dev->of_node, "ti,edma-memcpy-channels", &sz);
1997 if (prop) { 1995 if (prop) {
1998 const char pname[] = "ti,edma-memcpy-channels"; 1996 const char pname[] = "ti,edma-memcpy-channels";
1999 size_t nelm = sz / sizeof(s16); 1997 size_t nelm = sz / sizeof(s32);
2000 s16 *memcpy_ch; 1998 s32 *memcpy_ch;
2001 1999
2002 memcpy_ch = devm_kcalloc(dev, nelm + 1, sizeof(s16), 2000 memcpy_ch = devm_kcalloc(dev, nelm + 1, sizeof(s32),
2003 GFP_KERNEL); 2001 GFP_KERNEL);
2004 if (!memcpy_ch) 2002 if (!memcpy_ch)
2005 return ERR_PTR(-ENOMEM); 2003 return ERR_PTR(-ENOMEM);
2006 2004
2007 ret = of_property_read_u16_array(dev->of_node, pname, 2005 ret = of_property_read_u32_array(dev->of_node, pname,
2008 (u16 *)memcpy_ch, nelm); 2006 (u32 *)memcpy_ch, nelm);
2009 if (ret) 2007 if (ret)
2010 return ERR_PTR(ret); 2008 return ERR_PTR(ret);
2011 2009
@@ -2017,31 +2015,50 @@ static struct edma_soc_info *edma_setup_info_from_dt(struct device *dev,
2017 &sz); 2015 &sz);
2018 if (prop) { 2016 if (prop) {
2019 const char pname[] = "ti,edma-reserved-slot-ranges"; 2017 const char pname[] = "ti,edma-reserved-slot-ranges";
2018 u32 (*tmp)[2];
2020 s16 (*rsv_slots)[2]; 2019 s16 (*rsv_slots)[2];
2021 size_t nelm = sz / sizeof(*rsv_slots); 2020 size_t nelm = sz / sizeof(*tmp);
2022 struct edma_rsv_info *rsv_info; 2021 struct edma_rsv_info *rsv_info;
2022 int i;
2023 2023
2024 if (!nelm) 2024 if (!nelm)
2025 return info; 2025 return info;
2026 2026
2027 tmp = kcalloc(nelm, sizeof(*tmp), GFP_KERNEL);
2028 if (!tmp)
2029 return ERR_PTR(-ENOMEM);
2030
2027 rsv_info = devm_kzalloc(dev, sizeof(*rsv_info), GFP_KERNEL); 2031 rsv_info = devm_kzalloc(dev, sizeof(*rsv_info), GFP_KERNEL);
2028 if (!rsv_info) 2032 if (!rsv_info) {
2033 kfree(tmp);
2029 return ERR_PTR(-ENOMEM); 2034 return ERR_PTR(-ENOMEM);
2035 }
2030 2036
2031 rsv_slots = devm_kcalloc(dev, nelm + 1, sizeof(*rsv_slots), 2037 rsv_slots = devm_kcalloc(dev, nelm + 1, sizeof(*rsv_slots),
2032 GFP_KERNEL); 2038 GFP_KERNEL);
2033 if (!rsv_slots) 2039 if (!rsv_slots) {
2040 kfree(tmp);
2034 return ERR_PTR(-ENOMEM); 2041 return ERR_PTR(-ENOMEM);
2042 }
2035 2043
2036 ret = of_property_read_u16_array(dev->of_node, pname, 2044 ret = of_property_read_u32_array(dev->of_node, pname,
2037 (u16 *)rsv_slots, nelm * 2); 2045 (u32 *)tmp, nelm * 2);
2038 if (ret) 2046 if (ret) {
2047 kfree(tmp);
2039 return ERR_PTR(ret); 2048 return ERR_PTR(ret);
2049 }
2040 2050
2051 for (i = 0; i < nelm; i++) {
2052 rsv_slots[i][0] = tmp[i][0];
2053 rsv_slots[i][1] = tmp[i][1];
2054 }
2041 rsv_slots[nelm][0] = -1; 2055 rsv_slots[nelm][0] = -1;
2042 rsv_slots[nelm][1] = -1; 2056 rsv_slots[nelm][1] = -1;
2057
2043 info->rsv = rsv_info; 2058 info->rsv = rsv_info;
2044 info->rsv->rsv_slots = (const s16 (*)[2])rsv_slots; 2059 info->rsv->rsv_slots = (const s16 (*)[2])rsv_slots;
2060
2061 kfree(tmp);
2045 } 2062 }
2046 2063
2047 return info; 2064 return info;
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index 7058d58ba588..0f6fd42f55ca 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -1462,7 +1462,7 @@ err_firmware:
1462 1462
1463#define EVENT_REMAP_CELLS 3 1463#define EVENT_REMAP_CELLS 3
1464 1464
1465static int __init sdma_event_remap(struct sdma_engine *sdma) 1465static int sdma_event_remap(struct sdma_engine *sdma)
1466{ 1466{
1467 struct device_node *np = sdma->dev->of_node; 1467 struct device_node *np = sdma->dev->of_node;
1468 struct device_node *gpr_np = of_parse_phandle(np, "gpr", 0); 1468 struct device_node *gpr_np = of_parse_phandle(np, "gpr", 0);
diff --git a/drivers/dma/mic_x100_dma.c b/drivers/dma/mic_x100_dma.c
index 068e920ecb68..cddfa8dbf4bd 100644
--- a/drivers/dma/mic_x100_dma.c
+++ b/drivers/dma/mic_x100_dma.c
@@ -317,6 +317,7 @@ mic_dma_prep_memcpy_lock(struct dma_chan *ch, dma_addr_t dma_dest,
317 struct mic_dma_chan *mic_ch = to_mic_dma_chan(ch); 317 struct mic_dma_chan *mic_ch = to_mic_dma_chan(ch);
318 struct device *dev = mic_dma_ch_to_device(mic_ch); 318 struct device *dev = mic_dma_ch_to_device(mic_ch);
319 int result; 319 int result;
320 struct dma_async_tx_descriptor *tx = NULL;
320 321
321 if (!len && !flags) 322 if (!len && !flags)
322 return NULL; 323 return NULL;
@@ -324,10 +325,13 @@ mic_dma_prep_memcpy_lock(struct dma_chan *ch, dma_addr_t dma_dest,
324 spin_lock(&mic_ch->prep_lock); 325 spin_lock(&mic_ch->prep_lock);
325 result = mic_dma_do_dma(mic_ch, flags, dma_src, dma_dest, len); 326 result = mic_dma_do_dma(mic_ch, flags, dma_src, dma_dest, len);
326 if (result >= 0) 327 if (result >= 0)
327 return allocate_tx(mic_ch); 328 tx = allocate_tx(mic_ch);
328 dev_err(dev, "Error enqueueing dma, error=%d\n", result); 329
330 if (!tx)
331 dev_err(dev, "Error enqueueing dma, error=%d\n", result);
332
329 spin_unlock(&mic_ch->prep_lock); 333 spin_unlock(&mic_ch->prep_lock);
330 return NULL; 334 return tx;
331} 335}
332 336
333static struct dma_async_tx_descriptor * 337static struct dma_async_tx_descriptor *
@@ -335,13 +339,14 @@ mic_dma_prep_interrupt_lock(struct dma_chan *ch, unsigned long flags)
335{ 339{
336 struct mic_dma_chan *mic_ch = to_mic_dma_chan(ch); 340 struct mic_dma_chan *mic_ch = to_mic_dma_chan(ch);
337 int ret; 341 int ret;
342 struct dma_async_tx_descriptor *tx = NULL;
338 343
339 spin_lock(&mic_ch->prep_lock); 344 spin_lock(&mic_ch->prep_lock);
340 ret = mic_dma_do_dma(mic_ch, flags, 0, 0, 0); 345 ret = mic_dma_do_dma(mic_ch, flags, 0, 0, 0);
341 if (!ret) 346 if (!ret)
342 return allocate_tx(mic_ch); 347 tx = allocate_tx(mic_ch);
343 spin_unlock(&mic_ch->prep_lock); 348 spin_unlock(&mic_ch->prep_lock);
344 return NULL; 349 return tx;
345} 350}
346 351
347/* Return the status of the transaction */ 352/* Return the status of the transaction */
diff --git a/drivers/dma/sh/usb-dmac.c b/drivers/dma/sh/usb-dmac.c
index ebd8a5f398b0..f1bcc2a163b3 100644
--- a/drivers/dma/sh/usb-dmac.c
+++ b/drivers/dma/sh/usb-dmac.c
@@ -679,8 +679,11 @@ static int usb_dmac_runtime_suspend(struct device *dev)
679 struct usb_dmac *dmac = dev_get_drvdata(dev); 679 struct usb_dmac *dmac = dev_get_drvdata(dev);
680 int i; 680 int i;
681 681
682 for (i = 0; i < dmac->n_channels; ++i) 682 for (i = 0; i < dmac->n_channels; ++i) {
683 if (!dmac->channels[i].iomem)
684 break;
683 usb_dmac_chan_halt(&dmac->channels[i]); 685 usb_dmac_chan_halt(&dmac->channels[i]);
686 }
684 687
685 return 0; 688 return 0;
686} 689}
@@ -799,11 +802,10 @@ static int usb_dmac_probe(struct platform_device *pdev)
799 ret = pm_runtime_get_sync(&pdev->dev); 802 ret = pm_runtime_get_sync(&pdev->dev);
800 if (ret < 0) { 803 if (ret < 0) {
801 dev_err(&pdev->dev, "runtime PM get sync failed (%d)\n", ret); 804 dev_err(&pdev->dev, "runtime PM get sync failed (%d)\n", ret);
802 return ret; 805 goto error_pm;
803 } 806 }
804 807
805 ret = usb_dmac_init(dmac); 808 ret = usb_dmac_init(dmac);
806 pm_runtime_put(&pdev->dev);
807 809
808 if (ret) { 810 if (ret) {
809 dev_err(&pdev->dev, "failed to reset device\n"); 811 dev_err(&pdev->dev, "failed to reset device\n");
@@ -851,10 +853,13 @@ static int usb_dmac_probe(struct platform_device *pdev)
851 if (ret < 0) 853 if (ret < 0)
852 goto error; 854 goto error;
853 855
856 pm_runtime_put(&pdev->dev);
854 return 0; 857 return 0;
855 858
856error: 859error:
857 of_dma_controller_free(pdev->dev.of_node); 860 of_dma_controller_free(pdev->dev.of_node);
861 pm_runtime_put(&pdev->dev);
862error_pm:
858 pm_runtime_disable(&pdev->dev); 863 pm_runtime_disable(&pdev->dev);
859 return ret; 864 return ret;
860} 865}
diff --git a/drivers/fpga/fpga-mgr.c b/drivers/fpga/fpga-mgr.c
index a24f5cb877e0..953dc9195937 100644
--- a/drivers/fpga/fpga-mgr.c
+++ b/drivers/fpga/fpga-mgr.c
@@ -122,12 +122,10 @@ int fpga_mgr_firmware_load(struct fpga_manager *mgr, u32 flags,
122 } 122 }
123 123
124 ret = fpga_mgr_buf_load(mgr, flags, fw->data, fw->size); 124 ret = fpga_mgr_buf_load(mgr, flags, fw->data, fw->size);
125 if (ret)
126 return ret;
127 125
128 release_firmware(fw); 126 release_firmware(fw);
129 127
130 return 0; 128 return ret;
131} 129}
132EXPORT_SYMBOL_GPL(fpga_mgr_firmware_load); 130EXPORT_SYMBOL_GPL(fpga_mgr_firmware_load);
133 131
@@ -256,7 +254,6 @@ int fpga_mgr_register(struct device *dev, const char *name,
256 void *priv) 254 void *priv)
257{ 255{
258 struct fpga_manager *mgr; 256 struct fpga_manager *mgr;
259 const char *dt_label;
260 int id, ret; 257 int id, ret;
261 258
262 if (!mops || !mops->write_init || !mops->write || 259 if (!mops || !mops->write_init || !mops->write ||
@@ -300,11 +297,9 @@ int fpga_mgr_register(struct device *dev, const char *name,
300 mgr->dev.id = id; 297 mgr->dev.id = id;
301 dev_set_drvdata(dev, mgr); 298 dev_set_drvdata(dev, mgr);
302 299
303 dt_label = of_get_property(mgr->dev.of_node, "label", NULL); 300 ret = dev_set_name(&mgr->dev, "fpga%d", id);
304 if (dt_label) 301 if (ret)
305 ret = dev_set_name(&mgr->dev, "%s", dt_label); 302 goto error_device;
306 else
307 ret = dev_set_name(&mgr->dev, "fpga%d", id);
308 303
309 ret = device_add(&mgr->dev); 304 ret = device_add(&mgr->dev);
310 if (ret) 305 if (ret)
diff --git a/drivers/gpio/gpio-74xx-mmio.c b/drivers/gpio/gpio-74xx-mmio.c
index 6ed7c0fb3378..6b186829087c 100644
--- a/drivers/gpio/gpio-74xx-mmio.c
+++ b/drivers/gpio/gpio-74xx-mmio.c
@@ -113,13 +113,16 @@ static int mmio_74xx_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
113 113
114static int mmio_74xx_gpio_probe(struct platform_device *pdev) 114static int mmio_74xx_gpio_probe(struct platform_device *pdev)
115{ 115{
116 const struct of_device_id *of_id = 116 const struct of_device_id *of_id;
117 of_match_device(mmio_74xx_gpio_ids, &pdev->dev);
118 struct mmio_74xx_gpio_priv *priv; 117 struct mmio_74xx_gpio_priv *priv;
119 struct resource *res; 118 struct resource *res;
120 void __iomem *dat; 119 void __iomem *dat;
121 int err; 120 int err;
122 121
122 of_id = of_match_device(mmio_74xx_gpio_ids, &pdev->dev);
123 if (!of_id)
124 return -ENODEV;
125
123 priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); 126 priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
124 if (!priv) 127 if (!priv)
125 return -ENOMEM; 128 return -ENOMEM;
diff --git a/drivers/gpio/gpio-ath79.c b/drivers/gpio/gpio-ath79.c
index e7e38af11ec9..6e678121ab12 100644
--- a/drivers/gpio/gpio-ath79.c
+++ b/drivers/gpio/gpio-ath79.c
@@ -113,7 +113,7 @@ static int ar934x_gpio_direction_output(struct gpio_chip *chip, unsigned offset,
113 __raw_writel(BIT(offset), ctrl->base + AR71XX_GPIO_REG_CLEAR); 113 __raw_writel(BIT(offset), ctrl->base + AR71XX_GPIO_REG_CLEAR);
114 114
115 __raw_writel( 115 __raw_writel(
116 __raw_readl(ctrl->base + AR71XX_GPIO_REG_OE) & BIT(offset), 116 __raw_readl(ctrl->base + AR71XX_GPIO_REG_OE) & ~BIT(offset),
117 ctrl->base + AR71XX_GPIO_REG_OE); 117 ctrl->base + AR71XX_GPIO_REG_OE);
118 118
119 spin_unlock_irqrestore(&ctrl->lock, flags); 119 spin_unlock_irqrestore(&ctrl->lock, flags);
diff --git a/drivers/gpio/gpio-generic.c b/drivers/gpio/gpio-generic.c
index 72088028d7a9..ea581dc23d44 100644
--- a/drivers/gpio/gpio-generic.c
+++ b/drivers/gpio/gpio-generic.c
@@ -141,9 +141,9 @@ static int bgpio_get_set(struct gpio_chip *gc, unsigned int gpio)
141 unsigned long pinmask = bgc->pin2mask(bgc, gpio); 141 unsigned long pinmask = bgc->pin2mask(bgc, gpio);
142 142
143 if (bgc->dir & pinmask) 143 if (bgc->dir & pinmask)
144 return bgc->read_reg(bgc->reg_set) & pinmask; 144 return !!(bgc->read_reg(bgc->reg_set) & pinmask);
145 else 145 else
146 return bgc->read_reg(bgc->reg_dat) & pinmask; 146 return !!(bgc->read_reg(bgc->reg_dat) & pinmask);
147} 147}
148 148
149static int bgpio_get(struct gpio_chip *gc, unsigned int gpio) 149static int bgpio_get(struct gpio_chip *gc, unsigned int gpio)
diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c
index 7e4f7c5f999a..e183351d047c 100644
--- a/drivers/gpio/gpio-omap.c
+++ b/drivers/gpio/gpio-omap.c
@@ -1122,8 +1122,6 @@ static int omap_gpio_chip_init(struct gpio_bank *bank, struct irq_chip *irqc)
1122 /* MPUIO is a bit different, reading IRQ status clears it */ 1122 /* MPUIO is a bit different, reading IRQ status clears it */
1123 if (bank->is_mpuio) { 1123 if (bank->is_mpuio) {
1124 irqc->irq_ack = dummy_irq_chip.irq_ack; 1124 irqc->irq_ack = dummy_irq_chip.irq_ack;
1125 irqc->irq_mask = irq_gc_mask_set_bit;
1126 irqc->irq_unmask = irq_gc_mask_clr_bit;
1127 if (!bank->regs->wkup_en) 1125 if (!bank->regs->wkup_en)
1128 irqc->irq_set_wake = NULL; 1126 irqc->irq_set_wake = NULL;
1129 } 1127 }
diff --git a/drivers/gpio/gpio-palmas.c b/drivers/gpio/gpio-palmas.c
index 5f09ed1700dc..ddd1a00c839d 100644
--- a/drivers/gpio/gpio-palmas.c
+++ b/drivers/gpio/gpio-palmas.c
@@ -169,6 +169,8 @@ static int palmas_gpio_probe(struct platform_device *pdev)
169 const struct palmas_device_data *dev_data; 169 const struct palmas_device_data *dev_data;
170 170
171 match = of_match_device(of_palmas_gpio_match, &pdev->dev); 171 match = of_match_device(of_palmas_gpio_match, &pdev->dev);
172 if (!match)
173 return -ENODEV;
172 dev_data = match->data; 174 dev_data = match->data;
173 if (!dev_data) 175 if (!dev_data)
174 dev_data = &palmas_dev_data; 176 dev_data = &palmas_dev_data;
diff --git a/drivers/gpio/gpio-syscon.c b/drivers/gpio/gpio-syscon.c
index cd6afee11f84..333d5af4abd1 100644
--- a/drivers/gpio/gpio-syscon.c
+++ b/drivers/gpio/gpio-syscon.c
@@ -187,11 +187,15 @@ MODULE_DEVICE_TABLE(of, syscon_gpio_ids);
187static int syscon_gpio_probe(struct platform_device *pdev) 187static int syscon_gpio_probe(struct platform_device *pdev)
188{ 188{
189 struct device *dev = &pdev->dev; 189 struct device *dev = &pdev->dev;
190 const struct of_device_id *of_id = of_match_device(syscon_gpio_ids, dev); 190 const struct of_device_id *of_id;
191 struct syscon_gpio_priv *priv; 191 struct syscon_gpio_priv *priv;
192 struct device_node *np = dev->of_node; 192 struct device_node *np = dev->of_node;
193 int ret; 193 int ret;
194 194
195 of_id = of_match_device(syscon_gpio_ids, dev);
196 if (!of_id)
197 return -ENODEV;
198
195 priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); 199 priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
196 if (!priv) 200 if (!priv)
197 return -ENOMEM; 201 return -ENOMEM;
diff --git a/drivers/gpio/gpio-tegra.c b/drivers/gpio/gpio-tegra.c
index 027e5f47dd28..896bf29776b0 100644
--- a/drivers/gpio/gpio-tegra.c
+++ b/drivers/gpio/gpio-tegra.c
@@ -375,6 +375,60 @@ static int tegra_gpio_irq_set_wake(struct irq_data *d, unsigned int enable)
375} 375}
376#endif 376#endif
377 377
378#ifdef CONFIG_DEBUG_FS
379
380#include <linux/debugfs.h>
381#include <linux/seq_file.h>
382
383static int dbg_gpio_show(struct seq_file *s, void *unused)
384{
385 int i;
386 int j;
387
388 for (i = 0; i < tegra_gpio_bank_count; i++) {
389 for (j = 0; j < 4; j++) {
390 int gpio = tegra_gpio_compose(i, j, 0);
391 seq_printf(s,
392 "%d:%d %02x %02x %02x %02x %02x %02x %06x\n",
393 i, j,
394 tegra_gpio_readl(GPIO_CNF(gpio)),
395 tegra_gpio_readl(GPIO_OE(gpio)),
396 tegra_gpio_readl(GPIO_OUT(gpio)),
397 tegra_gpio_readl(GPIO_IN(gpio)),
398 tegra_gpio_readl(GPIO_INT_STA(gpio)),
399 tegra_gpio_readl(GPIO_INT_ENB(gpio)),
400 tegra_gpio_readl(GPIO_INT_LVL(gpio)));
401 }
402 }
403 return 0;
404}
405
406static int dbg_gpio_open(struct inode *inode, struct file *file)
407{
408 return single_open(file, dbg_gpio_show, &inode->i_private);
409}
410
411static const struct file_operations debug_fops = {
412 .open = dbg_gpio_open,
413 .read = seq_read,
414 .llseek = seq_lseek,
415 .release = single_release,
416};
417
418static void tegra_gpio_debuginit(void)
419{
420 (void) debugfs_create_file("tegra_gpio", S_IRUGO,
421 NULL, NULL, &debug_fops);
422}
423
424#else
425
426static inline void tegra_gpio_debuginit(void)
427{
428}
429
430#endif
431
378static struct irq_chip tegra_gpio_irq_chip = { 432static struct irq_chip tegra_gpio_irq_chip = {
379 .name = "GPIO", 433 .name = "GPIO",
380 .irq_ack = tegra_gpio_irq_ack, 434 .irq_ack = tegra_gpio_irq_ack,
@@ -519,6 +573,8 @@ static int tegra_gpio_probe(struct platform_device *pdev)
519 spin_lock_init(&bank->lvl_lock[j]); 573 spin_lock_init(&bank->lvl_lock[j]);
520 } 574 }
521 575
576 tegra_gpio_debuginit();
577
522 return 0; 578 return 0;
523} 579}
524 580
@@ -536,52 +592,3 @@ static int __init tegra_gpio_init(void)
536 return platform_driver_register(&tegra_gpio_driver); 592 return platform_driver_register(&tegra_gpio_driver);
537} 593}
538postcore_initcall(tegra_gpio_init); 594postcore_initcall(tegra_gpio_init);
539
540#ifdef CONFIG_DEBUG_FS
541
542#include <linux/debugfs.h>
543#include <linux/seq_file.h>
544
545static int dbg_gpio_show(struct seq_file *s, void *unused)
546{
547 int i;
548 int j;
549
550 for (i = 0; i < tegra_gpio_bank_count; i++) {
551 for (j = 0; j < 4; j++) {
552 int gpio = tegra_gpio_compose(i, j, 0);
553 seq_printf(s,
554 "%d:%d %02x %02x %02x %02x %02x %02x %06x\n",
555 i, j,
556 tegra_gpio_readl(GPIO_CNF(gpio)),
557 tegra_gpio_readl(GPIO_OE(gpio)),
558 tegra_gpio_readl(GPIO_OUT(gpio)),
559 tegra_gpio_readl(GPIO_IN(gpio)),
560 tegra_gpio_readl(GPIO_INT_STA(gpio)),
561 tegra_gpio_readl(GPIO_INT_ENB(gpio)),
562 tegra_gpio_readl(GPIO_INT_LVL(gpio)));
563 }
564 }
565 return 0;
566}
567
568static int dbg_gpio_open(struct inode *inode, struct file *file)
569{
570 return single_open(file, dbg_gpio_show, &inode->i_private);
571}
572
573static const struct file_operations debug_fops = {
574 .open = dbg_gpio_open,
575 .read = seq_read,
576 .llseek = seq_lseek,
577 .release = single_release,
578};
579
580static int __init tegra_gpio_debuginit(void)
581{
582 (void) debugfs_create_file("tegra_gpio", S_IRUGO,
583 NULL, NULL, &debug_fops);
584 return 0;
585}
586late_initcall(tegra_gpio_debuginit);
587#endif
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index e63bebc9ae60..d72ac1fdcd98 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -250,7 +250,7 @@ static struct gpio_desc *gpio_name_to_desc(const char * const name)
250 for (i = 0; i != chip->ngpio; ++i) { 250 for (i = 0; i != chip->ngpio; ++i) {
251 struct gpio_desc *gpio = &chip->desc[i]; 251 struct gpio_desc *gpio = &chip->desc[i];
252 252
253 if (!gpio->name) 253 if (!gpio->name || !name)
254 continue; 254 continue;
255 255
256 if (!strcmp(gpio->name, name)) { 256 if (!strcmp(gpio->name, name)) {
@@ -1303,7 +1303,13 @@ static int _gpiod_get_raw_value(const struct gpio_desc *desc)
1303 chip = desc->chip; 1303 chip = desc->chip;
1304 offset = gpio_chip_hwgpio(desc); 1304 offset = gpio_chip_hwgpio(desc);
1305 value = chip->get ? chip->get(chip, offset) : -EIO; 1305 value = chip->get ? chip->get(chip, offset) : -EIO;
1306 value = value < 0 ? value : !!value; 1306 /*
1307 * FIXME: fix all drivers to clamp to [0,1] or return negative,
1308 * then change this to:
1309 * value = value < 0 ? value : !!value;
1310 * so we can properly propagate error codes.
1311 */
1312 value = !!value;
1307 trace_gpio_value(desc_to_gpio(desc), 1, value); 1313 trace_gpio_value(desc_to_gpio(desc), 1, value);
1308 return value; 1314 return value;
1309} 1315}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 615ce6d464fb..5a5f04d0902d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -389,7 +389,6 @@ struct amdgpu_clock {
389 * Fences. 389 * Fences.
390 */ 390 */
391struct amdgpu_fence_driver { 391struct amdgpu_fence_driver {
392 struct amdgpu_ring *ring;
393 uint64_t gpu_addr; 392 uint64_t gpu_addr;
394 volatile uint32_t *cpu_addr; 393 volatile uint32_t *cpu_addr;
395 /* sync_seq is protected by ring emission lock */ 394 /* sync_seq is protected by ring emission lock */
@@ -398,7 +397,7 @@ struct amdgpu_fence_driver {
398 bool initialized; 397 bool initialized;
399 struct amdgpu_irq_src *irq_src; 398 struct amdgpu_irq_src *irq_src;
400 unsigned irq_type; 399 unsigned irq_type;
401 struct delayed_work lockup_work; 400 struct timer_list fallback_timer;
402 wait_queue_head_t fence_queue; 401 wait_queue_head_t fence_queue;
403}; 402};
404 403
@@ -497,6 +496,7 @@ struct amdgpu_bo_va_mapping {
497 496
498/* bo virtual addresses in a specific vm */ 497/* bo virtual addresses in a specific vm */
499struct amdgpu_bo_va { 498struct amdgpu_bo_va {
499 struct mutex mutex;
500 /* protected by bo being reserved */ 500 /* protected by bo being reserved */
501 struct list_head bo_list; 501 struct list_head bo_list;
502 struct fence *last_pt_update; 502 struct fence *last_pt_update;
@@ -539,6 +539,7 @@ struct amdgpu_bo {
539 /* Constant after initialization */ 539 /* Constant after initialization */
540 struct amdgpu_device *adev; 540 struct amdgpu_device *adev;
541 struct drm_gem_object gem_base; 541 struct drm_gem_object gem_base;
542 struct amdgpu_bo *parent;
542 543
543 struct ttm_bo_kmap_obj dma_buf_vmap; 544 struct ttm_bo_kmap_obj dma_buf_vmap;
544 pid_t pid; 545 pid_t pid;
@@ -917,8 +918,8 @@ struct amdgpu_ring {
917#define AMDGPU_VM_FAULT_STOP_ALWAYS 2 918#define AMDGPU_VM_FAULT_STOP_ALWAYS 2
918 919
919struct amdgpu_vm_pt { 920struct amdgpu_vm_pt {
920 struct amdgpu_bo *bo; 921 struct amdgpu_bo *bo;
921 uint64_t addr; 922 uint64_t addr;
922}; 923};
923 924
924struct amdgpu_vm_id { 925struct amdgpu_vm_id {
@@ -926,13 +927,9 @@ struct amdgpu_vm_id {
926 uint64_t pd_gpu_addr; 927 uint64_t pd_gpu_addr;
927 /* last flushed PD/PT update */ 928 /* last flushed PD/PT update */
928 struct fence *flushed_updates; 929 struct fence *flushed_updates;
929 /* last use of vmid */
930 struct fence *last_id_use;
931}; 930};
932 931
933struct amdgpu_vm { 932struct amdgpu_vm {
934 struct mutex mutex;
935
936 struct rb_root va; 933 struct rb_root va;
937 934
938 /* protecting invalidated */ 935 /* protecting invalidated */
@@ -957,24 +954,72 @@ struct amdgpu_vm {
957 954
958 /* for id and flush management per ring */ 955 /* for id and flush management per ring */
959 struct amdgpu_vm_id ids[AMDGPU_MAX_RINGS]; 956 struct amdgpu_vm_id ids[AMDGPU_MAX_RINGS];
957 /* for interval tree */
958 spinlock_t it_lock;
959 /* protecting freed */
960 spinlock_t freed_lock;
960}; 961};
961 962
962struct amdgpu_vm_manager { 963struct amdgpu_vm_manager {
963 struct fence *active[AMDGPU_NUM_VM]; 964 struct {
964 uint32_t max_pfn; 965 struct fence *active;
966 atomic_long_t owner;
967 } ids[AMDGPU_NUM_VM];
968
969 uint32_t max_pfn;
965 /* number of VMIDs */ 970 /* number of VMIDs */
966 unsigned nvm; 971 unsigned nvm;
967 /* vram base address for page table entry */ 972 /* vram base address for page table entry */
968 u64 vram_base_offset; 973 u64 vram_base_offset;
969 /* is vm enabled? */ 974 /* is vm enabled? */
970 bool enabled; 975 bool enabled;
971 /* for hw to save the PD addr on suspend/resume */
972 uint32_t saved_table_addr[AMDGPU_NUM_VM];
973 /* vm pte handling */ 976 /* vm pte handling */
974 const struct amdgpu_vm_pte_funcs *vm_pte_funcs; 977 const struct amdgpu_vm_pte_funcs *vm_pte_funcs;
975 struct amdgpu_ring *vm_pte_funcs_ring; 978 struct amdgpu_ring *vm_pte_funcs_ring;
976}; 979};
977 980
981void amdgpu_vm_manager_fini(struct amdgpu_device *adev);
982int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm);
983void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm);
984struct amdgpu_bo_list_entry *amdgpu_vm_get_bos(struct amdgpu_device *adev,
985 struct amdgpu_vm *vm,
986 struct list_head *head);
987int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
988 struct amdgpu_sync *sync);
989void amdgpu_vm_flush(struct amdgpu_ring *ring,
990 struct amdgpu_vm *vm,
991 struct fence *updates);
992void amdgpu_vm_fence(struct amdgpu_device *adev,
993 struct amdgpu_vm *vm,
994 struct fence *fence);
995uint64_t amdgpu_vm_map_gart(struct amdgpu_device *adev, uint64_t addr);
996int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
997 struct amdgpu_vm *vm);
998int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
999 struct amdgpu_vm *vm);
1000int amdgpu_vm_clear_invalids(struct amdgpu_device *adev, struct amdgpu_vm *vm,
1001 struct amdgpu_sync *sync);
1002int amdgpu_vm_bo_update(struct amdgpu_device *adev,
1003 struct amdgpu_bo_va *bo_va,
1004 struct ttm_mem_reg *mem);
1005void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
1006 struct amdgpu_bo *bo);
1007struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
1008 struct amdgpu_bo *bo);
1009struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
1010 struct amdgpu_vm *vm,
1011 struct amdgpu_bo *bo);
1012int amdgpu_vm_bo_map(struct amdgpu_device *adev,
1013 struct amdgpu_bo_va *bo_va,
1014 uint64_t addr, uint64_t offset,
1015 uint64_t size, uint32_t flags);
1016int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
1017 struct amdgpu_bo_va *bo_va,
1018 uint64_t addr);
1019void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
1020 struct amdgpu_bo_va *bo_va);
1021int amdgpu_vm_free_job(struct amdgpu_job *job);
1022
978/* 1023/*
979 * context related structures 1024 * context related structures
980 */ 1025 */
@@ -1211,6 +1256,7 @@ struct amdgpu_cs_parser {
1211 /* relocations */ 1256 /* relocations */
1212 struct amdgpu_bo_list_entry *vm_bos; 1257 struct amdgpu_bo_list_entry *vm_bos;
1213 struct list_head validated; 1258 struct list_head validated;
1259 struct fence *fence;
1214 1260
1215 struct amdgpu_ib *ibs; 1261 struct amdgpu_ib *ibs;
1216 uint32_t num_ibs; 1262 uint32_t num_ibs;
@@ -1226,7 +1272,7 @@ struct amdgpu_job {
1226 struct amdgpu_device *adev; 1272 struct amdgpu_device *adev;
1227 struct amdgpu_ib *ibs; 1273 struct amdgpu_ib *ibs;
1228 uint32_t num_ibs; 1274 uint32_t num_ibs;
1229 struct mutex job_lock; 1275 void *owner;
1230 struct amdgpu_user_fence uf; 1276 struct amdgpu_user_fence uf;
1231 int (*free_job)(struct amdgpu_job *job); 1277 int (*free_job)(struct amdgpu_job *job);
1232}; 1278};
@@ -2257,11 +2303,6 @@ void amdgpu_pci_config_reset(struct amdgpu_device *adev);
2257bool amdgpu_card_posted(struct amdgpu_device *adev); 2303bool amdgpu_card_posted(struct amdgpu_device *adev);
2258void amdgpu_update_display_priority(struct amdgpu_device *adev); 2304void amdgpu_update_display_priority(struct amdgpu_device *adev);
2259bool amdgpu_boot_test_post_card(struct amdgpu_device *adev); 2305bool amdgpu_boot_test_post_card(struct amdgpu_device *adev);
2260struct amdgpu_cs_parser *amdgpu_cs_parser_create(struct amdgpu_device *adev,
2261 struct drm_file *filp,
2262 struct amdgpu_ctx *ctx,
2263 struct amdgpu_ib *ibs,
2264 uint32_t num_ibs);
2265 2306
2266int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data); 2307int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data);
2267int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type, 2308int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type,
@@ -2319,49 +2360,6 @@ long amdgpu_kms_compat_ioctl(struct file *filp, unsigned int cmd,
2319 unsigned long arg); 2360 unsigned long arg);
2320 2361
2321/* 2362/*
2322 * vm
2323 */
2324int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm);
2325void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm);
2326struct amdgpu_bo_list_entry *amdgpu_vm_get_bos(struct amdgpu_device *adev,
2327 struct amdgpu_vm *vm,
2328 struct list_head *head);
2329int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
2330 struct amdgpu_sync *sync);
2331void amdgpu_vm_flush(struct amdgpu_ring *ring,
2332 struct amdgpu_vm *vm,
2333 struct fence *updates);
2334void amdgpu_vm_fence(struct amdgpu_device *adev,
2335 struct amdgpu_vm *vm,
2336 struct amdgpu_fence *fence);
2337uint64_t amdgpu_vm_map_gart(struct amdgpu_device *adev, uint64_t addr);
2338int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
2339 struct amdgpu_vm *vm);
2340int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
2341 struct amdgpu_vm *vm);
2342int amdgpu_vm_clear_invalids(struct amdgpu_device *adev,
2343 struct amdgpu_vm *vm, struct amdgpu_sync *sync);
2344int amdgpu_vm_bo_update(struct amdgpu_device *adev,
2345 struct amdgpu_bo_va *bo_va,
2346 struct ttm_mem_reg *mem);
2347void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
2348 struct amdgpu_bo *bo);
2349struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
2350 struct amdgpu_bo *bo);
2351struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
2352 struct amdgpu_vm *vm,
2353 struct amdgpu_bo *bo);
2354int amdgpu_vm_bo_map(struct amdgpu_device *adev,
2355 struct amdgpu_bo_va *bo_va,
2356 uint64_t addr, uint64_t offset,
2357 uint64_t size, uint32_t flags);
2358int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
2359 struct amdgpu_bo_va *bo_va,
2360 uint64_t addr);
2361void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
2362 struct amdgpu_bo_va *bo_va);
2363int amdgpu_vm_free_job(struct amdgpu_job *job);
2364/*
2365 * functions used by amdgpu_encoder.c 2363 * functions used by amdgpu_encoder.c
2366 */ 2364 */
2367struct amdgpu_afmt_acr { 2365struct amdgpu_afmt_acr {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index dfc4d02c7a38..4f352ec9dec4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -127,30 +127,6 @@ int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type,
127 return 0; 127 return 0;
128} 128}
129 129
130struct amdgpu_cs_parser *amdgpu_cs_parser_create(struct amdgpu_device *adev,
131 struct drm_file *filp,
132 struct amdgpu_ctx *ctx,
133 struct amdgpu_ib *ibs,
134 uint32_t num_ibs)
135{
136 struct amdgpu_cs_parser *parser;
137 int i;
138
139 parser = kzalloc(sizeof(struct amdgpu_cs_parser), GFP_KERNEL);
140 if (!parser)
141 return NULL;
142
143 parser->adev = adev;
144 parser->filp = filp;
145 parser->ctx = ctx;
146 parser->ibs = ibs;
147 parser->num_ibs = num_ibs;
148 for (i = 0; i < num_ibs; i++)
149 ibs[i].ctx = ctx;
150
151 return parser;
152}
153
154int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data) 130int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
155{ 131{
156 union drm_amdgpu_cs *cs = data; 132 union drm_amdgpu_cs *cs = data;
@@ -246,6 +222,8 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
246 } 222 }
247 223
248 p->uf.bo = gem_to_amdgpu_bo(gobj); 224 p->uf.bo = gem_to_amdgpu_bo(gobj);
225 amdgpu_bo_ref(p->uf.bo);
226 drm_gem_object_unreference_unlocked(gobj);
249 p->uf.offset = fence_data->offset; 227 p->uf.offset = fence_data->offset;
250 } else { 228 } else {
251 ret = -EINVAL; 229 ret = -EINVAL;
@@ -463,8 +441,18 @@ static int cmp_size_smaller_first(void *priv, struct list_head *a,
463 return (int)la->robj->tbo.num_pages - (int)lb->robj->tbo.num_pages; 441 return (int)la->robj->tbo.num_pages - (int)lb->robj->tbo.num_pages;
464} 442}
465 443
466static void amdgpu_cs_parser_fini_early(struct amdgpu_cs_parser *parser, int error, bool backoff) 444/**
445 * cs_parser_fini() - clean parser states
446 * @parser: parser structure holding parsing context.
447 * @error: error number
448 *
449 * If error is set than unvalidate buffer, otherwise just free memory
450 * used by parsing context.
451 **/
452static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, bool backoff)
467{ 453{
454 unsigned i;
455
468 if (!error) { 456 if (!error) {
469 /* Sort the buffer list from the smallest to largest buffer, 457 /* Sort the buffer list from the smallest to largest buffer,
470 * which affects the order of buffers in the LRU list. 458 * which affects the order of buffers in the LRU list.
@@ -479,17 +467,14 @@ static void amdgpu_cs_parser_fini_early(struct amdgpu_cs_parser *parser, int err
479 list_sort(NULL, &parser->validated, cmp_size_smaller_first); 467 list_sort(NULL, &parser->validated, cmp_size_smaller_first);
480 468
481 ttm_eu_fence_buffer_objects(&parser->ticket, 469 ttm_eu_fence_buffer_objects(&parser->ticket,
482 &parser->validated, 470 &parser->validated,
483 &parser->ibs[parser->num_ibs-1].fence->base); 471 parser->fence);
484 } else if (backoff) { 472 } else if (backoff) {
485 ttm_eu_backoff_reservation(&parser->ticket, 473 ttm_eu_backoff_reservation(&parser->ticket,
486 &parser->validated); 474 &parser->validated);
487 } 475 }
488} 476 fence_put(parser->fence);
489 477
490static void amdgpu_cs_parser_fini_late(struct amdgpu_cs_parser *parser)
491{
492 unsigned i;
493 if (parser->ctx) 478 if (parser->ctx)
494 amdgpu_ctx_put(parser->ctx); 479 amdgpu_ctx_put(parser->ctx);
495 if (parser->bo_list) 480 if (parser->bo_list)
@@ -499,31 +484,12 @@ static void amdgpu_cs_parser_fini_late(struct amdgpu_cs_parser *parser)
499 for (i = 0; i < parser->nchunks; i++) 484 for (i = 0; i < parser->nchunks; i++)
500 drm_free_large(parser->chunks[i].kdata); 485 drm_free_large(parser->chunks[i].kdata);
501 kfree(parser->chunks); 486 kfree(parser->chunks);
502 if (!amdgpu_enable_scheduler) 487 if (parser->ibs)
503 { 488 for (i = 0; i < parser->num_ibs; i++)
504 if (parser->ibs) 489 amdgpu_ib_free(parser->adev, &parser->ibs[i]);
505 for (i = 0; i < parser->num_ibs; i++) 490 kfree(parser->ibs);
506 amdgpu_ib_free(parser->adev, &parser->ibs[i]); 491 if (parser->uf.bo)
507 kfree(parser->ibs); 492 amdgpu_bo_unref(&parser->uf.bo);
508 if (parser->uf.bo)
509 drm_gem_object_unreference_unlocked(&parser->uf.bo->gem_base);
510 }
511
512 kfree(parser);
513}
514
515/**
516 * cs_parser_fini() - clean parser states
517 * @parser: parser structure holding parsing context.
518 * @error: error number
519 *
520 * If error is set than unvalidate buffer, otherwise just free memory
521 * used by parsing context.
522 **/
523static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, bool backoff)
524{
525 amdgpu_cs_parser_fini_early(parser, error, backoff);
526 amdgpu_cs_parser_fini_late(parser);
527} 493}
528 494
529static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p, 495static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p,
@@ -610,15 +576,9 @@ static int amdgpu_cs_ib_vm_chunk(struct amdgpu_device *adev,
610 } 576 }
611 577
612 r = amdgpu_bo_vm_update_pte(parser, vm); 578 r = amdgpu_bo_vm_update_pte(parser, vm);
613 if (r) { 579 if (!r)
614 goto out; 580 amdgpu_cs_sync_rings(parser);
615 }
616 amdgpu_cs_sync_rings(parser);
617 if (!amdgpu_enable_scheduler)
618 r = amdgpu_ib_schedule(adev, parser->num_ibs, parser->ibs,
619 parser->filp);
620 581
621out:
622 return r; 582 return r;
623} 583}
624 584
@@ -818,7 +778,7 @@ static int amdgpu_cs_free_job(struct amdgpu_job *job)
818 amdgpu_ib_free(job->adev, &job->ibs[i]); 778 amdgpu_ib_free(job->adev, &job->ibs[i]);
819 kfree(job->ibs); 779 kfree(job->ibs);
820 if (job->uf.bo) 780 if (job->uf.bo)
821 drm_gem_object_unreference_unlocked(&job->uf.bo->gem_base); 781 amdgpu_bo_unref(&job->uf.bo);
822 return 0; 782 return 0;
823} 783}
824 784
@@ -826,38 +786,35 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
826{ 786{
827 struct amdgpu_device *adev = dev->dev_private; 787 struct amdgpu_device *adev = dev->dev_private;
828 union drm_amdgpu_cs *cs = data; 788 union drm_amdgpu_cs *cs = data;
829 struct amdgpu_fpriv *fpriv = filp->driver_priv; 789 struct amdgpu_cs_parser parser = {};
830 struct amdgpu_vm *vm = &fpriv->vm;
831 struct amdgpu_cs_parser *parser;
832 bool reserved_buffers = false; 790 bool reserved_buffers = false;
833 int i, r; 791 int i, r;
834 792
835 if (!adev->accel_working) 793 if (!adev->accel_working)
836 return -EBUSY; 794 return -EBUSY;
837 795
838 parser = amdgpu_cs_parser_create(adev, filp, NULL, NULL, 0); 796 parser.adev = adev;
839 if (!parser) 797 parser.filp = filp;
840 return -ENOMEM; 798
841 r = amdgpu_cs_parser_init(parser, data); 799 r = amdgpu_cs_parser_init(&parser, data);
842 if (r) { 800 if (r) {
843 DRM_ERROR("Failed to initialize parser !\n"); 801 DRM_ERROR("Failed to initialize parser !\n");
844 amdgpu_cs_parser_fini(parser, r, false); 802 amdgpu_cs_parser_fini(&parser, r, false);
845 r = amdgpu_cs_handle_lockup(adev, r); 803 r = amdgpu_cs_handle_lockup(adev, r);
846 return r; 804 return r;
847 } 805 }
848 mutex_lock(&vm->mutex); 806 r = amdgpu_cs_parser_relocs(&parser);
849 r = amdgpu_cs_parser_relocs(parser);
850 if (r == -ENOMEM) 807 if (r == -ENOMEM)
851 DRM_ERROR("Not enough memory for command submission!\n"); 808 DRM_ERROR("Not enough memory for command submission!\n");
852 else if (r && r != -ERESTARTSYS) 809 else if (r && r != -ERESTARTSYS)
853 DRM_ERROR("Failed to process the buffer list %d!\n", r); 810 DRM_ERROR("Failed to process the buffer list %d!\n", r);
854 else if (!r) { 811 else if (!r) {
855 reserved_buffers = true; 812 reserved_buffers = true;
856 r = amdgpu_cs_ib_fill(adev, parser); 813 r = amdgpu_cs_ib_fill(adev, &parser);
857 } 814 }
858 815
859 if (!r) { 816 if (!r) {
860 r = amdgpu_cs_dependencies(adev, parser); 817 r = amdgpu_cs_dependencies(adev, &parser);
861 if (r) 818 if (r)
862 DRM_ERROR("Failed in the dependencies handling %d!\n", r); 819 DRM_ERROR("Failed in the dependencies handling %d!\n", r);
863 } 820 }
@@ -865,63 +822,71 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
865 if (r) 822 if (r)
866 goto out; 823 goto out;
867 824
868 for (i = 0; i < parser->num_ibs; i++) 825 for (i = 0; i < parser.num_ibs; i++)
869 trace_amdgpu_cs(parser, i); 826 trace_amdgpu_cs(&parser, i);
870 827
871 r = amdgpu_cs_ib_vm_chunk(adev, parser); 828 r = amdgpu_cs_ib_vm_chunk(adev, &parser);
872 if (r) 829 if (r)
873 goto out; 830 goto out;
874 831
875 if (amdgpu_enable_scheduler && parser->num_ibs) { 832 if (amdgpu_enable_scheduler && parser.num_ibs) {
833 struct amdgpu_ring * ring = parser.ibs->ring;
834 struct amd_sched_fence *fence;
876 struct amdgpu_job *job; 835 struct amdgpu_job *job;
877 struct amdgpu_ring * ring = parser->ibs->ring; 836
878 job = kzalloc(sizeof(struct amdgpu_job), GFP_KERNEL); 837 job = kzalloc(sizeof(struct amdgpu_job), GFP_KERNEL);
879 if (!job) { 838 if (!job) {
880 r = -ENOMEM; 839 r = -ENOMEM;
881 goto out; 840 goto out;
882 } 841 }
842
883 job->base.sched = &ring->sched; 843 job->base.sched = &ring->sched;
884 job->base.s_entity = &parser->ctx->rings[ring->idx].entity; 844 job->base.s_entity = &parser.ctx->rings[ring->idx].entity;
885 job->adev = parser->adev; 845 job->adev = parser.adev;
886 job->ibs = parser->ibs; 846 job->owner = parser.filp;
887 job->num_ibs = parser->num_ibs; 847 job->free_job = amdgpu_cs_free_job;
888 job->base.owner = parser->filp; 848
889 mutex_init(&job->job_lock); 849 job->ibs = parser.ibs;
850 job->num_ibs = parser.num_ibs;
851 parser.ibs = NULL;
852 parser.num_ibs = 0;
853
890 if (job->ibs[job->num_ibs - 1].user) { 854 if (job->ibs[job->num_ibs - 1].user) {
891 memcpy(&job->uf, &parser->uf, 855 job->uf = parser.uf;
892 sizeof(struct amdgpu_user_fence));
893 job->ibs[job->num_ibs - 1].user = &job->uf; 856 job->ibs[job->num_ibs - 1].user = &job->uf;
857 parser.uf.bo = NULL;
894 } 858 }
895 859
896 job->free_job = amdgpu_cs_free_job; 860 fence = amd_sched_fence_create(job->base.s_entity,
897 mutex_lock(&job->job_lock); 861 parser.filp);
898 r = amd_sched_entity_push_job(&job->base); 862 if (!fence) {
899 if (r) { 863 r = -ENOMEM;
900 mutex_unlock(&job->job_lock);
901 amdgpu_cs_free_job(job); 864 amdgpu_cs_free_job(job);
902 kfree(job); 865 kfree(job);
903 goto out; 866 goto out;
904 } 867 }
905 cs->out.handle = 868 job->base.s_fence = fence;
906 amdgpu_ctx_add_fence(parser->ctx, ring, 869 parser.fence = fence_get(&fence->base);
907 &job->base.s_fence->base);
908 parser->ibs[parser->num_ibs - 1].sequence = cs->out.handle;
909 870
910 list_sort(NULL, &parser->validated, cmp_size_smaller_first); 871 cs->out.handle = amdgpu_ctx_add_fence(parser.ctx, ring,
911 ttm_eu_fence_buffer_objects(&parser->ticket, 872 &fence->base);
912 &parser->validated, 873 job->ibs[job->num_ibs - 1].sequence = cs->out.handle;
913 &job->base.s_fence->base);
914 874
915 mutex_unlock(&job->job_lock); 875 trace_amdgpu_cs_ioctl(job);
916 amdgpu_cs_parser_fini_late(parser); 876 amd_sched_entity_push_job(&job->base);
917 mutex_unlock(&vm->mutex); 877
918 return 0; 878 } else {
879 struct amdgpu_fence *fence;
880
881 r = amdgpu_ib_schedule(adev, parser.num_ibs, parser.ibs,
882 parser.filp);
883 fence = parser.ibs[parser.num_ibs - 1].fence;
884 parser.fence = fence_get(&fence->base);
885 cs->out.handle = parser.ibs[parser.num_ibs - 1].sequence;
919 } 886 }
920 887
921 cs->out.handle = parser->ibs[parser->num_ibs - 1].sequence;
922out: 888out:
923 amdgpu_cs_parser_fini(parser, r, reserved_buffers); 889 amdgpu_cs_parser_fini(&parser, r, reserved_buffers);
924 mutex_unlock(&vm->mutex);
925 r = amdgpu_cs_handle_lockup(adev, r); 890 r = amdgpu_cs_handle_lockup(adev, r);
926 return r; 891 return r;
927} 892}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index e173a5a02f0d..5580d3420c3a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -73,6 +73,8 @@ static void amdgpu_flip_work_func(struct work_struct *__work)
73 struct drm_crtc *crtc = &amdgpuCrtc->base; 73 struct drm_crtc *crtc = &amdgpuCrtc->base;
74 unsigned long flags; 74 unsigned long flags;
75 unsigned i; 75 unsigned i;
76 int vpos, hpos, stat, min_udelay;
77 struct drm_vblank_crtc *vblank = &crtc->dev->vblank[work->crtc_id];
76 78
77 amdgpu_flip_wait_fence(adev, &work->excl); 79 amdgpu_flip_wait_fence(adev, &work->excl);
78 for (i = 0; i < work->shared_count; ++i) 80 for (i = 0; i < work->shared_count; ++i)
@@ -81,6 +83,41 @@ static void amdgpu_flip_work_func(struct work_struct *__work)
81 /* We borrow the event spin lock for protecting flip_status */ 83 /* We borrow the event spin lock for protecting flip_status */
82 spin_lock_irqsave(&crtc->dev->event_lock, flags); 84 spin_lock_irqsave(&crtc->dev->event_lock, flags);
83 85
86 /* If this happens to execute within the "virtually extended" vblank
87 * interval before the start of the real vblank interval then it needs
88 * to delay programming the mmio flip until the real vblank is entered.
89 * This prevents completing a flip too early due to the way we fudge
90 * our vblank counter and vblank timestamps in order to work around the
91 * problem that the hw fires vblank interrupts before actual start of
92 * vblank (when line buffer refilling is done for a frame). It
93 * complements the fudging logic in amdgpu_get_crtc_scanoutpos() for
94 * timestamping and amdgpu_get_vblank_counter_kms() for vblank counts.
95 *
96 * In practice this won't execute very often unless on very fast
97 * machines because the time window for this to happen is very small.
98 */
99 for (;;) {
100 /* GET_DISTANCE_TO_VBLANKSTART returns distance to real vblank
101 * start in hpos, and to the "fudged earlier" vblank start in
102 * vpos.
103 */
104 stat = amdgpu_get_crtc_scanoutpos(adev->ddev, work->crtc_id,
105 GET_DISTANCE_TO_VBLANKSTART,
106 &vpos, &hpos, NULL, NULL,
107 &crtc->hwmode);
108
109 if ((stat & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE)) !=
110 (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE) ||
111 !(vpos >= 0 && hpos <= 0))
112 break;
113
114 /* Sleep at least until estimated real start of hw vblank */
115 spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
116 min_udelay = (-hpos + 1) * max(vblank->linedur_ns / 1000, 5);
117 usleep_range(min_udelay, 2 * min_udelay);
118 spin_lock_irqsave(&crtc->dev->event_lock, flags);
119 };
120
84 /* do the flip (mmio) */ 121 /* do the flip (mmio) */
85 adev->mode_info.funcs->page_flip(adev, work->crtc_id, work->base); 122 adev->mode_info.funcs->page_flip(adev, work->crtc_id, work->base);
86 /* set the flip status */ 123 /* set the flip status */
@@ -109,7 +146,7 @@ static void amdgpu_unpin_work_func(struct work_struct *__work)
109 } else 146 } else
110 DRM_ERROR("failed to reserve buffer after flip\n"); 147 DRM_ERROR("failed to reserve buffer after flip\n");
111 148
112 drm_gem_object_unreference_unlocked(&work->old_rbo->gem_base); 149 amdgpu_bo_unref(&work->old_rbo);
113 kfree(work->shared); 150 kfree(work->shared);
114 kfree(work); 151 kfree(work);
115} 152}
@@ -148,8 +185,8 @@ int amdgpu_crtc_page_flip(struct drm_crtc *crtc,
148 obj = old_amdgpu_fb->obj; 185 obj = old_amdgpu_fb->obj;
149 186
150 /* take a reference to the old object */ 187 /* take a reference to the old object */
151 drm_gem_object_reference(obj);
152 work->old_rbo = gem_to_amdgpu_bo(obj); 188 work->old_rbo = gem_to_amdgpu_bo(obj);
189 amdgpu_bo_ref(work->old_rbo);
153 190
154 new_amdgpu_fb = to_amdgpu_framebuffer(fb); 191 new_amdgpu_fb = to_amdgpu_framebuffer(fb);
155 obj = new_amdgpu_fb->obj; 192 obj = new_amdgpu_fb->obj;
@@ -222,7 +259,7 @@ pflip_cleanup:
222 amdgpu_bo_unreserve(new_rbo); 259 amdgpu_bo_unreserve(new_rbo);
223 260
224cleanup: 261cleanup:
225 drm_gem_object_unreference_unlocked(&work->old_rbo->gem_base); 262 amdgpu_bo_unref(&work->old_rbo);
226 fence_put(work->excl); 263 fence_put(work->excl);
227 for (i = 0; i < work->shared_count; ++i) 264 for (i = 0; i < work->shared_count; ++i)
228 fence_put(work->shared[i]); 265 fence_put(work->shared[i]);
@@ -712,6 +749,15 @@ bool amdgpu_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
712 * \param dev Device to query. 749 * \param dev Device to query.
713 * \param pipe Crtc to query. 750 * \param pipe Crtc to query.
714 * \param flags Flags from caller (DRM_CALLED_FROM_VBLIRQ or 0). 751 * \param flags Flags from caller (DRM_CALLED_FROM_VBLIRQ or 0).
752 * For driver internal use only also supports these flags:
753 *
754 * USE_REAL_VBLANKSTART to use the real start of vblank instead
755 * of a fudged earlier start of vblank.
756 *
757 * GET_DISTANCE_TO_VBLANKSTART to return distance to the
758 * fudged earlier start of vblank in *vpos and the distance
759 * to true start of vblank in *hpos.
760 *
715 * \param *vpos Location where vertical scanout position should be stored. 761 * \param *vpos Location where vertical scanout position should be stored.
716 * \param *hpos Location where horizontal scanout position should go. 762 * \param *hpos Location where horizontal scanout position should go.
717 * \param *stime Target location for timestamp taken immediately before 763 * \param *stime Target location for timestamp taken immediately before
@@ -776,10 +822,40 @@ int amdgpu_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
776 vbl_end = 0; 822 vbl_end = 0;
777 } 823 }
778 824
825 /* Called from driver internal vblank counter query code? */
826 if (flags & GET_DISTANCE_TO_VBLANKSTART) {
827 /* Caller wants distance from real vbl_start in *hpos */
828 *hpos = *vpos - vbl_start;
829 }
830
831 /* Fudge vblank to start a few scanlines earlier to handle the
832 * problem that vblank irqs fire a few scanlines before start
833 * of vblank. Some driver internal callers need the true vblank
834 * start to be used and signal this via the USE_REAL_VBLANKSTART flag.
835 *
836 * The cause of the "early" vblank irq is that the irq is triggered
837 * by the line buffer logic when the line buffer read position enters
838 * the vblank, whereas our crtc scanout position naturally lags the
839 * line buffer read position.
840 */
841 if (!(flags & USE_REAL_VBLANKSTART))
842 vbl_start -= adev->mode_info.crtcs[pipe]->lb_vblank_lead_lines;
843
779 /* Test scanout position against vblank region. */ 844 /* Test scanout position against vblank region. */
780 if ((*vpos < vbl_start) && (*vpos >= vbl_end)) 845 if ((*vpos < vbl_start) && (*vpos >= vbl_end))
781 in_vbl = false; 846 in_vbl = false;
782 847
848 /* In vblank? */
849 if (in_vbl)
850 ret |= DRM_SCANOUTPOS_IN_VBLANK;
851
852 /* Called from driver internal vblank counter query code? */
853 if (flags & GET_DISTANCE_TO_VBLANKSTART) {
854 /* Caller wants distance from fudged earlier vbl_start */
855 *vpos -= vbl_start;
856 return ret;
857 }
858
783 /* Check if inside vblank area and apply corrective offsets: 859 /* Check if inside vblank area and apply corrective offsets:
784 * vpos will then be >=0 in video scanout area, but negative 860 * vpos will then be >=0 in video scanout area, but negative
785 * within vblank area, counting down the number of lines until 861 * within vblank area, counting down the number of lines until
@@ -795,32 +871,6 @@ int amdgpu_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
795 /* Correct for shifted end of vbl at vbl_end. */ 871 /* Correct for shifted end of vbl at vbl_end. */
796 *vpos = *vpos - vbl_end; 872 *vpos = *vpos - vbl_end;
797 873
798 /* In vblank? */
799 if (in_vbl)
800 ret |= DRM_SCANOUTPOS_IN_VBLANK;
801
802 /* Is vpos outside nominal vblank area, but less than
803 * 1/100 of a frame height away from start of vblank?
804 * If so, assume this isn't a massively delayed vblank
805 * interrupt, but a vblank interrupt that fired a few
806 * microseconds before true start of vblank. Compensate
807 * by adding a full frame duration to the final timestamp.
808 * Happens, e.g., on ATI R500, R600.
809 *
810 * We only do this if DRM_CALLED_FROM_VBLIRQ.
811 */
812 if ((flags & DRM_CALLED_FROM_VBLIRQ) && !in_vbl) {
813 vbl_start = mode->crtc_vdisplay;
814 vtotal = mode->crtc_vtotal;
815
816 if (vbl_start - *vpos < vtotal / 100) {
817 *vpos -= vtotal;
818
819 /* Signal this correction as "applied". */
820 ret |= 0x8;
821 }
822 }
823
824 return ret; 874 return ret;
825} 875}
826 876
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
index 257d72205bb5..3671f9f220bd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
@@ -47,6 +47,9 @@
47 * that the the relevant GPU caches have been flushed. 47 * that the the relevant GPU caches have been flushed.
48 */ 48 */
49 49
50static struct kmem_cache *amdgpu_fence_slab;
51static atomic_t amdgpu_fence_slab_ref = ATOMIC_INIT(0);
52
50/** 53/**
51 * amdgpu_fence_write - write a fence value 54 * amdgpu_fence_write - write a fence value
52 * 55 *
@@ -85,24 +88,6 @@ static u32 amdgpu_fence_read(struct amdgpu_ring *ring)
85} 88}
86 89
87/** 90/**
88 * amdgpu_fence_schedule_check - schedule lockup check
89 *
90 * @ring: pointer to struct amdgpu_ring
91 *
92 * Queues a delayed work item to check for lockups.
93 */
94static void amdgpu_fence_schedule_check(struct amdgpu_ring *ring)
95{
96 /*
97 * Do not reset the timer here with mod_delayed_work,
98 * this can livelock in an interaction with TTM delayed destroy.
99 */
100 queue_delayed_work(system_power_efficient_wq,
101 &ring->fence_drv.lockup_work,
102 AMDGPU_FENCE_JIFFIES_TIMEOUT);
103}
104
105/**
106 * amdgpu_fence_emit - emit a fence on the requested ring 91 * amdgpu_fence_emit - emit a fence on the requested ring
107 * 92 *
108 * @ring: ring the fence is associated with 93 * @ring: ring the fence is associated with
@@ -118,7 +103,7 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, void *owner,
118 struct amdgpu_device *adev = ring->adev; 103 struct amdgpu_device *adev = ring->adev;
119 104
120 /* we are protected by the ring emission mutex */ 105 /* we are protected by the ring emission mutex */
121 *fence = kmalloc(sizeof(struct amdgpu_fence), GFP_KERNEL); 106 *fence = kmem_cache_alloc(amdgpu_fence_slab, GFP_KERNEL);
122 if ((*fence) == NULL) { 107 if ((*fence) == NULL) {
123 return -ENOMEM; 108 return -ENOMEM;
124 } 109 }
@@ -132,11 +117,23 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, void *owner,
132 amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr, 117 amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
133 (*fence)->seq, 118 (*fence)->seq,
134 AMDGPU_FENCE_FLAG_INT); 119 AMDGPU_FENCE_FLAG_INT);
135 trace_amdgpu_fence_emit(ring->adev->ddev, ring->idx, (*fence)->seq);
136 return 0; 120 return 0;
137} 121}
138 122
139/** 123/**
124 * amdgpu_fence_schedule_fallback - schedule fallback check
125 *
126 * @ring: pointer to struct amdgpu_ring
127 *
128 * Start a timer as fallback to our interrupts.
129 */
130static void amdgpu_fence_schedule_fallback(struct amdgpu_ring *ring)
131{
132 mod_timer(&ring->fence_drv.fallback_timer,
133 jiffies + AMDGPU_FENCE_JIFFIES_TIMEOUT);
134}
135
136/**
140 * amdgpu_fence_activity - check for fence activity 137 * amdgpu_fence_activity - check for fence activity
141 * 138 *
142 * @ring: pointer to struct amdgpu_ring 139 * @ring: pointer to struct amdgpu_ring
@@ -202,45 +199,38 @@ static bool amdgpu_fence_activity(struct amdgpu_ring *ring)
202 } while (atomic64_xchg(&ring->fence_drv.last_seq, seq) > seq); 199 } while (atomic64_xchg(&ring->fence_drv.last_seq, seq) > seq);
203 200
204 if (seq < last_emitted) 201 if (seq < last_emitted)
205 amdgpu_fence_schedule_check(ring); 202 amdgpu_fence_schedule_fallback(ring);
206 203
207 return wake; 204 return wake;
208} 205}
209 206
210/** 207/**
211 * amdgpu_fence_check_lockup - check for hardware lockup 208 * amdgpu_fence_process - process a fence
212 * 209 *
213 * @work: delayed work item 210 * @adev: amdgpu_device pointer
211 * @ring: ring index the fence is associated with
214 * 212 *
215 * Checks for fence activity and if there is none probe 213 * Checks the current fence value and wakes the fence queue
216 * the hardware if a lockup occured. 214 * if the sequence number has increased (all asics).
217 */ 215 */
218static void amdgpu_fence_check_lockup(struct work_struct *work) 216void amdgpu_fence_process(struct amdgpu_ring *ring)
219{ 217{
220 struct amdgpu_fence_driver *fence_drv;
221 struct amdgpu_ring *ring;
222
223 fence_drv = container_of(work, struct amdgpu_fence_driver,
224 lockup_work.work);
225 ring = fence_drv->ring;
226
227 if (amdgpu_fence_activity(ring)) 218 if (amdgpu_fence_activity(ring))
228 wake_up_all(&ring->fence_drv.fence_queue); 219 wake_up_all(&ring->fence_drv.fence_queue);
229} 220}
230 221
231/** 222/**
232 * amdgpu_fence_process - process a fence 223 * amdgpu_fence_fallback - fallback for hardware interrupts
233 * 224 *
234 * @adev: amdgpu_device pointer 225 * @work: delayed work item
235 * @ring: ring index the fence is associated with
236 * 226 *
237 * Checks the current fence value and wakes the fence queue 227 * Checks for fence activity.
238 * if the sequence number has increased (all asics).
239 */ 228 */
240void amdgpu_fence_process(struct amdgpu_ring *ring) 229static void amdgpu_fence_fallback(unsigned long arg)
241{ 230{
242 if (amdgpu_fence_activity(ring)) 231 struct amdgpu_ring *ring = (void *)arg;
243 wake_up_all(&ring->fence_drv.fence_queue); 232
233 amdgpu_fence_process(ring);
244} 234}
245 235
246/** 236/**
@@ -290,7 +280,7 @@ static int amdgpu_fence_ring_wait_seq(struct amdgpu_ring *ring, uint64_t seq)
290 if (atomic64_read(&ring->fence_drv.last_seq) >= seq) 280 if (atomic64_read(&ring->fence_drv.last_seq) >= seq)
291 return 0; 281 return 0;
292 282
293 amdgpu_fence_schedule_check(ring); 283 amdgpu_fence_schedule_fallback(ring);
294 wait_event(ring->fence_drv.fence_queue, ( 284 wait_event(ring->fence_drv.fence_queue, (
295 (signaled = amdgpu_fence_seq_signaled(ring, seq)))); 285 (signaled = amdgpu_fence_seq_signaled(ring, seq))));
296 286
@@ -491,9 +481,8 @@ int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring)
491 atomic64_set(&ring->fence_drv.last_seq, 0); 481 atomic64_set(&ring->fence_drv.last_seq, 0);
492 ring->fence_drv.initialized = false; 482 ring->fence_drv.initialized = false;
493 483
494 INIT_DELAYED_WORK(&ring->fence_drv.lockup_work, 484 setup_timer(&ring->fence_drv.fallback_timer, amdgpu_fence_fallback,
495 amdgpu_fence_check_lockup); 485 (unsigned long)ring);
496 ring->fence_drv.ring = ring;
497 486
498 init_waitqueue_head(&ring->fence_drv.fence_queue); 487 init_waitqueue_head(&ring->fence_drv.fence_queue);
499 488
@@ -536,6 +525,13 @@ int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring)
536 */ 525 */
537int amdgpu_fence_driver_init(struct amdgpu_device *adev) 526int amdgpu_fence_driver_init(struct amdgpu_device *adev)
538{ 527{
528 if (atomic_inc_return(&amdgpu_fence_slab_ref) == 1) {
529 amdgpu_fence_slab = kmem_cache_create(
530 "amdgpu_fence", sizeof(struct amdgpu_fence), 0,
531 SLAB_HWCACHE_ALIGN, NULL);
532 if (!amdgpu_fence_slab)
533 return -ENOMEM;
534 }
539 if (amdgpu_debugfs_fence_init(adev)) 535 if (amdgpu_debugfs_fence_init(adev))
540 dev_err(adev->dev, "fence debugfs file creation failed\n"); 536 dev_err(adev->dev, "fence debugfs file creation failed\n");
541 537
@@ -554,9 +550,12 @@ void amdgpu_fence_driver_fini(struct amdgpu_device *adev)
554{ 550{
555 int i, r; 551 int i, r;
556 552
553 if (atomic_dec_and_test(&amdgpu_fence_slab_ref))
554 kmem_cache_destroy(amdgpu_fence_slab);
557 mutex_lock(&adev->ring_lock); 555 mutex_lock(&adev->ring_lock);
558 for (i = 0; i < AMDGPU_MAX_RINGS; i++) { 556 for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
559 struct amdgpu_ring *ring = adev->rings[i]; 557 struct amdgpu_ring *ring = adev->rings[i];
558
560 if (!ring || !ring->fence_drv.initialized) 559 if (!ring || !ring->fence_drv.initialized)
561 continue; 560 continue;
562 r = amdgpu_fence_wait_empty(ring); 561 r = amdgpu_fence_wait_empty(ring);
@@ -568,6 +567,7 @@ void amdgpu_fence_driver_fini(struct amdgpu_device *adev)
568 amdgpu_irq_put(adev, ring->fence_drv.irq_src, 567 amdgpu_irq_put(adev, ring->fence_drv.irq_src,
569 ring->fence_drv.irq_type); 568 ring->fence_drv.irq_type);
570 amd_sched_fini(&ring->sched); 569 amd_sched_fini(&ring->sched);
570 del_timer_sync(&ring->fence_drv.fallback_timer);
571 ring->fence_drv.initialized = false; 571 ring->fence_drv.initialized = false;
572 } 572 }
573 mutex_unlock(&adev->ring_lock); 573 mutex_unlock(&adev->ring_lock);
@@ -751,18 +751,25 @@ static bool amdgpu_fence_enable_signaling(struct fence *f)
751 fence->fence_wake.func = amdgpu_fence_check_signaled; 751 fence->fence_wake.func = amdgpu_fence_check_signaled;
752 __add_wait_queue(&ring->fence_drv.fence_queue, &fence->fence_wake); 752 __add_wait_queue(&ring->fence_drv.fence_queue, &fence->fence_wake);
753 fence_get(f); 753 fence_get(f);
754 amdgpu_fence_schedule_check(ring); 754 if (!timer_pending(&ring->fence_drv.fallback_timer))
755 amdgpu_fence_schedule_fallback(ring);
755 FENCE_TRACE(&fence->base, "armed on ring %i!\n", ring->idx); 756 FENCE_TRACE(&fence->base, "armed on ring %i!\n", ring->idx);
756 return true; 757 return true;
757} 758}
758 759
760static void amdgpu_fence_release(struct fence *f)
761{
762 struct amdgpu_fence *fence = to_amdgpu_fence(f);
763 kmem_cache_free(amdgpu_fence_slab, fence);
764}
765
759const struct fence_ops amdgpu_fence_ops = { 766const struct fence_ops amdgpu_fence_ops = {
760 .get_driver_name = amdgpu_fence_get_driver_name, 767 .get_driver_name = amdgpu_fence_get_driver_name,
761 .get_timeline_name = amdgpu_fence_get_timeline_name, 768 .get_timeline_name = amdgpu_fence_get_timeline_name,
762 .enable_signaling = amdgpu_fence_enable_signaling, 769 .enable_signaling = amdgpu_fence_enable_signaling,
763 .signaled = amdgpu_fence_is_signaled, 770 .signaled = amdgpu_fence_is_signaled,
764 .wait = fence_default_wait, 771 .wait = fence_default_wait,
765 .release = NULL, 772 .release = amdgpu_fence_release,
766}; 773};
767 774
768/* 775/*
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index 087332858853..9c253c535d26 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -115,12 +115,9 @@ int amdgpu_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_pri
115 struct amdgpu_vm *vm = &fpriv->vm; 115 struct amdgpu_vm *vm = &fpriv->vm;
116 struct amdgpu_bo_va *bo_va; 116 struct amdgpu_bo_va *bo_va;
117 int r; 117 int r;
118 mutex_lock(&vm->mutex);
119 r = amdgpu_bo_reserve(rbo, false); 118 r = amdgpu_bo_reserve(rbo, false);
120 if (r) { 119 if (r)
121 mutex_unlock(&vm->mutex);
122 return r; 120 return r;
123 }
124 121
125 bo_va = amdgpu_vm_bo_find(vm, rbo); 122 bo_va = amdgpu_vm_bo_find(vm, rbo);
126 if (!bo_va) { 123 if (!bo_va) {
@@ -129,7 +126,6 @@ int amdgpu_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_pri
129 ++bo_va->ref_count; 126 ++bo_va->ref_count;
130 } 127 }
131 amdgpu_bo_unreserve(rbo); 128 amdgpu_bo_unreserve(rbo);
132 mutex_unlock(&vm->mutex);
133 return 0; 129 return 0;
134} 130}
135 131
@@ -142,10 +138,8 @@ void amdgpu_gem_object_close(struct drm_gem_object *obj,
142 struct amdgpu_vm *vm = &fpriv->vm; 138 struct amdgpu_vm *vm = &fpriv->vm;
143 struct amdgpu_bo_va *bo_va; 139 struct amdgpu_bo_va *bo_va;
144 int r; 140 int r;
145 mutex_lock(&vm->mutex);
146 r = amdgpu_bo_reserve(rbo, true); 141 r = amdgpu_bo_reserve(rbo, true);
147 if (r) { 142 if (r) {
148 mutex_unlock(&vm->mutex);
149 dev_err(adev->dev, "leaking bo va because " 143 dev_err(adev->dev, "leaking bo va because "
150 "we fail to reserve bo (%d)\n", r); 144 "we fail to reserve bo (%d)\n", r);
151 return; 145 return;
@@ -157,7 +151,6 @@ void amdgpu_gem_object_close(struct drm_gem_object *obj,
157 } 151 }
158 } 152 }
159 amdgpu_bo_unreserve(rbo); 153 amdgpu_bo_unreserve(rbo);
160 mutex_unlock(&vm->mutex);
161} 154}
162 155
163static int amdgpu_gem_handle_lockup(struct amdgpu_device *adev, int r) 156static int amdgpu_gem_handle_lockup(struct amdgpu_device *adev, int r)
@@ -242,8 +235,9 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
242 AMDGPU_GEM_USERPTR_REGISTER)) 235 AMDGPU_GEM_USERPTR_REGISTER))
243 return -EINVAL; 236 return -EINVAL;
244 237
245 if (!(args->flags & AMDGPU_GEM_USERPTR_ANONONLY) || 238 if (!(args->flags & AMDGPU_GEM_USERPTR_READONLY) && (
246 !(args->flags & AMDGPU_GEM_USERPTR_REGISTER)) { 239 !(args->flags & AMDGPU_GEM_USERPTR_ANONONLY) ||
240 !(args->flags & AMDGPU_GEM_USERPTR_REGISTER))) {
247 241
248 /* if we want to write to it we must require anonymous 242 /* if we want to write to it we must require anonymous
249 memory and install a MMU notifier */ 243 memory and install a MMU notifier */
@@ -483,6 +477,17 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
483 if (domain == AMDGPU_GEM_DOMAIN_CPU) 477 if (domain == AMDGPU_GEM_DOMAIN_CPU)
484 goto error_unreserve; 478 goto error_unreserve;
485 } 479 }
480 list_for_each_entry(entry, &duplicates, head) {
481 domain = amdgpu_mem_type_to_domain(entry->bo->mem.mem_type);
482 /* if anything is swapped out don't swap it in here,
483 just abort and wait for the next CS */
484 if (domain == AMDGPU_GEM_DOMAIN_CPU)
485 goto error_unreserve;
486 }
487
488 r = amdgpu_vm_update_page_directory(adev, bo_va->vm);
489 if (r)
490 goto error_unreserve;
486 491
487 r = amdgpu_vm_clear_freed(adev, bo_va->vm); 492 r = amdgpu_vm_clear_freed(adev, bo_va->vm);
488 if (r) 493 if (r)
@@ -512,6 +517,9 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
512 struct amdgpu_fpriv *fpriv = filp->driver_priv; 517 struct amdgpu_fpriv *fpriv = filp->driver_priv;
513 struct amdgpu_bo *rbo; 518 struct amdgpu_bo *rbo;
514 struct amdgpu_bo_va *bo_va; 519 struct amdgpu_bo_va *bo_va;
520 struct ttm_validate_buffer tv, tv_pd;
521 struct ww_acquire_ctx ticket;
522 struct list_head list, duplicates;
515 uint32_t invalid_flags, va_flags = 0; 523 uint32_t invalid_flags, va_flags = 0;
516 int r = 0; 524 int r = 0;
517 525
@@ -547,19 +555,28 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
547 gobj = drm_gem_object_lookup(dev, filp, args->handle); 555 gobj = drm_gem_object_lookup(dev, filp, args->handle);
548 if (gobj == NULL) 556 if (gobj == NULL)
549 return -ENOENT; 557 return -ENOENT;
550 mutex_lock(&fpriv->vm.mutex);
551 rbo = gem_to_amdgpu_bo(gobj); 558 rbo = gem_to_amdgpu_bo(gobj);
552 r = amdgpu_bo_reserve(rbo, false); 559 INIT_LIST_HEAD(&list);
560 INIT_LIST_HEAD(&duplicates);
561 tv.bo = &rbo->tbo;
562 tv.shared = true;
563 list_add(&tv.head, &list);
564
565 if (args->operation == AMDGPU_VA_OP_MAP) {
566 tv_pd.bo = &fpriv->vm.page_directory->tbo;
567 tv_pd.shared = true;
568 list_add(&tv_pd.head, &list);
569 }
570 r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates);
553 if (r) { 571 if (r) {
554 mutex_unlock(&fpriv->vm.mutex);
555 drm_gem_object_unreference_unlocked(gobj); 572 drm_gem_object_unreference_unlocked(gobj);
556 return r; 573 return r;
557 } 574 }
558 575
559 bo_va = amdgpu_vm_bo_find(&fpriv->vm, rbo); 576 bo_va = amdgpu_vm_bo_find(&fpriv->vm, rbo);
560 if (!bo_va) { 577 if (!bo_va) {
561 amdgpu_bo_unreserve(rbo); 578 ttm_eu_backoff_reservation(&ticket, &list);
562 mutex_unlock(&fpriv->vm.mutex); 579 drm_gem_object_unreference_unlocked(gobj);
563 return -ENOENT; 580 return -ENOENT;
564 } 581 }
565 582
@@ -581,10 +598,10 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
581 default: 598 default:
582 break; 599 break;
583 } 600 }
584 601 ttm_eu_backoff_reservation(&ticket, &list);
585 if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE)) 602 if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE))
586 amdgpu_gem_va_update_vm(adev, bo_va, args->operation); 603 amdgpu_gem_va_update_vm(adev, bo_va, args->operation);
587 mutex_unlock(&fpriv->vm.mutex); 604
588 drm_gem_object_unreference_unlocked(gobj); 605 drm_gem_object_unreference_unlocked(gobj);
589 return r; 606 return r;
590} 607}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
index e65987743871..9e25edafa721 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
@@ -62,7 +62,7 @@ int amdgpu_ib_get(struct amdgpu_ring *ring, struct amdgpu_vm *vm,
62 int r; 62 int r;
63 63
64 if (size) { 64 if (size) {
65 r = amdgpu_sa_bo_new(adev, &adev->ring_tmp_bo, 65 r = amdgpu_sa_bo_new(&adev->ring_tmp_bo,
66 &ib->sa_bo, size, 256); 66 &ib->sa_bo, size, 256);
67 if (r) { 67 if (r) {
68 dev_err(adev->dev, "failed to get a new IB (%d)\n", r); 68 dev_err(adev->dev, "failed to get a new IB (%d)\n", r);
@@ -216,7 +216,7 @@ int amdgpu_ib_schedule(struct amdgpu_device *adev, unsigned num_ibs,
216 } 216 }
217 217
218 if (ib->vm) 218 if (ib->vm)
219 amdgpu_vm_fence(adev, ib->vm, ib->fence); 219 amdgpu_vm_fence(adev, ib->vm, &ib->fence->base);
220 220
221 amdgpu_ring_unlock_commit(ring); 221 amdgpu_ring_unlock_commit(ring);
222 return 0; 222 return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index 1618e2294a16..e23843f4d877 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -611,13 +611,59 @@ void amdgpu_driver_preclose_kms(struct drm_device *dev,
611u32 amdgpu_get_vblank_counter_kms(struct drm_device *dev, unsigned int pipe) 611u32 amdgpu_get_vblank_counter_kms(struct drm_device *dev, unsigned int pipe)
612{ 612{
613 struct amdgpu_device *adev = dev->dev_private; 613 struct amdgpu_device *adev = dev->dev_private;
614 int vpos, hpos, stat;
615 u32 count;
614 616
615 if (pipe >= adev->mode_info.num_crtc) { 617 if (pipe >= adev->mode_info.num_crtc) {
616 DRM_ERROR("Invalid crtc %u\n", pipe); 618 DRM_ERROR("Invalid crtc %u\n", pipe);
617 return -EINVAL; 619 return -EINVAL;
618 } 620 }
619 621
620 return amdgpu_display_vblank_get_counter(adev, pipe); 622 /* The hw increments its frame counter at start of vsync, not at start
623 * of vblank, as is required by DRM core vblank counter handling.
624 * Cook the hw count here to make it appear to the caller as if it
625 * incremented at start of vblank. We measure distance to start of
626 * vblank in vpos. vpos therefore will be >= 0 between start of vblank
627 * and start of vsync, so vpos >= 0 means to bump the hw frame counter
628 * result by 1 to give the proper appearance to caller.
629 */
630 if (adev->mode_info.crtcs[pipe]) {
631 /* Repeat readout if needed to provide stable result if
632 * we cross start of vsync during the queries.
633 */
634 do {
635 count = amdgpu_display_vblank_get_counter(adev, pipe);
636 /* Ask amdgpu_get_crtc_scanoutpos to return vpos as
637 * distance to start of vblank, instead of regular
638 * vertical scanout pos.
639 */
640 stat = amdgpu_get_crtc_scanoutpos(
641 dev, pipe, GET_DISTANCE_TO_VBLANKSTART,
642 &vpos, &hpos, NULL, NULL,
643 &adev->mode_info.crtcs[pipe]->base.hwmode);
644 } while (count != amdgpu_display_vblank_get_counter(adev, pipe));
645
646 if (((stat & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE)) !=
647 (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE))) {
648 DRM_DEBUG_VBL("Query failed! stat %d\n", stat);
649 } else {
650 DRM_DEBUG_VBL("crtc %d: dist from vblank start %d\n",
651 pipe, vpos);
652
653 /* Bump counter if we are at >= leading edge of vblank,
654 * but before vsync where vpos would turn negative and
655 * the hw counter really increments.
656 */
657 if (vpos >= 0)
658 count++;
659 }
660 } else {
661 /* Fallback to use value as is. */
662 count = amdgpu_display_vblank_get_counter(adev, pipe);
663 DRM_DEBUG_VBL("NULL mode info! Returned count may be wrong.\n");
664 }
665
666 return count;
621} 667}
622 668
623/** 669/**
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
index b62c1710cab6..064ebb347074 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
@@ -407,6 +407,7 @@ struct amdgpu_crtc {
407 u32 line_time; 407 u32 line_time;
408 u32 wm_low; 408 u32 wm_low;
409 u32 wm_high; 409 u32 wm_high;
410 u32 lb_vblank_lead_lines;
410 struct drm_display_mode hw_mode; 411 struct drm_display_mode hw_mode;
411}; 412};
412 413
@@ -528,6 +529,10 @@ struct amdgpu_framebuffer {
528#define ENCODER_MODE_IS_DP(em) (((em) == ATOM_ENCODER_MODE_DP) || \ 529#define ENCODER_MODE_IS_DP(em) (((em) == ATOM_ENCODER_MODE_DP) || \
529 ((em) == ATOM_ENCODER_MODE_DP_MST)) 530 ((em) == ATOM_ENCODER_MODE_DP_MST))
530 531
532/* Driver internal use only flags of amdgpu_get_crtc_scanoutpos() */
533#define USE_REAL_VBLANKSTART (1 << 30)
534#define GET_DISTANCE_TO_VBLANKSTART (1 << 31)
535
531void amdgpu_link_encoder_connector(struct drm_device *dev); 536void amdgpu_link_encoder_connector(struct drm_device *dev);
532 537
533struct drm_connector * 538struct drm_connector *
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 0d524384ff79..c3ce103b6a33 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -100,6 +100,7 @@ static void amdgpu_ttm_bo_destroy(struct ttm_buffer_object *tbo)
100 list_del_init(&bo->list); 100 list_del_init(&bo->list);
101 mutex_unlock(&bo->adev->gem.mutex); 101 mutex_unlock(&bo->adev->gem.mutex);
102 drm_gem_object_release(&bo->gem_base); 102 drm_gem_object_release(&bo->gem_base);
103 amdgpu_bo_unref(&bo->parent);
103 kfree(bo->metadata); 104 kfree(bo->metadata);
104 kfree(bo); 105 kfree(bo);
105} 106}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
index 3c2ff4567798..ea756e77b023 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
@@ -189,10 +189,9 @@ int amdgpu_sa_bo_manager_start(struct amdgpu_device *adev,
189 struct amdgpu_sa_manager *sa_manager); 189 struct amdgpu_sa_manager *sa_manager);
190int amdgpu_sa_bo_manager_suspend(struct amdgpu_device *adev, 190int amdgpu_sa_bo_manager_suspend(struct amdgpu_device *adev,
191 struct amdgpu_sa_manager *sa_manager); 191 struct amdgpu_sa_manager *sa_manager);
192int amdgpu_sa_bo_new(struct amdgpu_device *adev, 192int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager,
193 struct amdgpu_sa_manager *sa_manager, 193 struct amdgpu_sa_bo **sa_bo,
194 struct amdgpu_sa_bo **sa_bo, 194 unsigned size, unsigned align);
195 unsigned size, unsigned align);
196void amdgpu_sa_bo_free(struct amdgpu_device *adev, 195void amdgpu_sa_bo_free(struct amdgpu_device *adev,
197 struct amdgpu_sa_bo **sa_bo, 196 struct amdgpu_sa_bo **sa_bo,
198 struct fence *fence); 197 struct fence *fence);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
index 0212b31dc194..8b88edb0434b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
@@ -311,8 +311,7 @@ static bool amdgpu_sa_bo_next_hole(struct amdgpu_sa_manager *sa_manager,
311 return false; 311 return false;
312} 312}
313 313
314int amdgpu_sa_bo_new(struct amdgpu_device *adev, 314int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager,
315 struct amdgpu_sa_manager *sa_manager,
316 struct amdgpu_sa_bo **sa_bo, 315 struct amdgpu_sa_bo **sa_bo,
317 unsigned size, unsigned align) 316 unsigned size, unsigned align)
318{ 317{
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
index dcf4a8aca680..438c05254695 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
@@ -26,6 +26,7 @@
26#include <linux/sched.h> 26#include <linux/sched.h>
27#include <drm/drmP.h> 27#include <drm/drmP.h>
28#include "amdgpu.h" 28#include "amdgpu.h"
29#include "amdgpu_trace.h"
29 30
30static struct fence *amdgpu_sched_dependency(struct amd_sched_job *sched_job) 31static struct fence *amdgpu_sched_dependency(struct amd_sched_job *sched_job)
31{ 32{
@@ -44,11 +45,8 @@ static struct fence *amdgpu_sched_run_job(struct amd_sched_job *sched_job)
44 return NULL; 45 return NULL;
45 } 46 }
46 job = to_amdgpu_job(sched_job); 47 job = to_amdgpu_job(sched_job);
47 mutex_lock(&job->job_lock); 48 trace_amdgpu_sched_run_job(job);
48 r = amdgpu_ib_schedule(job->adev, 49 r = amdgpu_ib_schedule(job->adev, job->num_ibs, job->ibs, job->owner);
49 job->num_ibs,
50 job->ibs,
51 job->base.owner);
52 if (r) { 50 if (r) {
53 DRM_ERROR("Error scheduling IBs (%d)\n", r); 51 DRM_ERROR("Error scheduling IBs (%d)\n", r);
54 goto err; 52 goto err;
@@ -61,8 +59,6 @@ err:
61 if (job->free_job) 59 if (job->free_job)
62 job->free_job(job); 60 job->free_job(job);
63 61
64 mutex_unlock(&job->job_lock);
65 fence_put(&job->base.s_fence->base);
66 kfree(job); 62 kfree(job);
67 return fence ? &fence->base : NULL; 63 return fence ? &fence->base : NULL;
68} 64}
@@ -88,21 +84,19 @@ int amdgpu_sched_ib_submit_kernel_helper(struct amdgpu_device *adev,
88 return -ENOMEM; 84 return -ENOMEM;
89 job->base.sched = &ring->sched; 85 job->base.sched = &ring->sched;
90 job->base.s_entity = &adev->kernel_ctx.rings[ring->idx].entity; 86 job->base.s_entity = &adev->kernel_ctx.rings[ring->idx].entity;
87 job->base.s_fence = amd_sched_fence_create(job->base.s_entity, owner);
88 if (!job->base.s_fence) {
89 kfree(job);
90 return -ENOMEM;
91 }
92 *f = fence_get(&job->base.s_fence->base);
93
91 job->adev = adev; 94 job->adev = adev;
92 job->ibs = ibs; 95 job->ibs = ibs;
93 job->num_ibs = num_ibs; 96 job->num_ibs = num_ibs;
94 job->base.owner = owner; 97 job->owner = owner;
95 mutex_init(&job->job_lock);
96 job->free_job = free_job; 98 job->free_job = free_job;
97 mutex_lock(&job->job_lock); 99 amd_sched_entity_push_job(&job->base);
98 r = amd_sched_entity_push_job(&job->base);
99 if (r) {
100 mutex_unlock(&job->job_lock);
101 kfree(job);
102 return r;
103 }
104 *f = fence_get(&job->base.s_fence->base);
105 mutex_unlock(&job->job_lock);
106 } else { 100 } else {
107 r = amdgpu_ib_schedule(adev, num_ibs, ibs, owner); 101 r = amdgpu_ib_schedule(adev, num_ibs, ibs, owner);
108 if (r) 102 if (r)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_semaphore.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_semaphore.c
index ff3ca52ec6fe..1caaf201b708 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_semaphore.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_semaphore.c
@@ -40,7 +40,7 @@ int amdgpu_semaphore_create(struct amdgpu_device *adev,
40 if (*semaphore == NULL) { 40 if (*semaphore == NULL) {
41 return -ENOMEM; 41 return -ENOMEM;
42 } 42 }
43 r = amdgpu_sa_bo_new(adev, &adev->ring_tmp_bo, 43 r = amdgpu_sa_bo_new(&adev->ring_tmp_bo,
44 &(*semaphore)->sa_bo, 8, 8); 44 &(*semaphore)->sa_bo, 8, 8);
45 if (r) { 45 if (r) {
46 kfree(*semaphore); 46 kfree(*semaphore);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
index a6697fd05217..dd005c336c97 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
@@ -302,8 +302,14 @@ int amdgpu_sync_rings(struct amdgpu_sync *sync,
302 return -EINVAL; 302 return -EINVAL;
303 } 303 }
304 304
305 if (amdgpu_enable_scheduler || !amdgpu_enable_semaphores || 305 if (amdgpu_enable_scheduler || !amdgpu_enable_semaphores) {
306 (count >= AMDGPU_NUM_SYNCS)) { 306 r = fence_wait(&fence->base, true);
307 if (r)
308 return r;
309 continue;
310 }
311
312 if (count >= AMDGPU_NUM_SYNCS) {
307 /* not enough room, wait manually */ 313 /* not enough room, wait manually */
308 r = fence_wait(&fence->base, false); 314 r = fence_wait(&fence->base, false);
309 if (r) 315 if (r)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
index 76ecbaf72a2e..8f9834ab1bd5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
@@ -48,6 +48,57 @@ TRACE_EVENT(amdgpu_cs,
48 __entry->fences) 48 __entry->fences)
49); 49);
50 50
51TRACE_EVENT(amdgpu_cs_ioctl,
52 TP_PROTO(struct amdgpu_job *job),
53 TP_ARGS(job),
54 TP_STRUCT__entry(
55 __field(struct amdgpu_device *, adev)
56 __field(struct amd_sched_job *, sched_job)
57 __field(struct amdgpu_ib *, ib)
58 __field(struct fence *, fence)
59 __field(char *, ring_name)
60 __field(u32, num_ibs)
61 ),
62
63 TP_fast_assign(
64 __entry->adev = job->adev;
65 __entry->sched_job = &job->base;
66 __entry->ib = job->ibs;
67 __entry->fence = &job->base.s_fence->base;
68 __entry->ring_name = job->ibs[0].ring->name;
69 __entry->num_ibs = job->num_ibs;
70 ),
71 TP_printk("adev=%p, sched_job=%p, first ib=%p, sched fence=%p, ring name:%s, num_ibs:%u",
72 __entry->adev, __entry->sched_job, __entry->ib,
73 __entry->fence, __entry->ring_name, __entry->num_ibs)
74);
75
76TRACE_EVENT(amdgpu_sched_run_job,
77 TP_PROTO(struct amdgpu_job *job),
78 TP_ARGS(job),
79 TP_STRUCT__entry(
80 __field(struct amdgpu_device *, adev)
81 __field(struct amd_sched_job *, sched_job)
82 __field(struct amdgpu_ib *, ib)
83 __field(struct fence *, fence)
84 __field(char *, ring_name)
85 __field(u32, num_ibs)
86 ),
87
88 TP_fast_assign(
89 __entry->adev = job->adev;
90 __entry->sched_job = &job->base;
91 __entry->ib = job->ibs;
92 __entry->fence = &job->base.s_fence->base;
93 __entry->ring_name = job->ibs[0].ring->name;
94 __entry->num_ibs = job->num_ibs;
95 ),
96 TP_printk("adev=%p, sched_job=%p, first ib=%p, sched fence=%p, ring name:%s, num_ibs:%u",
97 __entry->adev, __entry->sched_job, __entry->ib,
98 __entry->fence, __entry->ring_name, __entry->num_ibs)
99);
100
101
51TRACE_EVENT(amdgpu_vm_grab_id, 102TRACE_EVENT(amdgpu_vm_grab_id,
52 TP_PROTO(unsigned vmid, int ring), 103 TP_PROTO(unsigned vmid, int ring),
53 TP_ARGS(vmid, ring), 104 TP_ARGS(vmid, ring),
@@ -196,49 +247,6 @@ TRACE_EVENT(amdgpu_bo_list_set,
196 TP_printk("list=%p, bo=%p", __entry->list, __entry->bo) 247 TP_printk("list=%p, bo=%p", __entry->list, __entry->bo)
197); 248);
198 249
199DECLARE_EVENT_CLASS(amdgpu_fence_request,
200
201 TP_PROTO(struct drm_device *dev, int ring, u32 seqno),
202
203 TP_ARGS(dev, ring, seqno),
204
205 TP_STRUCT__entry(
206 __field(u32, dev)
207 __field(int, ring)
208 __field(u32, seqno)
209 ),
210
211 TP_fast_assign(
212 __entry->dev = dev->primary->index;
213 __entry->ring = ring;
214 __entry->seqno = seqno;
215 ),
216
217 TP_printk("dev=%u, ring=%d, seqno=%u",
218 __entry->dev, __entry->ring, __entry->seqno)
219);
220
221DEFINE_EVENT(amdgpu_fence_request, amdgpu_fence_emit,
222
223 TP_PROTO(struct drm_device *dev, int ring, u32 seqno),
224
225 TP_ARGS(dev, ring, seqno)
226);
227
228DEFINE_EVENT(amdgpu_fence_request, amdgpu_fence_wait_begin,
229
230 TP_PROTO(struct drm_device *dev, int ring, u32 seqno),
231
232 TP_ARGS(dev, ring, seqno)
233);
234
235DEFINE_EVENT(amdgpu_fence_request, amdgpu_fence_wait_end,
236
237 TP_PROTO(struct drm_device *dev, int ring, u32 seqno),
238
239 TP_ARGS(dev, ring, seqno)
240);
241
242DECLARE_EVENT_CLASS(amdgpu_semaphore_request, 250DECLARE_EVENT_CLASS(amdgpu_semaphore_request,
243 251
244 TP_PROTO(int ring, struct amdgpu_semaphore *sem), 252 TP_PROTO(int ring, struct amdgpu_semaphore *sem),
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 81bb8e9fc26d..8a1752ff3d8e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -587,9 +587,13 @@ static int amdgpu_ttm_backend_bind(struct ttm_tt *ttm,
587 uint32_t flags = amdgpu_ttm_tt_pte_flags(gtt->adev, ttm, bo_mem); 587 uint32_t flags = amdgpu_ttm_tt_pte_flags(gtt->adev, ttm, bo_mem);
588 int r; 588 int r;
589 589
590 if (gtt->userptr) 590 if (gtt->userptr) {
591 amdgpu_ttm_tt_pin_userptr(ttm); 591 r = amdgpu_ttm_tt_pin_userptr(ttm);
592 592 if (r) {
593 DRM_ERROR("failed to pin userptr\n");
594 return r;
595 }
596 }
593 gtt->offset = (unsigned long)(bo_mem->start << PAGE_SHIFT); 597 gtt->offset = (unsigned long)(bo_mem->start << PAGE_SHIFT);
594 if (!ttm->num_pages) { 598 if (!ttm->num_pages) {
595 WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n", 599 WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n",
@@ -797,11 +801,12 @@ uint32_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
797 if (mem && mem->mem_type != TTM_PL_SYSTEM) 801 if (mem && mem->mem_type != TTM_PL_SYSTEM)
798 flags |= AMDGPU_PTE_VALID; 802 flags |= AMDGPU_PTE_VALID;
799 803
800 if (mem && mem->mem_type == TTM_PL_TT) 804 if (mem && mem->mem_type == TTM_PL_TT) {
801 flags |= AMDGPU_PTE_SYSTEM; 805 flags |= AMDGPU_PTE_SYSTEM;
802 806
803 if (!ttm || ttm->caching_state == tt_cached) 807 if (ttm->caching_state == tt_cached)
804 flags |= AMDGPU_PTE_SNOOPED; 808 flags |= AMDGPU_PTE_SNOOPED;
809 }
805 810
806 if (adev->asic_type >= CHIP_TOPAZ) 811 if (adev->asic_type >= CHIP_TOPAZ)
807 flags |= AMDGPU_PTE_EXECUTABLE; 812 flags |= AMDGPU_PTE_EXECUTABLE;
@@ -1073,10 +1078,10 @@ static int amdgpu_mm_dump_table(struct seq_file *m, void *data)
1073 ret = drm_mm_dump_table(m, mm); 1078 ret = drm_mm_dump_table(m, mm);
1074 spin_unlock(&glob->lru_lock); 1079 spin_unlock(&glob->lru_lock);
1075 if (ttm_pl == TTM_PL_VRAM) 1080 if (ttm_pl == TTM_PL_VRAM)
1076 seq_printf(m, "man size:%llu pages, ram usage:%luMB, vis usage:%luMB\n", 1081 seq_printf(m, "man size:%llu pages, ram usage:%lluMB, vis usage:%lluMB\n",
1077 adev->mman.bdev.man[ttm_pl].size, 1082 adev->mman.bdev.man[ttm_pl].size,
1078 atomic64_read(&adev->vram_usage) >> 20, 1083 (u64)atomic64_read(&adev->vram_usage) >> 20,
1079 atomic64_read(&adev->vram_vis_usage) >> 20); 1084 (u64)atomic64_read(&adev->vram_vis_usage) >> 20);
1080 return ret; 1085 return ret;
1081} 1086}
1082 1087
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
index 03f0c3bae516..a745eeeb5d82 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
@@ -392,7 +392,10 @@ int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
392 ib->ptr[ib->length_dw++] = 0x00000001; /* session cmd */ 392 ib->ptr[ib->length_dw++] = 0x00000001; /* session cmd */
393 ib->ptr[ib->length_dw++] = handle; 393 ib->ptr[ib->length_dw++] = handle;
394 394
395 ib->ptr[ib->length_dw++] = 0x00000030; /* len */ 395 if ((ring->adev->vce.fw_version >> 24) >= 52)
396 ib->ptr[ib->length_dw++] = 0x00000040; /* len */
397 else
398 ib->ptr[ib->length_dw++] = 0x00000030; /* len */
396 ib->ptr[ib->length_dw++] = 0x01000001; /* create cmd */ 399 ib->ptr[ib->length_dw++] = 0x01000001; /* create cmd */
397 ib->ptr[ib->length_dw++] = 0x00000000; 400 ib->ptr[ib->length_dw++] = 0x00000000;
398 ib->ptr[ib->length_dw++] = 0x00000042; 401 ib->ptr[ib->length_dw++] = 0x00000042;
@@ -404,6 +407,12 @@ int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
404 ib->ptr[ib->length_dw++] = 0x00000100; 407 ib->ptr[ib->length_dw++] = 0x00000100;
405 ib->ptr[ib->length_dw++] = 0x0000000c; 408 ib->ptr[ib->length_dw++] = 0x0000000c;
406 ib->ptr[ib->length_dw++] = 0x00000000; 409 ib->ptr[ib->length_dw++] = 0x00000000;
410 if ((ring->adev->vce.fw_version >> 24) >= 52) {
411 ib->ptr[ib->length_dw++] = 0x00000000;
412 ib->ptr[ib->length_dw++] = 0x00000000;
413 ib->ptr[ib->length_dw++] = 0x00000000;
414 ib->ptr[ib->length_dw++] = 0x00000000;
415 }
407 416
408 ib->ptr[ib->length_dw++] = 0x00000014; /* len */ 417 ib->ptr[ib->length_dw++] = 0x00000014; /* len */
409 ib->ptr[ib->length_dw++] = 0x05000005; /* feedback buffer */ 418 ib->ptr[ib->length_dw++] = 0x05000005; /* feedback buffer */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 633a32a48560..b53d273eb7a1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -143,10 +143,15 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
143 unsigned i; 143 unsigned i;
144 144
145 /* check if the id is still valid */ 145 /* check if the id is still valid */
146 if (vm_id->id && vm_id->last_id_use && 146 if (vm_id->id) {
147 vm_id->last_id_use == adev->vm_manager.active[vm_id->id]) { 147 unsigned id = vm_id->id;
148 trace_amdgpu_vm_grab_id(vm_id->id, ring->idx); 148 long owner;
149 return 0; 149
150 owner = atomic_long_read(&adev->vm_manager.ids[id].owner);
151 if (owner == (long)vm) {
152 trace_amdgpu_vm_grab_id(vm_id->id, ring->idx);
153 return 0;
154 }
150 } 155 }
151 156
152 /* we definately need to flush */ 157 /* we definately need to flush */
@@ -154,7 +159,7 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
154 159
155 /* skip over VMID 0, since it is the system VM */ 160 /* skip over VMID 0, since it is the system VM */
156 for (i = 1; i < adev->vm_manager.nvm; ++i) { 161 for (i = 1; i < adev->vm_manager.nvm; ++i) {
157 struct fence *fence = adev->vm_manager.active[i]; 162 struct fence *fence = adev->vm_manager.ids[i].active;
158 struct amdgpu_ring *fring; 163 struct amdgpu_ring *fring;
159 164
160 if (fence == NULL) { 165 if (fence == NULL) {
@@ -176,7 +181,7 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
176 if (choices[i]) { 181 if (choices[i]) {
177 struct fence *fence; 182 struct fence *fence;
178 183
179 fence = adev->vm_manager.active[choices[i]]; 184 fence = adev->vm_manager.ids[choices[i]].active;
180 vm_id->id = choices[i]; 185 vm_id->id = choices[i];
181 186
182 trace_amdgpu_vm_grab_id(choices[i], ring->idx); 187 trace_amdgpu_vm_grab_id(choices[i], ring->idx);
@@ -207,24 +212,21 @@ void amdgpu_vm_flush(struct amdgpu_ring *ring,
207 uint64_t pd_addr = amdgpu_bo_gpu_offset(vm->page_directory); 212 uint64_t pd_addr = amdgpu_bo_gpu_offset(vm->page_directory);
208 struct amdgpu_vm_id *vm_id = &vm->ids[ring->idx]; 213 struct amdgpu_vm_id *vm_id = &vm->ids[ring->idx];
209 struct fence *flushed_updates = vm_id->flushed_updates; 214 struct fence *flushed_updates = vm_id->flushed_updates;
210 bool is_earlier = false; 215 bool is_later;
211 216
212 if (flushed_updates && updates) { 217 if (!flushed_updates)
213 BUG_ON(flushed_updates->context != updates->context); 218 is_later = true;
214 is_earlier = (updates->seqno - flushed_updates->seqno <= 219 else if (!updates)
215 INT_MAX) ? true : false; 220 is_later = false;
216 } 221 else
217 222 is_later = fence_is_later(updates, flushed_updates);
218 if (pd_addr != vm_id->pd_gpu_addr || !flushed_updates ||
219 is_earlier) {
220 223
224 if (pd_addr != vm_id->pd_gpu_addr || is_later) {
221 trace_amdgpu_vm_flush(pd_addr, ring->idx, vm_id->id); 225 trace_amdgpu_vm_flush(pd_addr, ring->idx, vm_id->id);
222 if (is_earlier) { 226 if (is_later) {
223 vm_id->flushed_updates = fence_get(updates); 227 vm_id->flushed_updates = fence_get(updates);
224 fence_put(flushed_updates); 228 fence_put(flushed_updates);
225 } 229 }
226 if (!flushed_updates)
227 vm_id->flushed_updates = fence_get(updates);
228 vm_id->pd_gpu_addr = pd_addr; 230 vm_id->pd_gpu_addr = pd_addr;
229 amdgpu_ring_emit_vm_flush(ring, vm_id->id, vm_id->pd_gpu_addr); 231 amdgpu_ring_emit_vm_flush(ring, vm_id->id, vm_id->pd_gpu_addr);
230 } 232 }
@@ -244,16 +246,14 @@ void amdgpu_vm_flush(struct amdgpu_ring *ring,
244 */ 246 */
245void amdgpu_vm_fence(struct amdgpu_device *adev, 247void amdgpu_vm_fence(struct amdgpu_device *adev,
246 struct amdgpu_vm *vm, 248 struct amdgpu_vm *vm,
247 struct amdgpu_fence *fence) 249 struct fence *fence)
248{ 250{
249 unsigned ridx = fence->ring->idx; 251 struct amdgpu_ring *ring = amdgpu_ring_from_fence(fence);
250 unsigned vm_id = vm->ids[ridx].id; 252 unsigned vm_id = vm->ids[ring->idx].id;
251
252 fence_put(adev->vm_manager.active[vm_id]);
253 adev->vm_manager.active[vm_id] = fence_get(&fence->base);
254 253
255 fence_put(vm->ids[ridx].last_id_use); 254 fence_put(adev->vm_manager.ids[vm_id].active);
256 vm->ids[ridx].last_id_use = fence_get(&fence->base); 255 adev->vm_manager.ids[vm_id].active = fence_get(fence);
256 atomic_long_set(&adev->vm_manager.ids[vm_id].owner, (long)vm);
257} 257}
258 258
259/** 259/**
@@ -332,6 +332,8 @@ int amdgpu_vm_free_job(struct amdgpu_job *job)
332 * 332 *
333 * @adev: amdgpu_device pointer 333 * @adev: amdgpu_device pointer
334 * @bo: bo to clear 334 * @bo: bo to clear
335 *
336 * need to reserve bo first before calling it.
335 */ 337 */
336static int amdgpu_vm_clear_bo(struct amdgpu_device *adev, 338static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
337 struct amdgpu_bo *bo) 339 struct amdgpu_bo *bo)
@@ -343,24 +345,20 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
343 uint64_t addr; 345 uint64_t addr;
344 int r; 346 int r;
345 347
346 r = amdgpu_bo_reserve(bo, false);
347 if (r)
348 return r;
349
350 r = reservation_object_reserve_shared(bo->tbo.resv); 348 r = reservation_object_reserve_shared(bo->tbo.resv);
351 if (r) 349 if (r)
352 return r; 350 return r;
353 351
354 r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false); 352 r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
355 if (r) 353 if (r)
356 goto error_unreserve; 354 goto error;
357 355
358 addr = amdgpu_bo_gpu_offset(bo); 356 addr = amdgpu_bo_gpu_offset(bo);
359 entries = amdgpu_bo_size(bo) / 8; 357 entries = amdgpu_bo_size(bo) / 8;
360 358
361 ib = kzalloc(sizeof(struct amdgpu_ib), GFP_KERNEL); 359 ib = kzalloc(sizeof(struct amdgpu_ib), GFP_KERNEL);
362 if (!ib) 360 if (!ib)
363 goto error_unreserve; 361 goto error;
364 362
365 r = amdgpu_ib_get(ring, NULL, entries * 2 + 64, ib); 363 r = amdgpu_ib_get(ring, NULL, entries * 2 + 64, ib);
366 if (r) 364 if (r)
@@ -378,16 +376,14 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
378 if (!r) 376 if (!r)
379 amdgpu_bo_fence(bo, fence, true); 377 amdgpu_bo_fence(bo, fence, true);
380 fence_put(fence); 378 fence_put(fence);
381 if (amdgpu_enable_scheduler) { 379 if (amdgpu_enable_scheduler)
382 amdgpu_bo_unreserve(bo);
383 return 0; 380 return 0;
384 } 381
385error_free: 382error_free:
386 amdgpu_ib_free(adev, ib); 383 amdgpu_ib_free(adev, ib);
387 kfree(ib); 384 kfree(ib);
388 385
389error_unreserve: 386error:
390 amdgpu_bo_unreserve(bo);
391 return r; 387 return r;
392} 388}
393 389
@@ -889,17 +885,21 @@ int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
889 struct amdgpu_bo_va_mapping *mapping; 885 struct amdgpu_bo_va_mapping *mapping;
890 int r; 886 int r;
891 887
888 spin_lock(&vm->freed_lock);
892 while (!list_empty(&vm->freed)) { 889 while (!list_empty(&vm->freed)) {
893 mapping = list_first_entry(&vm->freed, 890 mapping = list_first_entry(&vm->freed,
894 struct amdgpu_bo_va_mapping, list); 891 struct amdgpu_bo_va_mapping, list);
895 list_del(&mapping->list); 892 list_del(&mapping->list);
896 893 spin_unlock(&vm->freed_lock);
897 r = amdgpu_vm_bo_update_mapping(adev, vm, mapping, 0, 0, NULL); 894 r = amdgpu_vm_bo_update_mapping(adev, vm, mapping, 0, 0, NULL);
898 kfree(mapping); 895 kfree(mapping);
899 if (r) 896 if (r)
900 return r; 897 return r;
901 898
899 spin_lock(&vm->freed_lock);
902 } 900 }
901 spin_unlock(&vm->freed_lock);
902
903 return 0; 903 return 0;
904 904
905} 905}
@@ -926,8 +926,9 @@ int amdgpu_vm_clear_invalids(struct amdgpu_device *adev,
926 bo_va = list_first_entry(&vm->invalidated, 926 bo_va = list_first_entry(&vm->invalidated,
927 struct amdgpu_bo_va, vm_status); 927 struct amdgpu_bo_va, vm_status);
928 spin_unlock(&vm->status_lock); 928 spin_unlock(&vm->status_lock);
929 929 mutex_lock(&bo_va->mutex);
930 r = amdgpu_vm_bo_update(adev, bo_va, NULL); 930 r = amdgpu_vm_bo_update(adev, bo_va, NULL);
931 mutex_unlock(&bo_va->mutex);
931 if (r) 932 if (r)
932 return r; 933 return r;
933 934
@@ -971,7 +972,7 @@ struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
971 INIT_LIST_HEAD(&bo_va->valids); 972 INIT_LIST_HEAD(&bo_va->valids);
972 INIT_LIST_HEAD(&bo_va->invalids); 973 INIT_LIST_HEAD(&bo_va->invalids);
973 INIT_LIST_HEAD(&bo_va->vm_status); 974 INIT_LIST_HEAD(&bo_va->vm_status);
974 975 mutex_init(&bo_va->mutex);
975 list_add_tail(&bo_va->bo_list, &bo->va); 976 list_add_tail(&bo_va->bo_list, &bo->va);
976 977
977 return bo_va; 978 return bo_va;
@@ -989,7 +990,7 @@ struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
989 * Add a mapping of the BO at the specefied addr into the VM. 990 * Add a mapping of the BO at the specefied addr into the VM.
990 * Returns 0 for success, error for failure. 991 * Returns 0 for success, error for failure.
991 * 992 *
992 * Object has to be reserved and gets unreserved by this function! 993 * Object has to be reserved and unreserved outside!
993 */ 994 */
994int amdgpu_vm_bo_map(struct amdgpu_device *adev, 995int amdgpu_vm_bo_map(struct amdgpu_device *adev,
995 struct amdgpu_bo_va *bo_va, 996 struct amdgpu_bo_va *bo_va,
@@ -1005,30 +1006,27 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
1005 1006
1006 /* validate the parameters */ 1007 /* validate the parameters */
1007 if (saddr & AMDGPU_GPU_PAGE_MASK || offset & AMDGPU_GPU_PAGE_MASK || 1008 if (saddr & AMDGPU_GPU_PAGE_MASK || offset & AMDGPU_GPU_PAGE_MASK ||
1008 size == 0 || size & AMDGPU_GPU_PAGE_MASK) { 1009 size == 0 || size & AMDGPU_GPU_PAGE_MASK)
1009 amdgpu_bo_unreserve(bo_va->bo);
1010 return -EINVAL; 1010 return -EINVAL;
1011 }
1012 1011
1013 /* make sure object fit at this offset */ 1012 /* make sure object fit at this offset */
1014 eaddr = saddr + size; 1013 eaddr = saddr + size;
1015 if ((saddr >= eaddr) || (offset + size > amdgpu_bo_size(bo_va->bo))) { 1014 if ((saddr >= eaddr) || (offset + size > amdgpu_bo_size(bo_va->bo)))
1016 amdgpu_bo_unreserve(bo_va->bo);
1017 return -EINVAL; 1015 return -EINVAL;
1018 }
1019 1016
1020 last_pfn = eaddr / AMDGPU_GPU_PAGE_SIZE; 1017 last_pfn = eaddr / AMDGPU_GPU_PAGE_SIZE;
1021 if (last_pfn > adev->vm_manager.max_pfn) { 1018 if (last_pfn > adev->vm_manager.max_pfn) {
1022 dev_err(adev->dev, "va above limit (0x%08X > 0x%08X)\n", 1019 dev_err(adev->dev, "va above limit (0x%08X > 0x%08X)\n",
1023 last_pfn, adev->vm_manager.max_pfn); 1020 last_pfn, adev->vm_manager.max_pfn);
1024 amdgpu_bo_unreserve(bo_va->bo);
1025 return -EINVAL; 1021 return -EINVAL;
1026 } 1022 }
1027 1023
1028 saddr /= AMDGPU_GPU_PAGE_SIZE; 1024 saddr /= AMDGPU_GPU_PAGE_SIZE;
1029 eaddr /= AMDGPU_GPU_PAGE_SIZE; 1025 eaddr /= AMDGPU_GPU_PAGE_SIZE;
1030 1026
1027 spin_lock(&vm->it_lock);
1031 it = interval_tree_iter_first(&vm->va, saddr, eaddr - 1); 1028 it = interval_tree_iter_first(&vm->va, saddr, eaddr - 1);
1029 spin_unlock(&vm->it_lock);
1032 if (it) { 1030 if (it) {
1033 struct amdgpu_bo_va_mapping *tmp; 1031 struct amdgpu_bo_va_mapping *tmp;
1034 tmp = container_of(it, struct amdgpu_bo_va_mapping, it); 1032 tmp = container_of(it, struct amdgpu_bo_va_mapping, it);
@@ -1036,14 +1034,12 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
1036 dev_err(adev->dev, "bo %p va 0x%010Lx-0x%010Lx conflict with " 1034 dev_err(adev->dev, "bo %p va 0x%010Lx-0x%010Lx conflict with "
1037 "0x%010lx-0x%010lx\n", bo_va->bo, saddr, eaddr, 1035 "0x%010lx-0x%010lx\n", bo_va->bo, saddr, eaddr,
1038 tmp->it.start, tmp->it.last + 1); 1036 tmp->it.start, tmp->it.last + 1);
1039 amdgpu_bo_unreserve(bo_va->bo);
1040 r = -EINVAL; 1037 r = -EINVAL;
1041 goto error; 1038 goto error;
1042 } 1039 }
1043 1040
1044 mapping = kmalloc(sizeof(*mapping), GFP_KERNEL); 1041 mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
1045 if (!mapping) { 1042 if (!mapping) {
1046 amdgpu_bo_unreserve(bo_va->bo);
1047 r = -ENOMEM; 1043 r = -ENOMEM;
1048 goto error; 1044 goto error;
1049 } 1045 }
@@ -1054,8 +1050,12 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
1054 mapping->offset = offset; 1050 mapping->offset = offset;
1055 mapping->flags = flags; 1051 mapping->flags = flags;
1056 1052
1053 mutex_lock(&bo_va->mutex);
1057 list_add(&mapping->list, &bo_va->invalids); 1054 list_add(&mapping->list, &bo_va->invalids);
1055 mutex_unlock(&bo_va->mutex);
1056 spin_lock(&vm->it_lock);
1058 interval_tree_insert(&mapping->it, &vm->va); 1057 interval_tree_insert(&mapping->it, &vm->va);
1058 spin_unlock(&vm->it_lock);
1059 trace_amdgpu_vm_bo_map(bo_va, mapping); 1059 trace_amdgpu_vm_bo_map(bo_va, mapping);
1060 1060
1061 /* Make sure the page tables are allocated */ 1061 /* Make sure the page tables are allocated */
@@ -1067,8 +1067,6 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
1067 if (eaddr > vm->max_pde_used) 1067 if (eaddr > vm->max_pde_used)
1068 vm->max_pde_used = eaddr; 1068 vm->max_pde_used = eaddr;
1069 1069
1070 amdgpu_bo_unreserve(bo_va->bo);
1071
1072 /* walk over the address space and allocate the page tables */ 1070 /* walk over the address space and allocate the page tables */
1073 for (pt_idx = saddr; pt_idx <= eaddr; ++pt_idx) { 1071 for (pt_idx = saddr; pt_idx <= eaddr; ++pt_idx) {
1074 struct reservation_object *resv = vm->page_directory->tbo.resv; 1072 struct reservation_object *resv = vm->page_directory->tbo.resv;
@@ -1077,16 +1075,19 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
1077 if (vm->page_tables[pt_idx].bo) 1075 if (vm->page_tables[pt_idx].bo)
1078 continue; 1076 continue;
1079 1077
1080 ww_mutex_lock(&resv->lock, NULL);
1081 r = amdgpu_bo_create(adev, AMDGPU_VM_PTE_COUNT * 8, 1078 r = amdgpu_bo_create(adev, AMDGPU_VM_PTE_COUNT * 8,
1082 AMDGPU_GPU_PAGE_SIZE, true, 1079 AMDGPU_GPU_PAGE_SIZE, true,
1083 AMDGPU_GEM_DOMAIN_VRAM, 1080 AMDGPU_GEM_DOMAIN_VRAM,
1084 AMDGPU_GEM_CREATE_NO_CPU_ACCESS, 1081 AMDGPU_GEM_CREATE_NO_CPU_ACCESS,
1085 NULL, resv, &pt); 1082 NULL, resv, &pt);
1086 ww_mutex_unlock(&resv->lock);
1087 if (r) 1083 if (r)
1088 goto error_free; 1084 goto error_free;
1089 1085
1086 /* Keep a reference to the page table to avoid freeing
1087 * them up in the wrong order.
1088 */
1089 pt->parent = amdgpu_bo_ref(vm->page_directory);
1090
1090 r = amdgpu_vm_clear_bo(adev, pt); 1091 r = amdgpu_vm_clear_bo(adev, pt);
1091 if (r) { 1092 if (r) {
1092 amdgpu_bo_unref(&pt); 1093 amdgpu_bo_unref(&pt);
@@ -1101,7 +1102,9 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
1101 1102
1102error_free: 1103error_free:
1103 list_del(&mapping->list); 1104 list_del(&mapping->list);
1105 spin_lock(&vm->it_lock);
1104 interval_tree_remove(&mapping->it, &vm->va); 1106 interval_tree_remove(&mapping->it, &vm->va);
1107 spin_unlock(&vm->it_lock);
1105 trace_amdgpu_vm_bo_unmap(bo_va, mapping); 1108 trace_amdgpu_vm_bo_unmap(bo_va, mapping);
1106 kfree(mapping); 1109 kfree(mapping);
1107 1110
@@ -1119,7 +1122,7 @@ error:
1119 * Remove a mapping of the BO at the specefied addr from the VM. 1122 * Remove a mapping of the BO at the specefied addr from the VM.
1120 * Returns 0 for success, error for failure. 1123 * Returns 0 for success, error for failure.
1121 * 1124 *
1122 * Object has to be reserved and gets unreserved by this function! 1125 * Object has to be reserved and unreserved outside!
1123 */ 1126 */
1124int amdgpu_vm_bo_unmap(struct amdgpu_device *adev, 1127int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
1125 struct amdgpu_bo_va *bo_va, 1128 struct amdgpu_bo_va *bo_va,
@@ -1130,7 +1133,7 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
1130 bool valid = true; 1133 bool valid = true;
1131 1134
1132 saddr /= AMDGPU_GPU_PAGE_SIZE; 1135 saddr /= AMDGPU_GPU_PAGE_SIZE;
1133 1136 mutex_lock(&bo_va->mutex);
1134 list_for_each_entry(mapping, &bo_va->valids, list) { 1137 list_for_each_entry(mapping, &bo_va->valids, list) {
1135 if (mapping->it.start == saddr) 1138 if (mapping->it.start == saddr)
1136 break; 1139 break;
@@ -1145,20 +1148,24 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
1145 } 1148 }
1146 1149
1147 if (&mapping->list == &bo_va->invalids) { 1150 if (&mapping->list == &bo_va->invalids) {
1148 amdgpu_bo_unreserve(bo_va->bo); 1151 mutex_unlock(&bo_va->mutex);
1149 return -ENOENT; 1152 return -ENOENT;
1150 } 1153 }
1151 } 1154 }
1152 1155 mutex_unlock(&bo_va->mutex);
1153 list_del(&mapping->list); 1156 list_del(&mapping->list);
1157 spin_lock(&vm->it_lock);
1154 interval_tree_remove(&mapping->it, &vm->va); 1158 interval_tree_remove(&mapping->it, &vm->va);
1159 spin_unlock(&vm->it_lock);
1155 trace_amdgpu_vm_bo_unmap(bo_va, mapping); 1160 trace_amdgpu_vm_bo_unmap(bo_va, mapping);
1156 1161
1157 if (valid) 1162 if (valid) {
1163 spin_lock(&vm->freed_lock);
1158 list_add(&mapping->list, &vm->freed); 1164 list_add(&mapping->list, &vm->freed);
1159 else 1165 spin_unlock(&vm->freed_lock);
1166 } else {
1160 kfree(mapping); 1167 kfree(mapping);
1161 amdgpu_bo_unreserve(bo_va->bo); 1168 }
1162 1169
1163 return 0; 1170 return 0;
1164} 1171}
@@ -1187,17 +1194,23 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
1187 1194
1188 list_for_each_entry_safe(mapping, next, &bo_va->valids, list) { 1195 list_for_each_entry_safe(mapping, next, &bo_va->valids, list) {
1189 list_del(&mapping->list); 1196 list_del(&mapping->list);
1197 spin_lock(&vm->it_lock);
1190 interval_tree_remove(&mapping->it, &vm->va); 1198 interval_tree_remove(&mapping->it, &vm->va);
1199 spin_unlock(&vm->it_lock);
1191 trace_amdgpu_vm_bo_unmap(bo_va, mapping); 1200 trace_amdgpu_vm_bo_unmap(bo_va, mapping);
1201 spin_lock(&vm->freed_lock);
1192 list_add(&mapping->list, &vm->freed); 1202 list_add(&mapping->list, &vm->freed);
1203 spin_unlock(&vm->freed_lock);
1193 } 1204 }
1194 list_for_each_entry_safe(mapping, next, &bo_va->invalids, list) { 1205 list_for_each_entry_safe(mapping, next, &bo_va->invalids, list) {
1195 list_del(&mapping->list); 1206 list_del(&mapping->list);
1207 spin_lock(&vm->it_lock);
1196 interval_tree_remove(&mapping->it, &vm->va); 1208 interval_tree_remove(&mapping->it, &vm->va);
1209 spin_unlock(&vm->it_lock);
1197 kfree(mapping); 1210 kfree(mapping);
1198 } 1211 }
1199
1200 fence_put(bo_va->last_pt_update); 1212 fence_put(bo_va->last_pt_update);
1213 mutex_destroy(&bo_va->mutex);
1201 kfree(bo_va); 1214 kfree(bo_va);
1202} 1215}
1203 1216
@@ -1241,15 +1254,14 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
1241 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { 1254 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
1242 vm->ids[i].id = 0; 1255 vm->ids[i].id = 0;
1243 vm->ids[i].flushed_updates = NULL; 1256 vm->ids[i].flushed_updates = NULL;
1244 vm->ids[i].last_id_use = NULL;
1245 } 1257 }
1246 mutex_init(&vm->mutex);
1247 vm->va = RB_ROOT; 1258 vm->va = RB_ROOT;
1248 spin_lock_init(&vm->status_lock); 1259 spin_lock_init(&vm->status_lock);
1249 INIT_LIST_HEAD(&vm->invalidated); 1260 INIT_LIST_HEAD(&vm->invalidated);
1250 INIT_LIST_HEAD(&vm->cleared); 1261 INIT_LIST_HEAD(&vm->cleared);
1251 INIT_LIST_HEAD(&vm->freed); 1262 INIT_LIST_HEAD(&vm->freed);
1252 1263 spin_lock_init(&vm->it_lock);
1264 spin_lock_init(&vm->freed_lock);
1253 pd_size = amdgpu_vm_directory_size(adev); 1265 pd_size = amdgpu_vm_directory_size(adev);
1254 pd_entries = amdgpu_vm_num_pdes(adev); 1266 pd_entries = amdgpu_vm_num_pdes(adev);
1255 1267
@@ -1269,8 +1281,14 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
1269 NULL, NULL, &vm->page_directory); 1281 NULL, NULL, &vm->page_directory);
1270 if (r) 1282 if (r)
1271 return r; 1283 return r;
1272 1284 r = amdgpu_bo_reserve(vm->page_directory, false);
1285 if (r) {
1286 amdgpu_bo_unref(&vm->page_directory);
1287 vm->page_directory = NULL;
1288 return r;
1289 }
1273 r = amdgpu_vm_clear_bo(adev, vm->page_directory); 1290 r = amdgpu_vm_clear_bo(adev, vm->page_directory);
1291 amdgpu_bo_unreserve(vm->page_directory);
1274 if (r) { 1292 if (r) {
1275 amdgpu_bo_unref(&vm->page_directory); 1293 amdgpu_bo_unref(&vm->page_directory);
1276 vm->page_directory = NULL; 1294 vm->page_directory = NULL;
@@ -1313,11 +1331,27 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
1313 1331
1314 amdgpu_bo_unref(&vm->page_directory); 1332 amdgpu_bo_unref(&vm->page_directory);
1315 fence_put(vm->page_directory_fence); 1333 fence_put(vm->page_directory_fence);
1316
1317 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { 1334 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
1335 unsigned id = vm->ids[i].id;
1336
1337 atomic_long_cmpxchg(&adev->vm_manager.ids[id].owner,
1338 (long)vm, 0);
1318 fence_put(vm->ids[i].flushed_updates); 1339 fence_put(vm->ids[i].flushed_updates);
1319 fence_put(vm->ids[i].last_id_use);
1320 } 1340 }
1321 1341
1322 mutex_destroy(&vm->mutex); 1342}
1343
1344/**
1345 * amdgpu_vm_manager_fini - cleanup VM manager
1346 *
1347 * @adev: amdgpu_device pointer
1348 *
1349 * Cleanup the VM manager and free resources.
1350 */
1351void amdgpu_vm_manager_fini(struct amdgpu_device *adev)
1352{
1353 unsigned i;
1354
1355 for (i = 0; i < AMDGPU_NUM_VM; ++i)
1356 fence_put(adev->vm_manager.ids[i].active);
1323} 1357}
diff --git a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
index a1a35a5df8e7..57a2e347f04d 100644
--- a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
@@ -6569,12 +6569,12 @@ static int ci_dpm_set_interrupt_state(struct amdgpu_device *adev,
6569 switch (state) { 6569 switch (state) {
6570 case AMDGPU_IRQ_STATE_DISABLE: 6570 case AMDGPU_IRQ_STATE_DISABLE:
6571 cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT); 6571 cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT);
6572 cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK; 6572 cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK;
6573 WREG32_SMC(ixCG_THERMAL_INT, cg_thermal_int); 6573 WREG32_SMC(ixCG_THERMAL_INT, cg_thermal_int);
6574 break; 6574 break;
6575 case AMDGPU_IRQ_STATE_ENABLE: 6575 case AMDGPU_IRQ_STATE_ENABLE:
6576 cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT); 6576 cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT);
6577 cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK; 6577 cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK;
6578 WREG32_SMC(ixCG_THERMAL_INT, cg_thermal_int); 6578 WREG32_SMC(ixCG_THERMAL_INT, cg_thermal_int);
6579 break; 6579 break;
6580 default: 6580 default:
@@ -6586,12 +6586,12 @@ static int ci_dpm_set_interrupt_state(struct amdgpu_device *adev,
6586 switch (state) { 6586 switch (state) {
6587 case AMDGPU_IRQ_STATE_DISABLE: 6587 case AMDGPU_IRQ_STATE_DISABLE:
6588 cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT); 6588 cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT);
6589 cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK; 6589 cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK;
6590 WREG32_SMC(ixCG_THERMAL_INT, cg_thermal_int); 6590 WREG32_SMC(ixCG_THERMAL_INT, cg_thermal_int);
6591 break; 6591 break;
6592 case AMDGPU_IRQ_STATE_ENABLE: 6592 case AMDGPU_IRQ_STATE_ENABLE:
6593 cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT); 6593 cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT);
6594 cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK; 6594 cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK;
6595 WREG32_SMC(ixCG_THERMAL_INT, cg_thermal_int); 6595 WREG32_SMC(ixCG_THERMAL_INT, cg_thermal_int);
6596 break; 6596 break;
6597 default: 6597 default:
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
index cb0f7747e3dc..4dcc8fba5792 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
@@ -1250,7 +1250,7 @@ static void dce_v10_0_program_watermarks(struct amdgpu_device *adev,
1250 u32 pixel_period; 1250 u32 pixel_period;
1251 u32 line_time = 0; 1251 u32 line_time = 0;
1252 u32 latency_watermark_a = 0, latency_watermark_b = 0; 1252 u32 latency_watermark_a = 0, latency_watermark_b = 0;
1253 u32 tmp, wm_mask; 1253 u32 tmp, wm_mask, lb_vblank_lead_lines = 0;
1254 1254
1255 if (amdgpu_crtc->base.enabled && num_heads && mode) { 1255 if (amdgpu_crtc->base.enabled && num_heads && mode) {
1256 pixel_period = 1000000 / (u32)mode->clock; 1256 pixel_period = 1000000 / (u32)mode->clock;
@@ -1333,6 +1333,7 @@ static void dce_v10_0_program_watermarks(struct amdgpu_device *adev,
1333 (adev->mode_info.disp_priority == 2)) { 1333 (adev->mode_info.disp_priority == 2)) {
1334 DRM_DEBUG_KMS("force priority to high\n"); 1334 DRM_DEBUG_KMS("force priority to high\n");
1335 } 1335 }
1336 lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode->crtc_hdisplay);
1336 } 1337 }
1337 1338
1338 /* select wm A */ 1339 /* select wm A */
@@ -1357,6 +1358,8 @@ static void dce_v10_0_program_watermarks(struct amdgpu_device *adev,
1357 amdgpu_crtc->line_time = line_time; 1358 amdgpu_crtc->line_time = line_time;
1358 amdgpu_crtc->wm_high = latency_watermark_a; 1359 amdgpu_crtc->wm_high = latency_watermark_a;
1359 amdgpu_crtc->wm_low = latency_watermark_b; 1360 amdgpu_crtc->wm_low = latency_watermark_b;
1361 /* Save number of lines the linebuffer leads before the scanout */
1362 amdgpu_crtc->lb_vblank_lead_lines = lb_vblank_lead_lines;
1360} 1363}
1361 1364
1362/** 1365/**
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
index 5af3721851d6..8f1e51128b33 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
@@ -1238,7 +1238,7 @@ static void dce_v11_0_program_watermarks(struct amdgpu_device *adev,
1238 u32 pixel_period; 1238 u32 pixel_period;
1239 u32 line_time = 0; 1239 u32 line_time = 0;
1240 u32 latency_watermark_a = 0, latency_watermark_b = 0; 1240 u32 latency_watermark_a = 0, latency_watermark_b = 0;
1241 u32 tmp, wm_mask; 1241 u32 tmp, wm_mask, lb_vblank_lead_lines = 0;
1242 1242
1243 if (amdgpu_crtc->base.enabled && num_heads && mode) { 1243 if (amdgpu_crtc->base.enabled && num_heads && mode) {
1244 pixel_period = 1000000 / (u32)mode->clock; 1244 pixel_period = 1000000 / (u32)mode->clock;
@@ -1321,6 +1321,7 @@ static void dce_v11_0_program_watermarks(struct amdgpu_device *adev,
1321 (adev->mode_info.disp_priority == 2)) { 1321 (adev->mode_info.disp_priority == 2)) {
1322 DRM_DEBUG_KMS("force priority to high\n"); 1322 DRM_DEBUG_KMS("force priority to high\n");
1323 } 1323 }
1324 lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode->crtc_hdisplay);
1324 } 1325 }
1325 1326
1326 /* select wm A */ 1327 /* select wm A */
@@ -1345,6 +1346,8 @@ static void dce_v11_0_program_watermarks(struct amdgpu_device *adev,
1345 amdgpu_crtc->line_time = line_time; 1346 amdgpu_crtc->line_time = line_time;
1346 amdgpu_crtc->wm_high = latency_watermark_a; 1347 amdgpu_crtc->wm_high = latency_watermark_a;
1347 amdgpu_crtc->wm_low = latency_watermark_b; 1348 amdgpu_crtc->wm_low = latency_watermark_b;
1349 /* Save number of lines the linebuffer leads before the scanout */
1350 amdgpu_crtc->lb_vblank_lead_lines = lb_vblank_lead_lines;
1348} 1351}
1349 1352
1350/** 1353/**
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
index 4f7b49a6dc50..42d954dc436d 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
@@ -1193,7 +1193,7 @@ static void dce_v8_0_program_watermarks(struct amdgpu_device *adev,
1193 u32 pixel_period; 1193 u32 pixel_period;
1194 u32 line_time = 0; 1194 u32 line_time = 0;
1195 u32 latency_watermark_a = 0, latency_watermark_b = 0; 1195 u32 latency_watermark_a = 0, latency_watermark_b = 0;
1196 u32 tmp, wm_mask; 1196 u32 tmp, wm_mask, lb_vblank_lead_lines = 0;
1197 1197
1198 if (amdgpu_crtc->base.enabled && num_heads && mode) { 1198 if (amdgpu_crtc->base.enabled && num_heads && mode) {
1199 pixel_period = 1000000 / (u32)mode->clock; 1199 pixel_period = 1000000 / (u32)mode->clock;
@@ -1276,6 +1276,7 @@ static void dce_v8_0_program_watermarks(struct amdgpu_device *adev,
1276 (adev->mode_info.disp_priority == 2)) { 1276 (adev->mode_info.disp_priority == 2)) {
1277 DRM_DEBUG_KMS("force priority to high\n"); 1277 DRM_DEBUG_KMS("force priority to high\n");
1278 } 1278 }
1279 lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode->crtc_hdisplay);
1279 } 1280 }
1280 1281
1281 /* select wm A */ 1282 /* select wm A */
@@ -1302,6 +1303,8 @@ static void dce_v8_0_program_watermarks(struct amdgpu_device *adev,
1302 amdgpu_crtc->line_time = line_time; 1303 amdgpu_crtc->line_time = line_time;
1303 amdgpu_crtc->wm_high = latency_watermark_a; 1304 amdgpu_crtc->wm_high = latency_watermark_a;
1304 amdgpu_crtc->wm_low = latency_watermark_b; 1305 amdgpu_crtc->wm_low = latency_watermark_b;
1306 /* Save number of lines the linebuffer leads before the scanout */
1307 amdgpu_crtc->lb_vblank_lead_lines = lb_vblank_lead_lines;
1305} 1308}
1306 1309
1307/** 1310/**
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index 6776cf756d40..e1dcab98e249 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -268,7 +268,6 @@ static const u32 fiji_mgcg_cgcg_init[] =
268 mmCGTT_CP_CLK_CTRL, 0xffffffff, 0x00000100, 268 mmCGTT_CP_CLK_CTRL, 0xffffffff, 0x00000100,
269 mmCGTT_CPC_CLK_CTRL, 0xffffffff, 0x00000100, 269 mmCGTT_CPC_CLK_CTRL, 0xffffffff, 0x00000100,
270 mmCGTT_CPF_CLK_CTRL, 0xffffffff, 0x40000100, 270 mmCGTT_CPF_CLK_CTRL, 0xffffffff, 0x40000100,
271 mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100,
272 mmCGTT_GDS_CLK_CTRL, 0xffffffff, 0x00000100, 271 mmCGTT_GDS_CLK_CTRL, 0xffffffff, 0x00000100,
273 mmCGTT_IA_CLK_CTRL, 0xffffffff, 0x06000100, 272 mmCGTT_IA_CLK_CTRL, 0xffffffff, 0x06000100,
274 mmCGTT_PA_CLK_CTRL, 0xffffffff, 0x00000100, 273 mmCGTT_PA_CLK_CTRL, 0xffffffff, 0x00000100,
@@ -296,10 +295,6 @@ static const u32 fiji_mgcg_cgcg_init[] =
296 mmCGTS_SM_CTRL_REG, 0xffffffff, 0x96e00200, 295 mmCGTS_SM_CTRL_REG, 0xffffffff, 0x96e00200,
297 mmCP_RB_WPTR_POLL_CNTL, 0xffffffff, 0x00900100, 296 mmCP_RB_WPTR_POLL_CNTL, 0xffffffff, 0x00900100,
298 mmRLC_CGCG_CGLS_CTRL, 0xffffffff, 0x0020003c, 297 mmRLC_CGCG_CGLS_CTRL, 0xffffffff, 0x0020003c,
299 mmPCIE_INDEX, 0xffffffff, 0x0140001c,
300 mmPCIE_DATA, 0x000f0000, 0x00000000,
301 mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100,
302 mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104,
303 mmCP_MEM_SLP_CNTL, 0x00000001, 0x00000001, 298 mmCP_MEM_SLP_CNTL, 0x00000001, 0x00000001,
304}; 299};
305 300
@@ -1000,7 +995,7 @@ static void gfx_v8_0_gpu_early_init(struct amdgpu_device *adev)
1000 adev->gfx.config.max_cu_per_sh = 16; 995 adev->gfx.config.max_cu_per_sh = 16;
1001 adev->gfx.config.max_sh_per_se = 1; 996 adev->gfx.config.max_sh_per_se = 1;
1002 adev->gfx.config.max_backends_per_se = 4; 997 adev->gfx.config.max_backends_per_se = 4;
1003 adev->gfx.config.max_texture_channel_caches = 8; 998 adev->gfx.config.max_texture_channel_caches = 16;
1004 adev->gfx.config.max_gprs = 256; 999 adev->gfx.config.max_gprs = 256;
1005 adev->gfx.config.max_gs_threads = 32; 1000 adev->gfx.config.max_gs_threads = 32;
1006 adev->gfx.config.max_hw_contexts = 8; 1001 adev->gfx.config.max_hw_contexts = 8;
@@ -1613,6 +1608,296 @@ static void gfx_v8_0_tiling_mode_table_init(struct amdgpu_device *adev)
1613 WREG32(mmGB_MACROTILE_MODE0 + reg_offset, gb_tile_moden); 1608 WREG32(mmGB_MACROTILE_MODE0 + reg_offset, gb_tile_moden);
1614 } 1609 }
1615 case CHIP_FIJI: 1610 case CHIP_FIJI:
1611 for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) {
1612 switch (reg_offset) {
1613 case 0:
1614 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1615 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1616 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
1617 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1618 break;
1619 case 1:
1620 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1621 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1622 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
1623 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1624 break;
1625 case 2:
1626 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1627 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1628 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
1629 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1630 break;
1631 case 3:
1632 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1633 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1634 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
1635 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1636 break;
1637 case 4:
1638 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1639 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1640 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
1641 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1642 break;
1643 case 5:
1644 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1645 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1646 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
1647 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1648 break;
1649 case 6:
1650 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1651 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1652 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
1653 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1654 break;
1655 case 7:
1656 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1657 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1658 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
1659 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
1660 break;
1661 case 8:
1662 gb_tile_moden = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
1663 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16));
1664 break;
1665 case 9:
1666 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1667 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1668 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
1669 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1670 break;
1671 case 10:
1672 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1673 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1674 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
1675 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1676 break;
1677 case 11:
1678 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1679 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1680 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
1681 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
1682 break;
1683 case 12:
1684 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1685 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1686 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
1687 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
1688 break;
1689 case 13:
1690 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1691 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1692 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1693 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1694 break;
1695 case 14:
1696 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1697 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1698 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1699 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1700 break;
1701 case 15:
1702 gb_tile_moden = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) |
1703 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1704 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1705 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1706 break;
1707 case 16:
1708 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1709 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1710 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1711 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
1712 break;
1713 case 17:
1714 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1715 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1716 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1717 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
1718 break;
1719 case 18:
1720 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
1721 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1722 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1723 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1724 break;
1725 case 19:
1726 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
1727 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1728 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
1729 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1730 break;
1731 case 20:
1732 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
1733 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1734 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
1735 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1736 break;
1737 case 21:
1738 gb_tile_moden = (ARRAY_MODE(ARRAY_3D_TILED_THICK) |
1739 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1740 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
1741 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1742 break;
1743 case 22:
1744 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
1745 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1746 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
1747 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1748 break;
1749 case 23:
1750 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
1751 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1752 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
1753 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1754 break;
1755 case 24:
1756 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
1757 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1758 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
1759 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1760 break;
1761 case 25:
1762 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) |
1763 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1764 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
1765 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1766 break;
1767 case 26:
1768 gb_tile_moden = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) |
1769 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1770 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
1771 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
1772 break;
1773 case 27:
1774 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
1775 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1776 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
1777 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1778 break;
1779 case 28:
1780 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
1781 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1782 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
1783 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
1784 break;
1785 case 29:
1786 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1787 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
1788 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
1789 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
1790 break;
1791 case 30:
1792 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
1793 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
1794 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
1795 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
1796 break;
1797 default:
1798 gb_tile_moden = 0;
1799 break;
1800 }
1801 adev->gfx.config.tile_mode_array[reg_offset] = gb_tile_moden;
1802 WREG32(mmGB_TILE_MODE0 + reg_offset, gb_tile_moden);
1803 }
1804 for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++) {
1805 switch (reg_offset) {
1806 case 0:
1807 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1808 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1809 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1810 NUM_BANKS(ADDR_SURF_8_BANK));
1811 break;
1812 case 1:
1813 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1814 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1815 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1816 NUM_BANKS(ADDR_SURF_8_BANK));
1817 break;
1818 case 2:
1819 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1820 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1821 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1822 NUM_BANKS(ADDR_SURF_8_BANK));
1823 break;
1824 case 3:
1825 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1826 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1827 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1828 NUM_BANKS(ADDR_SURF_8_BANK));
1829 break;
1830 case 4:
1831 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1832 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
1833 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
1834 NUM_BANKS(ADDR_SURF_8_BANK));
1835 break;
1836 case 5:
1837 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1838 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1839 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
1840 NUM_BANKS(ADDR_SURF_8_BANK));
1841 break;
1842 case 6:
1843 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1844 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1845 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
1846 NUM_BANKS(ADDR_SURF_8_BANK));
1847 break;
1848 case 8:
1849 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1850 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
1851 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1852 NUM_BANKS(ADDR_SURF_8_BANK));
1853 break;
1854 case 9:
1855 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1856 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
1857 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1858 NUM_BANKS(ADDR_SURF_8_BANK));
1859 break;
1860 case 10:
1861 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1862 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
1863 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
1864 NUM_BANKS(ADDR_SURF_8_BANK));
1865 break;
1866 case 11:
1867 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1868 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1869 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
1870 NUM_BANKS(ADDR_SURF_8_BANK));
1871 break;
1872 case 12:
1873 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1874 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
1875 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1876 NUM_BANKS(ADDR_SURF_8_BANK));
1877 break;
1878 case 13:
1879 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1880 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1881 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
1882 NUM_BANKS(ADDR_SURF_8_BANK));
1883 break;
1884 case 14:
1885 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
1886 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
1887 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
1888 NUM_BANKS(ADDR_SURF_4_BANK));
1889 break;
1890 case 7:
1891 /* unused idx */
1892 continue;
1893 default:
1894 gb_tile_moden = 0;
1895 break;
1896 }
1897 adev->gfx.config.macrotile_mode_array[reg_offset] = gb_tile_moden;
1898 WREG32(mmGB_MACROTILE_MODE0 + reg_offset, gb_tile_moden);
1899 }
1900 break;
1616 case CHIP_TONGA: 1901 case CHIP_TONGA:
1617 for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) { 1902 for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) {
1618 switch (reg_offset) { 1903 switch (reg_offset) {
@@ -2971,10 +3256,13 @@ static int gfx_v8_0_cp_gfx_start(struct amdgpu_device *adev)
2971 amdgpu_ring_write(ring, mmPA_SC_RASTER_CONFIG - PACKET3_SET_CONTEXT_REG_START); 3256 amdgpu_ring_write(ring, mmPA_SC_RASTER_CONFIG - PACKET3_SET_CONTEXT_REG_START);
2972 switch (adev->asic_type) { 3257 switch (adev->asic_type) {
2973 case CHIP_TONGA: 3258 case CHIP_TONGA:
2974 case CHIP_FIJI:
2975 amdgpu_ring_write(ring, 0x16000012); 3259 amdgpu_ring_write(ring, 0x16000012);
2976 amdgpu_ring_write(ring, 0x0000002A); 3260 amdgpu_ring_write(ring, 0x0000002A);
2977 break; 3261 break;
3262 case CHIP_FIJI:
3263 amdgpu_ring_write(ring, 0x3a00161a);
3264 amdgpu_ring_write(ring, 0x0000002e);
3265 break;
2978 case CHIP_TOPAZ: 3266 case CHIP_TOPAZ:
2979 case CHIP_CARRIZO: 3267 case CHIP_CARRIZO:
2980 amdgpu_ring_write(ring, 0x00000002); 3268 amdgpu_ring_write(ring, 0x00000002);
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
index 85bbcdc73fff..ed8abb58a785 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
@@ -40,7 +40,7 @@
40static void gmc_v7_0_set_gart_funcs(struct amdgpu_device *adev); 40static void gmc_v7_0_set_gart_funcs(struct amdgpu_device *adev);
41static void gmc_v7_0_set_irq_funcs(struct amdgpu_device *adev); 41static void gmc_v7_0_set_irq_funcs(struct amdgpu_device *adev);
42 42
43MODULE_FIRMWARE("radeon/boniare_mc.bin"); 43MODULE_FIRMWARE("radeon/bonaire_mc.bin");
44MODULE_FIRMWARE("radeon/hawaii_mc.bin"); 44MODULE_FIRMWARE("radeon/hawaii_mc.bin");
45 45
46/** 46/**
@@ -501,6 +501,7 @@ static int gmc_v7_0_gart_enable(struct amdgpu_device *adev)
501 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE, 1); 501 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE, 1);
502 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, EFFECTIVE_L2_QUEUE_SIZE, 7); 502 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, EFFECTIVE_L2_QUEUE_SIZE, 7);
503 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, CONTEXT1_IDENTITY_ACCESS_MODE, 1); 503 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, CONTEXT1_IDENTITY_ACCESS_MODE, 1);
504 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_DEFAULT_PAGE_OUT_TO_SYSTEM_MEMORY, 1);
504 WREG32(mmVM_L2_CNTL, tmp); 505 WREG32(mmVM_L2_CNTL, tmp);
505 tmp = REG_SET_FIELD(0, VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS, 1); 506 tmp = REG_SET_FIELD(0, VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS, 1);
506 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_L2_CACHE, 1); 507 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_L2_CACHE, 1);
@@ -512,7 +513,7 @@ static int gmc_v7_0_gart_enable(struct amdgpu_device *adev)
512 WREG32(mmVM_L2_CNTL3, tmp); 513 WREG32(mmVM_L2_CNTL3, tmp);
513 /* setup context0 */ 514 /* setup context0 */
514 WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->mc.gtt_start >> 12); 515 WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->mc.gtt_start >> 12);
515 WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, (adev->mc.gtt_end >> 12) - 1); 516 WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->mc.gtt_end >> 12);
516 WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, adev->gart.table_addr >> 12); 517 WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, adev->gart.table_addr >> 12);
517 WREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR, 518 WREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
518 (u32)(adev->dummy_page.addr >> 12)); 519 (u32)(adev->dummy_page.addr >> 12));
@@ -960,12 +961,10 @@ static int gmc_v7_0_sw_init(void *handle)
960 961
961static int gmc_v7_0_sw_fini(void *handle) 962static int gmc_v7_0_sw_fini(void *handle)
962{ 963{
963 int i;
964 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 964 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
965 965
966 if (adev->vm_manager.enabled) { 966 if (adev->vm_manager.enabled) {
967 for (i = 0; i < AMDGPU_NUM_VM; ++i) 967 amdgpu_vm_manager_fini(adev);
968 fence_put(adev->vm_manager.active[i]);
969 gmc_v7_0_vm_fini(adev); 968 gmc_v7_0_vm_fini(adev);
970 adev->vm_manager.enabled = false; 969 adev->vm_manager.enabled = false;
971 } 970 }
@@ -1010,12 +1009,10 @@ static int gmc_v7_0_hw_fini(void *handle)
1010 1009
1011static int gmc_v7_0_suspend(void *handle) 1010static int gmc_v7_0_suspend(void *handle)
1012{ 1011{
1013 int i;
1014 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1012 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1015 1013
1016 if (adev->vm_manager.enabled) { 1014 if (adev->vm_manager.enabled) {
1017 for (i = 0; i < AMDGPU_NUM_VM; ++i) 1015 amdgpu_vm_manager_fini(adev);
1018 fence_put(adev->vm_manager.active[i]);
1019 gmc_v7_0_vm_fini(adev); 1016 gmc_v7_0_vm_fini(adev);
1020 adev->vm_manager.enabled = false; 1017 adev->vm_manager.enabled = false;
1021 } 1018 }
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
index 1bcc4e74e3b4..d39028440814 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
@@ -629,6 +629,7 @@ static int gmc_v8_0_gart_enable(struct amdgpu_device *adev)
629 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE, 1); 629 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE, 1);
630 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, EFFECTIVE_L2_QUEUE_SIZE, 7); 630 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, EFFECTIVE_L2_QUEUE_SIZE, 7);
631 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, CONTEXT1_IDENTITY_ACCESS_MODE, 1); 631 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, CONTEXT1_IDENTITY_ACCESS_MODE, 1);
632 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_DEFAULT_PAGE_OUT_TO_SYSTEM_MEMORY, 1);
632 WREG32(mmVM_L2_CNTL, tmp); 633 WREG32(mmVM_L2_CNTL, tmp);
633 tmp = RREG32(mmVM_L2_CNTL2); 634 tmp = RREG32(mmVM_L2_CNTL2);
634 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS, 1); 635 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS, 1);
@@ -656,7 +657,7 @@ static int gmc_v8_0_gart_enable(struct amdgpu_device *adev)
656 WREG32(mmVM_L2_CNTL4, tmp); 657 WREG32(mmVM_L2_CNTL4, tmp);
657 /* setup context0 */ 658 /* setup context0 */
658 WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->mc.gtt_start >> 12); 659 WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->mc.gtt_start >> 12);
659 WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, (adev->mc.gtt_end >> 12) - 1); 660 WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->mc.gtt_end >> 12);
660 WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, adev->gart.table_addr >> 12); 661 WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, adev->gart.table_addr >> 12);
661 WREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR, 662 WREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
662 (u32)(adev->dummy_page.addr >> 12)); 663 (u32)(adev->dummy_page.addr >> 12));
@@ -979,12 +980,10 @@ static int gmc_v8_0_sw_init(void *handle)
979 980
980static int gmc_v8_0_sw_fini(void *handle) 981static int gmc_v8_0_sw_fini(void *handle)
981{ 982{
982 int i;
983 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 983 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
984 984
985 if (adev->vm_manager.enabled) { 985 if (adev->vm_manager.enabled) {
986 for (i = 0; i < AMDGPU_NUM_VM; ++i) 986 amdgpu_vm_manager_fini(adev);
987 fence_put(adev->vm_manager.active[i]);
988 gmc_v8_0_vm_fini(adev); 987 gmc_v8_0_vm_fini(adev);
989 adev->vm_manager.enabled = false; 988 adev->vm_manager.enabled = false;
990 } 989 }
@@ -1031,12 +1030,10 @@ static int gmc_v8_0_hw_fini(void *handle)
1031 1030
1032static int gmc_v8_0_suspend(void *handle) 1031static int gmc_v8_0_suspend(void *handle)
1033{ 1032{
1034 int i;
1035 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1033 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1036 1034
1037 if (adev->vm_manager.enabled) { 1035 if (adev->vm_manager.enabled) {
1038 for (i = 0; i < AMDGPU_NUM_VM; ++i) 1036 amdgpu_vm_manager_fini(adev);
1039 fence_put(adev->vm_manager.active[i]);
1040 gmc_v8_0_vm_fini(adev); 1037 gmc_v8_0_vm_fini(adev);
1041 adev->vm_manager.enabled = false; 1038 adev->vm_manager.enabled = false;
1042 } 1039 }
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
index 6a52db6ad8d7..370c6c9d81c2 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
@@ -40,6 +40,9 @@
40 40
41#define GRBM_GFX_INDEX__VCE_INSTANCE__SHIFT 0x04 41#define GRBM_GFX_INDEX__VCE_INSTANCE__SHIFT 0x04
42#define GRBM_GFX_INDEX__VCE_INSTANCE_MASK 0x10 42#define GRBM_GFX_INDEX__VCE_INSTANCE_MASK 0x10
43#define mmVCE_LMI_VCPU_CACHE_40BIT_BAR0 0x8616
44#define mmVCE_LMI_VCPU_CACHE_40BIT_BAR1 0x8617
45#define mmVCE_LMI_VCPU_CACHE_40BIT_BAR2 0x8618
43 46
44#define VCE_V3_0_FW_SIZE (384 * 1024) 47#define VCE_V3_0_FW_SIZE (384 * 1024)
45#define VCE_V3_0_STACK_SIZE (64 * 1024) 48#define VCE_V3_0_STACK_SIZE (64 * 1024)
@@ -130,9 +133,11 @@ static int vce_v3_0_start(struct amdgpu_device *adev)
130 133
131 /* set BUSY flag */ 134 /* set BUSY flag */
132 WREG32_P(mmVCE_STATUS, 1, ~1); 135 WREG32_P(mmVCE_STATUS, 1, ~1);
133 136 if (adev->asic_type >= CHIP_STONEY)
134 WREG32_P(mmVCE_VCPU_CNTL, VCE_VCPU_CNTL__CLK_EN_MASK, 137 WREG32_P(mmVCE_VCPU_CNTL, 1, ~0x200001);
135 ~VCE_VCPU_CNTL__CLK_EN_MASK); 138 else
139 WREG32_P(mmVCE_VCPU_CNTL, VCE_VCPU_CNTL__CLK_EN_MASK,
140 ~VCE_VCPU_CNTL__CLK_EN_MASK);
136 141
137 WREG32_P(mmVCE_SOFT_RESET, 142 WREG32_P(mmVCE_SOFT_RESET,
138 VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK, 143 VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK,
@@ -391,8 +396,12 @@ static void vce_v3_0_mc_resume(struct amdgpu_device *adev, int idx)
391 WREG32(mmVCE_LMI_SWAP_CNTL, 0); 396 WREG32(mmVCE_LMI_SWAP_CNTL, 0);
392 WREG32(mmVCE_LMI_SWAP_CNTL1, 0); 397 WREG32(mmVCE_LMI_SWAP_CNTL1, 0);
393 WREG32(mmVCE_LMI_VM_CTRL, 0); 398 WREG32(mmVCE_LMI_VM_CTRL, 0);
394 399 if (adev->asic_type >= CHIP_STONEY) {
395 WREG32(mmVCE_LMI_VCPU_CACHE_40BIT_BAR, (adev->vce.gpu_addr >> 8)); 400 WREG32(mmVCE_LMI_VCPU_CACHE_40BIT_BAR0, (adev->vce.gpu_addr >> 8));
401 WREG32(mmVCE_LMI_VCPU_CACHE_40BIT_BAR1, (adev->vce.gpu_addr >> 8));
402 WREG32(mmVCE_LMI_VCPU_CACHE_40BIT_BAR2, (adev->vce.gpu_addr >> 8));
403 } else
404 WREG32(mmVCE_LMI_VCPU_CACHE_40BIT_BAR, (adev->vce.gpu_addr >> 8));
396 offset = AMDGPU_VCE_FIRMWARE_OFFSET; 405 offset = AMDGPU_VCE_FIRMWARE_OFFSET;
397 size = VCE_V3_0_FW_SIZE; 406 size = VCE_V3_0_FW_SIZE;
398 WREG32(mmVCE_VCPU_CACHE_OFFSET0, offset & 0x7fffffff); 407 WREG32(mmVCE_VCPU_CACHE_OFFSET0, offset & 0x7fffffff);
@@ -576,6 +585,11 @@ static int vce_v3_0_process_interrupt(struct amdgpu_device *adev,
576 struct amdgpu_iv_entry *entry) 585 struct amdgpu_iv_entry *entry)
577{ 586{
578 DRM_DEBUG("IH: VCE\n"); 587 DRM_DEBUG("IH: VCE\n");
588
589 WREG32_P(mmVCE_SYS_INT_STATUS,
590 VCE_SYS_INT_STATUS__VCE_SYS_INT_TRAP_INTERRUPT_INT_MASK,
591 ~VCE_SYS_INT_STATUS__VCE_SYS_INT_TRAP_INTERRUPT_INT_MASK);
592
579 switch (entry->src_data) { 593 switch (entry->src_data) {
580 case 0: 594 case 0:
581 amdgpu_fence_process(&adev->vce.ring[0]); 595 amdgpu_fence_process(&adev->vce.ring[0]);
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h b/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h
index 144f50acc971..c89dc777768f 100644
--- a/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h
+++ b/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h
@@ -16,6 +16,8 @@ TRACE_EVENT(amd_sched_job,
16 TP_ARGS(sched_job), 16 TP_ARGS(sched_job),
17 TP_STRUCT__entry( 17 TP_STRUCT__entry(
18 __field(struct amd_sched_entity *, entity) 18 __field(struct amd_sched_entity *, entity)
19 __field(struct amd_sched_job *, sched_job)
20 __field(struct fence *, fence)
19 __field(const char *, name) 21 __field(const char *, name)
20 __field(u32, job_count) 22 __field(u32, job_count)
21 __field(int, hw_job_count) 23 __field(int, hw_job_count)
@@ -23,16 +25,32 @@ TRACE_EVENT(amd_sched_job,
23 25
24 TP_fast_assign( 26 TP_fast_assign(
25 __entry->entity = sched_job->s_entity; 27 __entry->entity = sched_job->s_entity;
28 __entry->sched_job = sched_job;
29 __entry->fence = &sched_job->s_fence->base;
26 __entry->name = sched_job->sched->name; 30 __entry->name = sched_job->sched->name;
27 __entry->job_count = kfifo_len( 31 __entry->job_count = kfifo_len(
28 &sched_job->s_entity->job_queue) / sizeof(sched_job); 32 &sched_job->s_entity->job_queue) / sizeof(sched_job);
29 __entry->hw_job_count = atomic_read( 33 __entry->hw_job_count = atomic_read(
30 &sched_job->sched->hw_rq_count); 34 &sched_job->sched->hw_rq_count);
31 ), 35 ),
32 TP_printk("entity=%p, ring=%s, job count:%u, hw job count:%d", 36 TP_printk("entity=%p, sched job=%p, fence=%p, ring=%s, job count:%u, hw job count:%d",
33 __entry->entity, __entry->name, __entry->job_count, 37 __entry->entity, __entry->sched_job, __entry->fence, __entry->name,
34 __entry->hw_job_count) 38 __entry->job_count, __entry->hw_job_count)
35); 39);
40
41TRACE_EVENT(amd_sched_process_job,
42 TP_PROTO(struct amd_sched_fence *fence),
43 TP_ARGS(fence),
44 TP_STRUCT__entry(
45 __field(struct fence *, fence)
46 ),
47
48 TP_fast_assign(
49 __entry->fence = &fence->base;
50 ),
51 TP_printk("fence=%p signaled", __entry->fence)
52);
53
36#endif 54#endif
37 55
38/* This part must be outside protection */ 56/* This part must be outside protection */
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
index 89619a5a4289..3a4820e863ec 100644
--- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
+++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
@@ -30,10 +30,12 @@
30#define CREATE_TRACE_POINTS 30#define CREATE_TRACE_POINTS
31#include "gpu_sched_trace.h" 31#include "gpu_sched_trace.h"
32 32
33static struct amd_sched_job * 33static bool amd_sched_entity_is_ready(struct amd_sched_entity *entity);
34amd_sched_entity_pop_job(struct amd_sched_entity *entity);
35static void amd_sched_wakeup(struct amd_gpu_scheduler *sched); 34static void amd_sched_wakeup(struct amd_gpu_scheduler *sched);
36 35
36struct kmem_cache *sched_fence_slab;
37atomic_t sched_fence_slab_ref = ATOMIC_INIT(0);
38
37/* Initialize a given run queue struct */ 39/* Initialize a given run queue struct */
38static void amd_sched_rq_init(struct amd_sched_rq *rq) 40static void amd_sched_rq_init(struct amd_sched_rq *rq)
39{ 41{
@@ -61,36 +63,36 @@ static void amd_sched_rq_remove_entity(struct amd_sched_rq *rq,
61} 63}
62 64
63/** 65/**
64 * Select next job from a specified run queue with round robin policy. 66 * Select an entity which could provide a job to run
65 * Return NULL if nothing available. 67 *
68 * @rq The run queue to check.
69 *
70 * Try to find a ready entity, returns NULL if none found.
66 */ 71 */
67static struct amd_sched_job * 72static struct amd_sched_entity *
68amd_sched_rq_select_job(struct amd_sched_rq *rq) 73amd_sched_rq_select_entity(struct amd_sched_rq *rq)
69{ 74{
70 struct amd_sched_entity *entity; 75 struct amd_sched_entity *entity;
71 struct amd_sched_job *sched_job;
72 76
73 spin_lock(&rq->lock); 77 spin_lock(&rq->lock);
74 78
75 entity = rq->current_entity; 79 entity = rq->current_entity;
76 if (entity) { 80 if (entity) {
77 list_for_each_entry_continue(entity, &rq->entities, list) { 81 list_for_each_entry_continue(entity, &rq->entities, list) {
78 sched_job = amd_sched_entity_pop_job(entity); 82 if (amd_sched_entity_is_ready(entity)) {
79 if (sched_job) {
80 rq->current_entity = entity; 83 rq->current_entity = entity;
81 spin_unlock(&rq->lock); 84 spin_unlock(&rq->lock);
82 return sched_job; 85 return entity;
83 } 86 }
84 } 87 }
85 } 88 }
86 89
87 list_for_each_entry(entity, &rq->entities, list) { 90 list_for_each_entry(entity, &rq->entities, list) {
88 91
89 sched_job = amd_sched_entity_pop_job(entity); 92 if (amd_sched_entity_is_ready(entity)) {
90 if (sched_job) {
91 rq->current_entity = entity; 93 rq->current_entity = entity;
92 spin_unlock(&rq->lock); 94 spin_unlock(&rq->lock);
93 return sched_job; 95 return entity;
94 } 96 }
95 97
96 if (entity == rq->current_entity) 98 if (entity == rq->current_entity)
@@ -174,6 +176,24 @@ static bool amd_sched_entity_is_idle(struct amd_sched_entity *entity)
174} 176}
175 177
176/** 178/**
179 * Check if entity is ready
180 *
181 * @entity The pointer to a valid scheduler entity
182 *
183 * Return true if entity could provide a job.
184 */
185static bool amd_sched_entity_is_ready(struct amd_sched_entity *entity)
186{
187 if (kfifo_is_empty(&entity->job_queue))
188 return false;
189
190 if (ACCESS_ONCE(entity->dependency))
191 return false;
192
193 return true;
194}
195
196/**
177 * Destroy a context entity 197 * Destroy a context entity
178 * 198 *
179 * @sched Pointer to scheduler instance 199 * @sched Pointer to scheduler instance
@@ -208,32 +228,53 @@ static void amd_sched_entity_wakeup(struct fence *f, struct fence_cb *cb)
208 amd_sched_wakeup(entity->sched); 228 amd_sched_wakeup(entity->sched);
209} 229}
210 230
231static bool amd_sched_entity_add_dependency_cb(struct amd_sched_entity *entity)
232{
233 struct amd_gpu_scheduler *sched = entity->sched;
234 struct fence * fence = entity->dependency;
235 struct amd_sched_fence *s_fence;
236
237 if (fence->context == entity->fence_context) {
238 /* We can ignore fences from ourself */
239 fence_put(entity->dependency);
240 return false;
241 }
242
243 s_fence = to_amd_sched_fence(fence);
244 if (s_fence && s_fence->sched == sched) {
245 /* Fence is from the same scheduler */
246 if (test_bit(AMD_SCHED_FENCE_SCHEDULED_BIT, &fence->flags)) {
247 /* Ignore it when it is already scheduled */
248 fence_put(entity->dependency);
249 return false;
250 }
251
252 /* Wait for fence to be scheduled */
253 entity->cb.func = amd_sched_entity_wakeup;
254 list_add_tail(&entity->cb.node, &s_fence->scheduled_cb);
255 return true;
256 }
257
258 if (!fence_add_callback(entity->dependency, &entity->cb,
259 amd_sched_entity_wakeup))
260 return true;
261
262 fence_put(entity->dependency);
263 return false;
264}
265
211static struct amd_sched_job * 266static struct amd_sched_job *
212amd_sched_entity_pop_job(struct amd_sched_entity *entity) 267amd_sched_entity_pop_job(struct amd_sched_entity *entity)
213{ 268{
214 struct amd_gpu_scheduler *sched = entity->sched; 269 struct amd_gpu_scheduler *sched = entity->sched;
215 struct amd_sched_job *sched_job; 270 struct amd_sched_job *sched_job;
216 271
217 if (ACCESS_ONCE(entity->dependency))
218 return NULL;
219
220 if (!kfifo_out_peek(&entity->job_queue, &sched_job, sizeof(sched_job))) 272 if (!kfifo_out_peek(&entity->job_queue, &sched_job, sizeof(sched_job)))
221 return NULL; 273 return NULL;
222 274
223 while ((entity->dependency = sched->ops->dependency(sched_job))) { 275 while ((entity->dependency = sched->ops->dependency(sched_job)))
224 276 if (amd_sched_entity_add_dependency_cb(entity))
225 if (entity->dependency->context == entity->fence_context) {
226 /* We can ignore fences from ourself */
227 fence_put(entity->dependency);
228 continue;
229 }
230
231 if (fence_add_callback(entity->dependency, &entity->cb,
232 amd_sched_entity_wakeup))
233 fence_put(entity->dependency);
234 else
235 return NULL; 277 return NULL;
236 }
237 278
238 return sched_job; 279 return sched_job;
239} 280}
@@ -247,6 +288,7 @@ amd_sched_entity_pop_job(struct amd_sched_entity *entity)
247 */ 288 */
248static bool amd_sched_entity_in(struct amd_sched_job *sched_job) 289static bool amd_sched_entity_in(struct amd_sched_job *sched_job)
249{ 290{
291 struct amd_gpu_scheduler *sched = sched_job->sched;
250 struct amd_sched_entity *entity = sched_job->s_entity; 292 struct amd_sched_entity *entity = sched_job->s_entity;
251 bool added, first = false; 293 bool added, first = false;
252 294
@@ -261,7 +303,7 @@ static bool amd_sched_entity_in(struct amd_sched_job *sched_job)
261 303
262 /* first job wakes up scheduler */ 304 /* first job wakes up scheduler */
263 if (first) 305 if (first)
264 amd_sched_wakeup(sched_job->sched); 306 amd_sched_wakeup(sched);
265 307
266 return added; 308 return added;
267} 309}
@@ -273,22 +315,13 @@ static bool amd_sched_entity_in(struct amd_sched_job *sched_job)
273 * 315 *
274 * Returns 0 for success, negative error code otherwise. 316 * Returns 0 for success, negative error code otherwise.
275 */ 317 */
276int amd_sched_entity_push_job(struct amd_sched_job *sched_job) 318void amd_sched_entity_push_job(struct amd_sched_job *sched_job)
277{ 319{
278 struct amd_sched_entity *entity = sched_job->s_entity; 320 struct amd_sched_entity *entity = sched_job->s_entity;
279 struct amd_sched_fence *fence = amd_sched_fence_create(
280 entity, sched_job->owner);
281
282 if (!fence)
283 return -ENOMEM;
284
285 fence_get(&fence->base);
286 sched_job->s_fence = fence;
287 321
322 trace_amd_sched_job(sched_job);
288 wait_event(entity->sched->job_scheduled, 323 wait_event(entity->sched->job_scheduled,
289 amd_sched_entity_in(sched_job)); 324 amd_sched_entity_in(sched_job));
290 trace_amd_sched_job(sched_job);
291 return 0;
292} 325}
293 326
294/** 327/**
@@ -310,22 +343,22 @@ static void amd_sched_wakeup(struct amd_gpu_scheduler *sched)
310} 343}
311 344
312/** 345/**
313 * Select next to run 346 * Select next entity to process
314*/ 347*/
315static struct amd_sched_job * 348static struct amd_sched_entity *
316amd_sched_select_job(struct amd_gpu_scheduler *sched) 349amd_sched_select_entity(struct amd_gpu_scheduler *sched)
317{ 350{
318 struct amd_sched_job *sched_job; 351 struct amd_sched_entity *entity;
319 352
320 if (!amd_sched_ready(sched)) 353 if (!amd_sched_ready(sched))
321 return NULL; 354 return NULL;
322 355
323 /* Kernel run queue has higher priority than normal run queue*/ 356 /* Kernel run queue has higher priority than normal run queue*/
324 sched_job = amd_sched_rq_select_job(&sched->kernel_rq); 357 entity = amd_sched_rq_select_entity(&sched->kernel_rq);
325 if (sched_job == NULL) 358 if (entity == NULL)
326 sched_job = amd_sched_rq_select_job(&sched->sched_rq); 359 entity = amd_sched_rq_select_entity(&sched->sched_rq);
327 360
328 return sched_job; 361 return entity;
329} 362}
330 363
331static void amd_sched_process_job(struct fence *f, struct fence_cb *cb) 364static void amd_sched_process_job(struct fence *f, struct fence_cb *cb)
@@ -343,6 +376,7 @@ static void amd_sched_process_job(struct fence *f, struct fence_cb *cb)
343 list_del_init(&s_fence->list); 376 list_del_init(&s_fence->list);
344 spin_unlock_irqrestore(&sched->fence_list_lock, flags); 377 spin_unlock_irqrestore(&sched->fence_list_lock, flags);
345 } 378 }
379 trace_amd_sched_process_job(s_fence);
346 fence_put(&s_fence->base); 380 fence_put(&s_fence->base);
347 wake_up_interruptible(&sched->wake_up_worker); 381 wake_up_interruptible(&sched->wake_up_worker);
348} 382}
@@ -386,13 +420,16 @@ static int amd_sched_main(void *param)
386 unsigned long flags; 420 unsigned long flags;
387 421
388 wait_event_interruptible(sched->wake_up_worker, 422 wait_event_interruptible(sched->wake_up_worker,
389 kthread_should_stop() || 423 (entity = amd_sched_select_entity(sched)) ||
390 (sched_job = amd_sched_select_job(sched))); 424 kthread_should_stop());
391 425
426 if (!entity)
427 continue;
428
429 sched_job = amd_sched_entity_pop_job(entity);
392 if (!sched_job) 430 if (!sched_job)
393 continue; 431 continue;
394 432
395 entity = sched_job->s_entity;
396 s_fence = sched_job->s_fence; 433 s_fence = sched_job->s_fence;
397 434
398 if (sched->timeout != MAX_SCHEDULE_TIMEOUT) { 435 if (sched->timeout != MAX_SCHEDULE_TIMEOUT) {
@@ -405,6 +442,7 @@ static int amd_sched_main(void *param)
405 442
406 atomic_inc(&sched->hw_rq_count); 443 atomic_inc(&sched->hw_rq_count);
407 fence = sched->ops->run_job(sched_job); 444 fence = sched->ops->run_job(sched_job);
445 amd_sched_fence_scheduled(s_fence);
408 if (fence) { 446 if (fence) {
409 r = fence_add_callback(fence, &s_fence->cb, 447 r = fence_add_callback(fence, &s_fence->cb,
410 amd_sched_process_job); 448 amd_sched_process_job);
@@ -450,6 +488,13 @@ int amd_sched_init(struct amd_gpu_scheduler *sched,
450 init_waitqueue_head(&sched->wake_up_worker); 488 init_waitqueue_head(&sched->wake_up_worker);
451 init_waitqueue_head(&sched->job_scheduled); 489 init_waitqueue_head(&sched->job_scheduled);
452 atomic_set(&sched->hw_rq_count, 0); 490 atomic_set(&sched->hw_rq_count, 0);
491 if (atomic_inc_return(&sched_fence_slab_ref) == 1) {
492 sched_fence_slab = kmem_cache_create(
493 "amd_sched_fence", sizeof(struct amd_sched_fence), 0,
494 SLAB_HWCACHE_ALIGN, NULL);
495 if (!sched_fence_slab)
496 return -ENOMEM;
497 }
453 498
454 /* Each scheduler will run on a seperate kernel thread */ 499 /* Each scheduler will run on a seperate kernel thread */
455 sched->thread = kthread_run(amd_sched_main, sched, sched->name); 500 sched->thread = kthread_run(amd_sched_main, sched, sched->name);
@@ -470,4 +515,6 @@ void amd_sched_fini(struct amd_gpu_scheduler *sched)
470{ 515{
471 if (sched->thread) 516 if (sched->thread)
472 kthread_stop(sched->thread); 517 kthread_stop(sched->thread);
518 if (atomic_dec_and_test(&sched_fence_slab_ref))
519 kmem_cache_destroy(sched_fence_slab);
473} 520}
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
index 929e9aced041..a0f0ae53aacd 100644
--- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
+++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
@@ -27,9 +27,14 @@
27#include <linux/kfifo.h> 27#include <linux/kfifo.h>
28#include <linux/fence.h> 28#include <linux/fence.h>
29 29
30#define AMD_SCHED_FENCE_SCHEDULED_BIT FENCE_FLAG_USER_BITS
31
30struct amd_gpu_scheduler; 32struct amd_gpu_scheduler;
31struct amd_sched_rq; 33struct amd_sched_rq;
32 34
35extern struct kmem_cache *sched_fence_slab;
36extern atomic_t sched_fence_slab_ref;
37
33/** 38/**
34 * A scheduler entity is a wrapper around a job queue or a group 39 * A scheduler entity is a wrapper around a job queue or a group
35 * of other entities. Entities take turns emitting jobs from their 40 * of other entities. Entities take turns emitting jobs from their
@@ -65,6 +70,7 @@ struct amd_sched_rq {
65struct amd_sched_fence { 70struct amd_sched_fence {
66 struct fence base; 71 struct fence base;
67 struct fence_cb cb; 72 struct fence_cb cb;
73 struct list_head scheduled_cb;
68 struct amd_gpu_scheduler *sched; 74 struct amd_gpu_scheduler *sched;
69 spinlock_t lock; 75 spinlock_t lock;
70 void *owner; 76 void *owner;
@@ -76,7 +82,6 @@ struct amd_sched_job {
76 struct amd_gpu_scheduler *sched; 82 struct amd_gpu_scheduler *sched;
77 struct amd_sched_entity *s_entity; 83 struct amd_sched_entity *s_entity;
78 struct amd_sched_fence *s_fence; 84 struct amd_sched_fence *s_fence;
79 void *owner;
80}; 85};
81 86
82extern const struct fence_ops amd_sched_fence_ops; 87extern const struct fence_ops amd_sched_fence_ops;
@@ -128,11 +133,11 @@ int amd_sched_entity_init(struct amd_gpu_scheduler *sched,
128 uint32_t jobs); 133 uint32_t jobs);
129void amd_sched_entity_fini(struct amd_gpu_scheduler *sched, 134void amd_sched_entity_fini(struct amd_gpu_scheduler *sched,
130 struct amd_sched_entity *entity); 135 struct amd_sched_entity *entity);
131int amd_sched_entity_push_job(struct amd_sched_job *sched_job); 136void amd_sched_entity_push_job(struct amd_sched_job *sched_job);
132 137
133struct amd_sched_fence *amd_sched_fence_create( 138struct amd_sched_fence *amd_sched_fence_create(
134 struct amd_sched_entity *s_entity, void *owner); 139 struct amd_sched_entity *s_entity, void *owner);
140void amd_sched_fence_scheduled(struct amd_sched_fence *fence);
135void amd_sched_fence_signal(struct amd_sched_fence *fence); 141void amd_sched_fence_signal(struct amd_sched_fence *fence);
136 142
137
138#endif 143#endif
diff --git a/drivers/gpu/drm/amd/scheduler/sched_fence.c b/drivers/gpu/drm/amd/scheduler/sched_fence.c
index d802638094f4..87c78eecea64 100644
--- a/drivers/gpu/drm/amd/scheduler/sched_fence.c
+++ b/drivers/gpu/drm/amd/scheduler/sched_fence.c
@@ -32,9 +32,11 @@ struct amd_sched_fence *amd_sched_fence_create(struct amd_sched_entity *s_entity
32 struct amd_sched_fence *fence = NULL; 32 struct amd_sched_fence *fence = NULL;
33 unsigned seq; 33 unsigned seq;
34 34
35 fence = kzalloc(sizeof(struct amd_sched_fence), GFP_KERNEL); 35 fence = kmem_cache_zalloc(sched_fence_slab, GFP_KERNEL);
36 if (fence == NULL) 36 if (fence == NULL)
37 return NULL; 37 return NULL;
38
39 INIT_LIST_HEAD(&fence->scheduled_cb);
38 fence->owner = owner; 40 fence->owner = owner;
39 fence->sched = s_entity->sched; 41 fence->sched = s_entity->sched;
40 spin_lock_init(&fence->lock); 42 spin_lock_init(&fence->lock);
@@ -55,6 +57,17 @@ void amd_sched_fence_signal(struct amd_sched_fence *fence)
55 FENCE_TRACE(&fence->base, "was already signaled\n"); 57 FENCE_TRACE(&fence->base, "was already signaled\n");
56} 58}
57 59
60void amd_sched_fence_scheduled(struct amd_sched_fence *s_fence)
61{
62 struct fence_cb *cur, *tmp;
63
64 set_bit(AMD_SCHED_FENCE_SCHEDULED_BIT, &s_fence->base.flags);
65 list_for_each_entry_safe(cur, tmp, &s_fence->scheduled_cb, node) {
66 list_del_init(&cur->node);
67 cur->func(&s_fence->base, cur);
68 }
69}
70
58static const char *amd_sched_fence_get_driver_name(struct fence *fence) 71static const char *amd_sched_fence_get_driver_name(struct fence *fence)
59{ 72{
60 return "amd_sched"; 73 return "amd_sched";
@@ -71,11 +84,17 @@ static bool amd_sched_fence_enable_signaling(struct fence *f)
71 return true; 84 return true;
72} 85}
73 86
87static void amd_sched_fence_release(struct fence *f)
88{
89 struct amd_sched_fence *fence = to_amd_sched_fence(f);
90 kmem_cache_free(sched_fence_slab, fence);
91}
92
74const struct fence_ops amd_sched_fence_ops = { 93const struct fence_ops amd_sched_fence_ops = {
75 .get_driver_name = amd_sched_fence_get_driver_name, 94 .get_driver_name = amd_sched_fence_get_driver_name,
76 .get_timeline_name = amd_sched_fence_get_timeline_name, 95 .get_timeline_name = amd_sched_fence_get_timeline_name,
77 .enable_signaling = amd_sched_fence_enable_signaling, 96 .enable_signaling = amd_sched_fence_enable_signaling,
78 .signaled = NULL, 97 .signaled = NULL,
79 .wait = fence_default_wait, 98 .wait = fence_default_wait,
80 .release = NULL, 99 .release = amd_sched_fence_release,
81}; 100};
diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
index 7bb3845d9974..aeee083c7f95 100644
--- a/drivers/gpu/drm/drm_atomic.c
+++ b/drivers/gpu/drm/drm_atomic.c
@@ -1432,6 +1432,45 @@ static int atomic_set_prop(struct drm_atomic_state *state,
1432 return ret; 1432 return ret;
1433} 1433}
1434 1434
1435/**
1436 * drm_atomic_update_old_fb -- Unset old_fb pointers and set plane->fb pointers.
1437 *
1438 * @dev: drm device to check.
1439 * @plane_mask: plane mask for planes that were updated.
1440 * @ret: return value, can be -EDEADLK for a retry.
1441 *
1442 * Before doing an update plane->old_fb is set to plane->fb,
1443 * but before dropping the locks old_fb needs to be set to NULL
1444 * and plane->fb updated. This is a common operation for each
1445 * atomic update, so this call is split off as a helper.
1446 */
1447void drm_atomic_clean_old_fb(struct drm_device *dev,
1448 unsigned plane_mask,
1449 int ret)
1450{
1451 struct drm_plane *plane;
1452
1453 /* if succeeded, fixup legacy plane crtc/fb ptrs before dropping
1454 * locks (ie. while it is still safe to deref plane->state). We
1455 * need to do this here because the driver entry points cannot
1456 * distinguish between legacy and atomic ioctls.
1457 */
1458 drm_for_each_plane_mask(plane, dev, plane_mask) {
1459 if (ret == 0) {
1460 struct drm_framebuffer *new_fb = plane->state->fb;
1461 if (new_fb)
1462 drm_framebuffer_reference(new_fb);
1463 plane->fb = new_fb;
1464 plane->crtc = plane->state->crtc;
1465
1466 if (plane->old_fb)
1467 drm_framebuffer_unreference(plane->old_fb);
1468 }
1469 plane->old_fb = NULL;
1470 }
1471}
1472EXPORT_SYMBOL(drm_atomic_clean_old_fb);
1473
1435int drm_mode_atomic_ioctl(struct drm_device *dev, 1474int drm_mode_atomic_ioctl(struct drm_device *dev,
1436 void *data, struct drm_file *file_priv) 1475 void *data, struct drm_file *file_priv)
1437{ 1476{
@@ -1446,7 +1485,7 @@ int drm_mode_atomic_ioctl(struct drm_device *dev,
1446 struct drm_plane *plane; 1485 struct drm_plane *plane;
1447 struct drm_crtc *crtc; 1486 struct drm_crtc *crtc;
1448 struct drm_crtc_state *crtc_state; 1487 struct drm_crtc_state *crtc_state;
1449 unsigned plane_mask = 0; 1488 unsigned plane_mask;
1450 int ret = 0; 1489 int ret = 0;
1451 unsigned int i, j; 1490 unsigned int i, j;
1452 1491
@@ -1486,6 +1525,7 @@ int drm_mode_atomic_ioctl(struct drm_device *dev,
1486 state->allow_modeset = !!(arg->flags & DRM_MODE_ATOMIC_ALLOW_MODESET); 1525 state->allow_modeset = !!(arg->flags & DRM_MODE_ATOMIC_ALLOW_MODESET);
1487 1526
1488retry: 1527retry:
1528 plane_mask = 0;
1489 copied_objs = 0; 1529 copied_objs = 0;
1490 copied_props = 0; 1530 copied_props = 0;
1491 1531
@@ -1576,24 +1616,7 @@ retry:
1576 } 1616 }
1577 1617
1578out: 1618out:
1579 /* if succeeded, fixup legacy plane crtc/fb ptrs before dropping 1619 drm_atomic_clean_old_fb(dev, plane_mask, ret);
1580 * locks (ie. while it is still safe to deref plane->state). We
1581 * need to do this here because the driver entry points cannot
1582 * distinguish between legacy and atomic ioctls.
1583 */
1584 drm_for_each_plane_mask(plane, dev, plane_mask) {
1585 if (ret == 0) {
1586 struct drm_framebuffer *new_fb = plane->state->fb;
1587 if (new_fb)
1588 drm_framebuffer_reference(new_fb);
1589 plane->fb = new_fb;
1590 plane->crtc = plane->state->crtc;
1591
1592 if (plane->old_fb)
1593 drm_framebuffer_unreference(plane->old_fb);
1594 }
1595 plane->old_fb = NULL;
1596 }
1597 1620
1598 if (ret && arg->flags & DRM_MODE_PAGE_FLIP_EVENT) { 1621 if (ret && arg->flags & DRM_MODE_PAGE_FLIP_EVENT) {
1599 /* 1622 /*
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index 0c6f62168776..e5aec45bf985 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -210,6 +210,14 @@ update_connector_routing(struct drm_atomic_state *state, int conn_idx)
210 return -EINVAL; 210 return -EINVAL;
211 } 211 }
212 212
213 if (!drm_encoder_crtc_ok(new_encoder, connector_state->crtc)) {
214 DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] incompatible with [CRTC:%d]\n",
215 new_encoder->base.id,
216 new_encoder->name,
217 connector_state->crtc->base.id);
218 return -EINVAL;
219 }
220
213 if (new_encoder == connector_state->best_encoder) { 221 if (new_encoder == connector_state->best_encoder) {
214 DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] keeps [ENCODER:%d:%s], now on [CRTC:%d]\n", 222 DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] keeps [ENCODER:%d:%s], now on [CRTC:%d]\n",
215 connector->base.id, 223 connector->base.id,
@@ -1553,6 +1561,9 @@ retry:
1553 goto fail; 1561 goto fail;
1554 } 1562 }
1555 1563
1564 if (plane_state->crtc && (plane == plane->crtc->cursor))
1565 plane_state->state->legacy_cursor_update = true;
1566
1556 ret = __drm_atomic_helper_disable_plane(plane, plane_state); 1567 ret = __drm_atomic_helper_disable_plane(plane, plane_state);
1557 if (ret != 0) 1568 if (ret != 0)
1558 goto fail; 1569 goto fail;
@@ -1605,9 +1616,6 @@ int __drm_atomic_helper_disable_plane(struct drm_plane *plane,
1605 plane_state->src_h = 0; 1616 plane_state->src_h = 0;
1606 plane_state->src_w = 0; 1617 plane_state->src_w = 0;
1607 1618
1608 if (plane->crtc && (plane == plane->crtc->cursor))
1609 plane_state->state->legacy_cursor_update = true;
1610
1611 return 0; 1619 return 0;
1612} 1620}
1613 1621
@@ -1741,6 +1749,7 @@ int __drm_atomic_helper_set_config(struct drm_mode_set *set,
1741 struct drm_crtc_state *crtc_state; 1749 struct drm_crtc_state *crtc_state;
1742 struct drm_plane_state *primary_state; 1750 struct drm_plane_state *primary_state;
1743 struct drm_crtc *crtc = set->crtc; 1751 struct drm_crtc *crtc = set->crtc;
1752 int hdisplay, vdisplay;
1744 int ret; 1753 int ret;
1745 1754
1746 crtc_state = drm_atomic_get_crtc_state(state, crtc); 1755 crtc_state = drm_atomic_get_crtc_state(state, crtc);
@@ -1783,19 +1792,21 @@ int __drm_atomic_helper_set_config(struct drm_mode_set *set,
1783 if (ret != 0) 1792 if (ret != 0)
1784 return ret; 1793 return ret;
1785 1794
1795 drm_crtc_get_hv_timing(set->mode, &hdisplay, &vdisplay);
1796
1786 drm_atomic_set_fb_for_plane(primary_state, set->fb); 1797 drm_atomic_set_fb_for_plane(primary_state, set->fb);
1787 primary_state->crtc_x = 0; 1798 primary_state->crtc_x = 0;
1788 primary_state->crtc_y = 0; 1799 primary_state->crtc_y = 0;
1789 primary_state->crtc_h = set->mode->vdisplay; 1800 primary_state->crtc_h = vdisplay;
1790 primary_state->crtc_w = set->mode->hdisplay; 1801 primary_state->crtc_w = hdisplay;
1791 primary_state->src_x = set->x << 16; 1802 primary_state->src_x = set->x << 16;
1792 primary_state->src_y = set->y << 16; 1803 primary_state->src_y = set->y << 16;
1793 if (primary_state->rotation & (BIT(DRM_ROTATE_90) | BIT(DRM_ROTATE_270))) { 1804 if (primary_state->rotation & (BIT(DRM_ROTATE_90) | BIT(DRM_ROTATE_270))) {
1794 primary_state->src_h = set->mode->hdisplay << 16; 1805 primary_state->src_h = hdisplay << 16;
1795 primary_state->src_w = set->mode->vdisplay << 16; 1806 primary_state->src_w = vdisplay << 16;
1796 } else { 1807 } else {
1797 primary_state->src_h = set->mode->vdisplay << 16; 1808 primary_state->src_h = vdisplay << 16;
1798 primary_state->src_w = set->mode->hdisplay << 16; 1809 primary_state->src_w = hdisplay << 16;
1799 } 1810 }
1800 1811
1801commit: 1812commit:
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 9362609df38a..7dd6728dd092 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -160,6 +160,11 @@ int drm_setmaster_ioctl(struct drm_device *dev, void *data,
160 goto out_unlock; 160 goto out_unlock;
161 } 161 }
162 162
163 if (!file_priv->allowed_master) {
164 ret = drm_new_set_master(dev, file_priv);
165 goto out_unlock;
166 }
167
163 file_priv->minor->master = drm_master_get(file_priv->master); 168 file_priv->minor->master = drm_master_get(file_priv->master);
164 file_priv->is_master = 1; 169 file_priv->is_master = 1;
165 if (dev->driver->master_set) { 170 if (dev->driver->master_set) {
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index e673c13c7391..69cbab5e5c81 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -342,6 +342,7 @@ static int restore_fbdev_mode_atomic(struct drm_fb_helper *fb_helper)
342 struct drm_plane *plane; 342 struct drm_plane *plane;
343 struct drm_atomic_state *state; 343 struct drm_atomic_state *state;
344 int i, ret; 344 int i, ret;
345 unsigned plane_mask;
345 346
346 state = drm_atomic_state_alloc(dev); 347 state = drm_atomic_state_alloc(dev);
347 if (!state) 348 if (!state)
@@ -349,11 +350,10 @@ static int restore_fbdev_mode_atomic(struct drm_fb_helper *fb_helper)
349 350
350 state->acquire_ctx = dev->mode_config.acquire_ctx; 351 state->acquire_ctx = dev->mode_config.acquire_ctx;
351retry: 352retry:
353 plane_mask = 0;
352 drm_for_each_plane(plane, dev) { 354 drm_for_each_plane(plane, dev) {
353 struct drm_plane_state *plane_state; 355 struct drm_plane_state *plane_state;
354 356
355 plane->old_fb = plane->fb;
356
357 plane_state = drm_atomic_get_plane_state(state, plane); 357 plane_state = drm_atomic_get_plane_state(state, plane);
358 if (IS_ERR(plane_state)) { 358 if (IS_ERR(plane_state)) {
359 ret = PTR_ERR(plane_state); 359 ret = PTR_ERR(plane_state);
@@ -362,6 +362,9 @@ retry:
362 362
363 plane_state->rotation = BIT(DRM_ROTATE_0); 363 plane_state->rotation = BIT(DRM_ROTATE_0);
364 364
365 plane->old_fb = plane->fb;
366 plane_mask |= 1 << drm_plane_index(plane);
367
365 /* disable non-primary: */ 368 /* disable non-primary: */
366 if (plane->type == DRM_PLANE_TYPE_PRIMARY) 369 if (plane->type == DRM_PLANE_TYPE_PRIMARY)
367 continue; 370 continue;
@@ -382,19 +385,7 @@ retry:
382 ret = drm_atomic_commit(state); 385 ret = drm_atomic_commit(state);
383 386
384fail: 387fail:
385 drm_for_each_plane(plane, dev) { 388 drm_atomic_clean_old_fb(dev, plane_mask, ret);
386 if (ret == 0) {
387 struct drm_framebuffer *new_fb = plane->state->fb;
388 if (new_fb)
389 drm_framebuffer_reference(new_fb);
390 plane->fb = new_fb;
391 plane->crtc = plane->state->crtc;
392
393 if (plane->old_fb)
394 drm_framebuffer_unreference(plane->old_fb);
395 }
396 plane->old_fb = NULL;
397 }
398 389
399 if (ret == -EDEADLK) 390 if (ret == -EDEADLK)
400 goto backoff; 391 goto backoff;
@@ -1236,7 +1227,9 @@ static int pan_display_atomic(struct fb_var_screeninfo *var,
1236 struct drm_fb_helper *fb_helper = info->par; 1227 struct drm_fb_helper *fb_helper = info->par;
1237 struct drm_device *dev = fb_helper->dev; 1228 struct drm_device *dev = fb_helper->dev;
1238 struct drm_atomic_state *state; 1229 struct drm_atomic_state *state;
1230 struct drm_plane *plane;
1239 int i, ret; 1231 int i, ret;
1232 unsigned plane_mask;
1240 1233
1241 state = drm_atomic_state_alloc(dev); 1234 state = drm_atomic_state_alloc(dev);
1242 if (!state) 1235 if (!state)
@@ -1244,19 +1237,22 @@ static int pan_display_atomic(struct fb_var_screeninfo *var,
1244 1237
1245 state->acquire_ctx = dev->mode_config.acquire_ctx; 1238 state->acquire_ctx = dev->mode_config.acquire_ctx;
1246retry: 1239retry:
1240 plane_mask = 0;
1247 for(i = 0; i < fb_helper->crtc_count; i++) { 1241 for(i = 0; i < fb_helper->crtc_count; i++) {
1248 struct drm_mode_set *mode_set; 1242 struct drm_mode_set *mode_set;
1249 1243
1250 mode_set = &fb_helper->crtc_info[i].mode_set; 1244 mode_set = &fb_helper->crtc_info[i].mode_set;
1251 1245
1252 mode_set->crtc->primary->old_fb = mode_set->crtc->primary->fb;
1253
1254 mode_set->x = var->xoffset; 1246 mode_set->x = var->xoffset;
1255 mode_set->y = var->yoffset; 1247 mode_set->y = var->yoffset;
1256 1248
1257 ret = __drm_atomic_helper_set_config(mode_set, state); 1249 ret = __drm_atomic_helper_set_config(mode_set, state);
1258 if (ret != 0) 1250 if (ret != 0)
1259 goto fail; 1251 goto fail;
1252
1253 plane = mode_set->crtc->primary;
1254 plane_mask |= drm_plane_index(plane);
1255 plane->old_fb = plane->fb;
1260 } 1256 }
1261 1257
1262 ret = drm_atomic_commit(state); 1258 ret = drm_atomic_commit(state);
@@ -1268,26 +1264,7 @@ retry:
1268 1264
1269 1265
1270fail: 1266fail:
1271 for(i = 0; i < fb_helper->crtc_count; i++) { 1267 drm_atomic_clean_old_fb(dev, plane_mask, ret);
1272 struct drm_mode_set *mode_set;
1273 struct drm_plane *plane;
1274
1275 mode_set = &fb_helper->crtc_info[i].mode_set;
1276 plane = mode_set->crtc->primary;
1277
1278 if (ret == 0) {
1279 struct drm_framebuffer *new_fb = plane->state->fb;
1280
1281 if (new_fb)
1282 drm_framebuffer_reference(new_fb);
1283 plane->fb = new_fb;
1284 plane->crtc = plane->state->crtc;
1285
1286 if (plane->old_fb)
1287 drm_framebuffer_unreference(plane->old_fb);
1288 }
1289 plane->old_fb = NULL;
1290 }
1291 1268
1292 if (ret == -EDEADLK) 1269 if (ret == -EDEADLK)
1293 goto backoff; 1270 goto backoff;
diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
index c59ce4d0ef75..6b5625e66119 100644
--- a/drivers/gpu/drm/drm_fops.c
+++ b/drivers/gpu/drm/drm_fops.c
@@ -126,6 +126,60 @@ static int drm_cpu_valid(void)
126} 126}
127 127
128/** 128/**
129 * drm_new_set_master - Allocate a new master object and become master for the
130 * associated master realm.
131 *
132 * @dev: The associated device.
133 * @fpriv: File private identifying the client.
134 *
135 * This function must be called with dev::struct_mutex held.
136 * Returns negative error code on failure. Zero on success.
137 */
138int drm_new_set_master(struct drm_device *dev, struct drm_file *fpriv)
139{
140 struct drm_master *old_master;
141 int ret;
142
143 lockdep_assert_held_once(&dev->master_mutex);
144
145 /* create a new master */
146 fpriv->minor->master = drm_master_create(fpriv->minor);
147 if (!fpriv->minor->master)
148 return -ENOMEM;
149
150 /* take another reference for the copy in the local file priv */
151 old_master = fpriv->master;
152 fpriv->master = drm_master_get(fpriv->minor->master);
153
154 if (dev->driver->master_create) {
155 ret = dev->driver->master_create(dev, fpriv->master);
156 if (ret)
157 goto out_err;
158 }
159 if (dev->driver->master_set) {
160 ret = dev->driver->master_set(dev, fpriv, true);
161 if (ret)
162 goto out_err;
163 }
164
165 fpriv->is_master = 1;
166 fpriv->allowed_master = 1;
167 fpriv->authenticated = 1;
168 if (old_master)
169 drm_master_put(&old_master);
170
171 return 0;
172
173out_err:
174 /* drop both references and restore old master on failure */
175 drm_master_put(&fpriv->minor->master);
176 drm_master_put(&fpriv->master);
177 fpriv->master = old_master;
178
179 return ret;
180}
181
182/**
129 * Called whenever a process opens /dev/drm. 183 * Called whenever a process opens /dev/drm.
130 * 184 *
131 * \param filp file pointer. 185 * \param filp file pointer.
@@ -189,35 +243,9 @@ static int drm_open_helper(struct file *filp, struct drm_minor *minor)
189 mutex_lock(&dev->master_mutex); 243 mutex_lock(&dev->master_mutex);
190 if (drm_is_primary_client(priv) && !priv->minor->master) { 244 if (drm_is_primary_client(priv) && !priv->minor->master) {
191 /* create a new master */ 245 /* create a new master */
192 priv->minor->master = drm_master_create(priv->minor); 246 ret = drm_new_set_master(dev, priv);
193 if (!priv->minor->master) { 247 if (ret)
194 ret = -ENOMEM;
195 goto out_close; 248 goto out_close;
196 }
197
198 priv->is_master = 1;
199 /* take another reference for the copy in the local file priv */
200 priv->master = drm_master_get(priv->minor->master);
201 priv->authenticated = 1;
202
203 if (dev->driver->master_create) {
204 ret = dev->driver->master_create(dev, priv->master);
205 if (ret) {
206 /* drop both references if this fails */
207 drm_master_put(&priv->minor->master);
208 drm_master_put(&priv->master);
209 goto out_close;
210 }
211 }
212 if (dev->driver->master_set) {
213 ret = dev->driver->master_set(dev, priv, true);
214 if (ret) {
215 /* drop both references if this fails */
216 drm_master_put(&priv->minor->master);
217 drm_master_put(&priv->master);
218 goto out_close;
219 }
220 }
221 } else if (drm_is_primary_client(priv)) { 249 } else if (drm_is_primary_client(priv)) {
222 /* get a reference to the master */ 250 /* get a reference to the master */
223 priv->master = drm_master_get(priv->minor->master); 251 priv->master = drm_master_get(priv->minor->master);
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index 2151ea551d3b..607f493ae801 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -980,7 +980,8 @@ static void send_vblank_event(struct drm_device *dev,
980 struct drm_pending_vblank_event *e, 980 struct drm_pending_vblank_event *e,
981 unsigned long seq, struct timeval *now) 981 unsigned long seq, struct timeval *now)
982{ 982{
983 WARN_ON_SMP(!spin_is_locked(&dev->event_lock)); 983 assert_spin_locked(&dev->event_lock);
984
984 e->event.sequence = seq; 985 e->event.sequence = seq;
985 e->event.tv_sec = now->tv_sec; 986 e->event.tv_sec = now->tv_sec;
986 e->event.tv_usec = now->tv_usec; 987 e->event.tv_usec = now->tv_usec;
@@ -993,6 +994,57 @@ static void send_vblank_event(struct drm_device *dev,
993} 994}
994 995
995/** 996/**
997 * drm_arm_vblank_event - arm vblank event after pageflip
998 * @dev: DRM device
999 * @pipe: CRTC index
1000 * @e: the event to prepare to send
1001 *
1002 * A lot of drivers need to generate vblank events for the very next vblank
1003 * interrupt. For example when the page flip interrupt happens when the page
1004 * flip gets armed, but not when it actually executes within the next vblank
1005 * period. This helper function implements exactly the required vblank arming
1006 * behaviour.
1007 *
1008 * Caller must hold event lock. Caller must also hold a vblank reference for
1009 * the event @e, which will be dropped when the next vblank arrives.
1010 *
1011 * This is the legacy version of drm_crtc_arm_vblank_event().
1012 */
1013void drm_arm_vblank_event(struct drm_device *dev, unsigned int pipe,
1014 struct drm_pending_vblank_event *e)
1015{
1016 assert_spin_locked(&dev->event_lock);
1017
1018 e->pipe = pipe;
1019 e->event.sequence = drm_vblank_count(dev, pipe);
1020 list_add_tail(&e->base.link, &dev->vblank_event_list);
1021}
1022EXPORT_SYMBOL(drm_arm_vblank_event);
1023
1024/**
1025 * drm_crtc_arm_vblank_event - arm vblank event after pageflip
1026 * @crtc: the source CRTC of the vblank event
1027 * @e: the event to send
1028 *
1029 * A lot of drivers need to generate vblank events for the very next vblank
1030 * interrupt. For example when the page flip interrupt happens when the page
1031 * flip gets armed, but not when it actually executes within the next vblank
1032 * period. This helper function implements exactly the required vblank arming
1033 * behaviour.
1034 *
1035 * Caller must hold event lock. Caller must also hold a vblank reference for
1036 * the event @e, which will be dropped when the next vblank arrives.
1037 *
1038 * This is the native KMS version of drm_arm_vblank_event().
1039 */
1040void drm_crtc_arm_vblank_event(struct drm_crtc *crtc,
1041 struct drm_pending_vblank_event *e)
1042{
1043 drm_arm_vblank_event(crtc->dev, drm_crtc_index(crtc), e);
1044}
1045EXPORT_SYMBOL(drm_crtc_arm_vblank_event);
1046
1047/**
996 * drm_send_vblank_event - helper to send vblank event after pageflip 1048 * drm_send_vblank_event - helper to send vblank event after pageflip
997 * @dev: DRM device 1049 * @dev: DRM device
998 * @pipe: CRTC index 1050 * @pipe: CRTC index
diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c
index a18164f2f6d2..f8b5fcfa91a2 100644
--- a/drivers/gpu/drm/drm_probe_helper.c
+++ b/drivers/gpu/drm/drm_probe_helper.c
@@ -229,7 +229,8 @@ static int drm_helper_probe_single_connector_modes_merge_bits(struct drm_connect
229 mode_flags |= DRM_MODE_FLAG_3D_MASK; 229 mode_flags |= DRM_MODE_FLAG_3D_MASK;
230 230
231 list_for_each_entry(mode, &connector->modes, head) { 231 list_for_each_entry(mode, &connector->modes, head) {
232 mode->status = drm_mode_validate_basic(mode); 232 if (mode->status == MODE_OK)
233 mode->status = drm_mode_validate_basic(mode);
233 234
234 if (mode->status == MODE_OK) 235 if (mode->status == MODE_OK)
235 mode->status = drm_mode_validate_size(mode, maxX, maxY); 236 mode->status = drm_mode_validate_size(mode, maxX, maxY);
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index a3b22bdacd44..8aab974b0564 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -2734,6 +2734,8 @@ static const char *power_domain_str(enum intel_display_power_domain domain)
2734 return "AUX_C"; 2734 return "AUX_C";
2735 case POWER_DOMAIN_AUX_D: 2735 case POWER_DOMAIN_AUX_D:
2736 return "AUX_D"; 2736 return "AUX_D";
2737 case POWER_DOMAIN_GMBUS:
2738 return "GMBUS";
2737 case POWER_DOMAIN_INIT: 2739 case POWER_DOMAIN_INIT:
2738 return "INIT"; 2740 return "INIT";
2739 default: 2741 default:
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 8afda459a26e..a01e51581c4c 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -199,6 +199,7 @@ enum intel_display_power_domain {
199 POWER_DOMAIN_AUX_B, 199 POWER_DOMAIN_AUX_B,
200 POWER_DOMAIN_AUX_C, 200 POWER_DOMAIN_AUX_C,
201 POWER_DOMAIN_AUX_D, 201 POWER_DOMAIN_AUX_D,
202 POWER_DOMAIN_GMBUS,
202 POWER_DOMAIN_INIT, 203 POWER_DOMAIN_INIT,
203 204
204 POWER_DOMAIN_NUM, 205 POWER_DOMAIN_NUM,
@@ -351,6 +352,8 @@ enum intel_dpll_id {
351 /* hsw/bdw */ 352 /* hsw/bdw */
352 DPLL_ID_WRPLL1 = 0, 353 DPLL_ID_WRPLL1 = 0,
353 DPLL_ID_WRPLL2 = 1, 354 DPLL_ID_WRPLL2 = 1,
355 DPLL_ID_SPLL = 2,
356
354 /* skl */ 357 /* skl */
355 DPLL_ID_SKL_DPLL1 = 0, 358 DPLL_ID_SKL_DPLL1 = 0,
356 DPLL_ID_SKL_DPLL2 = 1, 359 DPLL_ID_SKL_DPLL2 = 1,
@@ -367,6 +370,7 @@ struct intel_dpll_hw_state {
367 370
368 /* hsw, bdw */ 371 /* hsw, bdw */
369 uint32_t wrpll; 372 uint32_t wrpll;
373 uint32_t spll;
370 374
371 /* skl */ 375 /* skl */
372 /* 376 /*
@@ -2648,6 +2652,7 @@ struct i915_params {
2648 int enable_cmd_parser; 2652 int enable_cmd_parser;
2649 /* leave bools at the end to not create holes */ 2653 /* leave bools at the end to not create holes */
2650 bool enable_hangcheck; 2654 bool enable_hangcheck;
2655 bool fastboot;
2651 bool prefault_disable; 2656 bool prefault_disable;
2652 bool load_detect_test; 2657 bool load_detect_test;
2653 bool reset; 2658 bool reset;
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 5cf4a1998273..32e6aade6223 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1210,8 +1210,16 @@ int __i915_wait_request(struct drm_i915_gem_request *req,
1210 if (i915_gem_request_completed(req, true)) 1210 if (i915_gem_request_completed(req, true))
1211 return 0; 1211 return 0;
1212 1212
1213 timeout_expire = timeout ? 1213 timeout_expire = 0;
1214 jiffies + nsecs_to_jiffies_timeout((u64)*timeout) : 0; 1214 if (timeout) {
1215 if (WARN_ON(*timeout < 0))
1216 return -EINVAL;
1217
1218 if (*timeout == 0)
1219 return -ETIME;
1220
1221 timeout_expire = jiffies + nsecs_to_jiffies_timeout(*timeout);
1222 }
1215 1223
1216 if (INTEL_INFO(dev_priv)->gen >= 6) 1224 if (INTEL_INFO(dev_priv)->gen >= 6)
1217 gen6_rps_boost(dev_priv, rps, req->emitted_jiffies); 1225 gen6_rps_boost(dev_priv, rps, req->emitted_jiffies);
@@ -3809,6 +3817,7 @@ int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
3809int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data, 3817int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
3810 struct drm_file *file) 3818 struct drm_file *file)
3811{ 3819{
3820 struct drm_i915_private *dev_priv = dev->dev_private;
3812 struct drm_i915_gem_caching *args = data; 3821 struct drm_i915_gem_caching *args = data;
3813 struct drm_i915_gem_object *obj; 3822 struct drm_i915_gem_object *obj;
3814 enum i915_cache_level level; 3823 enum i915_cache_level level;
@@ -3837,9 +3846,11 @@ int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
3837 return -EINVAL; 3846 return -EINVAL;
3838 } 3847 }
3839 3848
3849 intel_runtime_pm_get(dev_priv);
3850
3840 ret = i915_mutex_lock_interruptible(dev); 3851 ret = i915_mutex_lock_interruptible(dev);
3841 if (ret) 3852 if (ret)
3842 return ret; 3853 goto rpm_put;
3843 3854
3844 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); 3855 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3845 if (&obj->base == NULL) { 3856 if (&obj->base == NULL) {
@@ -3852,6 +3863,9 @@ int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
3852 drm_gem_object_unreference(&obj->base); 3863 drm_gem_object_unreference(&obj->base);
3853unlock: 3864unlock:
3854 mutex_unlock(&dev->struct_mutex); 3865 mutex_unlock(&dev->struct_mutex);
3866rpm_put:
3867 intel_runtime_pm_put(dev_priv);
3868
3855 return ret; 3869 return ret;
3856} 3870}
3857 3871
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 8c688a5f1589..02ceb7a4b481 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -141,8 +141,6 @@ static void i915_gem_context_clean(struct intel_context *ctx)
141 if (!ppgtt) 141 if (!ppgtt)
142 return; 142 return;
143 143
144 WARN_ON(!list_empty(&ppgtt->base.active_list));
145
146 list_for_each_entry_safe(vma, next, &ppgtt->base.inactive_list, 144 list_for_each_entry_safe(vma, next, &ppgtt->base.inactive_list,
147 mm_list) { 145 mm_list) {
148 if (WARN_ON(__i915_vma_unbind_no_wait(vma))) 146 if (WARN_ON(__i915_vma_unbind_no_wait(vma)))
diff --git a/drivers/gpu/drm/i915/i915_gem_fence.c b/drivers/gpu/drm/i915/i915_gem_fence.c
index 40a10b25956c..f010391b87f5 100644
--- a/drivers/gpu/drm/i915/i915_gem_fence.c
+++ b/drivers/gpu/drm/i915/i915_gem_fence.c
@@ -642,11 +642,10 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
642 } 642 }
643 643
644 /* check for L-shaped memory aka modified enhanced addressing */ 644 /* check for L-shaped memory aka modified enhanced addressing */
645 if (IS_GEN4(dev)) { 645 if (IS_GEN4(dev) &&
646 uint32_t ddc2 = I915_READ(DCC2); 646 !(I915_READ(DCC2) & DCC2_MODIFIED_ENHANCED_DISABLE)) {
647 647 swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
648 if (!(ddc2 & DCC2_MODIFIED_ENHANCED_DISABLE)) 648 swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
649 dev_priv->quirks |= QUIRK_PIN_SWIZZLED_PAGES;
650 } 649 }
651 650
652 if (dcc == 0xffffffff) { 651 if (dcc == 0xffffffff) {
@@ -675,16 +674,35 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
675 * matching, which was the case for the swizzling required in 674 * matching, which was the case for the swizzling required in
676 * the table above, or from the 1-ch value being less than 675 * the table above, or from the 1-ch value being less than
677 * the minimum size of a rank. 676 * the minimum size of a rank.
677 *
678 * Reports indicate that the swizzling actually
679 * varies depending upon page placement inside the
680 * channels, i.e. we see swizzled pages where the
681 * banks of memory are paired and unswizzled on the
682 * uneven portion, so leave that as unknown.
678 */ 683 */
679 if (I915_READ16(C0DRB3) != I915_READ16(C1DRB3)) { 684 if (I915_READ16(C0DRB3) == I915_READ16(C1DRB3)) {
680 swizzle_x = I915_BIT_6_SWIZZLE_NONE;
681 swizzle_y = I915_BIT_6_SWIZZLE_NONE;
682 } else {
683 swizzle_x = I915_BIT_6_SWIZZLE_9_10; 685 swizzle_x = I915_BIT_6_SWIZZLE_9_10;
684 swizzle_y = I915_BIT_6_SWIZZLE_9; 686 swizzle_y = I915_BIT_6_SWIZZLE_9;
685 } 687 }
686 } 688 }
687 689
690 if (swizzle_x == I915_BIT_6_SWIZZLE_UNKNOWN ||
691 swizzle_y == I915_BIT_6_SWIZZLE_UNKNOWN) {
692 /* Userspace likes to explode if it sees unknown swizzling,
693 * so lie. We will finish the lie when reporting through
694 * the get-tiling-ioctl by reporting the physical swizzle
695 * mode as unknown instead.
696 *
697 * As we don't strictly know what the swizzling is, it may be
698 * bit17 dependent, and so we need to also prevent the pages
699 * from being moved.
700 */
701 dev_priv->quirks |= QUIRK_PIN_SWIZZLED_PAGES;
702 swizzle_x = I915_BIT_6_SWIZZLE_NONE;
703 swizzle_y = I915_BIT_6_SWIZZLE_NONE;
704 }
705
688 dev_priv->mm.bit_6_swizzle_x = swizzle_x; 706 dev_priv->mm.bit_6_swizzle_x = swizzle_x;
689 dev_priv->mm.bit_6_swizzle_y = swizzle_y; 707 dev_priv->mm.bit_6_swizzle_y = swizzle_y;
690} 708}
diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c
index 96bb23865eac..4be13a5eb932 100644
--- a/drivers/gpu/drm/i915/i915_params.c
+++ b/drivers/gpu/drm/i915/i915_params.c
@@ -40,6 +40,7 @@ struct i915_params i915 __read_mostly = {
40 .preliminary_hw_support = IS_ENABLED(CONFIG_DRM_I915_PRELIMINARY_HW_SUPPORT), 40 .preliminary_hw_support = IS_ENABLED(CONFIG_DRM_I915_PRELIMINARY_HW_SUPPORT),
41 .disable_power_well = -1, 41 .disable_power_well = -1,
42 .enable_ips = 1, 42 .enable_ips = 1,
43 .fastboot = 0,
43 .prefault_disable = 0, 44 .prefault_disable = 0,
44 .load_detect_test = 0, 45 .load_detect_test = 0,
45 .reset = true, 46 .reset = true,
@@ -133,6 +134,10 @@ MODULE_PARM_DESC(disable_power_well,
133module_param_named_unsafe(enable_ips, i915.enable_ips, int, 0600); 134module_param_named_unsafe(enable_ips, i915.enable_ips, int, 0600);
134MODULE_PARM_DESC(enable_ips, "Enable IPS (default: true)"); 135MODULE_PARM_DESC(enable_ips, "Enable IPS (default: true)");
135 136
137module_param_named(fastboot, i915.fastboot, bool, 0600);
138MODULE_PARM_DESC(fastboot,
139 "Try to skip unnecessary mode sets at boot time (default: false)");
140
136module_param_named_unsafe(prefault_disable, i915.prefault_disable, bool, 0600); 141module_param_named_unsafe(prefault_disable, i915.prefault_disable, bool, 0600);
137MODULE_PARM_DESC(prefault_disable, 142MODULE_PARM_DESC(prefault_disable,
138 "Disable page prefaulting for pread/pwrite/reloc (default:false). " 143 "Disable page prefaulting for pread/pwrite/reloc (default:false). "
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index b84aaa0bb48a..6a2c76e367a5 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -138,18 +138,6 @@ static void hsw_crt_get_config(struct intel_encoder *encoder,
138 pipe_config->base.adjusted_mode.flags |= intel_crt_get_flags(encoder); 138 pipe_config->base.adjusted_mode.flags |= intel_crt_get_flags(encoder);
139} 139}
140 140
141static void hsw_crt_pre_enable(struct intel_encoder *encoder)
142{
143 struct drm_device *dev = encoder->base.dev;
144 struct drm_i915_private *dev_priv = dev->dev_private;
145
146 WARN(I915_READ(SPLL_CTL) & SPLL_PLL_ENABLE, "SPLL already enabled\n");
147 I915_WRITE(SPLL_CTL,
148 SPLL_PLL_ENABLE | SPLL_PLL_FREQ_1350MHz | SPLL_PLL_SSC);
149 POSTING_READ(SPLL_CTL);
150 udelay(20);
151}
152
153/* Note: The caller is required to filter out dpms modes not supported by the 141/* Note: The caller is required to filter out dpms modes not supported by the
154 * platform. */ 142 * platform. */
155static void intel_crt_set_dpms(struct intel_encoder *encoder, int mode) 143static void intel_crt_set_dpms(struct intel_encoder *encoder, int mode)
@@ -216,19 +204,6 @@ static void pch_post_disable_crt(struct intel_encoder *encoder)
216 intel_disable_crt(encoder); 204 intel_disable_crt(encoder);
217} 205}
218 206
219static void hsw_crt_post_disable(struct intel_encoder *encoder)
220{
221 struct drm_device *dev = encoder->base.dev;
222 struct drm_i915_private *dev_priv = dev->dev_private;
223 uint32_t val;
224
225 DRM_DEBUG_KMS("Disabling SPLL\n");
226 val = I915_READ(SPLL_CTL);
227 WARN_ON(!(val & SPLL_PLL_ENABLE));
228 I915_WRITE(SPLL_CTL, val & ~SPLL_PLL_ENABLE);
229 POSTING_READ(SPLL_CTL);
230}
231
232static void intel_enable_crt(struct intel_encoder *encoder) 207static void intel_enable_crt(struct intel_encoder *encoder)
233{ 208{
234 struct intel_crt *crt = intel_encoder_to_crt(encoder); 209 struct intel_crt *crt = intel_encoder_to_crt(encoder);
@@ -280,6 +255,10 @@ static bool intel_crt_compute_config(struct intel_encoder *encoder,
280 if (HAS_DDI(dev)) { 255 if (HAS_DDI(dev)) {
281 pipe_config->ddi_pll_sel = PORT_CLK_SEL_SPLL; 256 pipe_config->ddi_pll_sel = PORT_CLK_SEL_SPLL;
282 pipe_config->port_clock = 135000 * 2; 257 pipe_config->port_clock = 135000 * 2;
258
259 pipe_config->dpll_hw_state.wrpll = 0;
260 pipe_config->dpll_hw_state.spll =
261 SPLL_PLL_ENABLE | SPLL_PLL_FREQ_1350MHz | SPLL_PLL_SSC;
283 } 262 }
284 263
285 return true; 264 return true;
@@ -860,8 +839,6 @@ void intel_crt_init(struct drm_device *dev)
860 if (HAS_DDI(dev)) { 839 if (HAS_DDI(dev)) {
861 crt->base.get_config = hsw_crt_get_config; 840 crt->base.get_config = hsw_crt_get_config;
862 crt->base.get_hw_state = intel_ddi_get_hw_state; 841 crt->base.get_hw_state = intel_ddi_get_hw_state;
863 crt->base.pre_enable = hsw_crt_pre_enable;
864 crt->base.post_disable = hsw_crt_post_disable;
865 } else { 842 } else {
866 crt->base.get_config = intel_crt_get_config; 843 crt->base.get_config = intel_crt_get_config;
867 crt->base.get_hw_state = intel_crt_get_hw_state; 844 crt->base.get_hw_state = intel_crt_get_hw_state;
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index b25e99a432fb..a6752a61d99f 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -1286,6 +1286,18 @@ hsw_ddi_pll_select(struct intel_crtc *intel_crtc,
1286 } 1286 }
1287 1287
1288 crtc_state->ddi_pll_sel = PORT_CLK_SEL_WRPLL(pll->id); 1288 crtc_state->ddi_pll_sel = PORT_CLK_SEL_WRPLL(pll->id);
1289 } else if (crtc_state->ddi_pll_sel == PORT_CLK_SEL_SPLL) {
1290 struct drm_atomic_state *state = crtc_state->base.state;
1291 struct intel_shared_dpll_config *spll =
1292 &intel_atomic_get_shared_dpll_state(state)[DPLL_ID_SPLL];
1293
1294 if (spll->crtc_mask &&
1295 WARN_ON(spll->hw_state.spll != crtc_state->dpll_hw_state.spll))
1296 return false;
1297
1298 crtc_state->shared_dpll = DPLL_ID_SPLL;
1299 spll->hw_state.spll = crtc_state->dpll_hw_state.spll;
1300 spll->crtc_mask |= 1 << intel_crtc->pipe;
1289 } 1301 }
1290 1302
1291 return true; 1303 return true;
@@ -2437,7 +2449,7 @@ static void intel_disable_ddi(struct intel_encoder *intel_encoder)
2437 } 2449 }
2438} 2450}
2439 2451
2440static void hsw_ddi_pll_enable(struct drm_i915_private *dev_priv, 2452static void hsw_ddi_wrpll_enable(struct drm_i915_private *dev_priv,
2441 struct intel_shared_dpll *pll) 2453 struct intel_shared_dpll *pll)
2442{ 2454{
2443 I915_WRITE(WRPLL_CTL(pll->id), pll->config.hw_state.wrpll); 2455 I915_WRITE(WRPLL_CTL(pll->id), pll->config.hw_state.wrpll);
@@ -2445,9 +2457,17 @@ static void hsw_ddi_pll_enable(struct drm_i915_private *dev_priv,
2445 udelay(20); 2457 udelay(20);
2446} 2458}
2447 2459
2448static void hsw_ddi_pll_disable(struct drm_i915_private *dev_priv, 2460static void hsw_ddi_spll_enable(struct drm_i915_private *dev_priv,
2449 struct intel_shared_dpll *pll) 2461 struct intel_shared_dpll *pll)
2450{ 2462{
2463 I915_WRITE(SPLL_CTL, pll->config.hw_state.spll);
2464 POSTING_READ(SPLL_CTL);
2465 udelay(20);
2466}
2467
2468static void hsw_ddi_wrpll_disable(struct drm_i915_private *dev_priv,
2469 struct intel_shared_dpll *pll)
2470{
2451 uint32_t val; 2471 uint32_t val;
2452 2472
2453 val = I915_READ(WRPLL_CTL(pll->id)); 2473 val = I915_READ(WRPLL_CTL(pll->id));
@@ -2455,9 +2475,19 @@ static void hsw_ddi_pll_disable(struct drm_i915_private *dev_priv,
2455 POSTING_READ(WRPLL_CTL(pll->id)); 2475 POSTING_READ(WRPLL_CTL(pll->id));
2456} 2476}
2457 2477
2458static bool hsw_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv, 2478static void hsw_ddi_spll_disable(struct drm_i915_private *dev_priv,
2459 struct intel_shared_dpll *pll, 2479 struct intel_shared_dpll *pll)
2460 struct intel_dpll_hw_state *hw_state) 2480{
2481 uint32_t val;
2482
2483 val = I915_READ(SPLL_CTL);
2484 I915_WRITE(SPLL_CTL, val & ~SPLL_PLL_ENABLE);
2485 POSTING_READ(SPLL_CTL);
2486}
2487
2488static bool hsw_ddi_wrpll_get_hw_state(struct drm_i915_private *dev_priv,
2489 struct intel_shared_dpll *pll,
2490 struct intel_dpll_hw_state *hw_state)
2461{ 2491{
2462 uint32_t val; 2492 uint32_t val;
2463 2493
@@ -2470,25 +2500,50 @@ static bool hsw_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
2470 return val & WRPLL_PLL_ENABLE; 2500 return val & WRPLL_PLL_ENABLE;
2471} 2501}
2472 2502
2503static bool hsw_ddi_spll_get_hw_state(struct drm_i915_private *dev_priv,
2504 struct intel_shared_dpll *pll,
2505 struct intel_dpll_hw_state *hw_state)
2506{
2507 uint32_t val;
2508
2509 if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_PLLS))
2510 return false;
2511
2512 val = I915_READ(SPLL_CTL);
2513 hw_state->spll = val;
2514
2515 return val & SPLL_PLL_ENABLE;
2516}
2517
2518
2473static const char * const hsw_ddi_pll_names[] = { 2519static const char * const hsw_ddi_pll_names[] = {
2474 "WRPLL 1", 2520 "WRPLL 1",
2475 "WRPLL 2", 2521 "WRPLL 2",
2522 "SPLL"
2476}; 2523};
2477 2524
2478static void hsw_shared_dplls_init(struct drm_i915_private *dev_priv) 2525static void hsw_shared_dplls_init(struct drm_i915_private *dev_priv)
2479{ 2526{
2480 int i; 2527 int i;
2481 2528
2482 dev_priv->num_shared_dpll = 2; 2529 dev_priv->num_shared_dpll = 3;
2483 2530
2484 for (i = 0; i < dev_priv->num_shared_dpll; i++) { 2531 for (i = 0; i < 2; i++) {
2485 dev_priv->shared_dplls[i].id = i; 2532 dev_priv->shared_dplls[i].id = i;
2486 dev_priv->shared_dplls[i].name = hsw_ddi_pll_names[i]; 2533 dev_priv->shared_dplls[i].name = hsw_ddi_pll_names[i];
2487 dev_priv->shared_dplls[i].disable = hsw_ddi_pll_disable; 2534 dev_priv->shared_dplls[i].disable = hsw_ddi_wrpll_disable;
2488 dev_priv->shared_dplls[i].enable = hsw_ddi_pll_enable; 2535 dev_priv->shared_dplls[i].enable = hsw_ddi_wrpll_enable;
2489 dev_priv->shared_dplls[i].get_hw_state = 2536 dev_priv->shared_dplls[i].get_hw_state =
2490 hsw_ddi_pll_get_hw_state; 2537 hsw_ddi_wrpll_get_hw_state;
2491 } 2538 }
2539
2540 /* SPLL is special, but needs to be initialized anyway.. */
2541 dev_priv->shared_dplls[i].id = i;
2542 dev_priv->shared_dplls[i].name = hsw_ddi_pll_names[i];
2543 dev_priv->shared_dplls[i].disable = hsw_ddi_spll_disable;
2544 dev_priv->shared_dplls[i].enable = hsw_ddi_spll_enable;
2545 dev_priv->shared_dplls[i].get_hw_state = hsw_ddi_spll_get_hw_state;
2546
2492} 2547}
2493 2548
2494static const char * const skl_ddi_pll_names[] = { 2549static const char * const skl_ddi_pll_names[] = {
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index f62ffc04c21d..62211abe4922 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -2646,11 +2646,13 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
2646 return; 2646 return;
2647 2647
2648valid_fb: 2648valid_fb:
2649 plane_state->src_x = plane_state->src_y = 0; 2649 plane_state->src_x = 0;
2650 plane_state->src_y = 0;
2650 plane_state->src_w = fb->width << 16; 2651 plane_state->src_w = fb->width << 16;
2651 plane_state->src_h = fb->height << 16; 2652 plane_state->src_h = fb->height << 16;
2652 2653
2653 plane_state->crtc_x = plane_state->src_y = 0; 2654 plane_state->crtc_x = 0;
2655 plane_state->crtc_y = 0;
2654 plane_state->crtc_w = fb->width; 2656 plane_state->crtc_w = fb->width;
2655 plane_state->crtc_h = fb->height; 2657 plane_state->crtc_h = fb->height;
2656 2658
@@ -4237,6 +4239,7 @@ struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc,
4237 struct intel_shared_dpll *pll; 4239 struct intel_shared_dpll *pll;
4238 struct intel_shared_dpll_config *shared_dpll; 4240 struct intel_shared_dpll_config *shared_dpll;
4239 enum intel_dpll_id i; 4241 enum intel_dpll_id i;
4242 int max = dev_priv->num_shared_dpll;
4240 4243
4241 shared_dpll = intel_atomic_get_shared_dpll_state(crtc_state->base.state); 4244 shared_dpll = intel_atomic_get_shared_dpll_state(crtc_state->base.state);
4242 4245
@@ -4271,9 +4274,11 @@ struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc,
4271 WARN_ON(shared_dpll[i].crtc_mask); 4274 WARN_ON(shared_dpll[i].crtc_mask);
4272 4275
4273 goto found; 4276 goto found;
4274 } 4277 } else if (INTEL_INFO(dev_priv)->gen < 9 && HAS_DDI(dev_priv))
4278 /* Do not consider SPLL */
4279 max = 2;
4275 4280
4276 for (i = 0; i < dev_priv->num_shared_dpll; i++) { 4281 for (i = 0; i < max; i++) {
4277 pll = &dev_priv->shared_dplls[i]; 4282 pll = &dev_priv->shared_dplls[i];
4278 4283
4279 /* Only want to check enabled timings first */ 4284 /* Only want to check enabled timings first */
@@ -5189,11 +5194,31 @@ static enum intel_display_power_domain port_to_power_domain(enum port port)
5189 case PORT_E: 5194 case PORT_E:
5190 return POWER_DOMAIN_PORT_DDI_E_2_LANES; 5195 return POWER_DOMAIN_PORT_DDI_E_2_LANES;
5191 default: 5196 default:
5192 WARN_ON_ONCE(1); 5197 MISSING_CASE(port);
5193 return POWER_DOMAIN_PORT_OTHER; 5198 return POWER_DOMAIN_PORT_OTHER;
5194 } 5199 }
5195} 5200}
5196 5201
5202static enum intel_display_power_domain port_to_aux_power_domain(enum port port)
5203{
5204 switch (port) {
5205 case PORT_A:
5206 return POWER_DOMAIN_AUX_A;
5207 case PORT_B:
5208 return POWER_DOMAIN_AUX_B;
5209 case PORT_C:
5210 return POWER_DOMAIN_AUX_C;
5211 case PORT_D:
5212 return POWER_DOMAIN_AUX_D;
5213 case PORT_E:
5214 /* FIXME: Check VBT for actual wiring of PORT E */
5215 return POWER_DOMAIN_AUX_D;
5216 default:
5217 MISSING_CASE(port);
5218 return POWER_DOMAIN_AUX_A;
5219 }
5220}
5221
5197#define for_each_power_domain(domain, mask) \ 5222#define for_each_power_domain(domain, mask) \
5198 for ((domain) = 0; (domain) < POWER_DOMAIN_NUM; (domain)++) \ 5223 for ((domain) = 0; (domain) < POWER_DOMAIN_NUM; (domain)++) \
5199 if ((1 << (domain)) & (mask)) 5224 if ((1 << (domain)) & (mask))
@@ -5225,6 +5250,36 @@ intel_display_port_power_domain(struct intel_encoder *intel_encoder)
5225 } 5250 }
5226} 5251}
5227 5252
5253enum intel_display_power_domain
5254intel_display_port_aux_power_domain(struct intel_encoder *intel_encoder)
5255{
5256 struct drm_device *dev = intel_encoder->base.dev;
5257 struct intel_digital_port *intel_dig_port;
5258
5259 switch (intel_encoder->type) {
5260 case INTEL_OUTPUT_UNKNOWN:
5261 case INTEL_OUTPUT_HDMI:
5262 /*
5263 * Only DDI platforms should ever use these output types.
5264 * We can get here after the HDMI detect code has already set
5265 * the type of the shared encoder. Since we can't be sure
5266 * what's the status of the given connectors, play safe and
5267 * run the DP detection too.
5268 */
5269 WARN_ON_ONCE(!HAS_DDI(dev));
5270 case INTEL_OUTPUT_DISPLAYPORT:
5271 case INTEL_OUTPUT_EDP:
5272 intel_dig_port = enc_to_dig_port(&intel_encoder->base);
5273 return port_to_aux_power_domain(intel_dig_port->port);
5274 case INTEL_OUTPUT_DP_MST:
5275 intel_dig_port = enc_to_mst(&intel_encoder->base)->primary;
5276 return port_to_aux_power_domain(intel_dig_port->port);
5277 default:
5278 MISSING_CASE(intel_encoder->type);
5279 return POWER_DOMAIN_AUX_A;
5280 }
5281}
5282
5228static unsigned long get_crtc_power_domains(struct drm_crtc *crtc) 5283static unsigned long get_crtc_power_domains(struct drm_crtc *crtc)
5229{ 5284{
5230 struct drm_device *dev = crtc->dev; 5285 struct drm_device *dev = crtc->dev;
@@ -6254,9 +6309,11 @@ static void intel_crtc_disable_noatomic(struct drm_crtc *crtc)
6254 if (to_intel_plane_state(crtc->primary->state)->visible) { 6309 if (to_intel_plane_state(crtc->primary->state)->visible) {
6255 intel_crtc_wait_for_pending_flips(crtc); 6310 intel_crtc_wait_for_pending_flips(crtc);
6256 intel_pre_disable_primary(crtc); 6311 intel_pre_disable_primary(crtc);
6312
6313 intel_crtc_disable_planes(crtc, 1 << drm_plane_index(crtc->primary));
6314 to_intel_plane_state(crtc->primary->state)->visible = false;
6257 } 6315 }
6258 6316
6259 intel_crtc_disable_planes(crtc, crtc->state->plane_mask);
6260 dev_priv->display.crtc_disable(crtc); 6317 dev_priv->display.crtc_disable(crtc);
6261 intel_crtc->active = false; 6318 intel_crtc->active = false;
6262 intel_update_watermarks(crtc); 6319 intel_update_watermarks(crtc);
@@ -9723,6 +9780,8 @@ static void haswell_get_ddi_pll(struct drm_i915_private *dev_priv,
9723 case PORT_CLK_SEL_WRPLL2: 9780 case PORT_CLK_SEL_WRPLL2:
9724 pipe_config->shared_dpll = DPLL_ID_WRPLL2; 9781 pipe_config->shared_dpll = DPLL_ID_WRPLL2;
9725 break; 9782 break;
9783 case PORT_CLK_SEL_SPLL:
9784 pipe_config->shared_dpll = DPLL_ID_SPLL;
9726 } 9785 }
9727} 9786}
9728 9787
@@ -12003,9 +12062,10 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc,
12003 pipe_config->dpll_hw_state.cfgcr1, 12062 pipe_config->dpll_hw_state.cfgcr1,
12004 pipe_config->dpll_hw_state.cfgcr2); 12063 pipe_config->dpll_hw_state.cfgcr2);
12005 } else if (HAS_DDI(dev)) { 12064 } else if (HAS_DDI(dev)) {
12006 DRM_DEBUG_KMS("ddi_pll_sel: %u; dpll_hw_state: wrpll: 0x%x\n", 12065 DRM_DEBUG_KMS("ddi_pll_sel: %u; dpll_hw_state: wrpll: 0x%x spll: 0x%x\n",
12007 pipe_config->ddi_pll_sel, 12066 pipe_config->ddi_pll_sel,
12008 pipe_config->dpll_hw_state.wrpll); 12067 pipe_config->dpll_hw_state.wrpll,
12068 pipe_config->dpll_hw_state.spll);
12009 } else { 12069 } else {
12010 DRM_DEBUG_KMS("dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, " 12070 DRM_DEBUG_KMS("dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
12011 "fp0: 0x%x, fp1: 0x%x\n", 12071 "fp0: 0x%x, fp1: 0x%x\n",
@@ -12452,7 +12512,6 @@ intel_pipe_config_compare(struct drm_device *dev,
12452 if (INTEL_INFO(dev)->gen < 8) { 12512 if (INTEL_INFO(dev)->gen < 8) {
12453 PIPE_CONF_CHECK_M_N(dp_m_n); 12513 PIPE_CONF_CHECK_M_N(dp_m_n);
12454 12514
12455 PIPE_CONF_CHECK_I(has_drrs);
12456 if (current_config->has_drrs) 12515 if (current_config->has_drrs)
12457 PIPE_CONF_CHECK_M_N(dp_m2_n2); 12516 PIPE_CONF_CHECK_M_N(dp_m2_n2);
12458 } else 12517 } else
@@ -12528,6 +12587,7 @@ intel_pipe_config_compare(struct drm_device *dev,
12528 PIPE_CONF_CHECK_X(dpll_hw_state.fp0); 12587 PIPE_CONF_CHECK_X(dpll_hw_state.fp0);
12529 PIPE_CONF_CHECK_X(dpll_hw_state.fp1); 12588 PIPE_CONF_CHECK_X(dpll_hw_state.fp1);
12530 PIPE_CONF_CHECK_X(dpll_hw_state.wrpll); 12589 PIPE_CONF_CHECK_X(dpll_hw_state.wrpll);
12590 PIPE_CONF_CHECK_X(dpll_hw_state.spll);
12531 PIPE_CONF_CHECK_X(dpll_hw_state.ctrl1); 12591 PIPE_CONF_CHECK_X(dpll_hw_state.ctrl1);
12532 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr1); 12592 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr1);
12533 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr2); 12593 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr2);
@@ -13032,6 +13092,9 @@ static int intel_atomic_check(struct drm_device *dev,
13032 struct intel_crtc_state *pipe_config = 13092 struct intel_crtc_state *pipe_config =
13033 to_intel_crtc_state(crtc_state); 13093 to_intel_crtc_state(crtc_state);
13034 13094
13095 memset(&to_intel_crtc(crtc)->atomic, 0,
13096 sizeof(struct intel_crtc_atomic_commit));
13097
13035 /* Catch I915_MODE_FLAG_INHERITED */ 13098 /* Catch I915_MODE_FLAG_INHERITED */
13036 if (crtc_state->mode.private_flags != crtc->state->mode.private_flags) 13099 if (crtc_state->mode.private_flags != crtc->state->mode.private_flags)
13037 crtc_state->mode_changed = true; 13100 crtc_state->mode_changed = true;
@@ -13056,7 +13119,8 @@ static int intel_atomic_check(struct drm_device *dev,
13056 if (ret) 13119 if (ret)
13057 return ret; 13120 return ret;
13058 13121
13059 if (intel_pipe_config_compare(state->dev, 13122 if (i915.fastboot &&
13123 intel_pipe_config_compare(state->dev,
13060 to_intel_crtc_state(crtc->state), 13124 to_intel_crtc_state(crtc->state),
13061 pipe_config, true)) { 13125 pipe_config, true)) {
13062 crtc_state->mode_changed = false; 13126 crtc_state->mode_changed = false;
@@ -14364,16 +14428,17 @@ static int intel_framebuffer_init(struct drm_device *dev,
14364static struct drm_framebuffer * 14428static struct drm_framebuffer *
14365intel_user_framebuffer_create(struct drm_device *dev, 14429intel_user_framebuffer_create(struct drm_device *dev,
14366 struct drm_file *filp, 14430 struct drm_file *filp,
14367 struct drm_mode_fb_cmd2 *mode_cmd) 14431 struct drm_mode_fb_cmd2 *user_mode_cmd)
14368{ 14432{
14369 struct drm_i915_gem_object *obj; 14433 struct drm_i915_gem_object *obj;
14434 struct drm_mode_fb_cmd2 mode_cmd = *user_mode_cmd;
14370 14435
14371 obj = to_intel_bo(drm_gem_object_lookup(dev, filp, 14436 obj = to_intel_bo(drm_gem_object_lookup(dev, filp,
14372 mode_cmd->handles[0])); 14437 mode_cmd.handles[0]));
14373 if (&obj->base == NULL) 14438 if (&obj->base == NULL)
14374 return ERR_PTR(-ENOENT); 14439 return ERR_PTR(-ENOENT);
14375 14440
14376 return intel_framebuffer_create(dev, mode_cmd, obj); 14441 return intel_framebuffer_create(dev, &mode_cmd, obj);
14377} 14442}
14378 14443
14379#ifndef CONFIG_DRM_FBDEV_EMULATION 14444#ifndef CONFIG_DRM_FBDEV_EMULATION
@@ -14705,6 +14770,9 @@ static struct intel_quirk intel_quirks[] = {
14705 /* Apple Macbook 2,1 (Core 2 T7400) */ 14770 /* Apple Macbook 2,1 (Core 2 T7400) */
14706 { 0x27a2, 0x8086, 0x7270, quirk_backlight_present }, 14771 { 0x27a2, 0x8086, 0x7270, quirk_backlight_present },
14707 14772
14773 /* Apple Macbook 4,1 */
14774 { 0x2a02, 0x106b, 0x00a1, quirk_backlight_present },
14775
14708 /* Toshiba CB35 Chromebook (Celeron 2955U) */ 14776 /* Toshiba CB35 Chromebook (Celeron 2955U) */
14709 { 0x0a06, 0x1179, 0x0a88, quirk_backlight_present }, 14777 { 0x0a06, 0x1179, 0x0a88, quirk_backlight_present },
14710 14778
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 09bdd94ca3ba..78b8ec84d576 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -277,7 +277,7 @@ static void pps_lock(struct intel_dp *intel_dp)
277 * See vlv_power_sequencer_reset() why we need 277 * See vlv_power_sequencer_reset() why we need
278 * a power domain reference here. 278 * a power domain reference here.
279 */ 279 */
280 power_domain = intel_display_port_power_domain(encoder); 280 power_domain = intel_display_port_aux_power_domain(encoder);
281 intel_display_power_get(dev_priv, power_domain); 281 intel_display_power_get(dev_priv, power_domain);
282 282
283 mutex_lock(&dev_priv->pps_mutex); 283 mutex_lock(&dev_priv->pps_mutex);
@@ -293,7 +293,7 @@ static void pps_unlock(struct intel_dp *intel_dp)
293 293
294 mutex_unlock(&dev_priv->pps_mutex); 294 mutex_unlock(&dev_priv->pps_mutex);
295 295
296 power_domain = intel_display_port_power_domain(encoder); 296 power_domain = intel_display_port_aux_power_domain(encoder);
297 intel_display_power_put(dev_priv, power_domain); 297 intel_display_power_put(dev_priv, power_domain);
298} 298}
299 299
@@ -816,8 +816,6 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
816 816
817 intel_dp_check_edp(intel_dp); 817 intel_dp_check_edp(intel_dp);
818 818
819 intel_aux_display_runtime_get(dev_priv);
820
821 /* Try to wait for any previous AUX channel activity */ 819 /* Try to wait for any previous AUX channel activity */
822 for (try = 0; try < 3; try++) { 820 for (try = 0; try < 3; try++) {
823 status = I915_READ_NOTRACE(ch_ctl); 821 status = I915_READ_NOTRACE(ch_ctl);
@@ -926,7 +924,6 @@ done:
926 ret = recv_bytes; 924 ret = recv_bytes;
927out: 925out:
928 pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE); 926 pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE);
929 intel_aux_display_runtime_put(dev_priv);
930 927
931 if (vdd) 928 if (vdd)
932 edp_panel_vdd_off(intel_dp, false); 929 edp_panel_vdd_off(intel_dp, false);
@@ -1784,7 +1781,7 @@ static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
1784 if (edp_have_panel_vdd(intel_dp)) 1781 if (edp_have_panel_vdd(intel_dp))
1785 return need_to_disable; 1782 return need_to_disable;
1786 1783
1787 power_domain = intel_display_port_power_domain(intel_encoder); 1784 power_domain = intel_display_port_aux_power_domain(intel_encoder);
1788 intel_display_power_get(dev_priv, power_domain); 1785 intel_display_power_get(dev_priv, power_domain);
1789 1786
1790 DRM_DEBUG_KMS("Turning eDP port %c VDD on\n", 1787 DRM_DEBUG_KMS("Turning eDP port %c VDD on\n",
@@ -1874,7 +1871,7 @@ static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
1874 if ((pp & POWER_TARGET_ON) == 0) 1871 if ((pp & POWER_TARGET_ON) == 0)
1875 intel_dp->last_power_cycle = jiffies; 1872 intel_dp->last_power_cycle = jiffies;
1876 1873
1877 power_domain = intel_display_port_power_domain(intel_encoder); 1874 power_domain = intel_display_port_aux_power_domain(intel_encoder);
1878 intel_display_power_put(dev_priv, power_domain); 1875 intel_display_power_put(dev_priv, power_domain);
1879} 1876}
1880 1877
@@ -2025,7 +2022,7 @@ static void edp_panel_off(struct intel_dp *intel_dp)
2025 wait_panel_off(intel_dp); 2022 wait_panel_off(intel_dp);
2026 2023
2027 /* We got a reference when we enabled the VDD. */ 2024 /* We got a reference when we enabled the VDD. */
2028 power_domain = intel_display_port_power_domain(intel_encoder); 2025 power_domain = intel_display_port_aux_power_domain(intel_encoder);
2029 intel_display_power_put(dev_priv, power_domain); 2026 intel_display_power_put(dev_priv, power_domain);
2030} 2027}
2031 2028
@@ -4765,26 +4762,6 @@ intel_dp_unset_edid(struct intel_dp *intel_dp)
4765 intel_dp->has_audio = false; 4762 intel_dp->has_audio = false;
4766} 4763}
4767 4764
4768static enum intel_display_power_domain
4769intel_dp_power_get(struct intel_dp *dp)
4770{
4771 struct intel_encoder *encoder = &dp_to_dig_port(dp)->base;
4772 enum intel_display_power_domain power_domain;
4773
4774 power_domain = intel_display_port_power_domain(encoder);
4775 intel_display_power_get(to_i915(encoder->base.dev), power_domain);
4776
4777 return power_domain;
4778}
4779
4780static void
4781intel_dp_power_put(struct intel_dp *dp,
4782 enum intel_display_power_domain power_domain)
4783{
4784 struct intel_encoder *encoder = &dp_to_dig_port(dp)->base;
4785 intel_display_power_put(to_i915(encoder->base.dev), power_domain);
4786}
4787
4788static enum drm_connector_status 4765static enum drm_connector_status
4789intel_dp_detect(struct drm_connector *connector, bool force) 4766intel_dp_detect(struct drm_connector *connector, bool force)
4790{ 4767{
@@ -4808,7 +4785,8 @@ intel_dp_detect(struct drm_connector *connector, bool force)
4808 return connector_status_disconnected; 4785 return connector_status_disconnected;
4809 } 4786 }
4810 4787
4811 power_domain = intel_dp_power_get(intel_dp); 4788 power_domain = intel_display_port_aux_power_domain(intel_encoder);
4789 intel_display_power_get(to_i915(dev), power_domain);
4812 4790
4813 /* Can't disconnect eDP, but you can close the lid... */ 4791 /* Can't disconnect eDP, but you can close the lid... */
4814 if (is_edp(intel_dp)) 4792 if (is_edp(intel_dp))
@@ -4853,7 +4831,7 @@ intel_dp_detect(struct drm_connector *connector, bool force)
4853 } 4831 }
4854 4832
4855out: 4833out:
4856 intel_dp_power_put(intel_dp, power_domain); 4834 intel_display_power_put(to_i915(dev), power_domain);
4857 return status; 4835 return status;
4858} 4836}
4859 4837
@@ -4862,6 +4840,7 @@ intel_dp_force(struct drm_connector *connector)
4862{ 4840{
4863 struct intel_dp *intel_dp = intel_attached_dp(connector); 4841 struct intel_dp *intel_dp = intel_attached_dp(connector);
4864 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base; 4842 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
4843 struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev);
4865 enum intel_display_power_domain power_domain; 4844 enum intel_display_power_domain power_domain;
4866 4845
4867 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", 4846 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
@@ -4871,11 +4850,12 @@ intel_dp_force(struct drm_connector *connector)
4871 if (connector->status != connector_status_connected) 4850 if (connector->status != connector_status_connected)
4872 return; 4851 return;
4873 4852
4874 power_domain = intel_dp_power_get(intel_dp); 4853 power_domain = intel_display_port_aux_power_domain(intel_encoder);
4854 intel_display_power_get(dev_priv, power_domain);
4875 4855
4876 intel_dp_set_edid(intel_dp); 4856 intel_dp_set_edid(intel_dp);
4877 4857
4878 intel_dp_power_put(intel_dp, power_domain); 4858 intel_display_power_put(dev_priv, power_domain);
4879 4859
4880 if (intel_encoder->type != INTEL_OUTPUT_EDP) 4860 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4881 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT; 4861 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
@@ -5091,7 +5071,7 @@ static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
5091 * indefinitely. 5071 * indefinitely.
5092 */ 5072 */
5093 DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n"); 5073 DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n");
5094 power_domain = intel_display_port_power_domain(&intel_dig_port->base); 5074 power_domain = intel_display_port_aux_power_domain(&intel_dig_port->base);
5095 intel_display_power_get(dev_priv, power_domain); 5075 intel_display_power_get(dev_priv, power_domain);
5096 5076
5097 edp_panel_vdd_schedule_off(intel_dp); 5077 edp_panel_vdd_schedule_off(intel_dp);
@@ -5153,7 +5133,8 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
5153 enum intel_display_power_domain power_domain; 5133 enum intel_display_power_domain power_domain;
5154 enum irqreturn ret = IRQ_NONE; 5134 enum irqreturn ret = IRQ_NONE;
5155 5135
5156 if (intel_dig_port->base.type != INTEL_OUTPUT_EDP) 5136 if (intel_dig_port->base.type != INTEL_OUTPUT_EDP &&
5137 intel_dig_port->base.type != INTEL_OUTPUT_HDMI)
5157 intel_dig_port->base.type = INTEL_OUTPUT_DISPLAYPORT; 5138 intel_dig_port->base.type = INTEL_OUTPUT_DISPLAYPORT;
5158 5139
5159 if (long_hpd && intel_dig_port->base.type == INTEL_OUTPUT_EDP) { 5140 if (long_hpd && intel_dig_port->base.type == INTEL_OUTPUT_EDP) {
@@ -5172,7 +5153,7 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
5172 port_name(intel_dig_port->port), 5153 port_name(intel_dig_port->port),
5173 long_hpd ? "long" : "short"); 5154 long_hpd ? "long" : "short");
5174 5155
5175 power_domain = intel_display_port_power_domain(intel_encoder); 5156 power_domain = intel_display_port_aux_power_domain(intel_encoder);
5176 intel_display_power_get(dev_priv, power_domain); 5157 intel_display_power_get(dev_priv, power_domain);
5177 5158
5178 if (long_hpd) { 5159 if (long_hpd) {
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 0598932ce623..f2a1142bff34 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -1169,6 +1169,8 @@ void hsw_enable_ips(struct intel_crtc *crtc);
1169void hsw_disable_ips(struct intel_crtc *crtc); 1169void hsw_disable_ips(struct intel_crtc *crtc);
1170enum intel_display_power_domain 1170enum intel_display_power_domain
1171intel_display_port_power_domain(struct intel_encoder *intel_encoder); 1171intel_display_port_power_domain(struct intel_encoder *intel_encoder);
1172enum intel_display_power_domain
1173intel_display_port_aux_power_domain(struct intel_encoder *intel_encoder);
1172void intel_mode_from_pipe_config(struct drm_display_mode *mode, 1174void intel_mode_from_pipe_config(struct drm_display_mode *mode,
1173 struct intel_crtc_state *pipe_config); 1175 struct intel_crtc_state *pipe_config);
1174void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc); 1176void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc);
@@ -1377,8 +1379,6 @@ void intel_display_power_get(struct drm_i915_private *dev_priv,
1377 enum intel_display_power_domain domain); 1379 enum intel_display_power_domain domain);
1378void intel_display_power_put(struct drm_i915_private *dev_priv, 1380void intel_display_power_put(struct drm_i915_private *dev_priv,
1379 enum intel_display_power_domain domain); 1381 enum intel_display_power_domain domain);
1380void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv);
1381void intel_aux_display_runtime_put(struct drm_i915_private *dev_priv);
1382void intel_runtime_pm_get(struct drm_i915_private *dev_priv); 1382void intel_runtime_pm_get(struct drm_i915_private *dev_priv);
1383void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv); 1383void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv);
1384void intel_runtime_pm_put(struct drm_i915_private *dev_priv); 1384void intel_runtime_pm_put(struct drm_i915_private *dev_priv);
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 9eafa191cee2..81cdd9ff3892 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -1335,21 +1335,17 @@ intel_hdmi_set_edid(struct drm_connector *connector, bool force)
1335{ 1335{
1336 struct drm_i915_private *dev_priv = to_i915(connector->dev); 1336 struct drm_i915_private *dev_priv = to_i915(connector->dev);
1337 struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector); 1337 struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
1338 struct intel_encoder *intel_encoder =
1339 &hdmi_to_dig_port(intel_hdmi)->base;
1340 enum intel_display_power_domain power_domain;
1341 struct edid *edid = NULL; 1338 struct edid *edid = NULL;
1342 bool connected = false; 1339 bool connected = false;
1343 1340
1344 power_domain = intel_display_port_power_domain(intel_encoder); 1341 intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS);
1345 intel_display_power_get(dev_priv, power_domain);
1346 1342
1347 if (force) 1343 if (force)
1348 edid = drm_get_edid(connector, 1344 edid = drm_get_edid(connector,
1349 intel_gmbus_get_adapter(dev_priv, 1345 intel_gmbus_get_adapter(dev_priv,
1350 intel_hdmi->ddc_bus)); 1346 intel_hdmi->ddc_bus));
1351 1347
1352 intel_display_power_put(dev_priv, power_domain); 1348 intel_display_power_put(dev_priv, POWER_DOMAIN_GMBUS);
1353 1349
1354 to_intel_connector(connector)->detect_edid = edid; 1350 to_intel_connector(connector)->detect_edid = edid;
1355 if (edid && edid->input & DRM_EDID_INPUT_DIGITAL) { 1351 if (edid && edid->input & DRM_EDID_INPUT_DIGITAL) {
@@ -1383,6 +1379,8 @@ intel_hdmi_detect(struct drm_connector *connector, bool force)
1383 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", 1379 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
1384 connector->base.id, connector->name); 1380 connector->base.id, connector->name);
1385 1381
1382 intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS);
1383
1386 while (!live_status && --retry) { 1384 while (!live_status && --retry) {
1387 live_status = intel_digital_port_connected(dev_priv, 1385 live_status = intel_digital_port_connected(dev_priv,
1388 hdmi_to_dig_port(intel_hdmi)); 1386 hdmi_to_dig_port(intel_hdmi));
@@ -1402,6 +1400,8 @@ intel_hdmi_detect(struct drm_connector *connector, bool force)
1402 } else 1400 } else
1403 status = connector_status_disconnected; 1401 status = connector_status_disconnected;
1404 1402
1403 intel_display_power_put(dev_priv, POWER_DOMAIN_GMBUS);
1404
1405 return status; 1405 return status;
1406} 1406}
1407 1407
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index 1369fc41d039..8324654037b6 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -483,7 +483,7 @@ gmbus_xfer(struct i2c_adapter *adapter,
483 int i = 0, inc, try = 0; 483 int i = 0, inc, try = 0;
484 int ret = 0; 484 int ret = 0;
485 485
486 intel_aux_display_runtime_get(dev_priv); 486 intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS);
487 mutex_lock(&dev_priv->gmbus_mutex); 487 mutex_lock(&dev_priv->gmbus_mutex);
488 488
489 if (bus->force_bit) { 489 if (bus->force_bit) {
@@ -595,7 +595,9 @@ timeout:
595 595
596out: 596out:
597 mutex_unlock(&dev_priv->gmbus_mutex); 597 mutex_unlock(&dev_priv->gmbus_mutex);
598 intel_aux_display_runtime_put(dev_priv); 598
599 intel_display_power_put(dev_priv, POWER_DOMAIN_GMBUS);
600
599 return ret; 601 return ret;
600} 602}
601 603
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index d52a15df6917..f091ad12d694 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -4449,7 +4449,7 @@ static void gen6_set_rps(struct drm_device *dev, u8 val)
4449 POSTING_READ(GEN6_RPNSWREQ); 4449 POSTING_READ(GEN6_RPNSWREQ);
4450 4450
4451 dev_priv->rps.cur_freq = val; 4451 dev_priv->rps.cur_freq = val;
4452 trace_intel_gpu_freq_change(val * 50); 4452 trace_intel_gpu_freq_change(intel_gpu_freq(dev_priv, val));
4453} 4453}
4454 4454
4455static void valleyview_set_rps(struct drm_device *dev, u8 val) 4455static void valleyview_set_rps(struct drm_device *dev, u8 val)
@@ -4782,8 +4782,7 @@ static void gen9_enable_rc6(struct drm_device *dev)
4782 /* 2b: Program RC6 thresholds.*/ 4782 /* 2b: Program RC6 thresholds.*/
4783 4783
4784 /* WaRsDoubleRc6WrlWithCoarsePowerGating: Doubling WRL only when CPG is enabled */ 4784 /* WaRsDoubleRc6WrlWithCoarsePowerGating: Doubling WRL only when CPG is enabled */
4785 if (IS_SKYLAKE(dev) && !((IS_SKL_GT3(dev) || IS_SKL_GT4(dev)) && 4785 if (IS_SKYLAKE(dev))
4786 (INTEL_REVID(dev) <= SKL_REVID_E0)))
4787 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 108 << 16); 4786 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 108 << 16);
4788 else 4787 else
4789 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16); 4788 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16);
@@ -4825,7 +4824,7 @@ static void gen9_enable_rc6(struct drm_device *dev)
4825 * WaRsDisableCoarsePowerGating:skl,bxt - Render/Media PG need to be disabled with RC6. 4824 * WaRsDisableCoarsePowerGating:skl,bxt - Render/Media PG need to be disabled with RC6.
4826 */ 4825 */
4827 if ((IS_BROXTON(dev) && (INTEL_REVID(dev) < BXT_REVID_B0)) || 4826 if ((IS_BROXTON(dev) && (INTEL_REVID(dev) < BXT_REVID_B0)) ||
4828 ((IS_SKL_GT3(dev) || IS_SKL_GT4(dev)) && (INTEL_REVID(dev) <= SKL_REVID_E0))) 4827 ((IS_SKL_GT3(dev) || IS_SKL_GT4(dev)) && (INTEL_REVID(dev) <= SKL_REVID_F0)))
4829 I915_WRITE(GEN9_PG_ENABLE, 0); 4828 I915_WRITE(GEN9_PG_ENABLE, 0);
4830 else 4829 else
4831 I915_WRITE(GEN9_PG_ENABLE, (rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ? 4830 I915_WRITE(GEN9_PG_ENABLE, (rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ?
@@ -7255,7 +7254,8 @@ static int chv_freq_opcode(struct drm_i915_private *dev_priv, int val)
7255int intel_gpu_freq(struct drm_i915_private *dev_priv, int val) 7254int intel_gpu_freq(struct drm_i915_private *dev_priv, int val)
7256{ 7255{
7257 if (IS_GEN9(dev_priv->dev)) 7256 if (IS_GEN9(dev_priv->dev))
7258 return (val * GT_FREQUENCY_MULTIPLIER) / GEN9_FREQ_SCALER; 7257 return DIV_ROUND_CLOSEST(val * GT_FREQUENCY_MULTIPLIER,
7258 GEN9_FREQ_SCALER);
7259 else if (IS_CHERRYVIEW(dev_priv->dev)) 7259 else if (IS_CHERRYVIEW(dev_priv->dev))
7260 return chv_gpu_freq(dev_priv, val); 7260 return chv_gpu_freq(dev_priv, val);
7261 else if (IS_VALLEYVIEW(dev_priv->dev)) 7261 else if (IS_VALLEYVIEW(dev_priv->dev))
@@ -7267,13 +7267,14 @@ int intel_gpu_freq(struct drm_i915_private *dev_priv, int val)
7267int intel_freq_opcode(struct drm_i915_private *dev_priv, int val) 7267int intel_freq_opcode(struct drm_i915_private *dev_priv, int val)
7268{ 7268{
7269 if (IS_GEN9(dev_priv->dev)) 7269 if (IS_GEN9(dev_priv->dev))
7270 return (val * GEN9_FREQ_SCALER) / GT_FREQUENCY_MULTIPLIER; 7270 return DIV_ROUND_CLOSEST(val * GEN9_FREQ_SCALER,
7271 GT_FREQUENCY_MULTIPLIER);
7271 else if (IS_CHERRYVIEW(dev_priv->dev)) 7272 else if (IS_CHERRYVIEW(dev_priv->dev))
7272 return chv_freq_opcode(dev_priv, val); 7273 return chv_freq_opcode(dev_priv, val);
7273 else if (IS_VALLEYVIEW(dev_priv->dev)) 7274 else if (IS_VALLEYVIEW(dev_priv->dev))
7274 return byt_freq_opcode(dev_priv, val); 7275 return byt_freq_opcode(dev_priv, val);
7275 else 7276 else
7276 return val / GT_FREQUENCY_MULTIPLIER; 7277 return DIV_ROUND_CLOSEST(val, GT_FREQUENCY_MULTIPLIER);
7277} 7278}
7278 7279
7279struct request_boost { 7280struct request_boost {
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index d89c1d0aa1b7..7e23d65c9b24 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -362,6 +362,7 @@ static void hsw_set_power_well(struct drm_i915_private *dev_priv,
362 BIT(POWER_DOMAIN_AUX_C) | \ 362 BIT(POWER_DOMAIN_AUX_C) | \
363 BIT(POWER_DOMAIN_AUDIO) | \ 363 BIT(POWER_DOMAIN_AUDIO) | \
364 BIT(POWER_DOMAIN_VGA) | \ 364 BIT(POWER_DOMAIN_VGA) | \
365 BIT(POWER_DOMAIN_GMBUS) | \
365 BIT(POWER_DOMAIN_INIT)) 366 BIT(POWER_DOMAIN_INIT))
366#define BXT_DISPLAY_POWERWELL_1_POWER_DOMAINS ( \ 367#define BXT_DISPLAY_POWERWELL_1_POWER_DOMAINS ( \
367 BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS | \ 368 BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
@@ -1483,6 +1484,7 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
1483 BIT(POWER_DOMAIN_AUX_B) | \ 1484 BIT(POWER_DOMAIN_AUX_B) | \
1484 BIT(POWER_DOMAIN_AUX_C) | \ 1485 BIT(POWER_DOMAIN_AUX_C) | \
1485 BIT(POWER_DOMAIN_AUX_D) | \ 1486 BIT(POWER_DOMAIN_AUX_D) | \
1487 BIT(POWER_DOMAIN_GMBUS) | \
1486 BIT(POWER_DOMAIN_INIT)) 1488 BIT(POWER_DOMAIN_INIT))
1487#define HSW_DISPLAY_POWER_DOMAINS ( \ 1489#define HSW_DISPLAY_POWER_DOMAINS ( \
1488 (POWER_DOMAIN_MASK & ~HSW_ALWAYS_ON_POWER_DOMAINS) | \ 1490 (POWER_DOMAIN_MASK & ~HSW_ALWAYS_ON_POWER_DOMAINS) | \
@@ -1845,6 +1847,8 @@ int intel_power_domains_init(struct drm_i915_private *dev_priv)
1845 i915.disable_power_well = sanitize_disable_power_well_option(dev_priv, 1847 i915.disable_power_well = sanitize_disable_power_well_option(dev_priv,
1846 i915.disable_power_well); 1848 i915.disable_power_well);
1847 1849
1850 BUILD_BUG_ON(POWER_DOMAIN_NUM > 31);
1851
1848 mutex_init(&power_domains->lock); 1852 mutex_init(&power_domains->lock);
1849 1853
1850 /* 1854 /*
@@ -2064,36 +2068,6 @@ void intel_power_domains_init_hw(struct drm_i915_private *dev_priv)
2064} 2068}
2065 2069
2066/** 2070/**
2067 * intel_aux_display_runtime_get - grab an auxiliary power domain reference
2068 * @dev_priv: i915 device instance
2069 *
2070 * This function grabs a power domain reference for the auxiliary power domain
2071 * (for access to the GMBUS and DP AUX blocks) and ensures that it and all its
2072 * parents are powered up. Therefore users should only grab a reference to the
2073 * innermost power domain they need.
2074 *
2075 * Any power domain reference obtained by this function must have a symmetric
2076 * call to intel_aux_display_runtime_put() to release the reference again.
2077 */
2078void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv)
2079{
2080 intel_runtime_pm_get(dev_priv);
2081}
2082
2083/**
2084 * intel_aux_display_runtime_put - release an auxiliary power domain reference
2085 * @dev_priv: i915 device instance
2086 *
2087 * This function drops the auxiliary power domain reference obtained by
2088 * intel_aux_display_runtime_get() and might power down the corresponding
2089 * hardware block right away if this is the last reference.
2090 */
2091void intel_aux_display_runtime_put(struct drm_i915_private *dev_priv)
2092{
2093 intel_runtime_pm_put(dev_priv);
2094}
2095
2096/**
2097 * intel_runtime_pm_get - grab a runtime pm reference 2071 * intel_runtime_pm_get - grab a runtime pm reference
2098 * @dev_priv: i915 device instance 2072 * @dev_priv: i915 device instance
2099 * 2073 *
diff --git a/drivers/gpu/drm/imx/imx-drm-core.c b/drivers/gpu/drm/imx/imx-drm-core.c
index 64f16ea779ef..7b990b4e96d2 100644
--- a/drivers/gpu/drm/imx/imx-drm-core.c
+++ b/drivers/gpu/drm/imx/imx-drm-core.c
@@ -63,8 +63,7 @@ static void imx_drm_driver_lastclose(struct drm_device *drm)
63#if IS_ENABLED(CONFIG_DRM_IMX_FB_HELPER) 63#if IS_ENABLED(CONFIG_DRM_IMX_FB_HELPER)
64 struct imx_drm_device *imxdrm = drm->dev_private; 64 struct imx_drm_device *imxdrm = drm->dev_private;
65 65
66 if (imxdrm->fbhelper) 66 drm_fbdev_cma_restore_mode(imxdrm->fbhelper);
67 drm_fbdev_cma_restore_mode(imxdrm->fbhelper);
68#endif 67#endif
69} 68}
70 69
@@ -340,7 +339,7 @@ err_kms:
340 * imx_drm_add_crtc - add a new crtc 339 * imx_drm_add_crtc - add a new crtc
341 */ 340 */
342int imx_drm_add_crtc(struct drm_device *drm, struct drm_crtc *crtc, 341int imx_drm_add_crtc(struct drm_device *drm, struct drm_crtc *crtc,
343 struct imx_drm_crtc **new_crtc, 342 struct imx_drm_crtc **new_crtc, struct drm_plane *primary_plane,
344 const struct imx_drm_crtc_helper_funcs *imx_drm_helper_funcs, 343 const struct imx_drm_crtc_helper_funcs *imx_drm_helper_funcs,
345 struct device_node *port) 344 struct device_node *port)
346{ 345{
@@ -379,7 +378,7 @@ int imx_drm_add_crtc(struct drm_device *drm, struct drm_crtc *crtc,
379 drm_crtc_helper_add(crtc, 378 drm_crtc_helper_add(crtc,
380 imx_drm_crtc->imx_drm_helper_funcs.crtc_helper_funcs); 379 imx_drm_crtc->imx_drm_helper_funcs.crtc_helper_funcs);
381 380
382 drm_crtc_init(drm, crtc, 381 drm_crtc_init_with_planes(drm, crtc, primary_plane, NULL,
383 imx_drm_crtc->imx_drm_helper_funcs.crtc_funcs); 382 imx_drm_crtc->imx_drm_helper_funcs.crtc_funcs);
384 383
385 return 0; 384 return 0;
diff --git a/drivers/gpu/drm/imx/imx-drm.h b/drivers/gpu/drm/imx/imx-drm.h
index 28e776d8d9d2..83284b4d4be1 100644
--- a/drivers/gpu/drm/imx/imx-drm.h
+++ b/drivers/gpu/drm/imx/imx-drm.h
@@ -9,6 +9,7 @@ struct drm_display_mode;
9struct drm_encoder; 9struct drm_encoder;
10struct drm_fbdev_cma; 10struct drm_fbdev_cma;
11struct drm_framebuffer; 11struct drm_framebuffer;
12struct drm_plane;
12struct imx_drm_crtc; 13struct imx_drm_crtc;
13struct platform_device; 14struct platform_device;
14 15
@@ -24,7 +25,7 @@ struct imx_drm_crtc_helper_funcs {
24}; 25};
25 26
26int imx_drm_add_crtc(struct drm_device *drm, struct drm_crtc *crtc, 27int imx_drm_add_crtc(struct drm_device *drm, struct drm_crtc *crtc,
27 struct imx_drm_crtc **new_crtc, 28 struct imx_drm_crtc **new_crtc, struct drm_plane *primary_plane,
28 const struct imx_drm_crtc_helper_funcs *imx_helper_funcs, 29 const struct imx_drm_crtc_helper_funcs *imx_helper_funcs,
29 struct device_node *port); 30 struct device_node *port);
30int imx_drm_remove_crtc(struct imx_drm_crtc *); 31int imx_drm_remove_crtc(struct imx_drm_crtc *);
diff --git a/drivers/gpu/drm/imx/imx-tve.c b/drivers/gpu/drm/imx/imx-tve.c
index e671ad369416..f9597146dc67 100644
--- a/drivers/gpu/drm/imx/imx-tve.c
+++ b/drivers/gpu/drm/imx/imx-tve.c
@@ -721,6 +721,7 @@ static const struct of_device_id imx_tve_dt_ids[] = {
721 { .compatible = "fsl,imx53-tve", }, 721 { .compatible = "fsl,imx53-tve", },
722 { /* sentinel */ } 722 { /* sentinel */ }
723}; 723};
724MODULE_DEVICE_TABLE(of, imx_tve_dt_ids);
724 725
725static struct platform_driver imx_tve_driver = { 726static struct platform_driver imx_tve_driver = {
726 .probe = imx_tve_probe, 727 .probe = imx_tve_probe,
diff --git a/drivers/gpu/drm/imx/ipuv3-crtc.c b/drivers/gpu/drm/imx/ipuv3-crtc.c
index 7bc8301fafff..4ab841eebee1 100644
--- a/drivers/gpu/drm/imx/ipuv3-crtc.c
+++ b/drivers/gpu/drm/imx/ipuv3-crtc.c
@@ -212,7 +212,8 @@ static void ipu_crtc_handle_pageflip(struct ipu_crtc *ipu_crtc)
212 212
213 spin_lock_irqsave(&drm->event_lock, flags); 213 spin_lock_irqsave(&drm->event_lock, flags);
214 if (ipu_crtc->page_flip_event) 214 if (ipu_crtc->page_flip_event)
215 drm_send_vblank_event(drm, -1, ipu_crtc->page_flip_event); 215 drm_crtc_send_vblank_event(&ipu_crtc->base,
216 ipu_crtc->page_flip_event);
216 ipu_crtc->page_flip_event = NULL; 217 ipu_crtc->page_flip_event = NULL;
217 imx_drm_crtc_vblank_put(ipu_crtc->imx_crtc); 218 imx_drm_crtc_vblank_put(ipu_crtc->imx_crtc);
218 spin_unlock_irqrestore(&drm->event_lock, flags); 219 spin_unlock_irqrestore(&drm->event_lock, flags);
@@ -349,7 +350,6 @@ static int ipu_crtc_init(struct ipu_crtc *ipu_crtc,
349 struct ipu_soc *ipu = dev_get_drvdata(ipu_crtc->dev->parent); 350 struct ipu_soc *ipu = dev_get_drvdata(ipu_crtc->dev->parent);
350 int dp = -EINVAL; 351 int dp = -EINVAL;
351 int ret; 352 int ret;
352 int id;
353 353
354 ret = ipu_get_resources(ipu_crtc, pdata); 354 ret = ipu_get_resources(ipu_crtc, pdata);
355 if (ret) { 355 if (ret) {
@@ -358,18 +358,23 @@ static int ipu_crtc_init(struct ipu_crtc *ipu_crtc,
358 return ret; 358 return ret;
359 } 359 }
360 360
361 if (pdata->dp >= 0)
362 dp = IPU_DP_FLOW_SYNC_BG;
363 ipu_crtc->plane[0] = ipu_plane_init(drm, ipu, pdata->dma[0], dp, 0,
364 DRM_PLANE_TYPE_PRIMARY);
365 if (IS_ERR(ipu_crtc->plane[0])) {
366 ret = PTR_ERR(ipu_crtc->plane[0]);
367 goto err_put_resources;
368 }
369
361 ret = imx_drm_add_crtc(drm, &ipu_crtc->base, &ipu_crtc->imx_crtc, 370 ret = imx_drm_add_crtc(drm, &ipu_crtc->base, &ipu_crtc->imx_crtc,
362 &ipu_crtc_helper_funcs, ipu_crtc->dev->of_node); 371 &ipu_crtc->plane[0]->base, &ipu_crtc_helper_funcs,
372 ipu_crtc->dev->of_node);
363 if (ret) { 373 if (ret) {
364 dev_err(ipu_crtc->dev, "adding crtc failed with %d.\n", ret); 374 dev_err(ipu_crtc->dev, "adding crtc failed with %d.\n", ret);
365 goto err_put_resources; 375 goto err_put_resources;
366 } 376 }
367 377
368 if (pdata->dp >= 0)
369 dp = IPU_DP_FLOW_SYNC_BG;
370 id = imx_drm_crtc_id(ipu_crtc->imx_crtc);
371 ipu_crtc->plane[0] = ipu_plane_init(ipu_crtc->base.dev, ipu,
372 pdata->dma[0], dp, BIT(id), true);
373 ret = ipu_plane_get_resources(ipu_crtc->plane[0]); 378 ret = ipu_plane_get_resources(ipu_crtc->plane[0]);
374 if (ret) { 379 if (ret) {
375 dev_err(ipu_crtc->dev, "getting plane 0 resources failed with %d.\n", 380 dev_err(ipu_crtc->dev, "getting plane 0 resources failed with %d.\n",
@@ -379,10 +384,10 @@ static int ipu_crtc_init(struct ipu_crtc *ipu_crtc,
379 384
380 /* If this crtc is using the DP, add an overlay plane */ 385 /* If this crtc is using the DP, add an overlay plane */
381 if (pdata->dp >= 0 && pdata->dma[1] > 0) { 386 if (pdata->dp >= 0 && pdata->dma[1] > 0) {
382 ipu_crtc->plane[1] = ipu_plane_init(ipu_crtc->base.dev, ipu, 387 ipu_crtc->plane[1] = ipu_plane_init(drm, ipu, pdata->dma[1],
383 pdata->dma[1], 388 IPU_DP_FLOW_SYNC_FG,
384 IPU_DP_FLOW_SYNC_FG, 389 drm_crtc_mask(&ipu_crtc->base),
385 BIT(id), false); 390 DRM_PLANE_TYPE_OVERLAY);
386 if (IS_ERR(ipu_crtc->plane[1])) 391 if (IS_ERR(ipu_crtc->plane[1]))
387 ipu_crtc->plane[1] = NULL; 392 ipu_crtc->plane[1] = NULL;
388 } 393 }
@@ -407,28 +412,6 @@ err_put_resources:
407 return ret; 412 return ret;
408} 413}
409 414
410static struct device_node *ipu_drm_get_port_by_id(struct device_node *parent,
411 int port_id)
412{
413 struct device_node *port;
414 int id, ret;
415
416 port = of_get_child_by_name(parent, "port");
417 while (port) {
418 ret = of_property_read_u32(port, "reg", &id);
419 if (!ret && id == port_id)
420 return port;
421
422 do {
423 port = of_get_next_child(parent, port);
424 if (!port)
425 return NULL;
426 } while (of_node_cmp(port->name, "port"));
427 }
428
429 return NULL;
430}
431
432static int ipu_drm_bind(struct device *dev, struct device *master, void *data) 415static int ipu_drm_bind(struct device *dev, struct device *master, void *data)
433{ 416{
434 struct ipu_client_platformdata *pdata = dev->platform_data; 417 struct ipu_client_platformdata *pdata = dev->platform_data;
@@ -470,23 +453,11 @@ static const struct component_ops ipu_crtc_ops = {
470static int ipu_drm_probe(struct platform_device *pdev) 453static int ipu_drm_probe(struct platform_device *pdev)
471{ 454{
472 struct device *dev = &pdev->dev; 455 struct device *dev = &pdev->dev;
473 struct ipu_client_platformdata *pdata = dev->platform_data;
474 int ret; 456 int ret;
475 457
476 if (!dev->platform_data) 458 if (!dev->platform_data)
477 return -EINVAL; 459 return -EINVAL;
478 460
479 if (!dev->of_node) {
480 /* Associate crtc device with the corresponding DI port node */
481 dev->of_node = ipu_drm_get_port_by_id(dev->parent->of_node,
482 pdata->di + 2);
483 if (!dev->of_node) {
484 dev_err(dev, "missing port@%d node in %s\n",
485 pdata->di + 2, dev->parent->of_node->full_name);
486 return -ENODEV;
487 }
488 }
489
490 ret = dma_set_coherent_mask(dev, DMA_BIT_MASK(32)); 461 ret = dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
491 if (ret) 462 if (ret)
492 return ret; 463 return ret;
diff --git a/drivers/gpu/drm/imx/ipuv3-plane.c b/drivers/gpu/drm/imx/ipuv3-plane.c
index 575f4c84388f..e2ff410bab74 100644
--- a/drivers/gpu/drm/imx/ipuv3-plane.c
+++ b/drivers/gpu/drm/imx/ipuv3-plane.c
@@ -381,7 +381,7 @@ static struct drm_plane_funcs ipu_plane_funcs = {
381 381
382struct ipu_plane *ipu_plane_init(struct drm_device *dev, struct ipu_soc *ipu, 382struct ipu_plane *ipu_plane_init(struct drm_device *dev, struct ipu_soc *ipu,
383 int dma, int dp, unsigned int possible_crtcs, 383 int dma, int dp, unsigned int possible_crtcs,
384 bool priv) 384 enum drm_plane_type type)
385{ 385{
386 struct ipu_plane *ipu_plane; 386 struct ipu_plane *ipu_plane;
387 int ret; 387 int ret;
@@ -399,10 +399,9 @@ struct ipu_plane *ipu_plane_init(struct drm_device *dev, struct ipu_soc *ipu,
399 ipu_plane->dma = dma; 399 ipu_plane->dma = dma;
400 ipu_plane->dp_flow = dp; 400 ipu_plane->dp_flow = dp;
401 401
402 ret = drm_plane_init(dev, &ipu_plane->base, possible_crtcs, 402 ret = drm_universal_plane_init(dev, &ipu_plane->base, possible_crtcs,
403 &ipu_plane_funcs, ipu_plane_formats, 403 &ipu_plane_funcs, ipu_plane_formats,
404 ARRAY_SIZE(ipu_plane_formats), 404 ARRAY_SIZE(ipu_plane_formats), type);
405 priv);
406 if (ret) { 405 if (ret) {
407 DRM_ERROR("failed to initialize plane\n"); 406 DRM_ERROR("failed to initialize plane\n");
408 kfree(ipu_plane); 407 kfree(ipu_plane);
diff --git a/drivers/gpu/drm/imx/ipuv3-plane.h b/drivers/gpu/drm/imx/ipuv3-plane.h
index 9b5eff18f5b8..3a443b413c60 100644
--- a/drivers/gpu/drm/imx/ipuv3-plane.h
+++ b/drivers/gpu/drm/imx/ipuv3-plane.h
@@ -34,7 +34,7 @@ struct ipu_plane {
34 34
35struct ipu_plane *ipu_plane_init(struct drm_device *dev, struct ipu_soc *ipu, 35struct ipu_plane *ipu_plane_init(struct drm_device *dev, struct ipu_soc *ipu,
36 int dma, int dp, unsigned int possible_crtcs, 36 int dma, int dp, unsigned int possible_crtcs,
37 bool priv); 37 enum drm_plane_type type);
38 38
39/* Init IDMAC, DMFC, DP */ 39/* Init IDMAC, DMFC, DP */
40int ipu_plane_mode_set(struct ipu_plane *plane, struct drm_crtc *crtc, 40int ipu_plane_mode_set(struct ipu_plane *plane, struct drm_crtc *crtc,
diff --git a/drivers/gpu/drm/imx/parallel-display.c b/drivers/gpu/drm/imx/parallel-display.c
index b4deb9cf9d71..2e9b9f1b5cd2 100644
--- a/drivers/gpu/drm/imx/parallel-display.c
+++ b/drivers/gpu/drm/imx/parallel-display.c
@@ -54,7 +54,11 @@ static int imx_pd_connector_get_modes(struct drm_connector *connector)
54 54
55 if (imxpd->panel && imxpd->panel->funcs && 55 if (imxpd->panel && imxpd->panel->funcs &&
56 imxpd->panel->funcs->get_modes) { 56 imxpd->panel->funcs->get_modes) {
57 struct drm_display_info *di = &connector->display_info;
58
57 num_modes = imxpd->panel->funcs->get_modes(imxpd->panel); 59 num_modes = imxpd->panel->funcs->get_modes(imxpd->panel);
60 if (!imxpd->bus_format && di->num_bus_formats)
61 imxpd->bus_format = di->bus_formats[0];
58 if (num_modes > 0) 62 if (num_modes > 0)
59 return num_modes; 63 return num_modes;
60 } 64 }
diff --git a/drivers/gpu/drm/mgag200/mgag200_cursor.c b/drivers/gpu/drm/mgag200/mgag200_cursor.c
index 4f2068fe5d88..a7bf6a90eae5 100644
--- a/drivers/gpu/drm/mgag200/mgag200_cursor.c
+++ b/drivers/gpu/drm/mgag200/mgag200_cursor.c
@@ -70,6 +70,11 @@ int mga_crtc_cursor_set(struct drm_crtc *crtc,
70 BUG_ON(pixels_2 != pixels_current && pixels_2 != pixels_prev); 70 BUG_ON(pixels_2 != pixels_current && pixels_2 != pixels_prev);
71 BUG_ON(pixels_current == pixels_prev); 71 BUG_ON(pixels_current == pixels_prev);
72 72
73 if (!handle || !file_priv) {
74 mga_hide_cursor(mdev);
75 return 0;
76 }
77
73 obj = drm_gem_object_lookup(dev, file_priv, handle); 78 obj = drm_gem_object_lookup(dev, file_priv, handle);
74 if (!obj) 79 if (!obj)
75 return -ENOENT; 80 return -ENOENT;
@@ -88,12 +93,6 @@ int mga_crtc_cursor_set(struct drm_crtc *crtc,
88 goto out_unreserve1; 93 goto out_unreserve1;
89 } 94 }
90 95
91 if (!handle) {
92 mga_hide_cursor(mdev);
93 ret = 0;
94 goto out1;
95 }
96
97 /* Move cursor buffers into VRAM if they aren't already */ 96 /* Move cursor buffers into VRAM if they aren't already */
98 if (!pixels_1->pin_count) { 97 if (!pixels_1->pin_count) {
99 ret = mgag200_bo_pin(pixels_1, TTM_PL_FLAG_VRAM, 98 ret = mgag200_bo_pin(pixels_1, TTM_PL_FLAG_VRAM,
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/device.h b/drivers/gpu/drm/nouveau/include/nvkm/core/device.h
index 8f760002e401..913192c94876 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/device.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/device.h
@@ -159,7 +159,6 @@ struct nvkm_device_func {
159struct nvkm_device_quirk { 159struct nvkm_device_quirk {
160 u8 tv_pin_mask; 160 u8 tv_pin_mask;
161 u8 tv_gpio; 161 u8 tv_gpio;
162 bool War00C800_0;
163}; 162};
164 163
165struct nvkm_device_chip { 164struct nvkm_device_chip {
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/instmem.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/instmem.h
index 28bc202f9753..40f845e31272 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/instmem.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/instmem.h
@@ -7,6 +7,7 @@ struct nvkm_instmem {
7 const struct nvkm_instmem_func *func; 7 const struct nvkm_instmem_func *func;
8 struct nvkm_subdev subdev; 8 struct nvkm_subdev subdev;
9 9
10 spinlock_t lock;
10 struct list_head list; 11 struct list_head list;
11 u32 reserved; 12 u32 reserved;
12 13
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c
index 8b8332e46f24..d5e6938cc6bc 100644
--- a/drivers/gpu/drm/nouveau/nouveau_acpi.c
+++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c
@@ -367,6 +367,7 @@ static int nouveau_rom_call(acpi_handle rom_handle, uint8_t *bios,
367 return -ENODEV; 367 return -ENODEV;
368 } 368 }
369 obj = (union acpi_object *)buffer.pointer; 369 obj = (union acpi_object *)buffer.pointer;
370 len = min(len, (int)obj->buffer.length);
370 memcpy(bios+offset, obj->buffer.pointer, len); 371 memcpy(bios+offset, obj->buffer.pointer, len);
371 kfree(buffer.pointer); 372 kfree(buffer.pointer);
372 return len; 373 return len;
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index db6bc6760545..64c8d932d5f1 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -829,7 +829,6 @@ nouveau_finish_page_flip(struct nouveau_channel *chan,
829 struct drm_device *dev = drm->dev; 829 struct drm_device *dev = drm->dev;
830 struct nouveau_page_flip_state *s; 830 struct nouveau_page_flip_state *s;
831 unsigned long flags; 831 unsigned long flags;
832 int crtcid = -1;
833 832
834 spin_lock_irqsave(&dev->event_lock, flags); 833 spin_lock_irqsave(&dev->event_lock, flags);
835 834
@@ -841,15 +840,19 @@ nouveau_finish_page_flip(struct nouveau_channel *chan,
841 840
842 s = list_first_entry(&fctx->flip, struct nouveau_page_flip_state, head); 841 s = list_first_entry(&fctx->flip, struct nouveau_page_flip_state, head);
843 if (s->event) { 842 if (s->event) {
844 /* Vblank timestamps/counts are only correct on >= NV-50 */ 843 if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA) {
845 if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) 844 drm_arm_vblank_event(dev, s->crtc, s->event);
846 crtcid = s->crtc; 845 } else {
846 drm_send_vblank_event(dev, s->crtc, s->event);
847 847
848 drm_send_vblank_event(dev, crtcid, s->event); 848 /* Give up ownership of vblank for page-flipped crtc */
849 drm_vblank_put(dev, s->crtc);
850 }
851 }
852 else {
853 /* Give up ownership of vblank for page-flipped crtc */
854 drm_vblank_put(dev, s->crtc);
849 } 855 }
850
851 /* Give up ownership of vblank for page-flipped crtc */
852 drm_vblank_put(dev, s->crtc);
853 856
854 list_del(&s->head); 857 list_del(&s->head);
855 if (ps) 858 if (ps)
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.h b/drivers/gpu/drm/nouveau/nouveau_drm.h
index 3050042e6c6d..a02813e994ec 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.h
@@ -39,6 +39,7 @@
39 39
40#include <nvif/client.h> 40#include <nvif/client.h>
41#include <nvif/device.h> 41#include <nvif/device.h>
42#include <nvif/ioctl.h>
42 43
43#include <drmP.h> 44#include <drmP.h>
44 45
@@ -65,9 +66,10 @@ struct nouveau_drm_tile {
65}; 66};
66 67
67enum nouveau_drm_object_route { 68enum nouveau_drm_object_route {
68 NVDRM_OBJECT_NVIF = 0, 69 NVDRM_OBJECT_NVIF = NVIF_IOCTL_V0_OWNER_NVIF,
69 NVDRM_OBJECT_USIF, 70 NVDRM_OBJECT_USIF,
70 NVDRM_OBJECT_ABI16, 71 NVDRM_OBJECT_ABI16,
72 NVDRM_OBJECT_ANY = NVIF_IOCTL_V0_OWNER_ANY,
71}; 73};
72 74
73enum nouveau_drm_notify_route { 75enum nouveau_drm_notify_route {
diff --git a/drivers/gpu/drm/nouveau/nouveau_usif.c b/drivers/gpu/drm/nouveau/nouveau_usif.c
index 89dc4ce63490..6ae1b3494bcd 100644
--- a/drivers/gpu/drm/nouveau/nouveau_usif.c
+++ b/drivers/gpu/drm/nouveau/nouveau_usif.c
@@ -313,7 +313,10 @@ usif_ioctl(struct drm_file *filp, void __user *user, u32 argc)
313 if (nvif_unpack(argv->v0, 0, 0, true)) { 313 if (nvif_unpack(argv->v0, 0, 0, true)) {
314 /* block access to objects not created via this interface */ 314 /* block access to objects not created via this interface */
315 owner = argv->v0.owner; 315 owner = argv->v0.owner;
316 argv->v0.owner = NVDRM_OBJECT_USIF; 316 if (argv->v0.object == 0ULL)
317 argv->v0.owner = NVDRM_OBJECT_ANY; /* except client */
318 else
319 argv->v0.owner = NVDRM_OBJECT_USIF;
317 } else 320 } else
318 goto done; 321 goto done;
319 322
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c
index e3c783d0e2ab..62ad0300cfa5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c
@@ -259,12 +259,6 @@ nvkm_device_pci_10de_0df4[] = {
259}; 259};
260 260
261static const struct nvkm_device_pci_vendor 261static const struct nvkm_device_pci_vendor
262nvkm_device_pci_10de_0fcd[] = {
263 { 0x17aa, 0x3801, NULL, { .War00C800_0 = true } }, /* Lenovo Y510P */
264 {}
265};
266
267static const struct nvkm_device_pci_vendor
268nvkm_device_pci_10de_0fd2[] = { 262nvkm_device_pci_10de_0fd2[] = {
269 { 0x1028, 0x0595, "GeForce GT 640M LE" }, 263 { 0x1028, 0x0595, "GeForce GT 640M LE" },
270 { 0x1028, 0x05b2, "GeForce GT 640M LE" }, 264 { 0x1028, 0x05b2, "GeForce GT 640M LE" },
@@ -684,7 +678,6 @@ nvkm_device_pci_10de_1189[] = {
684static const struct nvkm_device_pci_vendor 678static const struct nvkm_device_pci_vendor
685nvkm_device_pci_10de_1199[] = { 679nvkm_device_pci_10de_1199[] = {
686 { 0x1458, 0xd001, "GeForce GTX 760" }, 680 { 0x1458, 0xd001, "GeForce GTX 760" },
687 { 0x1462, 0x1106, "GeForce GTX 780M", { .War00C800_0 = true } }, /* Medion Erazer X7827 */
688 {} 681 {}
689}; 682};
690 683
@@ -695,14 +688,6 @@ nvkm_device_pci_10de_11e3[] = {
695}; 688};
696 689
697static const struct nvkm_device_pci_vendor 690static const struct nvkm_device_pci_vendor
698nvkm_device_pci_10de_11fc[] = {
699 { 0x1179, 0x0001, NULL, { .War00C800_0 = true } }, /* Toshiba Tecra W50 */
700 { 0x17aa, 0x2211, NULL, { .War00C800_0 = true } }, /* Lenovo W541 */
701 { 0x17aa, 0x221e, NULL, { .War00C800_0 = true } }, /* Lenovo W541 */
702 {}
703};
704
705static const struct nvkm_device_pci_vendor
706nvkm_device_pci_10de_1247[] = { 691nvkm_device_pci_10de_1247[] = {
707 { 0x1043, 0x212a, "GeForce GT 635M" }, 692 { 0x1043, 0x212a, "GeForce GT 635M" },
708 { 0x1043, 0x212b, "GeForce GT 635M" }, 693 { 0x1043, 0x212b, "GeForce GT 635M" },
@@ -1356,7 +1341,7 @@ nvkm_device_pci_10de[] = {
1356 { 0x0fc6, "GeForce GTX 650" }, 1341 { 0x0fc6, "GeForce GTX 650" },
1357 { 0x0fc8, "GeForce GT 740" }, 1342 { 0x0fc8, "GeForce GT 740" },
1358 { 0x0fc9, "GeForce GT 730" }, 1343 { 0x0fc9, "GeForce GT 730" },
1359 { 0x0fcd, "GeForce GT 755M", nvkm_device_pci_10de_0fcd }, 1344 { 0x0fcd, "GeForce GT 755M" },
1360 { 0x0fce, "GeForce GT 640M LE" }, 1345 { 0x0fce, "GeForce GT 640M LE" },
1361 { 0x0fd1, "GeForce GT 650M" }, 1346 { 0x0fd1, "GeForce GT 650M" },
1362 { 0x0fd2, "GeForce GT 640M", nvkm_device_pci_10de_0fd2 }, 1347 { 0x0fd2, "GeForce GT 640M", nvkm_device_pci_10de_0fd2 },
@@ -1490,7 +1475,7 @@ nvkm_device_pci_10de[] = {
1490 { 0x11e2, "GeForce GTX 765M" }, 1475 { 0x11e2, "GeForce GTX 765M" },
1491 { 0x11e3, "GeForce GTX 760M", nvkm_device_pci_10de_11e3 }, 1476 { 0x11e3, "GeForce GTX 760M", nvkm_device_pci_10de_11e3 },
1492 { 0x11fa, "Quadro K4000" }, 1477 { 0x11fa, "Quadro K4000" },
1493 { 0x11fc, "Quadro K2100M", nvkm_device_pci_10de_11fc }, 1478 { 0x11fc, "Quadro K2100M" },
1494 { 0x1200, "GeForce GTX 560 Ti" }, 1479 { 0x1200, "GeForce GTX 560 Ti" },
1495 { 0x1201, "GeForce GTX 560" }, 1480 { 0x1201, "GeForce GTX 560" },
1496 { 0x1203, "GeForce GTX 460 SE v2" }, 1481 { 0x1203, "GeForce GTX 460 SE v2" },
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf117.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf117.c
index b5b875928aba..74de7a96c22a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf117.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf117.c
@@ -207,6 +207,8 @@ gf117_grctx_generate_attrib(struct gf100_grctx *info)
207 const u32 b = beta * gr->ppc_tpc_nr[gpc][ppc]; 207 const u32 b = beta * gr->ppc_tpc_nr[gpc][ppc];
208 const u32 t = timeslice_mode; 208 const u32 t = timeslice_mode;
209 const u32 o = PPC_UNIT(gpc, ppc, 0); 209 const u32 o = PPC_UNIT(gpc, ppc, 0);
210 if (!(gr->ppc_mask[gpc] & (1 << ppc)))
211 continue;
210 mmio_skip(info, o + 0xc0, (t << 28) | (b << 16) | ++bo); 212 mmio_skip(info, o + 0xc0, (t << 28) | (b << 16) | ++bo);
211 mmio_wr32(info, o + 0xc0, (t << 28) | (b << 16) | --bo); 213 mmio_wr32(info, o + 0xc0, (t << 28) | (b << 16) | --bo);
212 bo += grctx->attrib_nr_max * gr->ppc_tpc_nr[gpc][ppc]; 214 bo += grctx->attrib_nr_max * gr->ppc_tpc_nr[gpc][ppc];
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpc.fuc b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpc.fuc
index 194afe910d21..7dacb3cc0668 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpc.fuc
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpc.fuc
@@ -52,10 +52,12 @@ mmio_list_base:
52#endif 52#endif
53 53
54#ifdef INCLUDE_CODE 54#ifdef INCLUDE_CODE
55#define gpc_addr(reg,addr) /*
56*/ imm32(reg,addr) /*
57*/ or reg NV_PGRAPH_GPCX_GPCCS_MMIO_CTRL_BASE_ENABLE
55#define gpc_wr32(addr,reg) /* 58#define gpc_wr32(addr,reg) /*
59*/ gpc_addr($r14,addr) /*
56*/ mov b32 $r15 reg /* 60*/ mov b32 $r15 reg /*
57*/ imm32($r14, addr) /*
58*/ or $r14 NV_PGRAPH_GPCX_GPCCS_MMIO_CTRL_BASE_ENABLE /*
59*/ call(nv_wr32) 61*/ call(nv_wr32)
60 62
61// reports an exception to the host 63// reports an exception to the host
@@ -161,7 +163,7 @@ init:
161 163
162#if NV_PGRAPH_GPCX_UNK__SIZE > 0 164#if NV_PGRAPH_GPCX_UNK__SIZE > 0
163 // figure out which, and how many, UNKs are actually present 165 // figure out which, and how many, UNKs are actually present
164 imm32($r14, 0x500c30) 166 gpc_addr($r14, 0x500c30)
165 clear b32 $r2 167 clear b32 $r2
166 clear b32 $r3 168 clear b32 $r3
167 clear b32 $r4 169 clear b32 $r4
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgf117.fuc3.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgf117.fuc3.h
index 64d07df4b8b1..bb820ff28621 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgf117.fuc3.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgf117.fuc3.h
@@ -314,7 +314,7 @@ uint32_t gf117_grgpc_code[] = {
314 0x03f01200, 314 0x03f01200,
315 0x0002d000, 315 0x0002d000,
316 0x17f104bd, 316 0x17f104bd,
317 0x10fe0542, 317 0x10fe0545,
318 0x0007f100, 318 0x0007f100,
319 0x0003f007, 319 0x0003f007,
320 0xbd0000d0, 320 0xbd0000d0,
@@ -338,184 +338,184 @@ uint32_t gf117_grgpc_code[] = {
338 0x02d00103, 338 0x02d00103,
339 0xf104bd00, 339 0xf104bd00,
340 0xf00c30e7, 340 0xf00c30e7,
341 0x24bd50e3, 341 0xe5f050e3,
342 0x44bd34bd, 342 0xbd24bd01,
343/* 0x0430: init_unk_loop */ 343/* 0x0433: init_unk_loop */
344 0xb06821f4, 344 0xf444bd34,
345 0x0bf400f6, 345 0xf6b06821,
346 0x01f7f00f, 346 0x0f0bf400,
347 0xfd04f2bb, 347 0xbb01f7f0,
348 0x30b6054f, 348 0x4ffd04f2,
349/* 0x0445: init_unk_next */ 349 0x0130b605,
350 0x0120b601, 350/* 0x0448: init_unk_next */
351 0xb004e0b6, 351 0xb60120b6,
352 0x1bf40126, 352 0x26b004e0,
353/* 0x0451: init_unk_done */ 353 0xe21bf401,
354 0x070380e2, 354/* 0x0454: init_unk_done */
355 0xf1080480, 355 0x80070380,
356 0xf0010027, 356 0x27f10804,
357 0x22cf0223, 357 0x23f00100,
358 0x9534bd00, 358 0x0022cf02,
359 0x07f10825, 359 0x259534bd,
360 0x03f0c000, 360 0x0007f108,
361 0x0005d001, 361 0x0103f0c0,
362 0x07f104bd, 362 0xbd0005d0,
363 0x03f0c100, 363 0x0007f104,
364 0x0005d001, 364 0x0103f0c1,
365 0x0e9804bd, 365 0xbd0005d0,
366 0x010f9800, 366 0x000e9804,
367 0x015021f5, 367 0xf5010f98,
368 0xbb002fbb, 368 0xbb015021,
369 0x0e98003f, 369 0x3fbb002f,
370 0x020f9801, 370 0x010e9800,
371 0x015021f5, 371 0xf5020f98,
372 0xfd050e98, 372 0x98015021,
373 0x2ebb00ef, 373 0xeffd050e,
374 0x003ebb00, 374 0x002ebb00,
375 0x98020e98, 375 0x98003ebb,
376 0x21f5030f, 376 0x0f98020e,
377 0x0e980150, 377 0x5021f503,
378 0x00effd07, 378 0x070e9801,
379 0xbb002ebb, 379 0xbb00effd,
380 0x35b6003e, 380 0x3ebb002e,
381 0x0007f102, 381 0x0235b600,
382 0x0103f0d3, 382 0xd30007f1,
383 0xbd0003d0, 383 0xd00103f0,
384 0x0825b604,
385 0xb60635b6,
386 0x30b60120,
387 0x0824b601,
388 0xb90834b6,
389 0x21f5022f,
390 0x2fbb02d3,
391 0x003fbb00,
392 0x010007f1,
393 0xd00203f0,
394 0x04bd0003, 384 0x04bd0003,
395 0x29f024bd, 385 0xb60825b6,
396 0x0007f11f, 386 0x20b60635,
397 0x0203f008, 387 0x0130b601,
398 0xbd0002d0, 388 0xb60824b6,
399/* 0x0505: main */ 389 0x2fb90834,
400 0x0031f404, 390 0xd321f502,
401 0xf00028f4, 391 0x002fbb02,
402 0x21f424d7, 392 0xf1003fbb,
403 0xf401f439, 393 0xf0010007,
404 0xf404e4b0, 394 0x03d00203,
405 0x81fe1e18, 395 0xbd04bd00,
406 0x0627f001, 396 0x1f29f024,
407 0x12fd20bd, 397 0x080007f1,
408 0x01e4b604, 398 0xd00203f0,
409 0xfe051efd, 399 0x04bd0002,
410 0x21f50018, 400/* 0x0508: main */
411 0x0ef405fa, 401 0xf40031f4,
412/* 0x0535: main_not_ctx_xfer */ 402 0xd7f00028,
413 0x10ef94d3, 403 0x3921f424,
414 0xf501f5f0, 404 0xb0f401f4,
415 0xf4037e21, 405 0x18f404e4,
416/* 0x0542: ih */ 406 0x0181fe1e,
417 0x80f9c60e, 407 0xbd0627f0,
418 0xf90188fe, 408 0x0412fd20,
419 0xf990f980, 409 0xfd01e4b6,
420 0xf9b0f9a0, 410 0x18fe051e,
421 0xf9e0f9d0, 411 0xfd21f500,
422 0xf104bdf0, 412 0xd30ef405,
423 0xf00200a7, 413/* 0x0538: main_not_ctx_xfer */
424 0xaacf00a3, 414 0xf010ef94,
425 0x04abc400, 415 0x21f501f5,
426 0xf02c0bf4, 416 0x0ef4037e,
427 0xe7f124d7, 417/* 0x0545: ih */
428 0xe3f01a00, 418 0xfe80f9c6,
429 0x00eecf00, 419 0x80f90188,
430 0x1900f7f1, 420 0xa0f990f9,
431 0xcf00f3f0, 421 0xd0f9b0f9,
432 0x21f400ff, 422 0xf0f9e0f9,
433 0x01e7f004, 423 0xa7f104bd,
434 0x1d0007f1, 424 0xa3f00200,
435 0xd00003f0, 425 0x00aacf00,
436 0x04bd000e, 426 0xf404abc4,
437/* 0x0590: ih_no_fifo */ 427 0xd7f02c0b,
438 0x010007f1, 428 0x00e7f124,
439 0xd00003f0, 429 0x00e3f01a,
440 0x04bd000a, 430 0xf100eecf,
441 0xe0fcf0fc, 431 0xf01900f7,
442 0xb0fcd0fc, 432 0xffcf00f3,
443 0x90fca0fc, 433 0x0421f400,
444 0x88fe80fc, 434 0xf101e7f0,
445 0xf480fc00, 435 0xf01d0007,
446 0x01f80032, 436 0x0ed00003,
447/* 0x05b4: hub_barrier_done */ 437/* 0x0593: ih_no_fifo */
448 0x9801f7f0, 438 0xf104bd00,
449 0xfebb040e, 439 0xf0010007,
450 0x02ffb904, 440 0x0ad00003,
451 0x9418e7f1, 441 0xfc04bd00,
452 0xf440e3f0, 442 0xfce0fcf0,
453 0x00f89d21, 443 0xfcb0fcd0,
454/* 0x05cc: ctx_redswitch */ 444 0xfc90fca0,
455 0xf120f7f0, 445 0x0088fe80,
446 0x32f480fc,
447/* 0x05b7: hub_barrier_done */
448 0xf001f800,
449 0x0e9801f7,
450 0x04febb04,
451 0xf102ffb9,
452 0xf09418e7,
453 0x21f440e3,
454/* 0x05cf: ctx_redswitch */
455 0xf000f89d,
456 0x07f120f7,
457 0x03f08500,
458 0x000fd001,
459 0xe7f004bd,
460/* 0x05e1: ctx_redswitch_delay */
461 0x01e2b608,
462 0xf1fd1bf4,
463 0xf10800f5,
464 0xf10200f5,
456 0xf0850007, 465 0xf0850007,
457 0x0fd00103, 466 0x0fd00103,
458 0xf004bd00, 467 0xf804bd00,
459/* 0x05de: ctx_redswitch_delay */ 468/* 0x05fd: ctx_xfer */
460 0xe2b608e7, 469 0x0007f100,
461 0xfd1bf401, 470 0x0203f081,
462 0x0800f5f1, 471 0xbd000fd0,
463 0x0200f5f1, 472 0x0711f404,
464 0x850007f1, 473 0x05cf21f5,
465 0xd00103f0, 474/* 0x0610: ctx_xfer_not_load */
466 0x04bd000f, 475 0x026a21f5,
467/* 0x05fa: ctx_xfer */ 476 0x07f124bd,
468 0x07f100f8, 477 0x03f047fc,
469 0x03f08100, 478 0x0002d002,
470 0x000fd002, 479 0x2cf004bd,
471 0x11f404bd, 480 0x0320b601,
472 0xcc21f507, 481 0x4afc07f1,
473/* 0x060d: ctx_xfer_not_load */ 482 0xd00203f0,
474 0x6a21f505, 483 0x04bd0002,
475 0xf124bd02,
476 0xf047fc07,
477 0x02d00203,
478 0xf004bd00,
479 0x20b6012c,
480 0xfc07f103,
481 0x0203f04a,
482 0xbd0002d0,
483 0x01acf004,
484 0xf102a5f0,
485 0xf00000b7,
486 0x0c9850b3,
487 0x0fc4b604,
488 0x9800bcbb,
489 0x0d98000c,
490 0x00e7f001,
491 0x016f21f5,
492 0xf101acf0,
493 0xf04000b7,
494 0x0c9850b3,
495 0x0fc4b604,
496 0x9800bcbb,
497 0x0d98010c,
498 0x060f9802,
499 0x0800e7f1,
500 0x016f21f5,
501 0xf001acf0, 484 0xf001acf0,
502 0xb7f104a5, 485 0xb7f102a5,
503 0xb3f03000, 486 0xb3f00000,
504 0x040c9850, 487 0x040c9850,
505 0xbb0fc4b6, 488 0xbb0fc4b6,
506 0x0c9800bc, 489 0x0c9800bc,
507 0x030d9802, 490 0x010d9800,
508 0xf1080f98, 491 0xf500e7f0,
509 0xf50200e7, 492 0xf0016f21,
510 0xf5016f21, 493 0xb7f101ac,
511 0xf4025e21, 494 0xb3f04000,
512 0x12f40601, 495 0x040c9850,
513/* 0x06a9: ctx_xfer_post */ 496 0xbb0fc4b6,
514 0x7f21f507, 497 0x0c9800bc,
515/* 0x06ad: ctx_xfer_done */ 498 0x020d9801,
516 0xb421f502, 499 0xf1060f98,
517 0x0000f805, 500 0xf50800e7,
518 0x00000000, 501 0xf0016f21,
502 0xa5f001ac,
503 0x00b7f104,
504 0x50b3f030,
505 0xb6040c98,
506 0xbcbb0fc4,
507 0x020c9800,
508 0x98030d98,
509 0xe7f1080f,
510 0x21f50200,
511 0x21f5016f,
512 0x01f4025e,
513 0x0712f406,
514/* 0x06ac: ctx_xfer_post */
515 0x027f21f5,
516/* 0x06b0: ctx_xfer_done */
517 0x05b721f5,
518 0x000000f8,
519 0x00000000, 519 0x00000000,
520 0x00000000, 520 0x00000000,
521 0x00000000, 521 0x00000000,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk104.fuc3.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk104.fuc3.h
index 2f596433c222..911976d20940 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk104.fuc3.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk104.fuc3.h
@@ -314,7 +314,7 @@ uint32_t gk104_grgpc_code[] = {
314 0x03f01200, 314 0x03f01200,
315 0x0002d000, 315 0x0002d000,
316 0x17f104bd, 316 0x17f104bd,
317 0x10fe0542, 317 0x10fe0545,
318 0x0007f100, 318 0x0007f100,
319 0x0003f007, 319 0x0003f007,
320 0xbd0000d0, 320 0xbd0000d0,
@@ -338,184 +338,184 @@ uint32_t gk104_grgpc_code[] = {
338 0x02d00103, 338 0x02d00103,
339 0xf104bd00, 339 0xf104bd00,
340 0xf00c30e7, 340 0xf00c30e7,
341 0x24bd50e3, 341 0xe5f050e3,
342 0x44bd34bd, 342 0xbd24bd01,
343/* 0x0430: init_unk_loop */ 343/* 0x0433: init_unk_loop */
344 0xb06821f4, 344 0xf444bd34,
345 0x0bf400f6, 345 0xf6b06821,
346 0x01f7f00f, 346 0x0f0bf400,
347 0xfd04f2bb, 347 0xbb01f7f0,
348 0x30b6054f, 348 0x4ffd04f2,
349/* 0x0445: init_unk_next */ 349 0x0130b605,
350 0x0120b601, 350/* 0x0448: init_unk_next */
351 0xb004e0b6, 351 0xb60120b6,
352 0x1bf40126, 352 0x26b004e0,
353/* 0x0451: init_unk_done */ 353 0xe21bf401,
354 0x070380e2, 354/* 0x0454: init_unk_done */
355 0xf1080480, 355 0x80070380,
356 0xf0010027, 356 0x27f10804,
357 0x22cf0223, 357 0x23f00100,
358 0x9534bd00, 358 0x0022cf02,
359 0x07f10825, 359 0x259534bd,
360 0x03f0c000, 360 0x0007f108,
361 0x0005d001, 361 0x0103f0c0,
362 0x07f104bd, 362 0xbd0005d0,
363 0x03f0c100, 363 0x0007f104,
364 0x0005d001, 364 0x0103f0c1,
365 0x0e9804bd, 365 0xbd0005d0,
366 0x010f9800, 366 0x000e9804,
367 0x015021f5, 367 0xf5010f98,
368 0xbb002fbb, 368 0xbb015021,
369 0x0e98003f, 369 0x3fbb002f,
370 0x020f9801, 370 0x010e9800,
371 0x015021f5, 371 0xf5020f98,
372 0xfd050e98, 372 0x98015021,
373 0x2ebb00ef, 373 0xeffd050e,
374 0x003ebb00, 374 0x002ebb00,
375 0x98020e98, 375 0x98003ebb,
376 0x21f5030f, 376 0x0f98020e,
377 0x0e980150, 377 0x5021f503,
378 0x00effd07, 378 0x070e9801,
379 0xbb002ebb, 379 0xbb00effd,
380 0x35b6003e, 380 0x3ebb002e,
381 0x0007f102, 381 0x0235b600,
382 0x0103f0d3, 382 0xd30007f1,
383 0xbd0003d0, 383 0xd00103f0,
384 0x0825b604,
385 0xb60635b6,
386 0x30b60120,
387 0x0824b601,
388 0xb90834b6,
389 0x21f5022f,
390 0x2fbb02d3,
391 0x003fbb00,
392 0x010007f1,
393 0xd00203f0,
394 0x04bd0003, 384 0x04bd0003,
395 0x29f024bd, 385 0xb60825b6,
396 0x0007f11f, 386 0x20b60635,
397 0x0203f008, 387 0x0130b601,
398 0xbd0002d0, 388 0xb60824b6,
399/* 0x0505: main */ 389 0x2fb90834,
400 0x0031f404, 390 0xd321f502,
401 0xf00028f4, 391 0x002fbb02,
402 0x21f424d7, 392 0xf1003fbb,
403 0xf401f439, 393 0xf0010007,
404 0xf404e4b0, 394 0x03d00203,
405 0x81fe1e18, 395 0xbd04bd00,
406 0x0627f001, 396 0x1f29f024,
407 0x12fd20bd, 397 0x080007f1,
408 0x01e4b604, 398 0xd00203f0,
409 0xfe051efd, 399 0x04bd0002,
410 0x21f50018, 400/* 0x0508: main */
411 0x0ef405fa, 401 0xf40031f4,
412/* 0x0535: main_not_ctx_xfer */ 402 0xd7f00028,
413 0x10ef94d3, 403 0x3921f424,
414 0xf501f5f0, 404 0xb0f401f4,
415 0xf4037e21, 405 0x18f404e4,
416/* 0x0542: ih */ 406 0x0181fe1e,
417 0x80f9c60e, 407 0xbd0627f0,
418 0xf90188fe, 408 0x0412fd20,
419 0xf990f980, 409 0xfd01e4b6,
420 0xf9b0f9a0, 410 0x18fe051e,
421 0xf9e0f9d0, 411 0xfd21f500,
422 0xf104bdf0, 412 0xd30ef405,
423 0xf00200a7, 413/* 0x0538: main_not_ctx_xfer */
424 0xaacf00a3, 414 0xf010ef94,
425 0x04abc400, 415 0x21f501f5,
426 0xf02c0bf4, 416 0x0ef4037e,
427 0xe7f124d7, 417/* 0x0545: ih */
428 0xe3f01a00, 418 0xfe80f9c6,
429 0x00eecf00, 419 0x80f90188,
430 0x1900f7f1, 420 0xa0f990f9,
431 0xcf00f3f0, 421 0xd0f9b0f9,
432 0x21f400ff, 422 0xf0f9e0f9,
433 0x01e7f004, 423 0xa7f104bd,
434 0x1d0007f1, 424 0xa3f00200,
435 0xd00003f0, 425 0x00aacf00,
436 0x04bd000e, 426 0xf404abc4,
437/* 0x0590: ih_no_fifo */ 427 0xd7f02c0b,
438 0x010007f1, 428 0x00e7f124,
439 0xd00003f0, 429 0x00e3f01a,
440 0x04bd000a, 430 0xf100eecf,
441 0xe0fcf0fc, 431 0xf01900f7,
442 0xb0fcd0fc, 432 0xffcf00f3,
443 0x90fca0fc, 433 0x0421f400,
444 0x88fe80fc, 434 0xf101e7f0,
445 0xf480fc00, 435 0xf01d0007,
446 0x01f80032, 436 0x0ed00003,
447/* 0x05b4: hub_barrier_done */ 437/* 0x0593: ih_no_fifo */
448 0x9801f7f0, 438 0xf104bd00,
449 0xfebb040e, 439 0xf0010007,
450 0x02ffb904, 440 0x0ad00003,
451 0x9418e7f1, 441 0xfc04bd00,
452 0xf440e3f0, 442 0xfce0fcf0,
453 0x00f89d21, 443 0xfcb0fcd0,
454/* 0x05cc: ctx_redswitch */ 444 0xfc90fca0,
455 0xf120f7f0, 445 0x0088fe80,
446 0x32f480fc,
447/* 0x05b7: hub_barrier_done */
448 0xf001f800,
449 0x0e9801f7,
450 0x04febb04,
451 0xf102ffb9,
452 0xf09418e7,
453 0x21f440e3,
454/* 0x05cf: ctx_redswitch */
455 0xf000f89d,
456 0x07f120f7,
457 0x03f08500,
458 0x000fd001,
459 0xe7f004bd,
460/* 0x05e1: ctx_redswitch_delay */
461 0x01e2b608,
462 0xf1fd1bf4,
463 0xf10800f5,
464 0xf10200f5,
456 0xf0850007, 465 0xf0850007,
457 0x0fd00103, 466 0x0fd00103,
458 0xf004bd00, 467 0xf804bd00,
459/* 0x05de: ctx_redswitch_delay */ 468/* 0x05fd: ctx_xfer */
460 0xe2b608e7, 469 0x0007f100,
461 0xfd1bf401, 470 0x0203f081,
462 0x0800f5f1, 471 0xbd000fd0,
463 0x0200f5f1, 472 0x0711f404,
464 0x850007f1, 473 0x05cf21f5,
465 0xd00103f0, 474/* 0x0610: ctx_xfer_not_load */
466 0x04bd000f, 475 0x026a21f5,
467/* 0x05fa: ctx_xfer */ 476 0x07f124bd,
468 0x07f100f8, 477 0x03f047fc,
469 0x03f08100, 478 0x0002d002,
470 0x000fd002, 479 0x2cf004bd,
471 0x11f404bd, 480 0x0320b601,
472 0xcc21f507, 481 0x4afc07f1,
473/* 0x060d: ctx_xfer_not_load */ 482 0xd00203f0,
474 0x6a21f505, 483 0x04bd0002,
475 0xf124bd02,
476 0xf047fc07,
477 0x02d00203,
478 0xf004bd00,
479 0x20b6012c,
480 0xfc07f103,
481 0x0203f04a,
482 0xbd0002d0,
483 0x01acf004,
484 0xf102a5f0,
485 0xf00000b7,
486 0x0c9850b3,
487 0x0fc4b604,
488 0x9800bcbb,
489 0x0d98000c,
490 0x00e7f001,
491 0x016f21f5,
492 0xf101acf0,
493 0xf04000b7,
494 0x0c9850b3,
495 0x0fc4b604,
496 0x9800bcbb,
497 0x0d98010c,
498 0x060f9802,
499 0x0800e7f1,
500 0x016f21f5,
501 0xf001acf0, 484 0xf001acf0,
502 0xb7f104a5, 485 0xb7f102a5,
503 0xb3f03000, 486 0xb3f00000,
504 0x040c9850, 487 0x040c9850,
505 0xbb0fc4b6, 488 0xbb0fc4b6,
506 0x0c9800bc, 489 0x0c9800bc,
507 0x030d9802, 490 0x010d9800,
508 0xf1080f98, 491 0xf500e7f0,
509 0xf50200e7, 492 0xf0016f21,
510 0xf5016f21, 493 0xb7f101ac,
511 0xf4025e21, 494 0xb3f04000,
512 0x12f40601, 495 0x040c9850,
513/* 0x06a9: ctx_xfer_post */ 496 0xbb0fc4b6,
514 0x7f21f507, 497 0x0c9800bc,
515/* 0x06ad: ctx_xfer_done */ 498 0x020d9801,
516 0xb421f502, 499 0xf1060f98,
517 0x0000f805, 500 0xf50800e7,
518 0x00000000, 501 0xf0016f21,
502 0xa5f001ac,
503 0x00b7f104,
504 0x50b3f030,
505 0xb6040c98,
506 0xbcbb0fc4,
507 0x020c9800,
508 0x98030d98,
509 0xe7f1080f,
510 0x21f50200,
511 0x21f5016f,
512 0x01f4025e,
513 0x0712f406,
514/* 0x06ac: ctx_xfer_post */
515 0x027f21f5,
516/* 0x06b0: ctx_xfer_done */
517 0x05b721f5,
518 0x000000f8,
519 0x00000000, 519 0x00000000,
520 0x00000000, 520 0x00000000,
521 0x00000000, 521 0x00000000,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk110.fuc3.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk110.fuc3.h
index ee8e54db8fc9..1c6e11b05df2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk110.fuc3.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk110.fuc3.h
@@ -314,7 +314,7 @@ uint32_t gk110_grgpc_code[] = {
314 0x03f01200, 314 0x03f01200,
315 0x0002d000, 315 0x0002d000,
316 0x17f104bd, 316 0x17f104bd,
317 0x10fe0542, 317 0x10fe0545,
318 0x0007f100, 318 0x0007f100,
319 0x0003f007, 319 0x0003f007,
320 0xbd0000d0, 320 0xbd0000d0,
@@ -338,184 +338,184 @@ uint32_t gk110_grgpc_code[] = {
338 0x02d00103, 338 0x02d00103,
339 0xf104bd00, 339 0xf104bd00,
340 0xf00c30e7, 340 0xf00c30e7,
341 0x24bd50e3, 341 0xe5f050e3,
342 0x44bd34bd, 342 0xbd24bd01,
343/* 0x0430: init_unk_loop */ 343/* 0x0433: init_unk_loop */
344 0xb06821f4, 344 0xf444bd34,
345 0x0bf400f6, 345 0xf6b06821,
346 0x01f7f00f, 346 0x0f0bf400,
347 0xfd04f2bb, 347 0xbb01f7f0,
348 0x30b6054f, 348 0x4ffd04f2,
349/* 0x0445: init_unk_next */ 349 0x0130b605,
350 0x0120b601, 350/* 0x0448: init_unk_next */
351 0xb004e0b6, 351 0xb60120b6,
352 0x1bf40226, 352 0x26b004e0,
353/* 0x0451: init_unk_done */ 353 0xe21bf402,
354 0x070380e2, 354/* 0x0454: init_unk_done */
355 0xf1080480, 355 0x80070380,
356 0xf0010027, 356 0x27f10804,
357 0x22cf0223, 357 0x23f00100,
358 0x9534bd00, 358 0x0022cf02,
359 0x07f10825, 359 0x259534bd,
360 0x03f0c000, 360 0x0007f108,
361 0x0005d001, 361 0x0103f0c0,
362 0x07f104bd, 362 0xbd0005d0,
363 0x03f0c100, 363 0x0007f104,
364 0x0005d001, 364 0x0103f0c1,
365 0x0e9804bd, 365 0xbd0005d0,
366 0x010f9800, 366 0x000e9804,
367 0x015021f5, 367 0xf5010f98,
368 0xbb002fbb, 368 0xbb015021,
369 0x0e98003f, 369 0x3fbb002f,
370 0x020f9801, 370 0x010e9800,
371 0x015021f5, 371 0xf5020f98,
372 0xfd050e98, 372 0x98015021,
373 0x2ebb00ef, 373 0xeffd050e,
374 0x003ebb00, 374 0x002ebb00,
375 0x98020e98, 375 0x98003ebb,
376 0x21f5030f, 376 0x0f98020e,
377 0x0e980150, 377 0x5021f503,
378 0x00effd07, 378 0x070e9801,
379 0xbb002ebb, 379 0xbb00effd,
380 0x35b6003e, 380 0x3ebb002e,
381 0x0007f102, 381 0x0235b600,
382 0x0103f0d3, 382 0xd30007f1,
383 0xbd0003d0, 383 0xd00103f0,
384 0x0825b604,
385 0xb60635b6,
386 0x30b60120,
387 0x0824b601,
388 0xb90834b6,
389 0x21f5022f,
390 0x2fbb02d3,
391 0x003fbb00,
392 0x010007f1,
393 0xd00203f0,
394 0x04bd0003, 384 0x04bd0003,
395 0x29f024bd, 385 0xb60825b6,
396 0x0007f11f, 386 0x20b60635,
397 0x0203f030, 387 0x0130b601,
398 0xbd0002d0, 388 0xb60824b6,
399/* 0x0505: main */ 389 0x2fb90834,
400 0x0031f404, 390 0xd321f502,
401 0xf00028f4, 391 0x002fbb02,
402 0x21f424d7, 392 0xf1003fbb,
403 0xf401f439, 393 0xf0010007,
404 0xf404e4b0, 394 0x03d00203,
405 0x81fe1e18, 395 0xbd04bd00,
406 0x0627f001, 396 0x1f29f024,
407 0x12fd20bd, 397 0x300007f1,
408 0x01e4b604, 398 0xd00203f0,
409 0xfe051efd, 399 0x04bd0002,
410 0x21f50018, 400/* 0x0508: main */
411 0x0ef405fa, 401 0xf40031f4,
412/* 0x0535: main_not_ctx_xfer */ 402 0xd7f00028,
413 0x10ef94d3, 403 0x3921f424,
414 0xf501f5f0, 404 0xb0f401f4,
415 0xf4037e21, 405 0x18f404e4,
416/* 0x0542: ih */ 406 0x0181fe1e,
417 0x80f9c60e, 407 0xbd0627f0,
418 0xf90188fe, 408 0x0412fd20,
419 0xf990f980, 409 0xfd01e4b6,
420 0xf9b0f9a0, 410 0x18fe051e,
421 0xf9e0f9d0, 411 0xfd21f500,
422 0xf104bdf0, 412 0xd30ef405,
423 0xf00200a7, 413/* 0x0538: main_not_ctx_xfer */
424 0xaacf00a3, 414 0xf010ef94,
425 0x04abc400, 415 0x21f501f5,
426 0xf02c0bf4, 416 0x0ef4037e,
427 0xe7f124d7, 417/* 0x0545: ih */
428 0xe3f01a00, 418 0xfe80f9c6,
429 0x00eecf00, 419 0x80f90188,
430 0x1900f7f1, 420 0xa0f990f9,
431 0xcf00f3f0, 421 0xd0f9b0f9,
432 0x21f400ff, 422 0xf0f9e0f9,
433 0x01e7f004, 423 0xa7f104bd,
434 0x1d0007f1, 424 0xa3f00200,
435 0xd00003f0, 425 0x00aacf00,
436 0x04bd000e, 426 0xf404abc4,
437/* 0x0590: ih_no_fifo */ 427 0xd7f02c0b,
438 0x010007f1, 428 0x00e7f124,
439 0xd00003f0, 429 0x00e3f01a,
440 0x04bd000a, 430 0xf100eecf,
441 0xe0fcf0fc, 431 0xf01900f7,
442 0xb0fcd0fc, 432 0xffcf00f3,
443 0x90fca0fc, 433 0x0421f400,
444 0x88fe80fc, 434 0xf101e7f0,
445 0xf480fc00, 435 0xf01d0007,
446 0x01f80032, 436 0x0ed00003,
447/* 0x05b4: hub_barrier_done */ 437/* 0x0593: ih_no_fifo */
448 0x9801f7f0, 438 0xf104bd00,
449 0xfebb040e, 439 0xf0010007,
450 0x02ffb904, 440 0x0ad00003,
451 0x9418e7f1, 441 0xfc04bd00,
452 0xf440e3f0, 442 0xfce0fcf0,
453 0x00f89d21, 443 0xfcb0fcd0,
454/* 0x05cc: ctx_redswitch */ 444 0xfc90fca0,
455 0xf120f7f0, 445 0x0088fe80,
446 0x32f480fc,
447/* 0x05b7: hub_barrier_done */
448 0xf001f800,
449 0x0e9801f7,
450 0x04febb04,
451 0xf102ffb9,
452 0xf09418e7,
453 0x21f440e3,
454/* 0x05cf: ctx_redswitch */
455 0xf000f89d,
456 0x07f120f7,
457 0x03f08500,
458 0x000fd001,
459 0xe7f004bd,
460/* 0x05e1: ctx_redswitch_delay */
461 0x01e2b608,
462 0xf1fd1bf4,
463 0xf10800f5,
464 0xf10200f5,
456 0xf0850007, 465 0xf0850007,
457 0x0fd00103, 466 0x0fd00103,
458 0xf004bd00, 467 0xf804bd00,
459/* 0x05de: ctx_redswitch_delay */ 468/* 0x05fd: ctx_xfer */
460 0xe2b608e7, 469 0x0007f100,
461 0xfd1bf401, 470 0x0203f081,
462 0x0800f5f1, 471 0xbd000fd0,
463 0x0200f5f1, 472 0x0711f404,
464 0x850007f1, 473 0x05cf21f5,
465 0xd00103f0, 474/* 0x0610: ctx_xfer_not_load */
466 0x04bd000f, 475 0x026a21f5,
467/* 0x05fa: ctx_xfer */ 476 0x07f124bd,
468 0x07f100f8, 477 0x03f047fc,
469 0x03f08100, 478 0x0002d002,
470 0x000fd002, 479 0x2cf004bd,
471 0x11f404bd, 480 0x0320b601,
472 0xcc21f507, 481 0x4afc07f1,
473/* 0x060d: ctx_xfer_not_load */ 482 0xd00203f0,
474 0x6a21f505, 483 0x04bd0002,
475 0xf124bd02,
476 0xf047fc07,
477 0x02d00203,
478 0xf004bd00,
479 0x20b6012c,
480 0xfc07f103,
481 0x0203f04a,
482 0xbd0002d0,
483 0x01acf004,
484 0xf102a5f0,
485 0xf00000b7,
486 0x0c9850b3,
487 0x0fc4b604,
488 0x9800bcbb,
489 0x0d98000c,
490 0x00e7f001,
491 0x016f21f5,
492 0xf101acf0,
493 0xf04000b7,
494 0x0c9850b3,
495 0x0fc4b604,
496 0x9800bcbb,
497 0x0d98010c,
498 0x060f9802,
499 0x0800e7f1,
500 0x016f21f5,
501 0xf001acf0, 484 0xf001acf0,
502 0xb7f104a5, 485 0xb7f102a5,
503 0xb3f03000, 486 0xb3f00000,
504 0x040c9850, 487 0x040c9850,
505 0xbb0fc4b6, 488 0xbb0fc4b6,
506 0x0c9800bc, 489 0x0c9800bc,
507 0x030d9802, 490 0x010d9800,
508 0xf1080f98, 491 0xf500e7f0,
509 0xf50200e7, 492 0xf0016f21,
510 0xf5016f21, 493 0xb7f101ac,
511 0xf4025e21, 494 0xb3f04000,
512 0x12f40601, 495 0x040c9850,
513/* 0x06a9: ctx_xfer_post */ 496 0xbb0fc4b6,
514 0x7f21f507, 497 0x0c9800bc,
515/* 0x06ad: ctx_xfer_done */ 498 0x020d9801,
516 0xb421f502, 499 0xf1060f98,
517 0x0000f805, 500 0xf50800e7,
518 0x00000000, 501 0xf0016f21,
502 0xa5f001ac,
503 0x00b7f104,
504 0x50b3f030,
505 0xb6040c98,
506 0xbcbb0fc4,
507 0x020c9800,
508 0x98030d98,
509 0xe7f1080f,
510 0x21f50200,
511 0x21f5016f,
512 0x01f4025e,
513 0x0712f406,
514/* 0x06ac: ctx_xfer_post */
515 0x027f21f5,
516/* 0x06b0: ctx_xfer_done */
517 0x05b721f5,
518 0x000000f8,
519 0x00000000, 519 0x00000000,
520 0x00000000, 520 0x00000000,
521 0x00000000, 521 0x00000000,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk208.fuc5.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk208.fuc5.h
index fbcc342f896f..84af7ec6a78e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk208.fuc5.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk208.fuc5.h
@@ -276,7 +276,7 @@ uint32_t gk208_grgpc_code[] = {
276 0x02020014, 276 0x02020014,
277 0xf6120040, 277 0xf6120040,
278 0x04bd0002, 278 0x04bd0002,
279 0xfe048141, 279 0xfe048441,
280 0x00400010, 280 0x00400010,
281 0x0000f607, 281 0x0000f607,
282 0x040204bd, 282 0x040204bd,
@@ -295,165 +295,165 @@ uint32_t gk208_grgpc_code[] = {
295 0x01c90080, 295 0x01c90080,
296 0xbd0002f6, 296 0xbd0002f6,
297 0x0c308e04, 297 0x0c308e04,
298 0xbd24bd50, 298 0x01e5f050,
299/* 0x0383: init_unk_loop */ 299 0x34bd24bd,
300 0x7e44bd34, 300/* 0x0386: init_unk_loop */
301 0xb0000065, 301 0x657e44bd,
302 0x0bf400f6, 302 0xf6b00000,
303 0xbb010f0e, 303 0x0e0bf400,
304 0x4ffd04f2, 304 0xf2bb010f,
305 0x0130b605, 305 0x054ffd04,
306/* 0x0398: init_unk_next */ 306/* 0x039b: init_unk_next */
307 0xb60120b6, 307 0xb60130b6,
308 0x26b004e0, 308 0xe0b60120,
309 0xe21bf401, 309 0x0126b004,
310/* 0x03a4: init_unk_done */ 310/* 0x03a7: init_unk_done */
311 0xb50703b5, 311 0xb5e21bf4,
312 0x00820804, 312 0x04b50703,
313 0x22cf0201, 313 0x01008208,
314 0x9534bd00, 314 0x0022cf02,
315 0x00800825, 315 0x259534bd,
316 0x05f601c0, 316 0xc0008008,
317 0x8004bd00, 317 0x0005f601,
318 0xf601c100, 318 0x008004bd,
319 0x04bd0005, 319 0x05f601c1,
320 0x98000e98, 320 0x9804bd00,
321 0x207e010f, 321 0x0f98000e,
322 0x2fbb0001, 322 0x01207e01,
323 0x003fbb00, 323 0x002fbb00,
324 0x98010e98, 324 0x98003fbb,
325 0x207e020f, 325 0x0f98010e,
326 0x0e980001, 326 0x01207e02,
327 0x00effd05, 327 0x050e9800,
328 0xbb002ebb, 328 0xbb00effd,
329 0x0e98003e, 329 0x3ebb002e,
330 0x030f9802, 330 0x020e9800,
331 0x0001207e, 331 0x7e030f98,
332 0xfd070e98, 332 0x98000120,
333 0x2ebb00ef, 333 0xeffd070e,
334 0x003ebb00, 334 0x002ebb00,
335 0x800235b6, 335 0xb6003ebb,
336 0xf601d300, 336 0x00800235,
337 0x04bd0003, 337 0x03f601d3,
338 0xb60825b6, 338 0xb604bd00,
339 0x20b60635, 339 0x35b60825,
340 0x0130b601, 340 0x0120b606,
341 0xb60824b6, 341 0xb60130b6,
342 0x2fb20834, 342 0x34b60824,
343 0x0002687e, 343 0x7e2fb208,
344 0xbb002fbb, 344 0xbb000268,
345 0x0080003f, 345 0x3fbb002f,
346 0x03f60201, 346 0x01008000,
347 0xbd04bd00, 347 0x0003f602,
348 0x1f29f024, 348 0x24bd04bd,
349 0x02300080, 349 0x801f29f0,
350 0xbd0002f6, 350 0xf6023000,
351/* 0x0445: main */ 351 0x04bd0002,
352 0x0031f404, 352/* 0x0448: main */
353 0x0d0028f4, 353 0xf40031f4,
354 0x00377e24, 354 0x240d0028,
355 0xf401f400, 355 0x0000377e,
356 0xf404e4b0, 356 0xb0f401f4,
357 0x81fe1d18, 357 0x18f404e4,
358 0xbd060201, 358 0x0181fe1d,
359 0x0412fd20, 359 0x20bd0602,
360 0xfd01e4b6, 360 0xb60412fd,
361 0x18fe051e, 361 0x1efd01e4,
362 0x05187e00, 362 0x0018fe05,
363 0xd40ef400, 363 0x00051b7e,
364/* 0x0474: main_not_ctx_xfer */ 364/* 0x0477: main_not_ctx_xfer */
365 0xf010ef94, 365 0x94d40ef4,
366 0xf87e01f5, 366 0xf5f010ef,
367 0x0ef40002, 367 0x02f87e01,
368/* 0x0481: ih */ 368 0xc70ef400,
369 0xfe80f9c7, 369/* 0x0484: ih */
370 0x80f90188, 370 0x88fe80f9,
371 0xa0f990f9, 371 0xf980f901,
372 0xd0f9b0f9, 372 0xf9a0f990,
373 0xf0f9e0f9, 373 0xf9d0f9b0,
374 0x004a04bd, 374 0xbdf0f9e0,
375 0x00aacf02, 375 0x02004a04,
376 0xf404abc4, 376 0xc400aacf,
377 0x240d1f0b, 377 0x0bf404ab,
378 0xcf1a004e, 378 0x4e240d1f,
379 0x004f00ee, 379 0xeecf1a00,
380 0x00ffcf19, 380 0x19004f00,
381 0x0000047e, 381 0x7e00ffcf,
382 0x0040010e, 382 0x0e000004,
383 0x000ef61d, 383 0x1d004001,
384/* 0x04be: ih_no_fifo */ 384 0xbd000ef6,
385 0x004004bd, 385/* 0x04c1: ih_no_fifo */
386 0x000af601, 386 0x01004004,
387 0xf0fc04bd, 387 0xbd000af6,
388 0xd0fce0fc, 388 0xfcf0fc04,
389 0xa0fcb0fc, 389 0xfcd0fce0,
390 0x80fc90fc, 390 0xfca0fcb0,
391 0xfc0088fe, 391 0xfe80fc90,
392 0x0032f480, 392 0x80fc0088,
393/* 0x04de: hub_barrier_done */ 393 0xf80032f4,
394 0x010f01f8, 394/* 0x04e1: hub_barrier_done */
395 0xbb040e98, 395 0x98010f01,
396 0xffb204fe, 396 0xfebb040e,
397 0x4094188e, 397 0x8effb204,
398 0x00008f7e, 398 0x7e409418,
399/* 0x04f2: ctx_redswitch */ 399 0xf800008f,
400 0x200f00f8, 400/* 0x04f5: ctx_redswitch */
401 0x80200f00,
402 0xf6018500,
403 0x04bd000f,
404/* 0x0502: ctx_redswitch_delay */
405 0xe2b6080e,
406 0xfd1bf401,
407 0x0800f5f1,
408 0x0200f5f1,
401 0x01850080, 409 0x01850080,
402 0xbd000ff6, 410 0xbd000ff6,
403/* 0x04ff: ctx_redswitch_delay */ 411/* 0x051b: ctx_xfer */
404 0xb6080e04, 412 0x8000f804,
405 0x1bf401e2, 413 0xf6028100,
406 0x00f5f1fd, 414 0x04bd000f,
407 0x00f5f108, 415 0x7e0711f4,
408 0x85008002, 416/* 0x052b: ctx_xfer_not_load */
409 0x000ff601, 417 0x7e0004f5,
410 0x00f804bd, 418 0xbd000216,
411/* 0x0518: ctx_xfer */ 419 0x47fc8024,
412 0x02810080,
413 0xbd000ff6,
414 0x0711f404,
415 0x0004f27e,
416/* 0x0528: ctx_xfer_not_load */
417 0x0002167e,
418 0xfc8024bd,
419 0x02f60247,
420 0xf004bd00,
421 0x20b6012c,
422 0x4afc8003,
423 0x0002f602, 420 0x0002f602,
424 0xacf004bd, 421 0x2cf004bd,
425 0x02a5f001, 422 0x0320b601,
426 0x5000008b, 423 0x024afc80,
427 0xb6040c98, 424 0xbd0002f6,
428 0xbcbb0fc4, 425 0x01acf004,
429 0x000c9800, 426 0x8b02a5f0,
430 0x0e010d98, 427 0x98500000,
431 0x013d7e00,
432 0x01acf000,
433 0x5040008b,
434 0xb6040c98,
435 0xbcbb0fc4,
436 0x010c9800,
437 0x98020d98,
438 0x004e060f,
439 0x013d7e08,
440 0x01acf000,
441 0x8b04a5f0,
442 0x98503000,
443 0xc4b6040c, 428 0xc4b6040c,
444 0x00bcbb0f, 429 0x00bcbb0f,
445 0x98020c98, 430 0x98000c98,
446 0x0f98030d, 431 0x000e010d,
447 0x02004e08,
448 0x00013d7e, 432 0x00013d7e,
449 0x00020a7e, 433 0x8b01acf0,
450 0xf40601f4, 434 0x98504000,
451/* 0x05b2: ctx_xfer_post */ 435 0xc4b6040c,
452 0x277e0712, 436 0x00bcbb0f,
453/* 0x05b6: ctx_xfer_done */ 437 0x98010c98,
454 0xde7e0002, 438 0x0f98020d,
455 0x00f80004, 439 0x08004e06,
456 0x00000000, 440 0x00013d7e,
441 0xf001acf0,
442 0x008b04a5,
443 0x0c985030,
444 0x0fc4b604,
445 0x9800bcbb,
446 0x0d98020c,
447 0x080f9803,
448 0x7e02004e,
449 0x7e00013d,
450 0xf400020a,
451 0x12f40601,
452/* 0x05b5: ctx_xfer_post */
453 0x02277e07,
454/* 0x05b9: ctx_xfer_done */
455 0x04e17e00,
456 0x0000f800,
457 0x00000000, 457 0x00000000,
458 0x00000000, 458 0x00000000,
459 0x00000000, 459 0x00000000,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgm107.fuc5.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgm107.fuc5.h
index 51f5c3c6e966..11bf363a6ae9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgm107.fuc5.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgm107.fuc5.h
@@ -289,7 +289,7 @@ uint32_t gm107_grgpc_code[] = {
289 0x020014fe, 289 0x020014fe,
290 0x12004002, 290 0x12004002,
291 0xbd0002f6, 291 0xbd0002f6,
292 0x05b04104, 292 0x05b34104,
293 0x400010fe, 293 0x400010fe,
294 0x00f60700, 294 0x00f60700,
295 0x0204bd00, 295 0x0204bd00,
@@ -308,259 +308,259 @@ uint32_t gm107_grgpc_code[] = {
308 0xc900800f, 308 0xc900800f,
309 0x0002f601, 309 0x0002f601,
310 0x308e04bd, 310 0x308e04bd,
311 0x24bd500c, 311 0xe5f0500c,
312 0x44bd34bd, 312 0xbd24bd01,
313/* 0x03b0: init_unk_loop */ 313/* 0x03b3: init_unk_loop */
314 0x0000657e, 314 0x7e44bd34,
315 0xf400f6b0, 315 0xb0000065,
316 0x010f0e0b, 316 0x0bf400f6,
317 0xfd04f2bb, 317 0xbb010f0e,
318 0x30b6054f, 318 0x4ffd04f2,
319/* 0x03c5: init_unk_next */ 319 0x0130b605,
320 0x0120b601, 320/* 0x03c8: init_unk_next */
321 0xb004e0b6, 321 0xb60120b6,
322 0x1bf40226, 322 0x26b004e0,
323/* 0x03d1: init_unk_done */ 323 0xe21bf402,
324 0x0703b5e2, 324/* 0x03d4: init_unk_done */
325 0x820804b5, 325 0xb50703b5,
326 0xcf020100, 326 0x00820804,
327 0x34bd0022, 327 0x22cf0201,
328 0x80082595, 328 0x9534bd00,
329 0xf601c000, 329 0x00800825,
330 0x05f601c0,
331 0x8004bd00,
332 0xf601c100,
330 0x04bd0005, 333 0x04bd0005,
331 0x01c10080, 334 0x98000e98,
332 0xbd0005f6, 335 0x207e010f,
333 0x000e9804, 336 0x2fbb0001,
334 0x7e010f98, 337 0x003fbb00,
335 0xbb000120, 338 0x98010e98,
336 0x3fbb002f, 339 0x207e020f,
337 0x010e9800, 340 0x0e980001,
338 0x7e020f98, 341 0x00effd05,
339 0x98000120, 342 0xbb002ebb,
340 0xeffd050e, 343 0x0e98003e,
341 0x002ebb00, 344 0x030f9802,
342 0x98003ebb, 345 0x0001207e,
343 0x0f98020e, 346 0xfd070e98,
344 0x01207e03, 347 0x2ebb00ef,
345 0x070e9800, 348 0x003ebb00,
346 0xbb00effd, 349 0x800235b6,
347 0x3ebb002e, 350 0xf601d300,
348 0x0235b600, 351 0x04bd0003,
349 0x01d30080, 352 0xb60825b6,
350 0xbd0003f6, 353 0x20b60635,
351 0x0825b604, 354 0x0130b601,
352 0xb60635b6, 355 0xb60824b6,
353 0x30b60120, 356 0x2fb20834,
354 0x0824b601, 357 0x0002687e,
355 0xb20834b6, 358 0xbb002fbb,
356 0x02687e2f, 359 0x3f0f003f,
357 0x002fbb00, 360 0x501d608e,
358 0x0f003fbb, 361 0xb201e5f0,
359 0x8effb23f, 362 0x008f7eff,
360 0xf0501d60, 363 0x8e0c0f00,
361 0x8f7e01e5,
362 0x0c0f0000,
363 0xa88effb2,
364 0xe5f0501d,
365 0x008f7e01,
366 0x03147e00,
367 0xb23f0f00,
368 0x1d608eff,
369 0x01e5f050,
370 0x00008f7e,
371 0xffb2000f,
372 0x501d9c8e,
373 0x7e01e5f0,
374 0x0f00008f,
375 0x03147e01,
376 0x8effb200,
377 0xf0501da8, 364 0xf0501da8,
378 0x8f7e01e5, 365 0xffb201e5,
379 0xff0f0000, 366 0x00008f7e,
380 0x988effb2, 367 0x0003147e,
368 0x608e3f0f,
381 0xe5f0501d, 369 0xe5f0501d,
382 0x008f7e01, 370 0x7effb201,
383 0xb2020f00, 371 0x0f00008f,
384 0x1da88eff, 372 0x1d9c8e00,
385 0x01e5f050, 373 0x01e5f050,
386 0x00008f7e, 374 0x8f7effb2,
375 0x010f0000,
387 0x0003147e, 376 0x0003147e,
388 0x85050498, 377 0x501da88e,
389 0x98504000, 378 0xb201e5f0,
390 0x64b60406, 379 0x008f7eff,
391 0x0056bb0f, 380 0x8eff0f00,
392/* 0x04e0: tpc_strand_init_tpc_loop */ 381 0xf0501d98,
393 0x05705eb8, 382 0xffb201e5,
394 0x00657e00,
395 0xbdf6b200,
396/* 0x04ed: tpc_strand_init_idx_loop */
397 0x605eb874,
398 0x7fb20005,
399 0x00008f7e,
400 0x05885eb8,
401 0x082f9500,
402 0x00008f7e,
403 0x058c5eb8,
404 0x082f9500,
405 0x00008f7e, 383 0x00008f7e,
406 0x05905eb8, 384 0xa88e020f,
407 0x00657e00,
408 0x06f5b600,
409 0xb601f0b6,
410 0x2fbb08f4,
411 0x003fbb00,
412 0xb60170b6,
413 0x1bf40162,
414 0x0050b7bf,
415 0x0142b608,
416 0x0fa81bf4,
417 0x8effb23f,
418 0xf0501d60,
419 0x8f7e01e5,
420 0x0d0f0000,
421 0xa88effb2,
422 0xe5f0501d, 385 0xe5f0501d,
423 0x008f7e01, 386 0x7effb201,
424 0x03147e00, 387 0x7e00008f,
425 0x01008000, 388 0x98000314,
426 0x0003f602, 389 0x00850504,
427 0x24bd04bd, 390 0x06985040,
428 0x801f29f0, 391 0x0f64b604,
429 0xf6023000, 392/* 0x04e3: tpc_strand_init_tpc_loop */
430 0x04bd0002, 393 0xb80056bb,
431/* 0x0574: main */ 394 0x0005705e,
432 0xf40031f4, 395 0x0000657e,
433 0x240d0028, 396 0x74bdf6b2,
434 0x0000377e, 397/* 0x04f0: tpc_strand_init_idx_loop */
435 0xb0f401f4, 398 0x05605eb8,
436 0x18f404e4, 399 0x7e7fb200,
437 0x0181fe1d, 400 0xb800008f,
438 0x20bd0602, 401 0x0005885e,
439 0xb60412fd, 402 0x7e082f95,
440 0x1efd01e4, 403 0xb800008f,
441 0x0018fe05, 404 0x00058c5e,
442 0x0006477e, 405 0x7e082f95,
443/* 0x05a3: main_not_ctx_xfer */ 406 0xb800008f,
444 0x94d40ef4, 407 0x0005905e,
445 0xf5f010ef, 408 0x0000657e,
446 0x02f87e01, 409 0xb606f5b6,
447 0xc70ef400, 410 0xf4b601f0,
448/* 0x05b0: ih */ 411 0x002fbb08,
449 0x88fe80f9, 412 0xb6003fbb,
450 0xf980f901, 413 0x62b60170,
451 0xf9a0f990, 414 0xbf1bf401,
452 0xf9d0f9b0, 415 0x080050b7,
453 0xbdf0f9e0, 416 0xf40142b6,
454 0x02004a04, 417 0x3f0fa81b,
455 0xc400aacf, 418 0x501d608e,
456 0x0bf404ab, 419 0xb201e5f0,
457 0x4e240d1f, 420 0x008f7eff,
458 0xeecf1a00, 421 0x8e0d0f00,
459 0x19004f00, 422 0xf0501da8,
460 0x7e00ffcf, 423 0xffb201e5,
461 0x0e000004, 424 0x00008f7e,
462 0x1d004001, 425 0x0003147e,
463 0xbd000ef6, 426 0x02010080,
464/* 0x05ed: ih_no_fifo */ 427 0xbd0003f6,
465 0x01004004, 428 0xf024bd04,
466 0xbd000af6, 429 0x00801f29,
467 0xfcf0fc04, 430 0x02f60230,
468 0xfcd0fce0, 431/* 0x0577: main */
469 0xfca0fcb0, 432 0xf404bd00,
470 0xfe80fc90, 433 0x28f40031,
471 0x80fc0088, 434 0x7e240d00,
472 0xf80032f4, 435 0xf4000037,
473/* 0x060d: hub_barrier_done */ 436 0xe4b0f401,
474 0x98010f01, 437 0x1d18f404,
475 0xfebb040e, 438 0x020181fe,
476 0x8effb204, 439 0xfd20bd06,
477 0x7e409418, 440 0xe4b60412,
478 0xf800008f, 441 0x051efd01,
479/* 0x0621: ctx_redswitch */ 442 0x7e0018fe,
480 0x80200f00, 443 0xf400064a,
444/* 0x05a6: main_not_ctx_xfer */
445 0xef94d40e,
446 0x01f5f010,
447 0x0002f87e,
448/* 0x05b3: ih */
449 0xf9c70ef4,
450 0x0188fe80,
451 0x90f980f9,
452 0xb0f9a0f9,
453 0xe0f9d0f9,
454 0x04bdf0f9,
455 0xcf02004a,
456 0xabc400aa,
457 0x1f0bf404,
458 0x004e240d,
459 0x00eecf1a,
460 0xcf19004f,
461 0x047e00ff,
462 0x010e0000,
463 0xf61d0040,
464 0x04bd000e,
465/* 0x05f0: ih_no_fifo */
466 0xf6010040,
467 0x04bd000a,
468 0xe0fcf0fc,
469 0xb0fcd0fc,
470 0x90fca0fc,
471 0x88fe80fc,
472 0xf480fc00,
473 0x01f80032,
474/* 0x0610: hub_barrier_done */
475 0x0e98010f,
476 0x04febb04,
477 0x188effb2,
478 0x8f7e4094,
479 0x00f80000,
480/* 0x0624: ctx_redswitch */
481 0x0080200f,
482 0x0ff60185,
483 0x0e04bd00,
484/* 0x0631: ctx_redswitch_delay */
485 0x01e2b608,
486 0xf1fd1bf4,
487 0xf10800f5,
488 0x800200f5,
481 0xf6018500, 489 0xf6018500,
482 0x04bd000f, 490 0x04bd000f,
483/* 0x062e: ctx_redswitch_delay */ 491/* 0x064a: ctx_xfer */
484 0xe2b6080e, 492 0x008000f8,
485 0xfd1bf401, 493 0x0ff60281,
486 0x0800f5f1, 494 0x8e04bd00,
487 0x0200f5f1, 495 0xf0501dc4,
488 0x01850080, 496 0xffb201e5,
489 0xbd000ff6, 497 0x00008f7e,
490/* 0x0647: ctx_xfer */ 498 0x7e0711f4,
491 0x8000f804, 499/* 0x0667: ctx_xfer_not_load */
492 0xf6028100, 500 0x7e000624,
493 0x04bd000f, 501 0xbd000216,
494 0xc48effb2, 502 0x47fc8024,
495 0xe5f0501d,
496 0x008f7e01,
497 0x0711f400,
498 0x0006217e,
499/* 0x0664: ctx_xfer_not_load */
500 0x0002167e,
501 0xfc8024bd,
502 0x02f60247,
503 0xf004bd00,
504 0x20b6012c,
505 0x4afc8003,
506 0x0002f602, 503 0x0002f602,
507 0x0c0f04bd, 504 0x2cf004bd,
508 0xa88effb2, 505 0x0320b601,
509 0xe5f0501d, 506 0x024afc80,
510 0x008f7e01, 507 0xbd0002f6,
511 0x03147e00, 508 0x8e0c0f04,
512 0xb23f0f00, 509 0xf0501da8,
513 0x1d608eff, 510 0xffb201e5,
514 0x01e5f050,
515 0x00008f7e, 511 0x00008f7e,
516 0xffb2000f, 512 0x0003147e,
517 0x501d9c8e, 513 0x608e3f0f,
518 0x7e01e5f0, 514 0xe5f0501d,
515 0x7effb201,
519 0x0f00008f, 516 0x0f00008f,
520 0x03147e01, 517 0x1d9c8e00,
521 0x01fcf000,
522 0xb203f0b6,
523 0x1da88eff,
524 0x01e5f050, 518 0x01e5f050,
525 0x00008f7e, 519 0x8f7effb2,
526 0xf001acf0, 520 0x010f0000,
527 0x008b02a5, 521 0x0003147e,
528 0x0c985000, 522 0xb601fcf0,
529 0x0fc4b604, 523 0xa88e03f0,
530 0x9800bcbb, 524 0xe5f0501d,
531 0x0d98000c, 525 0x7effb201,
532 0x7e000e01, 526 0xf000008f,
533 0xf000013d,
534 0x008b01ac,
535 0x0c985040,
536 0x0fc4b604,
537 0x9800bcbb,
538 0x0d98010c,
539 0x060f9802,
540 0x7e08004e,
541 0xf000013d,
542 0xa5f001ac, 527 0xa5f001ac,
543 0x30008b04, 528 0x00008b02,
544 0x040c9850, 529 0x040c9850,
545 0xbb0fc4b6, 530 0xbb0fc4b6,
546 0x0c9800bc, 531 0x0c9800bc,
547 0x030d9802, 532 0x010d9800,
548 0x4e080f98, 533 0x3d7e000e,
549 0x3d7e0200, 534 0xacf00001,
550 0x0a7e0001, 535 0x40008b01,
551 0x147e0002, 536 0x040c9850,
552 0x01f40003, 537 0xbb0fc4b6,
553 0x1a12f406, 538 0x0c9800bc,
554/* 0x073c: ctx_xfer_post */ 539 0x020d9801,
555 0x0002277e, 540 0x4e060f98,
556 0xffb20d0f, 541 0x3d7e0800,
557 0x501da88e, 542 0xacf00001,
558 0x7e01e5f0, 543 0x04a5f001,
559 0x7e00008f, 544 0x5030008b,
560/* 0x0753: ctx_xfer_done */ 545 0xb6040c98,
561 0x7e000314, 546 0xbcbb0fc4,
562 0xf800060d, 547 0x020c9800,
563 0x00000000, 548 0x98030d98,
549 0x004e080f,
550 0x013d7e02,
551 0x020a7e00,
552 0x03147e00,
553 0x0601f400,
554/* 0x073f: ctx_xfer_post */
555 0x7e1a12f4,
556 0x0f000227,
557 0x1da88e0d,
558 0x01e5f050,
559 0x8f7effb2,
560 0x147e0000,
561/* 0x0756: ctx_xfer_done */
562 0x107e0003,
563 0x00f80006,
564 0x00000000, 564 0x00000000,
565 0x00000000, 565 0x00000000,
566 0x00000000, 566 0x00000000,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
index dda7a7d224c9..9f5dfc85147a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
@@ -143,7 +143,7 @@ gf100_gr_zbc_depth_get(struct gf100_gr *gr, int format,
143static int 143static int
144gf100_fermi_mthd_zbc_color(struct nvkm_object *object, void *data, u32 size) 144gf100_fermi_mthd_zbc_color(struct nvkm_object *object, void *data, u32 size)
145{ 145{
146 struct gf100_gr *gr = (void *)object->engine; 146 struct gf100_gr *gr = gf100_gr(nvkm_gr(object->engine));
147 union { 147 union {
148 struct fermi_a_zbc_color_v0 v0; 148 struct fermi_a_zbc_color_v0 v0;
149 } *args = data; 149 } *args = data;
@@ -189,7 +189,7 @@ gf100_fermi_mthd_zbc_color(struct nvkm_object *object, void *data, u32 size)
189static int 189static int
190gf100_fermi_mthd_zbc_depth(struct nvkm_object *object, void *data, u32 size) 190gf100_fermi_mthd_zbc_depth(struct nvkm_object *object, void *data, u32 size)
191{ 191{
192 struct gf100_gr *gr = (void *)object->engine; 192 struct gf100_gr *gr = gf100_gr(nvkm_gr(object->engine));
193 union { 193 union {
194 struct fermi_a_zbc_depth_v0 v0; 194 struct fermi_a_zbc_depth_v0 v0;
195 } *args = data; 195 } *args = data;
@@ -1530,6 +1530,8 @@ gf100_gr_oneinit(struct nvkm_gr *base)
1530 gr->ppc_nr[i] = gr->func->ppc_nr; 1530 gr->ppc_nr[i] = gr->func->ppc_nr;
1531 for (j = 0; j < gr->ppc_nr[i]; j++) { 1531 for (j = 0; j < gr->ppc_nr[i]; j++) {
1532 u8 mask = nvkm_rd32(device, GPC_UNIT(i, 0x0c30 + (j * 4))); 1532 u8 mask = nvkm_rd32(device, GPC_UNIT(i, 0x0c30 + (j * 4)));
1533 if (mask)
1534 gr->ppc_mask[i] |= (1 << j);
1533 gr->ppc_tpc_nr[i][j] = hweight8(mask); 1535 gr->ppc_tpc_nr[i][j] = hweight8(mask);
1534 } 1536 }
1535 } 1537 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
index 4611961b1187..02e78b8d93f6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
@@ -97,6 +97,7 @@ struct gf100_gr {
97 u8 tpc_nr[GPC_MAX]; 97 u8 tpc_nr[GPC_MAX];
98 u8 tpc_total; 98 u8 tpc_total;
99 u8 ppc_nr[GPC_MAX]; 99 u8 ppc_nr[GPC_MAX];
100 u8 ppc_mask[GPC_MAX];
100 u8 ppc_tpc_nr[GPC_MAX][4]; 101 u8 ppc_tpc_nr[GPC_MAX][4];
101 102
102 struct nvkm_memory *unk4188b4; 103 struct nvkm_memory *unk4188b4;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c
index 895ba74057d4..1d7dd38292b3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c
@@ -97,7 +97,9 @@ static void *
97nvkm_instobj_dtor(struct nvkm_memory *memory) 97nvkm_instobj_dtor(struct nvkm_memory *memory)
98{ 98{
99 struct nvkm_instobj *iobj = nvkm_instobj(memory); 99 struct nvkm_instobj *iobj = nvkm_instobj(memory);
100 spin_lock(&iobj->imem->lock);
100 list_del(&iobj->head); 101 list_del(&iobj->head);
102 spin_unlock(&iobj->imem->lock);
101 nvkm_memory_del(&iobj->parent); 103 nvkm_memory_del(&iobj->parent);
102 return iobj; 104 return iobj;
103} 105}
@@ -190,7 +192,9 @@ nvkm_instobj_new(struct nvkm_instmem *imem, u32 size, u32 align, bool zero,
190 nvkm_memory_ctor(&nvkm_instobj_func_slow, &iobj->memory); 192 nvkm_memory_ctor(&nvkm_instobj_func_slow, &iobj->memory);
191 iobj->parent = memory; 193 iobj->parent = memory;
192 iobj->imem = imem; 194 iobj->imem = imem;
195 spin_lock(&iobj->imem->lock);
193 list_add_tail(&iobj->head, &imem->list); 196 list_add_tail(&iobj->head, &imem->list);
197 spin_unlock(&iobj->imem->lock);
194 memory = &iobj->memory; 198 memory = &iobj->memory;
195 } 199 }
196 200
@@ -309,5 +313,6 @@ nvkm_instmem_ctor(const struct nvkm_instmem_func *func,
309{ 313{
310 nvkm_subdev_ctor(&nvkm_instmem, device, index, 0, &imem->subdev); 314 nvkm_subdev_ctor(&nvkm_instmem, device, index, 0, &imem->subdev);
311 imem->func = func; 315 imem->func = func;
316 spin_lock_init(&imem->lock);
312 INIT_LIST_HEAD(&imem->list); 317 INIT_LIST_HEAD(&imem->list);
313} 318}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk104.c
index d942fa7b9f18..86f9f3b13f71 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk104.c
@@ -81,9 +81,7 @@ gk104_pmu_pgob(struct nvkm_pmu *pmu, bool enable)
81 nvkm_mask(device, 0x000200, 0x00001000, 0x00001000); 81 nvkm_mask(device, 0x000200, 0x00001000, 0x00001000);
82 nvkm_rd32(device, 0x000200); 82 nvkm_rd32(device, 0x000200);
83 83
84 if ( nvkm_boolopt(device->cfgopt, "War00C800_0", 84 if (nvkm_boolopt(device->cfgopt, "War00C800_0", true)) {
85 device->quirk ? device->quirk->War00C800_0 : false)) {
86 nvkm_info(&pmu->subdev, "hw bug workaround enabled\n");
87 switch (device->chipset) { 85 switch (device->chipset) {
88 case 0xe4: 86 case 0xe4:
89 magic(device, 0x04000000); 87 magic(device, 0x04000000);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk104.c
index b61509e26ec9..b735173a18ff 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk104.c
@@ -59,7 +59,7 @@ gk104_volt_set(struct nvkm_volt *base, u32 uv)
59 duty = (uv - bios->base) * div / bios->pwm_range; 59 duty = (uv - bios->base) * div / bios->pwm_range;
60 60
61 nvkm_wr32(device, 0x20340, div); 61 nvkm_wr32(device, 0x20340, div);
62 nvkm_wr32(device, 0x20344, 0x8000000 | duty); 62 nvkm_wr32(device, 0x20344, 0x80000000 | duty);
63 63
64 return 0; 64 return 0;
65} 65}
diff --git a/drivers/gpu/drm/omapdrm/omap_fbdev.c b/drivers/gpu/drm/omapdrm/omap_fbdev.c
index b8e4cdec28c3..24f92bea39c7 100644
--- a/drivers/gpu/drm/omapdrm/omap_fbdev.c
+++ b/drivers/gpu/drm/omapdrm/omap_fbdev.c
@@ -112,11 +112,8 @@ static int omap_fbdev_create(struct drm_fb_helper *helper,
112 dma_addr_t paddr; 112 dma_addr_t paddr;
113 int ret; 113 int ret;
114 114
115 /* only doing ARGB32 since this is what is needed to alpha-blend
116 * with video overlays:
117 */
118 sizes->surface_bpp = 32; 115 sizes->surface_bpp = 32;
119 sizes->surface_depth = 32; 116 sizes->surface_depth = 24;
120 117
121 DBG("create fbdev: %dx%d@%d (%dx%d)", sizes->surface_width, 118 DBG("create fbdev: %dx%d@%d (%dx%d)", sizes->surface_width,
122 sizes->surface_height, sizes->surface_bpp, 119 sizes->surface_height, sizes->surface_bpp,
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
index 248953d2fdb7..f81fb2641097 100644
--- a/drivers/gpu/drm/radeon/cik.c
+++ b/drivers/gpu/drm/radeon/cik.c
@@ -4173,11 +4173,7 @@ void cik_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
4173 control |= ib->length_dw | (vm_id << 24); 4173 control |= ib->length_dw | (vm_id << 24);
4174 4174
4175 radeon_ring_write(ring, header); 4175 radeon_ring_write(ring, header);
4176 radeon_ring_write(ring, 4176 radeon_ring_write(ring, (ib->gpu_addr & 0xFFFFFFFC));
4177#ifdef __BIG_ENDIAN
4178 (2 << 0) |
4179#endif
4180 (ib->gpu_addr & 0xFFFFFFFC));
4181 radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFFFF); 4177 radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFFFF);
4182 radeon_ring_write(ring, control); 4178 radeon_ring_write(ring, control);
4183} 4179}
@@ -8472,7 +8468,7 @@ restart_ih:
8472 if (queue_dp) 8468 if (queue_dp)
8473 schedule_work(&rdev->dp_work); 8469 schedule_work(&rdev->dp_work);
8474 if (queue_hotplug) 8470 if (queue_hotplug)
8475 schedule_work(&rdev->hotplug_work); 8471 schedule_delayed_work(&rdev->hotplug_work, 0);
8476 if (queue_reset) { 8472 if (queue_reset) {
8477 rdev->needs_reset = true; 8473 rdev->needs_reset = true;
8478 wake_up_all(&rdev->fence_queue); 8474 wake_up_all(&rdev->fence_queue);
@@ -9630,6 +9626,9 @@ static void dce8_program_watermarks(struct radeon_device *rdev,
9630 (rdev->disp_priority == 2)) { 9626 (rdev->disp_priority == 2)) {
9631 DRM_DEBUG_KMS("force priority to high\n"); 9627 DRM_DEBUG_KMS("force priority to high\n");
9632 } 9628 }
9629
9630 /* Save number of lines the linebuffer leads before the scanout */
9631 radeon_crtc->lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode->crtc_hdisplay);
9633 } 9632 }
9634 9633
9635 /* select wm A */ 9634 /* select wm A */
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 7f33767d7ed6..2ad462896896 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -2372,6 +2372,9 @@ static void evergreen_program_watermarks(struct radeon_device *rdev,
2372 c.full = dfixed_div(c, a); 2372 c.full = dfixed_div(c, a);
2373 priority_b_mark = dfixed_trunc(c); 2373 priority_b_mark = dfixed_trunc(c);
2374 priority_b_cnt |= priority_b_mark & PRIORITY_MARK_MASK; 2374 priority_b_cnt |= priority_b_mark & PRIORITY_MARK_MASK;
2375
2376 /* Save number of lines the linebuffer leads before the scanout */
2377 radeon_crtc->lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode->crtc_hdisplay);
2375 } 2378 }
2376 2379
2377 /* select wm A */ 2380 /* select wm A */
@@ -5344,7 +5347,7 @@ restart_ih:
5344 if (queue_dp) 5347 if (queue_dp)
5345 schedule_work(&rdev->dp_work); 5348 schedule_work(&rdev->dp_work);
5346 if (queue_hotplug) 5349 if (queue_hotplug)
5347 schedule_work(&rdev->hotplug_work); 5350 schedule_delayed_work(&rdev->hotplug_work, 0);
5348 if (queue_hdmi) 5351 if (queue_hdmi)
5349 schedule_work(&rdev->audio_work); 5352 schedule_work(&rdev->audio_work);
5350 if (queue_thermal && rdev->pm.dpm_enabled) 5353 if (queue_thermal && rdev->pm.dpm_enabled)
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index 238b13f045c1..9e7e2bf03b81 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -806,7 +806,7 @@ int r100_irq_process(struct radeon_device *rdev)
806 status = r100_irq_ack(rdev); 806 status = r100_irq_ack(rdev);
807 } 807 }
808 if (queue_hotplug) 808 if (queue_hotplug)
809 schedule_work(&rdev->hotplug_work); 809 schedule_delayed_work(&rdev->hotplug_work, 0);
810 if (rdev->msi_enabled) { 810 if (rdev->msi_enabled) {
811 switch (rdev->family) { 811 switch (rdev->family) {
812 case CHIP_RS400: 812 case CHIP_RS400:
@@ -3217,6 +3217,9 @@ void r100_bandwidth_update(struct radeon_device *rdev)
3217 uint32_t pixel_bytes1 = 0; 3217 uint32_t pixel_bytes1 = 0;
3218 uint32_t pixel_bytes2 = 0; 3218 uint32_t pixel_bytes2 = 0;
3219 3219
3220 /* Guess line buffer size to be 8192 pixels */
3221 u32 lb_size = 8192;
3222
3220 if (!rdev->mode_info.mode_config_initialized) 3223 if (!rdev->mode_info.mode_config_initialized)
3221 return; 3224 return;
3222 3225
@@ -3631,6 +3634,13 @@ void r100_bandwidth_update(struct radeon_device *rdev)
3631 DRM_DEBUG_KMS("GRPH2_BUFFER_CNTL from to %x\n", 3634 DRM_DEBUG_KMS("GRPH2_BUFFER_CNTL from to %x\n",
3632 (unsigned int)RREG32(RADEON_GRPH2_BUFFER_CNTL)); 3635 (unsigned int)RREG32(RADEON_GRPH2_BUFFER_CNTL));
3633 } 3636 }
3637
3638 /* Save number of lines the linebuffer leads before the scanout */
3639 if (mode1)
3640 rdev->mode_info.crtcs[0]->lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode1->crtc_hdisplay);
3641
3642 if (mode2)
3643 rdev->mode_info.crtcs[1]->lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode2->crtc_hdisplay);
3634} 3644}
3635 3645
3636int r100_ring_test(struct radeon_device *rdev, struct radeon_ring *ring) 3646int r100_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 4ea5b10ff5f4..cc2fdf0be37a 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -4276,7 +4276,7 @@ restart_ih:
4276 WREG32(IH_RB_RPTR, rptr); 4276 WREG32(IH_RB_RPTR, rptr);
4277 } 4277 }
4278 if (queue_hotplug) 4278 if (queue_hotplug)
4279 schedule_work(&rdev->hotplug_work); 4279 schedule_delayed_work(&rdev->hotplug_work, 0);
4280 if (queue_hdmi) 4280 if (queue_hdmi)
4281 schedule_work(&rdev->audio_work); 4281 schedule_work(&rdev->audio_work);
4282 if (queue_thermal && rdev->pm.dpm_enabled) 4282 if (queue_thermal && rdev->pm.dpm_enabled)
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index b6cbd816537e..87db64983ea8 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -2414,7 +2414,7 @@ struct radeon_device {
2414 struct r600_ih ih; /* r6/700 interrupt ring */ 2414 struct r600_ih ih; /* r6/700 interrupt ring */
2415 struct radeon_rlc rlc; 2415 struct radeon_rlc rlc;
2416 struct radeon_mec mec; 2416 struct radeon_mec mec;
2417 struct work_struct hotplug_work; 2417 struct delayed_work hotplug_work;
2418 struct work_struct dp_work; 2418 struct work_struct dp_work;
2419 struct work_struct audio_work; 2419 struct work_struct audio_work;
2420 int num_crtc; /* number of crtcs */ 2420 int num_crtc; /* number of crtcs */
diff --git a/drivers/gpu/drm/radeon/radeon_agp.c b/drivers/gpu/drm/radeon/radeon_agp.c
index fe994aac3b04..c77d349c561c 100644
--- a/drivers/gpu/drm/radeon/radeon_agp.c
+++ b/drivers/gpu/drm/radeon/radeon_agp.c
@@ -54,6 +54,9 @@ static struct radeon_agpmode_quirk radeon_agpmode_quirk_list[] = {
54 /* Intel 82855PM host bridge / Mobility 9600 M10 RV350 Needs AGPMode 1 (lp #195051) */ 54 /* Intel 82855PM host bridge / Mobility 9600 M10 RV350 Needs AGPMode 1 (lp #195051) */
55 { PCI_VENDOR_ID_INTEL, 0x3340, PCI_VENDOR_ID_ATI, 0x4e50, 55 { PCI_VENDOR_ID_INTEL, 0x3340, PCI_VENDOR_ID_ATI, 0x4e50,
56 PCI_VENDOR_ID_IBM, 0x0550, 1}, 56 PCI_VENDOR_ID_IBM, 0x0550, 1},
57 /* Intel 82855PM host bridge / RV250/M9 GL [Mobility FireGL 9000/Radeon 9000] needs AGPMode 1 (Thinkpad T40p) */
58 { PCI_VENDOR_ID_INTEL, 0x3340, PCI_VENDOR_ID_ATI, 0x4c66,
59 PCI_VENDOR_ID_IBM, 0x054d, 1},
57 /* Intel 82855PM host bridge / Mobility M7 needs AGPMode 1 */ 60 /* Intel 82855PM host bridge / Mobility M7 needs AGPMode 1 */
58 { PCI_VENDOR_ID_INTEL, 0x3340, PCI_VENDOR_ID_ATI, 0x4c57, 61 { PCI_VENDOR_ID_INTEL, 0x3340, PCI_VENDOR_ID_ATI, 0x4c57,
59 PCI_VENDOR_ID_IBM, 0x0530, 1}, 62 PCI_VENDOR_ID_IBM, 0x0530, 1},
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 5a2cafb4f1bc..340f3f549f29 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -1234,13 +1234,32 @@ radeon_dvi_detect(struct drm_connector *connector, bool force)
1234 if (r < 0) 1234 if (r < 0)
1235 return connector_status_disconnected; 1235 return connector_status_disconnected;
1236 1236
1237 if (radeon_connector->detected_hpd_without_ddc) {
1238 force = true;
1239 radeon_connector->detected_hpd_without_ddc = false;
1240 }
1241
1237 if (!force && radeon_check_hpd_status_unchanged(connector)) { 1242 if (!force && radeon_check_hpd_status_unchanged(connector)) {
1238 ret = connector->status; 1243 ret = connector->status;
1239 goto exit; 1244 goto exit;
1240 } 1245 }
1241 1246
1242 if (radeon_connector->ddc_bus) 1247 if (radeon_connector->ddc_bus) {
1243 dret = radeon_ddc_probe(radeon_connector, false); 1248 dret = radeon_ddc_probe(radeon_connector, false);
1249
1250 /* Sometimes the pins required for the DDC probe on DVI
1251 * connectors don't make contact at the same time that the ones
1252 * for HPD do. If the DDC probe fails even though we had an HPD
1253 * signal, try again later */
1254 if (!dret && !force &&
1255 connector->status != connector_status_connected) {
1256 DRM_DEBUG_KMS("hpd detected without ddc, retrying in 1 second\n");
1257 radeon_connector->detected_hpd_without_ddc = true;
1258 schedule_delayed_work(&rdev->hotplug_work,
1259 msecs_to_jiffies(1000));
1260 goto exit;
1261 }
1262 }
1244 if (dret) { 1263 if (dret) {
1245 radeon_connector->detected_by_load = false; 1264 radeon_connector->detected_by_load = false;
1246 radeon_connector_free_edid(connector); 1265 radeon_connector_free_edid(connector);
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index a8d9927ed9eb..1eca0acac016 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -322,7 +322,9 @@ void radeon_crtc_handle_vblank(struct radeon_device *rdev, int crtc_id)
322 * to complete in this vblank? 322 * to complete in this vblank?
323 */ 323 */
324 if (update_pending && 324 if (update_pending &&
325 (DRM_SCANOUTPOS_VALID & radeon_get_crtc_scanoutpos(rdev->ddev, crtc_id, 0, 325 (DRM_SCANOUTPOS_VALID & radeon_get_crtc_scanoutpos(rdev->ddev,
326 crtc_id,
327 USE_REAL_VBLANKSTART,
326 &vpos, &hpos, NULL, NULL, 328 &vpos, &hpos, NULL, NULL,
327 &rdev->mode_info.crtcs[crtc_id]->base.hwmode)) && 329 &rdev->mode_info.crtcs[crtc_id]->base.hwmode)) &&
328 ((vpos >= (99 * rdev->mode_info.crtcs[crtc_id]->base.hwmode.crtc_vdisplay)/100) || 330 ((vpos >= (99 * rdev->mode_info.crtcs[crtc_id]->base.hwmode.crtc_vdisplay)/100) ||
@@ -401,6 +403,8 @@ static void radeon_flip_work_func(struct work_struct *__work)
401 struct drm_crtc *crtc = &radeon_crtc->base; 403 struct drm_crtc *crtc = &radeon_crtc->base;
402 unsigned long flags; 404 unsigned long flags;
403 int r; 405 int r;
406 int vpos, hpos, stat, min_udelay;
407 struct drm_vblank_crtc *vblank = &crtc->dev->vblank[work->crtc_id];
404 408
405 down_read(&rdev->exclusive_lock); 409 down_read(&rdev->exclusive_lock);
406 if (work->fence) { 410 if (work->fence) {
@@ -437,6 +441,41 @@ static void radeon_flip_work_func(struct work_struct *__work)
437 /* set the proper interrupt */ 441 /* set the proper interrupt */
438 radeon_irq_kms_pflip_irq_get(rdev, radeon_crtc->crtc_id); 442 radeon_irq_kms_pflip_irq_get(rdev, radeon_crtc->crtc_id);
439 443
444 /* If this happens to execute within the "virtually extended" vblank
445 * interval before the start of the real vblank interval then it needs
446 * to delay programming the mmio flip until the real vblank is entered.
447 * This prevents completing a flip too early due to the way we fudge
448 * our vblank counter and vblank timestamps in order to work around the
449 * problem that the hw fires vblank interrupts before actual start of
450 * vblank (when line buffer refilling is done for a frame). It
451 * complements the fudging logic in radeon_get_crtc_scanoutpos() for
452 * timestamping and radeon_get_vblank_counter_kms() for vblank counts.
453 *
454 * In practice this won't execute very often unless on very fast
455 * machines because the time window for this to happen is very small.
456 */
457 for (;;) {
458 /* GET_DISTANCE_TO_VBLANKSTART returns distance to real vblank
459 * start in hpos, and to the "fudged earlier" vblank start in
460 * vpos.
461 */
462 stat = radeon_get_crtc_scanoutpos(rdev->ddev, work->crtc_id,
463 GET_DISTANCE_TO_VBLANKSTART,
464 &vpos, &hpos, NULL, NULL,
465 &crtc->hwmode);
466
467 if ((stat & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE)) !=
468 (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE) ||
469 !(vpos >= 0 && hpos <= 0))
470 break;
471
472 /* Sleep at least until estimated real start of hw vblank */
473 spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
474 min_udelay = (-hpos + 1) * max(vblank->linedur_ns / 1000, 5);
475 usleep_range(min_udelay, 2 * min_udelay);
476 spin_lock_irqsave(&crtc->dev->event_lock, flags);
477 };
478
440 /* do the flip (mmio) */ 479 /* do the flip (mmio) */
441 radeon_page_flip(rdev, radeon_crtc->crtc_id, work->base); 480 radeon_page_flip(rdev, radeon_crtc->crtc_id, work->base);
442 481
@@ -1768,6 +1807,15 @@ bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
1768 * \param dev Device to query. 1807 * \param dev Device to query.
1769 * \param crtc Crtc to query. 1808 * \param crtc Crtc to query.
1770 * \param flags Flags from caller (DRM_CALLED_FROM_VBLIRQ or 0). 1809 * \param flags Flags from caller (DRM_CALLED_FROM_VBLIRQ or 0).
1810 * For driver internal use only also supports these flags:
1811 *
1812 * USE_REAL_VBLANKSTART to use the real start of vblank instead
1813 * of a fudged earlier start of vblank.
1814 *
1815 * GET_DISTANCE_TO_VBLANKSTART to return distance to the
1816 * fudged earlier start of vblank in *vpos and the distance
1817 * to true start of vblank in *hpos.
1818 *
1771 * \param *vpos Location where vertical scanout position should be stored. 1819 * \param *vpos Location where vertical scanout position should be stored.
1772 * \param *hpos Location where horizontal scanout position should go. 1820 * \param *hpos Location where horizontal scanout position should go.
1773 * \param *stime Target location for timestamp taken immediately before 1821 * \param *stime Target location for timestamp taken immediately before
@@ -1911,10 +1959,40 @@ int radeon_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
1911 vbl_end = 0; 1959 vbl_end = 0;
1912 } 1960 }
1913 1961
1962 /* Called from driver internal vblank counter query code? */
1963 if (flags & GET_DISTANCE_TO_VBLANKSTART) {
1964 /* Caller wants distance from real vbl_start in *hpos */
1965 *hpos = *vpos - vbl_start;
1966 }
1967
1968 /* Fudge vblank to start a few scanlines earlier to handle the
1969 * problem that vblank irqs fire a few scanlines before start
1970 * of vblank. Some driver internal callers need the true vblank
1971 * start to be used and signal this via the USE_REAL_VBLANKSTART flag.
1972 *
1973 * The cause of the "early" vblank irq is that the irq is triggered
1974 * by the line buffer logic when the line buffer read position enters
1975 * the vblank, whereas our crtc scanout position naturally lags the
1976 * line buffer read position.
1977 */
1978 if (!(flags & USE_REAL_VBLANKSTART))
1979 vbl_start -= rdev->mode_info.crtcs[pipe]->lb_vblank_lead_lines;
1980
1914 /* Test scanout position against vblank region. */ 1981 /* Test scanout position against vblank region. */
1915 if ((*vpos < vbl_start) && (*vpos >= vbl_end)) 1982 if ((*vpos < vbl_start) && (*vpos >= vbl_end))
1916 in_vbl = false; 1983 in_vbl = false;
1917 1984
1985 /* In vblank? */
1986 if (in_vbl)
1987 ret |= DRM_SCANOUTPOS_IN_VBLANK;
1988
1989 /* Called from driver internal vblank counter query code? */
1990 if (flags & GET_DISTANCE_TO_VBLANKSTART) {
1991 /* Caller wants distance from fudged earlier vbl_start */
1992 *vpos -= vbl_start;
1993 return ret;
1994 }
1995
1918 /* Check if inside vblank area and apply corrective offsets: 1996 /* Check if inside vblank area and apply corrective offsets:
1919 * vpos will then be >=0 in video scanout area, but negative 1997 * vpos will then be >=0 in video scanout area, but negative
1920 * within vblank area, counting down the number of lines until 1998 * within vblank area, counting down the number of lines until
@@ -1930,31 +2008,5 @@ int radeon_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
1930 /* Correct for shifted end of vbl at vbl_end. */ 2008 /* Correct for shifted end of vbl at vbl_end. */
1931 *vpos = *vpos - vbl_end; 2009 *vpos = *vpos - vbl_end;
1932 2010
1933 /* In vblank? */
1934 if (in_vbl)
1935 ret |= DRM_SCANOUTPOS_IN_VBLANK;
1936
1937 /* Is vpos outside nominal vblank area, but less than
1938 * 1/100 of a frame height away from start of vblank?
1939 * If so, assume this isn't a massively delayed vblank
1940 * interrupt, but a vblank interrupt that fired a few
1941 * microseconds before true start of vblank. Compensate
1942 * by adding a full frame duration to the final timestamp.
1943 * Happens, e.g., on ATI R500, R600.
1944 *
1945 * We only do this if DRM_CALLED_FROM_VBLIRQ.
1946 */
1947 if ((flags & DRM_CALLED_FROM_VBLIRQ) && !in_vbl) {
1948 vbl_start = mode->crtc_vdisplay;
1949 vtotal = mode->crtc_vtotal;
1950
1951 if (vbl_start - *vpos < vtotal / 100) {
1952 *vpos -= vtotal;
1953
1954 /* Signal this correction as "applied". */
1955 ret |= 0x8;
1956 }
1957 }
1958
1959 return ret; 2011 return ret;
1960} 2012}
diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c
index 171d3e43c30c..979f3bf65f2c 100644
--- a/drivers/gpu/drm/radeon/radeon_irq_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c
@@ -74,7 +74,7 @@ irqreturn_t radeon_driver_irq_handler_kms(int irq, void *arg)
74static void radeon_hotplug_work_func(struct work_struct *work) 74static void radeon_hotplug_work_func(struct work_struct *work)
75{ 75{
76 struct radeon_device *rdev = container_of(work, struct radeon_device, 76 struct radeon_device *rdev = container_of(work, struct radeon_device,
77 hotplug_work); 77 hotplug_work.work);
78 struct drm_device *dev = rdev->ddev; 78 struct drm_device *dev = rdev->ddev;
79 struct drm_mode_config *mode_config = &dev->mode_config; 79 struct drm_mode_config *mode_config = &dev->mode_config;
80 struct drm_connector *connector; 80 struct drm_connector *connector;
@@ -302,7 +302,7 @@ int radeon_irq_kms_init(struct radeon_device *rdev)
302 } 302 }
303 } 303 }
304 304
305 INIT_WORK(&rdev->hotplug_work, radeon_hotplug_work_func); 305 INIT_DELAYED_WORK(&rdev->hotplug_work, radeon_hotplug_work_func);
306 INIT_WORK(&rdev->dp_work, radeon_dp_work_func); 306 INIT_WORK(&rdev->dp_work, radeon_dp_work_func);
307 INIT_WORK(&rdev->audio_work, r600_audio_update_hdmi); 307 INIT_WORK(&rdev->audio_work, r600_audio_update_hdmi);
308 308
@@ -310,7 +310,7 @@ int radeon_irq_kms_init(struct radeon_device *rdev)
310 r = drm_irq_install(rdev->ddev, rdev->ddev->pdev->irq); 310 r = drm_irq_install(rdev->ddev, rdev->ddev->pdev->irq);
311 if (r) { 311 if (r) {
312 rdev->irq.installed = false; 312 rdev->irq.installed = false;
313 flush_work(&rdev->hotplug_work); 313 flush_delayed_work(&rdev->hotplug_work);
314 return r; 314 return r;
315 } 315 }
316 316
@@ -333,7 +333,7 @@ void radeon_irq_kms_fini(struct radeon_device *rdev)
333 rdev->irq.installed = false; 333 rdev->irq.installed = false;
334 if (rdev->msi_enabled) 334 if (rdev->msi_enabled)
335 pci_disable_msi(rdev->pdev); 335 pci_disable_msi(rdev->pdev);
336 flush_work(&rdev->hotplug_work); 336 flush_delayed_work(&rdev->hotplug_work);
337 } 337 }
338} 338}
339 339
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index 0ec6fcca16d3..d290a8a09036 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -755,6 +755,8 @@ void radeon_driver_preclose_kms(struct drm_device *dev,
755 */ 755 */
756u32 radeon_get_vblank_counter_kms(struct drm_device *dev, int crtc) 756u32 radeon_get_vblank_counter_kms(struct drm_device *dev, int crtc)
757{ 757{
758 int vpos, hpos, stat;
759 u32 count;
758 struct radeon_device *rdev = dev->dev_private; 760 struct radeon_device *rdev = dev->dev_private;
759 761
760 if (crtc < 0 || crtc >= rdev->num_crtc) { 762 if (crtc < 0 || crtc >= rdev->num_crtc) {
@@ -762,7 +764,53 @@ u32 radeon_get_vblank_counter_kms(struct drm_device *dev, int crtc)
762 return -EINVAL; 764 return -EINVAL;
763 } 765 }
764 766
765 return radeon_get_vblank_counter(rdev, crtc); 767 /* The hw increments its frame counter at start of vsync, not at start
768 * of vblank, as is required by DRM core vblank counter handling.
769 * Cook the hw count here to make it appear to the caller as if it
770 * incremented at start of vblank. We measure distance to start of
771 * vblank in vpos. vpos therefore will be >= 0 between start of vblank
772 * and start of vsync, so vpos >= 0 means to bump the hw frame counter
773 * result by 1 to give the proper appearance to caller.
774 */
775 if (rdev->mode_info.crtcs[crtc]) {
776 /* Repeat readout if needed to provide stable result if
777 * we cross start of vsync during the queries.
778 */
779 do {
780 count = radeon_get_vblank_counter(rdev, crtc);
781 /* Ask radeon_get_crtc_scanoutpos to return vpos as
782 * distance to start of vblank, instead of regular
783 * vertical scanout pos.
784 */
785 stat = radeon_get_crtc_scanoutpos(
786 dev, crtc, GET_DISTANCE_TO_VBLANKSTART,
787 &vpos, &hpos, NULL, NULL,
788 &rdev->mode_info.crtcs[crtc]->base.hwmode);
789 } while (count != radeon_get_vblank_counter(rdev, crtc));
790
791 if (((stat & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE)) !=
792 (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE))) {
793 DRM_DEBUG_VBL("Query failed! stat %d\n", stat);
794 }
795 else {
796 DRM_DEBUG_VBL("crtc %d: dist from vblank start %d\n",
797 crtc, vpos);
798
799 /* Bump counter if we are at >= leading edge of vblank,
800 * but before vsync where vpos would turn negative and
801 * the hw counter really increments.
802 */
803 if (vpos >= 0)
804 count++;
805 }
806 }
807 else {
808 /* Fallback to use value as is. */
809 count = radeon_get_vblank_counter(rdev, crtc);
810 DRM_DEBUG_VBL("NULL mode info! Returned count may be wrong.\n");
811 }
812
813 return count;
766} 814}
767 815
768/** 816/**
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index 830e171c3a9e..bba112628b47 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -367,6 +367,7 @@ struct radeon_crtc {
367 u32 line_time; 367 u32 line_time;
368 u32 wm_low; 368 u32 wm_low;
369 u32 wm_high; 369 u32 wm_high;
370 u32 lb_vblank_lead_lines;
370 struct drm_display_mode hw_mode; 371 struct drm_display_mode hw_mode;
371 enum radeon_output_csc output_csc; 372 enum radeon_output_csc output_csc;
372}; 373};
@@ -553,6 +554,7 @@ struct radeon_connector {
553 void *con_priv; 554 void *con_priv;
554 bool dac_load_detect; 555 bool dac_load_detect;
555 bool detected_by_load; /* if the connection status was determined by load */ 556 bool detected_by_load; /* if the connection status was determined by load */
557 bool detected_hpd_without_ddc; /* if an HPD signal was detected on DVI, but ddc probing failed */
556 uint16_t connector_object_id; 558 uint16_t connector_object_id;
557 struct radeon_hpd hpd; 559 struct radeon_hpd hpd;
558 struct radeon_router router; 560 struct radeon_router router;
@@ -686,6 +688,9 @@ struct atom_voltage_table
686 struct atom_voltage_table_entry entries[MAX_VOLTAGE_ENTRIES]; 688 struct atom_voltage_table_entry entries[MAX_VOLTAGE_ENTRIES];
687}; 689};
688 690
691/* Driver internal use only flags of radeon_get_crtc_scanoutpos() */
692#define USE_REAL_VBLANKSTART (1 << 30)
693#define GET_DISTANCE_TO_VBLANKSTART (1 << 31)
689 694
690extern void 695extern void
691radeon_add_atom_connector(struct drm_device *dev, 696radeon_add_atom_connector(struct drm_device *dev,
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index d3024883b844..84d45633d28c 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -221,11 +221,17 @@ int radeon_bo_create(struct radeon_device *rdev,
221 if (!(rdev->flags & RADEON_IS_PCIE)) 221 if (!(rdev->flags & RADEON_IS_PCIE))
222 bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC); 222 bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC);
223 223
224 /* Write-combined CPU mappings of GTT cause GPU hangs with RV6xx
225 * See https://bugs.freedesktop.org/show_bug.cgi?id=91268
226 */
227 if (rdev->family >= CHIP_RV610 && rdev->family <= CHIP_RV635)
228 bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC);
229
224#ifdef CONFIG_X86_32 230#ifdef CONFIG_X86_32
225 /* XXX: Write-combined CPU mappings of GTT seem broken on 32-bit 231 /* XXX: Write-combined CPU mappings of GTT seem broken on 32-bit
226 * See https://bugs.freedesktop.org/show_bug.cgi?id=84627 232 * See https://bugs.freedesktop.org/show_bug.cgi?id=84627
227 */ 233 */
228 bo->flags &= ~RADEON_GEM_GTT_WC; 234 bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC);
229#elif defined(CONFIG_X86) && !defined(CONFIG_X86_PAT) 235#elif defined(CONFIG_X86) && !defined(CONFIG_X86_PAT)
230 /* Don't try to enable write-combining when it can't work, or things 236 /* Don't try to enable write-combining when it can't work, or things
231 * may be slow 237 * may be slow
@@ -235,9 +241,10 @@ int radeon_bo_create(struct radeon_device *rdev,
235#warning Please enable CONFIG_MTRR and CONFIG_X86_PAT for better performance \ 241#warning Please enable CONFIG_MTRR and CONFIG_X86_PAT for better performance \
236 thanks to write-combining 242 thanks to write-combining
237 243
238 DRM_INFO_ONCE("Please enable CONFIG_MTRR and CONFIG_X86_PAT for " 244 if (bo->flags & RADEON_GEM_GTT_WC)
239 "better performance thanks to write-combining\n"); 245 DRM_INFO_ONCE("Please enable CONFIG_MTRR and CONFIG_X86_PAT for "
240 bo->flags &= ~RADEON_GEM_GTT_WC; 246 "better performance thanks to write-combining\n");
247 bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC);
241#endif 248#endif
242 249
243 radeon_ttm_placement_from_domain(bo, domain); 250 radeon_ttm_placement_from_domain(bo, domain);
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 6d80dde23400..59abebd6b5dc 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -1542,8 +1542,7 @@ int radeon_pm_late_init(struct radeon_device *rdev)
1542 ret = device_create_file(rdev->dev, &dev_attr_power_method); 1542 ret = device_create_file(rdev->dev, &dev_attr_power_method);
1543 if (ret) 1543 if (ret)
1544 DRM_ERROR("failed to create device file for power method\n"); 1544 DRM_ERROR("failed to create device file for power method\n");
1545 if (!ret) 1545 rdev->pm.sysfs_initialized = true;
1546 rdev->pm.sysfs_initialized = true;
1547 } 1546 }
1548 1547
1549 mutex_lock(&rdev->pm.mutex); 1548 mutex_lock(&rdev->pm.mutex);
@@ -1757,7 +1756,9 @@ static bool radeon_pm_in_vbl(struct radeon_device *rdev)
1757 */ 1756 */
1758 for (crtc = 0; (crtc < rdev->num_crtc) && in_vbl; crtc++) { 1757 for (crtc = 0; (crtc < rdev->num_crtc) && in_vbl; crtc++) {
1759 if (rdev->pm.active_crtcs & (1 << crtc)) { 1758 if (rdev->pm.active_crtcs & (1 << crtc)) {
1760 vbl_status = radeon_get_crtc_scanoutpos(rdev->ddev, crtc, 0, 1759 vbl_status = radeon_get_crtc_scanoutpos(rdev->ddev,
1760 crtc,
1761 USE_REAL_VBLANKSTART,
1761 &vpos, &hpos, NULL, NULL, 1762 &vpos, &hpos, NULL, NULL,
1762 &rdev->mode_info.crtcs[crtc]->base.hwmode); 1763 &rdev->mode_info.crtcs[crtc]->base.hwmode);
1763 if ((vbl_status & DRM_SCANOUTPOS_VALID) && 1764 if ((vbl_status & DRM_SCANOUTPOS_VALID) &&
diff --git a/drivers/gpu/drm/radeon/radeon_vce.c b/drivers/gpu/drm/radeon/radeon_vce.c
index 574f62bbd215..7eb1ae758906 100644
--- a/drivers/gpu/drm/radeon/radeon_vce.c
+++ b/drivers/gpu/drm/radeon/radeon_vce.c
@@ -361,31 +361,31 @@ int radeon_vce_get_create_msg(struct radeon_device *rdev, int ring,
361 361
362 /* stitch together an VCE create msg */ 362 /* stitch together an VCE create msg */
363 ib.length_dw = 0; 363 ib.length_dw = 0;
364 ib.ptr[ib.length_dw++] = 0x0000000c; /* len */ 364 ib.ptr[ib.length_dw++] = cpu_to_le32(0x0000000c); /* len */
365 ib.ptr[ib.length_dw++] = 0x00000001; /* session cmd */ 365 ib.ptr[ib.length_dw++] = cpu_to_le32(0x00000001); /* session cmd */
366 ib.ptr[ib.length_dw++] = handle; 366 ib.ptr[ib.length_dw++] = cpu_to_le32(handle);
367 367
368 ib.ptr[ib.length_dw++] = 0x00000030; /* len */ 368 ib.ptr[ib.length_dw++] = cpu_to_le32(0x00000030); /* len */
369 ib.ptr[ib.length_dw++] = 0x01000001; /* create cmd */ 369 ib.ptr[ib.length_dw++] = cpu_to_le32(0x01000001); /* create cmd */
370 ib.ptr[ib.length_dw++] = 0x00000000; 370 ib.ptr[ib.length_dw++] = cpu_to_le32(0x00000000);
371 ib.ptr[ib.length_dw++] = 0x00000042; 371 ib.ptr[ib.length_dw++] = cpu_to_le32(0x00000042);
372 ib.ptr[ib.length_dw++] = 0x0000000a; 372 ib.ptr[ib.length_dw++] = cpu_to_le32(0x0000000a);
373 ib.ptr[ib.length_dw++] = 0x00000001; 373 ib.ptr[ib.length_dw++] = cpu_to_le32(0x00000001);
374 ib.ptr[ib.length_dw++] = 0x00000080; 374 ib.ptr[ib.length_dw++] = cpu_to_le32(0x00000080);
375 ib.ptr[ib.length_dw++] = 0x00000060; 375 ib.ptr[ib.length_dw++] = cpu_to_le32(0x00000060);
376 ib.ptr[ib.length_dw++] = 0x00000100; 376 ib.ptr[ib.length_dw++] = cpu_to_le32(0x00000100);
377 ib.ptr[ib.length_dw++] = 0x00000100; 377 ib.ptr[ib.length_dw++] = cpu_to_le32(0x00000100);
378 ib.ptr[ib.length_dw++] = 0x0000000c; 378 ib.ptr[ib.length_dw++] = cpu_to_le32(0x0000000c);
379 ib.ptr[ib.length_dw++] = 0x00000000; 379 ib.ptr[ib.length_dw++] = cpu_to_le32(0x00000000);
380 380
381 ib.ptr[ib.length_dw++] = 0x00000014; /* len */ 381 ib.ptr[ib.length_dw++] = cpu_to_le32(0x00000014); /* len */
382 ib.ptr[ib.length_dw++] = 0x05000005; /* feedback buffer */ 382 ib.ptr[ib.length_dw++] = cpu_to_le32(0x05000005); /* feedback buffer */
383 ib.ptr[ib.length_dw++] = upper_32_bits(dummy); 383 ib.ptr[ib.length_dw++] = cpu_to_le32(upper_32_bits(dummy));
384 ib.ptr[ib.length_dw++] = dummy; 384 ib.ptr[ib.length_dw++] = cpu_to_le32(dummy);
385 ib.ptr[ib.length_dw++] = 0x00000001; 385 ib.ptr[ib.length_dw++] = cpu_to_le32(0x00000001);
386 386
387 for (i = ib.length_dw; i < ib_size_dw; ++i) 387 for (i = ib.length_dw; i < ib_size_dw; ++i)
388 ib.ptr[i] = 0x0; 388 ib.ptr[i] = cpu_to_le32(0x0);
389 389
390 r = radeon_ib_schedule(rdev, &ib, NULL, false); 390 r = radeon_ib_schedule(rdev, &ib, NULL, false);
391 if (r) { 391 if (r) {
@@ -428,21 +428,21 @@ int radeon_vce_get_destroy_msg(struct radeon_device *rdev, int ring,
428 428
429 /* stitch together an VCE destroy msg */ 429 /* stitch together an VCE destroy msg */
430 ib.length_dw = 0; 430 ib.length_dw = 0;
431 ib.ptr[ib.length_dw++] = 0x0000000c; /* len */ 431 ib.ptr[ib.length_dw++] = cpu_to_le32(0x0000000c); /* len */
432 ib.ptr[ib.length_dw++] = 0x00000001; /* session cmd */ 432 ib.ptr[ib.length_dw++] = cpu_to_le32(0x00000001); /* session cmd */
433 ib.ptr[ib.length_dw++] = handle; 433 ib.ptr[ib.length_dw++] = cpu_to_le32(handle);
434 434
435 ib.ptr[ib.length_dw++] = 0x00000014; /* len */ 435 ib.ptr[ib.length_dw++] = cpu_to_le32(0x00000014); /* len */
436 ib.ptr[ib.length_dw++] = 0x05000005; /* feedback buffer */ 436 ib.ptr[ib.length_dw++] = cpu_to_le32(0x05000005); /* feedback buffer */
437 ib.ptr[ib.length_dw++] = upper_32_bits(dummy); 437 ib.ptr[ib.length_dw++] = cpu_to_le32(upper_32_bits(dummy));
438 ib.ptr[ib.length_dw++] = dummy; 438 ib.ptr[ib.length_dw++] = cpu_to_le32(dummy);
439 ib.ptr[ib.length_dw++] = 0x00000001; 439 ib.ptr[ib.length_dw++] = cpu_to_le32(0x00000001);
440 440
441 ib.ptr[ib.length_dw++] = 0x00000008; /* len */ 441 ib.ptr[ib.length_dw++] = cpu_to_le32(0x00000008); /* len */
442 ib.ptr[ib.length_dw++] = 0x02000001; /* destroy cmd */ 442 ib.ptr[ib.length_dw++] = cpu_to_le32(0x02000001); /* destroy cmd */
443 443
444 for (i = ib.length_dw; i < ib_size_dw; ++i) 444 for (i = ib.length_dw; i < ib_size_dw; ++i)
445 ib.ptr[i] = 0x0; 445 ib.ptr[i] = cpu_to_le32(0x0);
446 446
447 r = radeon_ib_schedule(rdev, &ib, NULL, false); 447 r = radeon_ib_schedule(rdev, &ib, NULL, false);
448 if (r) { 448 if (r) {
@@ -699,12 +699,12 @@ bool radeon_vce_semaphore_emit(struct radeon_device *rdev,
699{ 699{
700 uint64_t addr = semaphore->gpu_addr; 700 uint64_t addr = semaphore->gpu_addr;
701 701
702 radeon_ring_write(ring, VCE_CMD_SEMAPHORE); 702 radeon_ring_write(ring, cpu_to_le32(VCE_CMD_SEMAPHORE));
703 radeon_ring_write(ring, (addr >> 3) & 0x000FFFFF); 703 radeon_ring_write(ring, cpu_to_le32((addr >> 3) & 0x000FFFFF));
704 radeon_ring_write(ring, (addr >> 23) & 0x000FFFFF); 704 radeon_ring_write(ring, cpu_to_le32((addr >> 23) & 0x000FFFFF));
705 radeon_ring_write(ring, 0x01003000 | (emit_wait ? 1 : 0)); 705 radeon_ring_write(ring, cpu_to_le32(0x01003000 | (emit_wait ? 1 : 0)));
706 if (!emit_wait) 706 if (!emit_wait)
707 radeon_ring_write(ring, VCE_CMD_END); 707 radeon_ring_write(ring, cpu_to_le32(VCE_CMD_END));
708 708
709 return true; 709 return true;
710} 710}
@@ -719,10 +719,10 @@ bool radeon_vce_semaphore_emit(struct radeon_device *rdev,
719void radeon_vce_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib) 719void radeon_vce_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
720{ 720{
721 struct radeon_ring *ring = &rdev->ring[ib->ring]; 721 struct radeon_ring *ring = &rdev->ring[ib->ring];
722 radeon_ring_write(ring, VCE_CMD_IB); 722 radeon_ring_write(ring, cpu_to_le32(VCE_CMD_IB));
723 radeon_ring_write(ring, ib->gpu_addr); 723 radeon_ring_write(ring, cpu_to_le32(ib->gpu_addr));
724 radeon_ring_write(ring, upper_32_bits(ib->gpu_addr)); 724 radeon_ring_write(ring, cpu_to_le32(upper_32_bits(ib->gpu_addr)));
725 radeon_ring_write(ring, ib->length_dw); 725 radeon_ring_write(ring, cpu_to_le32(ib->length_dw));
726} 726}
727 727
728/** 728/**
@@ -738,12 +738,12 @@ void radeon_vce_fence_emit(struct radeon_device *rdev,
738 struct radeon_ring *ring = &rdev->ring[fence->ring]; 738 struct radeon_ring *ring = &rdev->ring[fence->ring];
739 uint64_t addr = rdev->fence_drv[fence->ring].gpu_addr; 739 uint64_t addr = rdev->fence_drv[fence->ring].gpu_addr;
740 740
741 radeon_ring_write(ring, VCE_CMD_FENCE); 741 radeon_ring_write(ring, cpu_to_le32(VCE_CMD_FENCE));
742 radeon_ring_write(ring, addr); 742 radeon_ring_write(ring, cpu_to_le32(addr));
743 radeon_ring_write(ring, upper_32_bits(addr)); 743 radeon_ring_write(ring, cpu_to_le32(upper_32_bits(addr)));
744 radeon_ring_write(ring, fence->seq); 744 radeon_ring_write(ring, cpu_to_le32(fence->seq));
745 radeon_ring_write(ring, VCE_CMD_TRAP); 745 radeon_ring_write(ring, cpu_to_le32(VCE_CMD_TRAP));
746 radeon_ring_write(ring, VCE_CMD_END); 746 radeon_ring_write(ring, cpu_to_le32(VCE_CMD_END));
747} 747}
748 748
749/** 749/**
@@ -765,7 +765,7 @@ int radeon_vce_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
765 ring->idx, r); 765 ring->idx, r);
766 return r; 766 return r;
767 } 767 }
768 radeon_ring_write(ring, VCE_CMD_END); 768 radeon_ring_write(ring, cpu_to_le32(VCE_CMD_END));
769 radeon_ring_unlock_commit(rdev, ring, false); 769 radeon_ring_unlock_commit(rdev, ring, false);
770 770
771 for (i = 0; i < rdev->usec_timeout; i++) { 771 for (i = 0; i < rdev->usec_timeout; i++) {
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index 97a904835759..6244f4e44e9a 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -813,7 +813,7 @@ int rs600_irq_process(struct radeon_device *rdev)
813 status = rs600_irq_ack(rdev); 813 status = rs600_irq_ack(rdev);
814 } 814 }
815 if (queue_hotplug) 815 if (queue_hotplug)
816 schedule_work(&rdev->hotplug_work); 816 schedule_delayed_work(&rdev->hotplug_work, 0);
817 if (queue_hdmi) 817 if (queue_hdmi)
818 schedule_work(&rdev->audio_work); 818 schedule_work(&rdev->audio_work);
819 if (rdev->msi_enabled) { 819 if (rdev->msi_enabled) {
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
index 516ca27cfa12..6bc44c24e837 100644
--- a/drivers/gpu/drm/radeon/rs690.c
+++ b/drivers/gpu/drm/radeon/rs690.c
@@ -207,6 +207,9 @@ void rs690_line_buffer_adjust(struct radeon_device *rdev,
207{ 207{
208 u32 tmp; 208 u32 tmp;
209 209
210 /* Guess line buffer size to be 8192 pixels */
211 u32 lb_size = 8192;
212
210 /* 213 /*
211 * Line Buffer Setup 214 * Line Buffer Setup
212 * There is a single line buffer shared by both display controllers. 215 * There is a single line buffer shared by both display controllers.
@@ -243,6 +246,13 @@ void rs690_line_buffer_adjust(struct radeon_device *rdev,
243 tmp |= V_006520_DC_LB_MEMORY_SPLIT_D1_1Q_D2_3Q; 246 tmp |= V_006520_DC_LB_MEMORY_SPLIT_D1_1Q_D2_3Q;
244 } 247 }
245 WREG32(R_006520_DC_LB_MEMORY_SPLIT, tmp); 248 WREG32(R_006520_DC_LB_MEMORY_SPLIT, tmp);
249
250 /* Save number of lines the linebuffer leads before the scanout */
251 if (mode1)
252 rdev->mode_info.crtcs[0]->lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode1->crtc_hdisplay);
253
254 if (mode2)
255 rdev->mode_info.crtcs[1]->lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode2->crtc_hdisplay);
246} 256}
247 257
248struct rs690_watermark { 258struct rs690_watermark {
diff --git a/drivers/gpu/drm/radeon/rv730_dpm.c b/drivers/gpu/drm/radeon/rv730_dpm.c
index 3f5e1cf138ba..d37ba2cb886e 100644
--- a/drivers/gpu/drm/radeon/rv730_dpm.c
+++ b/drivers/gpu/drm/radeon/rv730_dpm.c
@@ -464,7 +464,7 @@ void rv730_stop_dpm(struct radeon_device *rdev)
464 result = rv770_send_msg_to_smc(rdev, PPSMC_MSG_TwoLevelsDisabled); 464 result = rv770_send_msg_to_smc(rdev, PPSMC_MSG_TwoLevelsDisabled);
465 465
466 if (result != PPSMC_Result_OK) 466 if (result != PPSMC_Result_OK)
467 DRM_ERROR("Could not force DPM to low\n"); 467 DRM_DEBUG("Could not force DPM to low\n");
468 468
469 WREG32_P(GENERAL_PWRMGT, 0, ~GLOBAL_PWRMGT_EN); 469 WREG32_P(GENERAL_PWRMGT, 0, ~GLOBAL_PWRMGT_EN);
470 470
diff --git a/drivers/gpu/drm/radeon/rv770_dpm.c b/drivers/gpu/drm/radeon/rv770_dpm.c
index b9c770745a7a..e830c8935db0 100644
--- a/drivers/gpu/drm/radeon/rv770_dpm.c
+++ b/drivers/gpu/drm/radeon/rv770_dpm.c
@@ -193,7 +193,7 @@ void rv770_stop_dpm(struct radeon_device *rdev)
193 result = rv770_send_msg_to_smc(rdev, PPSMC_MSG_TwoLevelsDisabled); 193 result = rv770_send_msg_to_smc(rdev, PPSMC_MSG_TwoLevelsDisabled);
194 194
195 if (result != PPSMC_Result_OK) 195 if (result != PPSMC_Result_OK)
196 DRM_ERROR("Could not force DPM to low.\n"); 196 DRM_DEBUG("Could not force DPM to low.\n");
197 197
198 WREG32_P(GENERAL_PWRMGT, 0, ~GLOBAL_PWRMGT_EN); 198 WREG32_P(GENERAL_PWRMGT, 0, ~GLOBAL_PWRMGT_EN);
199 199
@@ -1418,7 +1418,7 @@ int rv770_resume_smc(struct radeon_device *rdev)
1418int rv770_set_sw_state(struct radeon_device *rdev) 1418int rv770_set_sw_state(struct radeon_device *rdev)
1419{ 1419{
1420 if (rv770_send_msg_to_smc(rdev, PPSMC_MSG_SwitchToSwState) != PPSMC_Result_OK) 1420 if (rv770_send_msg_to_smc(rdev, PPSMC_MSG_SwitchToSwState) != PPSMC_Result_OK)
1421 return -EINVAL; 1421 DRM_DEBUG("rv770_set_sw_state failed\n");
1422 return 0; 1422 return 0;
1423} 1423}
1424 1424
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index 07037e32dea3..f878d6962da5 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -2376,6 +2376,9 @@ static void dce6_program_watermarks(struct radeon_device *rdev,
2376 c.full = dfixed_div(c, a); 2376 c.full = dfixed_div(c, a);
2377 priority_b_mark = dfixed_trunc(c); 2377 priority_b_mark = dfixed_trunc(c);
2378 priority_b_cnt |= priority_b_mark & PRIORITY_MARK_MASK; 2378 priority_b_cnt |= priority_b_mark & PRIORITY_MARK_MASK;
2379
2380 /* Save number of lines the linebuffer leads before the scanout */
2381 radeon_crtc->lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode->crtc_hdisplay);
2379 } 2382 }
2380 2383
2381 /* select wm A */ 2384 /* select wm A */
@@ -6848,7 +6851,7 @@ restart_ih:
6848 if (queue_dp) 6851 if (queue_dp)
6849 schedule_work(&rdev->dp_work); 6852 schedule_work(&rdev->dp_work);
6850 if (queue_hotplug) 6853 if (queue_hotplug)
6851 schedule_work(&rdev->hotplug_work); 6854 schedule_delayed_work(&rdev->hotplug_work, 0);
6852 if (queue_thermal && rdev->pm.dpm_enabled) 6855 if (queue_thermal && rdev->pm.dpm_enabled)
6853 schedule_work(&rdev->pm.dpm.thermal.work); 6856 schedule_work(&rdev->pm.dpm.thermal.work);
6854 rdev->ih.rptr = rptr; 6857 rdev->ih.rptr = rptr;
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
index e72bf46042e0..a82b891ae1fe 100644
--- a/drivers/gpu/drm/radeon/si_dpm.c
+++ b/drivers/gpu/drm/radeon/si_dpm.c
@@ -2927,7 +2927,7 @@ static struct si_dpm_quirk si_dpm_quirk_list[] = {
2927 { PCI_VENDOR_ID_ATI, 0x6810, 0x1462, 0x3036, 0, 120000 }, 2927 { PCI_VENDOR_ID_ATI, 0x6810, 0x1462, 0x3036, 0, 120000 },
2928 { PCI_VENDOR_ID_ATI, 0x6811, 0x174b, 0xe271, 0, 120000 }, 2928 { PCI_VENDOR_ID_ATI, 0x6811, 0x174b, 0xe271, 0, 120000 },
2929 { PCI_VENDOR_ID_ATI, 0x6810, 0x174b, 0xe271, 85000, 90000 }, 2929 { PCI_VENDOR_ID_ATI, 0x6810, 0x174b, 0xe271, 85000, 90000 },
2930 { PCI_VENDOR_ID_ATI, 0x6811, 0x1762, 0x2015, 0, 120000 }, 2930 { PCI_VENDOR_ID_ATI, 0x6811, 0x1462, 0x2015, 0, 120000 },
2931 { PCI_VENDOR_ID_ATI, 0x6811, 0x1043, 0x2015, 0, 120000 }, 2931 { PCI_VENDOR_ID_ATI, 0x6811, 0x1043, 0x2015, 0, 120000 },
2932 { 0, 0, 0, 0 }, 2932 { 0, 0, 0, 0 },
2933}; 2933};
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
index 8caea0a33dd8..d908321b94ce 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
@@ -67,6 +67,7 @@ static int rockchip_drm_gem_object_mmap(struct drm_gem_object *obj,
67 * VM_PFNMAP flag that was set by drm_gem_mmap_obj()/drm_gem_mmap(). 67 * VM_PFNMAP flag that was set by drm_gem_mmap_obj()/drm_gem_mmap().
68 */ 68 */
69 vma->vm_flags &= ~VM_PFNMAP; 69 vma->vm_flags &= ~VM_PFNMAP;
70 vma->vm_pgoff = 0;
70 71
71 ret = dma_mmap_attrs(drm->dev, vma, rk_obj->kvaddr, rk_obj->dma_addr, 72 ret = dma_mmap_attrs(drm->dev, vma, rk_obj->kvaddr, rk_obj->dma_addr,
72 obj->size, &rk_obj->dma_attrs); 73 obj->size, &rk_obj->dma_attrs);
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
index 5d8ae5e49c44..03c47eeadc81 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
@@ -374,6 +374,7 @@ static const struct of_device_id vop_driver_dt_match[] = {
374 .data = &rk3288_vop }, 374 .data = &rk3288_vop },
375 {}, 375 {},
376}; 376};
377MODULE_DEVICE_TABLE(of, vop_driver_dt_match);
377 378
378static inline void vop_writel(struct vop *vop, uint32_t offset, uint32_t v) 379static inline void vop_writel(struct vop *vop, uint32_t offset, uint32_t v)
379{ 380{
@@ -959,8 +960,8 @@ static int vop_update_plane_event(struct drm_plane *plane,
959 val = (dest.y2 - dest.y1 - 1) << 16; 960 val = (dest.y2 - dest.y1 - 1) << 16;
960 val |= (dest.x2 - dest.x1 - 1) & 0xffff; 961 val |= (dest.x2 - dest.x1 - 1) & 0xffff;
961 VOP_WIN_SET(vop, win, dsp_info, val); 962 VOP_WIN_SET(vop, win, dsp_info, val);
962 val = (dsp_sty - 1) << 16; 963 val = dsp_sty << 16;
963 val |= (dsp_stx - 1) & 0xffff; 964 val |= dsp_stx & 0xffff;
964 VOP_WIN_SET(vop, win, dsp_st, val); 965 VOP_WIN_SET(vop, win, dsp_st, val);
965 VOP_WIN_SET(vop, win, rb_swap, rb_swap); 966 VOP_WIN_SET(vop, win, rb_swap, rb_swap);
966 967
@@ -1289,7 +1290,7 @@ static void vop_win_state_complete(struct vop_win *vop_win,
1289 1290
1290 if (state->event) { 1291 if (state->event) {
1291 spin_lock_irqsave(&drm->event_lock, flags); 1292 spin_lock_irqsave(&drm->event_lock, flags);
1292 drm_send_vblank_event(drm, -1, state->event); 1293 drm_crtc_send_vblank_event(crtc, state->event);
1293 spin_unlock_irqrestore(&drm->event_lock, flags); 1294 spin_unlock_irqrestore(&drm->event_lock, flags);
1294 } 1295 }
1295 1296
@@ -1575,32 +1576,25 @@ static int vop_initial(struct vop *vop)
1575 return PTR_ERR(vop->dclk); 1576 return PTR_ERR(vop->dclk);
1576 } 1577 }
1577 1578
1578 ret = clk_prepare(vop->hclk);
1579 if (ret < 0) {
1580 dev_err(vop->dev, "failed to prepare hclk\n");
1581 return ret;
1582 }
1583
1584 ret = clk_prepare(vop->dclk); 1579 ret = clk_prepare(vop->dclk);
1585 if (ret < 0) { 1580 if (ret < 0) {
1586 dev_err(vop->dev, "failed to prepare dclk\n"); 1581 dev_err(vop->dev, "failed to prepare dclk\n");
1587 goto err_unprepare_hclk; 1582 return ret;
1588 } 1583 }
1589 1584
1590 ret = clk_prepare(vop->aclk); 1585 /* Enable both the hclk and aclk to setup the vop */
1586 ret = clk_prepare_enable(vop->hclk);
1591 if (ret < 0) { 1587 if (ret < 0) {
1592 dev_err(vop->dev, "failed to prepare aclk\n"); 1588 dev_err(vop->dev, "failed to prepare/enable hclk\n");
1593 goto err_unprepare_dclk; 1589 goto err_unprepare_dclk;
1594 } 1590 }
1595 1591
1596 /* 1592 ret = clk_prepare_enable(vop->aclk);
1597 * enable hclk, so that we can config vop register.
1598 */
1599 ret = clk_enable(vop->hclk);
1600 if (ret < 0) { 1593 if (ret < 0) {
1601 dev_err(vop->dev, "failed to prepare aclk\n"); 1594 dev_err(vop->dev, "failed to prepare/enable aclk\n");
1602 goto err_unprepare_aclk; 1595 goto err_disable_hclk;
1603 } 1596 }
1597
1604 /* 1598 /*
1605 * do hclk_reset, reset all vop registers. 1599 * do hclk_reset, reset all vop registers.
1606 */ 1600 */
@@ -1608,7 +1602,7 @@ static int vop_initial(struct vop *vop)
1608 if (IS_ERR(ahb_rst)) { 1602 if (IS_ERR(ahb_rst)) {
1609 dev_err(vop->dev, "failed to get ahb reset\n"); 1603 dev_err(vop->dev, "failed to get ahb reset\n");
1610 ret = PTR_ERR(ahb_rst); 1604 ret = PTR_ERR(ahb_rst);
1611 goto err_disable_hclk; 1605 goto err_disable_aclk;
1612 } 1606 }
1613 reset_control_assert(ahb_rst); 1607 reset_control_assert(ahb_rst);
1614 usleep_range(10, 20); 1608 usleep_range(10, 20);
@@ -1634,26 +1628,25 @@ static int vop_initial(struct vop *vop)
1634 if (IS_ERR(vop->dclk_rst)) { 1628 if (IS_ERR(vop->dclk_rst)) {
1635 dev_err(vop->dev, "failed to get dclk reset\n"); 1629 dev_err(vop->dev, "failed to get dclk reset\n");
1636 ret = PTR_ERR(vop->dclk_rst); 1630 ret = PTR_ERR(vop->dclk_rst);
1637 goto err_unprepare_aclk; 1631 goto err_disable_aclk;
1638 } 1632 }
1639 reset_control_assert(vop->dclk_rst); 1633 reset_control_assert(vop->dclk_rst);
1640 usleep_range(10, 20); 1634 usleep_range(10, 20);
1641 reset_control_deassert(vop->dclk_rst); 1635 reset_control_deassert(vop->dclk_rst);
1642 1636
1643 clk_disable(vop->hclk); 1637 clk_disable(vop->hclk);
1638 clk_disable(vop->aclk);
1644 1639
1645 vop->is_enabled = false; 1640 vop->is_enabled = false;
1646 1641
1647 return 0; 1642 return 0;
1648 1643
1644err_disable_aclk:
1645 clk_disable_unprepare(vop->aclk);
1649err_disable_hclk: 1646err_disable_hclk:
1650 clk_disable(vop->hclk); 1647 clk_disable_unprepare(vop->hclk);
1651err_unprepare_aclk:
1652 clk_unprepare(vop->aclk);
1653err_unprepare_dclk: 1648err_unprepare_dclk:
1654 clk_unprepare(vop->dclk); 1649 clk_unprepare(vop->dclk);
1655err_unprepare_hclk:
1656 clk_unprepare(vop->hclk);
1657 return ret; 1650 return ret;
1658} 1651}
1659 1652
diff --git a/drivers/gpu/drm/ttm/ttm_lock.c b/drivers/gpu/drm/ttm/ttm_lock.c
index 6a954544727f..f154fb1929bd 100644
--- a/drivers/gpu/drm/ttm/ttm_lock.c
+++ b/drivers/gpu/drm/ttm/ttm_lock.c
@@ -180,7 +180,7 @@ int ttm_write_lock(struct ttm_lock *lock, bool interruptible)
180 spin_unlock(&lock->lock); 180 spin_unlock(&lock->lock);
181 } 181 }
182 } else 182 } else
183 wait_event(lock->queue, __ttm_read_lock(lock)); 183 wait_event(lock->queue, __ttm_write_lock(lock));
184 184
185 return ret; 185 return ret;
186} 186}
diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c
index 7a9f4768591e..265064c62d49 100644
--- a/drivers/gpu/drm/vc4/vc4_crtc.c
+++ b/drivers/gpu/drm/vc4/vc4_crtc.c
@@ -168,7 +168,7 @@ static int vc4_get_clock_select(struct drm_crtc *crtc)
168 struct drm_connector *connector; 168 struct drm_connector *connector;
169 169
170 drm_for_each_connector(connector, crtc->dev) { 170 drm_for_each_connector(connector, crtc->dev) {
171 if (connector && connector->state->crtc == crtc) { 171 if (connector->state->crtc == crtc) {
172 struct drm_encoder *encoder = connector->encoder; 172 struct drm_encoder *encoder = connector->encoder;
173 struct vc4_encoder *vc4_encoder = 173 struct vc4_encoder *vc4_encoder =
174 to_vc4_encoder(encoder); 174 to_vc4_encoder(encoder);
@@ -401,7 +401,8 @@ static void vc4_crtc_atomic_flush(struct drm_crtc *crtc,
401 dlist_next++; 401 dlist_next++;
402 402
403 HVS_WRITE(SCALER_DISPLISTX(vc4_crtc->channel), 403 HVS_WRITE(SCALER_DISPLISTX(vc4_crtc->channel),
404 (u32 *)vc4_crtc->dlist - (u32 *)vc4->hvs->dlist); 404 (u32 __iomem *)vc4_crtc->dlist -
405 (u32 __iomem *)vc4->hvs->dlist);
405 406
406 /* Make the next display list start after ours. */ 407 /* Make the next display list start after ours. */
407 vc4_crtc->dlist_size -= (dlist_next - vc4_crtc->dlist); 408 vc4_crtc->dlist_size -= (dlist_next - vc4_crtc->dlist);
@@ -591,14 +592,14 @@ static int vc4_crtc_bind(struct device *dev, struct device *master, void *data)
591 * that will take too much. 592 * that will take too much.
592 */ 593 */
593 primary_plane = vc4_plane_init(drm, DRM_PLANE_TYPE_PRIMARY); 594 primary_plane = vc4_plane_init(drm, DRM_PLANE_TYPE_PRIMARY);
594 if (!primary_plane) { 595 if (IS_ERR(primary_plane)) {
595 dev_err(dev, "failed to construct primary plane\n"); 596 dev_err(dev, "failed to construct primary plane\n");
596 ret = PTR_ERR(primary_plane); 597 ret = PTR_ERR(primary_plane);
597 goto err; 598 goto err;
598 } 599 }
599 600
600 cursor_plane = vc4_plane_init(drm, DRM_PLANE_TYPE_CURSOR); 601 cursor_plane = vc4_plane_init(drm, DRM_PLANE_TYPE_CURSOR);
601 if (!cursor_plane) { 602 if (IS_ERR(cursor_plane)) {
602 dev_err(dev, "failed to construct cursor plane\n"); 603 dev_err(dev, "failed to construct cursor plane\n");
603 ret = PTR_ERR(cursor_plane); 604 ret = PTR_ERR(cursor_plane);
604 goto err_primary; 605 goto err_primary;
diff --git a/drivers/gpu/drm/vc4/vc4_drv.c b/drivers/gpu/drm/vc4/vc4_drv.c
index 6e730605edcc..d5db9e0f3b73 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.c
+++ b/drivers/gpu/drm/vc4/vc4_drv.c
@@ -259,7 +259,6 @@ static struct platform_driver vc4_platform_driver = {
259 .remove = vc4_platform_drm_remove, 259 .remove = vc4_platform_drm_remove,
260 .driver = { 260 .driver = {
261 .name = "vc4-drm", 261 .name = "vc4-drm",
262 .owner = THIS_MODULE,
263 .of_match_table = vc4_of_match, 262 .of_match_table = vc4_of_match,
264 }, 263 },
265}; 264};
diff --git a/drivers/gpu/drm/vc4/vc4_hvs.c b/drivers/gpu/drm/vc4/vc4_hvs.c
index ab1673f672a4..8098c5b21ba4 100644
--- a/drivers/gpu/drm/vc4/vc4_hvs.c
+++ b/drivers/gpu/drm/vc4/vc4_hvs.c
@@ -75,10 +75,10 @@ void vc4_hvs_dump_state(struct drm_device *dev)
75 for (i = 0; i < 64; i += 4) { 75 for (i = 0; i < 64; i += 4) {
76 DRM_INFO("0x%08x (%s): 0x%08x 0x%08x 0x%08x 0x%08x\n", 76 DRM_INFO("0x%08x (%s): 0x%08x 0x%08x 0x%08x 0x%08x\n",
77 i * 4, i < HVS_BOOTLOADER_DLIST_END ? "B" : "D", 77 i * 4, i < HVS_BOOTLOADER_DLIST_END ? "B" : "D",
78 ((uint32_t *)vc4->hvs->dlist)[i + 0], 78 readl((u32 __iomem *)vc4->hvs->dlist + i + 0),
79 ((uint32_t *)vc4->hvs->dlist)[i + 1], 79 readl((u32 __iomem *)vc4->hvs->dlist + i + 1),
80 ((uint32_t *)vc4->hvs->dlist)[i + 2], 80 readl((u32 __iomem *)vc4->hvs->dlist + i + 2),
81 ((uint32_t *)vc4->hvs->dlist)[i + 3]); 81 readl((u32 __iomem *)vc4->hvs->dlist + i + 3));
82 } 82 }
83} 83}
84 84
diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c
index cdd8b10c0147..887f3caad0be 100644
--- a/drivers/gpu/drm/vc4/vc4_plane.c
+++ b/drivers/gpu/drm/vc4/vc4_plane.c
@@ -70,7 +70,7 @@ static bool plane_enabled(struct drm_plane_state *state)
70 return state->fb && state->crtc; 70 return state->fb && state->crtc;
71} 71}
72 72
73struct drm_plane_state *vc4_plane_duplicate_state(struct drm_plane *plane) 73static struct drm_plane_state *vc4_plane_duplicate_state(struct drm_plane *plane)
74{ 74{
75 struct vc4_plane_state *vc4_state; 75 struct vc4_plane_state *vc4_state;
76 76
@@ -97,8 +97,8 @@ struct drm_plane_state *vc4_plane_duplicate_state(struct drm_plane *plane)
97 return &vc4_state->base; 97 return &vc4_state->base;
98} 98}
99 99
100void vc4_plane_destroy_state(struct drm_plane *plane, 100static void vc4_plane_destroy_state(struct drm_plane *plane,
101 struct drm_plane_state *state) 101 struct drm_plane_state *state)
102{ 102{
103 struct vc4_plane_state *vc4_state = to_vc4_plane_state(state); 103 struct vc4_plane_state *vc4_state = to_vc4_plane_state(state);
104 104
@@ -108,7 +108,7 @@ void vc4_plane_destroy_state(struct drm_plane *plane,
108} 108}
109 109
110/* Called during init to allocate the plane's atomic state. */ 110/* Called during init to allocate the plane's atomic state. */
111void vc4_plane_reset(struct drm_plane *plane) 111static void vc4_plane_reset(struct drm_plane *plane)
112{ 112{
113 struct vc4_plane_state *vc4_state; 113 struct vc4_plane_state *vc4_state;
114 114
@@ -157,6 +157,16 @@ static int vc4_plane_mode_set(struct drm_plane *plane,
157 int crtc_w = state->crtc_w; 157 int crtc_w = state->crtc_w;
158 int crtc_h = state->crtc_h; 158 int crtc_h = state->crtc_h;
159 159
160 if (state->crtc_w << 16 != state->src_w ||
161 state->crtc_h << 16 != state->src_h) {
162 /* We don't support scaling yet, which involves
163 * allocating the LBM memory for scaling temporary
164 * storage, and putting filter kernels in the HVS
165 * context.
166 */
167 return -EINVAL;
168 }
169
160 if (crtc_x < 0) { 170 if (crtc_x < 0) {
161 offset += drm_format_plane_cpp(fb->pixel_format, 0) * -crtc_x; 171 offset += drm_format_plane_cpp(fb->pixel_format, 0) * -crtc_x;
162 crtc_w += crtc_x; 172 crtc_w += crtc_x;
diff --git a/drivers/gpu/drm/virtio/virtgpu_display.c b/drivers/gpu/drm/virtio/virtgpu_display.c
index f545913a56c7..578fe0a9324c 100644
--- a/drivers/gpu/drm/virtio/virtgpu_display.c
+++ b/drivers/gpu/drm/virtio/virtgpu_display.c
@@ -412,7 +412,7 @@ static const struct drm_connector_funcs virtio_gpu_connector_funcs = {
412 .save = virtio_gpu_conn_save, 412 .save = virtio_gpu_conn_save,
413 .restore = virtio_gpu_conn_restore, 413 .restore = virtio_gpu_conn_restore,
414 .detect = virtio_gpu_conn_detect, 414 .detect = virtio_gpu_conn_detect,
415 .fill_modes = drm_helper_probe_single_connector_modes, 415 .fill_modes = drm_helper_probe_single_connector_modes_nomerge,
416 .destroy = virtio_gpu_conn_destroy, 416 .destroy = virtio_gpu_conn_destroy,
417 .reset = drm_atomic_helper_connector_reset, 417 .reset = drm_atomic_helper_connector_reset,
418 .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, 418 .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index a09cf8529b9f..c49812b80dd0 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -1233,6 +1233,7 @@ static void vmw_master_drop(struct drm_device *dev,
1233 1233
1234 vmw_fp->locked_master = drm_master_get(file_priv->master); 1234 vmw_fp->locked_master = drm_master_get(file_priv->master);
1235 ret = ttm_vt_lock(&vmaster->lock, false, vmw_fp->tfile); 1235 ret = ttm_vt_lock(&vmaster->lock, false, vmw_fp->tfile);
1236 vmw_kms_legacy_hotspot_clear(dev_priv);
1236 if (unlikely((ret != 0))) { 1237 if (unlikely((ret != 0))) {
1237 DRM_ERROR("Unable to lock TTM at VT switch.\n"); 1238 DRM_ERROR("Unable to lock TTM at VT switch.\n");
1238 drm_master_put(&vmw_fp->locked_master); 1239 drm_master_put(&vmw_fp->locked_master);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index a8ae9dfb83b7..469cdd520615 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -925,6 +925,7 @@ int vmw_kms_present(struct vmw_private *dev_priv,
925 uint32_t num_clips); 925 uint32_t num_clips);
926int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data, 926int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
927 struct drm_file *file_priv); 927 struct drm_file *file_priv);
928void vmw_kms_legacy_hotspot_clear(struct vmw_private *dev_priv);
928 929
929int vmw_dumb_create(struct drm_file *file_priv, 930int vmw_dumb_create(struct drm_file *file_priv,
930 struct drm_device *dev, 931 struct drm_device *dev,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
index a8baf5f5e765..b6a0806b06bf 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
@@ -390,7 +390,7 @@ void *vmw_fifo_reserve_dx(struct vmw_private *dev_priv, uint32_t bytes,
390 else if (ctx_id == SVGA3D_INVALID_ID) 390 else if (ctx_id == SVGA3D_INVALID_ID)
391 ret = vmw_local_fifo_reserve(dev_priv, bytes); 391 ret = vmw_local_fifo_reserve(dev_priv, bytes);
392 else { 392 else {
393 WARN_ON("Command buffer has not been allocated.\n"); 393 WARN(1, "Command buffer has not been allocated.\n");
394 ret = NULL; 394 ret = NULL;
395 } 395 }
396 if (IS_ERR_OR_NULL(ret)) { 396 if (IS_ERR_OR_NULL(ret)) {
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 9fcd7f82995c..9b4bb9e74d73 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -133,13 +133,19 @@ void vmw_cursor_update_position(struct vmw_private *dev_priv,
133 vmw_mmio_write(++count, fifo_mem + SVGA_FIFO_CURSOR_COUNT); 133 vmw_mmio_write(++count, fifo_mem + SVGA_FIFO_CURSOR_COUNT);
134} 134}
135 135
136int vmw_du_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv, 136
137 uint32_t handle, uint32_t width, uint32_t height) 137/*
138 * vmw_du_crtc_cursor_set2 - Driver cursor_set2 callback.
139 */
140int vmw_du_crtc_cursor_set2(struct drm_crtc *crtc, struct drm_file *file_priv,
141 uint32_t handle, uint32_t width, uint32_t height,
142 int32_t hot_x, int32_t hot_y)
138{ 143{
139 struct vmw_private *dev_priv = vmw_priv(crtc->dev); 144 struct vmw_private *dev_priv = vmw_priv(crtc->dev);
140 struct vmw_display_unit *du = vmw_crtc_to_du(crtc); 145 struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
141 struct vmw_surface *surface = NULL; 146 struct vmw_surface *surface = NULL;
142 struct vmw_dma_buffer *dmabuf = NULL; 147 struct vmw_dma_buffer *dmabuf = NULL;
148 s32 hotspot_x, hotspot_y;
143 int ret; 149 int ret;
144 150
145 /* 151 /*
@@ -151,6 +157,8 @@ int vmw_du_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
151 */ 157 */
152 drm_modeset_unlock_crtc(crtc); 158 drm_modeset_unlock_crtc(crtc);
153 drm_modeset_lock_all(dev_priv->dev); 159 drm_modeset_lock_all(dev_priv->dev);
160 hotspot_x = hot_x + du->hotspot_x;
161 hotspot_y = hot_y + du->hotspot_y;
154 162
155 /* A lot of the code assumes this */ 163 /* A lot of the code assumes this */
156 if (handle && (width != 64 || height != 64)) { 164 if (handle && (width != 64 || height != 64)) {
@@ -187,31 +195,34 @@ int vmw_du_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
187 vmw_dmabuf_unreference(&du->cursor_dmabuf); 195 vmw_dmabuf_unreference(&du->cursor_dmabuf);
188 196
189 /* setup new image */ 197 /* setup new image */
198 ret = 0;
190 if (surface) { 199 if (surface) {
191 /* vmw_user_surface_lookup takes one reference */ 200 /* vmw_user_surface_lookup takes one reference */
192 du->cursor_surface = surface; 201 du->cursor_surface = surface;
193 202
194 du->cursor_surface->snooper.crtc = crtc; 203 du->cursor_surface->snooper.crtc = crtc;
195 du->cursor_age = du->cursor_surface->snooper.age; 204 du->cursor_age = du->cursor_surface->snooper.age;
196 vmw_cursor_update_image(dev_priv, surface->snooper.image, 205 ret = vmw_cursor_update_image(dev_priv, surface->snooper.image,
197 64, 64, du->hotspot_x, du->hotspot_y); 206 64, 64, hotspot_x, hotspot_y);
198 } else if (dmabuf) { 207 } else if (dmabuf) {
199 /* vmw_user_surface_lookup takes one reference */ 208 /* vmw_user_surface_lookup takes one reference */
200 du->cursor_dmabuf = dmabuf; 209 du->cursor_dmabuf = dmabuf;
201 210
202 ret = vmw_cursor_update_dmabuf(dev_priv, dmabuf, width, height, 211 ret = vmw_cursor_update_dmabuf(dev_priv, dmabuf, width, height,
203 du->hotspot_x, du->hotspot_y); 212 hotspot_x, hotspot_y);
204 } else { 213 } else {
205 vmw_cursor_update_position(dev_priv, false, 0, 0); 214 vmw_cursor_update_position(dev_priv, false, 0, 0);
206 ret = 0;
207 goto out; 215 goto out;
208 } 216 }
209 217
210 vmw_cursor_update_position(dev_priv, true, 218 if (!ret) {
211 du->cursor_x + du->hotspot_x, 219 vmw_cursor_update_position(dev_priv, true,
212 du->cursor_y + du->hotspot_y); 220 du->cursor_x + hotspot_x,
221 du->cursor_y + hotspot_y);
222 du->core_hotspot_x = hot_x;
223 du->core_hotspot_y = hot_y;
224 }
213 225
214 ret = 0;
215out: 226out:
216 drm_modeset_unlock_all(dev_priv->dev); 227 drm_modeset_unlock_all(dev_priv->dev);
217 drm_modeset_lock_crtc(crtc, crtc->cursor); 228 drm_modeset_lock_crtc(crtc, crtc->cursor);
@@ -239,8 +250,10 @@ int vmw_du_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
239 drm_modeset_lock_all(dev_priv->dev); 250 drm_modeset_lock_all(dev_priv->dev);
240 251
241 vmw_cursor_update_position(dev_priv, shown, 252 vmw_cursor_update_position(dev_priv, shown,
242 du->cursor_x + du->hotspot_x, 253 du->cursor_x + du->hotspot_x +
243 du->cursor_y + du->hotspot_y); 254 du->core_hotspot_x,
255 du->cursor_y + du->hotspot_y +
256 du->core_hotspot_y);
244 257
245 drm_modeset_unlock_all(dev_priv->dev); 258 drm_modeset_unlock_all(dev_priv->dev);
246 drm_modeset_lock_crtc(crtc, crtc->cursor); 259 drm_modeset_lock_crtc(crtc, crtc->cursor);
@@ -334,6 +347,29 @@ err_unreserve:
334 ttm_bo_unreserve(bo); 347 ttm_bo_unreserve(bo);
335} 348}
336 349
350/**
351 * vmw_kms_legacy_hotspot_clear - Clear legacy hotspots
352 *
353 * @dev_priv: Pointer to the device private struct.
354 *
355 * Clears all legacy hotspots.
356 */
357void vmw_kms_legacy_hotspot_clear(struct vmw_private *dev_priv)
358{
359 struct drm_device *dev = dev_priv->dev;
360 struct vmw_display_unit *du;
361 struct drm_crtc *crtc;
362
363 drm_modeset_lock_all(dev);
364 drm_for_each_crtc(crtc, dev) {
365 du = vmw_crtc_to_du(crtc);
366
367 du->hotspot_x = 0;
368 du->hotspot_y = 0;
369 }
370 drm_modeset_unlock_all(dev);
371}
372
337void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv) 373void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv)
338{ 374{
339 struct drm_device *dev = dev_priv->dev; 375 struct drm_device *dev = dev_priv->dev;
@@ -351,7 +387,9 @@ void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv)
351 du->cursor_age = du->cursor_surface->snooper.age; 387 du->cursor_age = du->cursor_surface->snooper.age;
352 vmw_cursor_update_image(dev_priv, 388 vmw_cursor_update_image(dev_priv,
353 du->cursor_surface->snooper.image, 389 du->cursor_surface->snooper.image,
354 64, 64, du->hotspot_x, du->hotspot_y); 390 64, 64,
391 du->hotspot_x + du->core_hotspot_x,
392 du->hotspot_y + du->core_hotspot_y);
355 } 393 }
356 394
357 mutex_unlock(&dev->mode_config.mutex); 395 mutex_unlock(&dev->mode_config.mutex);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
index 782df7ca9794..edd81503516d 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
@@ -159,6 +159,8 @@ struct vmw_display_unit {
159 159
160 int hotspot_x; 160 int hotspot_x;
161 int hotspot_y; 161 int hotspot_y;
162 s32 core_hotspot_x;
163 s32 core_hotspot_y;
162 164
163 unsigned unit; 165 unsigned unit;
164 166
@@ -193,8 +195,9 @@ void vmw_du_crtc_restore(struct drm_crtc *crtc);
193void vmw_du_crtc_gamma_set(struct drm_crtc *crtc, 195void vmw_du_crtc_gamma_set(struct drm_crtc *crtc,
194 u16 *r, u16 *g, u16 *b, 196 u16 *r, u16 *g, u16 *b,
195 uint32_t start, uint32_t size); 197 uint32_t start, uint32_t size);
196int vmw_du_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv, 198int vmw_du_crtc_cursor_set2(struct drm_crtc *crtc, struct drm_file *file_priv,
197 uint32_t handle, uint32_t width, uint32_t height); 199 uint32_t handle, uint32_t width, uint32_t height,
200 int32_t hot_x, int32_t hot_y);
198int vmw_du_crtc_cursor_move(struct drm_crtc *crtc, int x, int y); 201int vmw_du_crtc_cursor_move(struct drm_crtc *crtc, int x, int y);
199int vmw_du_connector_dpms(struct drm_connector *connector, int mode); 202int vmw_du_connector_dpms(struct drm_connector *connector, int mode);
200void vmw_du_connector_save(struct drm_connector *connector); 203void vmw_du_connector_save(struct drm_connector *connector);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
index bb63e4d795fa..52caecb4502e 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
@@ -297,7 +297,7 @@ static int vmw_ldu_crtc_set_config(struct drm_mode_set *set)
297static struct drm_crtc_funcs vmw_legacy_crtc_funcs = { 297static struct drm_crtc_funcs vmw_legacy_crtc_funcs = {
298 .save = vmw_du_crtc_save, 298 .save = vmw_du_crtc_save,
299 .restore = vmw_du_crtc_restore, 299 .restore = vmw_du_crtc_restore,
300 .cursor_set = vmw_du_crtc_cursor_set, 300 .cursor_set2 = vmw_du_crtc_cursor_set2,
301 .cursor_move = vmw_du_crtc_cursor_move, 301 .cursor_move = vmw_du_crtc_cursor_move,
302 .gamma_set = vmw_du_crtc_gamma_set, 302 .gamma_set = vmw_du_crtc_gamma_set,
303 .destroy = vmw_ldu_crtc_destroy, 303 .destroy = vmw_ldu_crtc_destroy,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
index b96d1ab610c5..13926ff192e3 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
@@ -533,7 +533,7 @@ out_no_fence:
533static struct drm_crtc_funcs vmw_screen_object_crtc_funcs = { 533static struct drm_crtc_funcs vmw_screen_object_crtc_funcs = {
534 .save = vmw_du_crtc_save, 534 .save = vmw_du_crtc_save,
535 .restore = vmw_du_crtc_restore, 535 .restore = vmw_du_crtc_restore,
536 .cursor_set = vmw_du_crtc_cursor_set, 536 .cursor_set2 = vmw_du_crtc_cursor_set2,
537 .cursor_move = vmw_du_crtc_cursor_move, 537 .cursor_move = vmw_du_crtc_cursor_move,
538 .gamma_set = vmw_du_crtc_gamma_set, 538 .gamma_set = vmw_du_crtc_gamma_set,
539 .destroy = vmw_sou_crtc_destroy, 539 .destroy = vmw_sou_crtc_destroy,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
index b1fc1c02792d..f823fc3efed7 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
@@ -1043,7 +1043,7 @@ out_finish:
1043static struct drm_crtc_funcs vmw_stdu_crtc_funcs = { 1043static struct drm_crtc_funcs vmw_stdu_crtc_funcs = {
1044 .save = vmw_du_crtc_save, 1044 .save = vmw_du_crtc_save,
1045 .restore = vmw_du_crtc_restore, 1045 .restore = vmw_du_crtc_restore,
1046 .cursor_set = vmw_du_crtc_cursor_set, 1046 .cursor_set2 = vmw_du_crtc_cursor_set2,
1047 .cursor_move = vmw_du_crtc_cursor_move, 1047 .cursor_move = vmw_du_crtc_cursor_move,
1048 .gamma_set = vmw_du_crtc_gamma_set, 1048 .gamma_set = vmw_du_crtc_gamma_set,
1049 .destroy = vmw_stdu_crtc_destroy, 1049 .destroy = vmw_stdu_crtc_destroy,
diff --git a/drivers/gpu/ipu-v3/ipu-common.c b/drivers/gpu/ipu-v3/ipu-common.c
index ba47b30d28fa..f2e13eb8339f 100644
--- a/drivers/gpu/ipu-v3/ipu-common.c
+++ b/drivers/gpu/ipu-v3/ipu-common.c
@@ -28,6 +28,7 @@
28#include <linux/irqchip/chained_irq.h> 28#include <linux/irqchip/chained_irq.h>
29#include <linux/irqdomain.h> 29#include <linux/irqdomain.h>
30#include <linux/of_device.h> 30#include <linux/of_device.h>
31#include <linux/of_graph.h>
31 32
32#include <drm/drm_fourcc.h> 33#include <drm/drm_fourcc.h>
33 34
@@ -993,12 +994,26 @@ static void platform_device_unregister_children(struct platform_device *pdev)
993struct ipu_platform_reg { 994struct ipu_platform_reg {
994 struct ipu_client_platformdata pdata; 995 struct ipu_client_platformdata pdata;
995 const char *name; 996 const char *name;
996 int reg_offset;
997}; 997};
998 998
999/* These must be in the order of the corresponding device tree port nodes */
999static const struct ipu_platform_reg client_reg[] = { 1000static const struct ipu_platform_reg client_reg[] = {
1000 { 1001 {
1001 .pdata = { 1002 .pdata = {
1003 .csi = 0,
1004 .dma[0] = IPUV3_CHANNEL_CSI0,
1005 .dma[1] = -EINVAL,
1006 },
1007 .name = "imx-ipuv3-camera",
1008 }, {
1009 .pdata = {
1010 .csi = 1,
1011 .dma[0] = IPUV3_CHANNEL_CSI1,
1012 .dma[1] = -EINVAL,
1013 },
1014 .name = "imx-ipuv3-camera",
1015 }, {
1016 .pdata = {
1002 .di = 0, 1017 .di = 0,
1003 .dc = 5, 1018 .dc = 5,
1004 .dp = IPU_DP_FLOW_SYNC_BG, 1019 .dp = IPU_DP_FLOW_SYNC_BG,
@@ -1015,22 +1030,6 @@ static const struct ipu_platform_reg client_reg[] = {
1015 .dma[1] = -EINVAL, 1030 .dma[1] = -EINVAL,
1016 }, 1031 },
1017 .name = "imx-ipuv3-crtc", 1032 .name = "imx-ipuv3-crtc",
1018 }, {
1019 .pdata = {
1020 .csi = 0,
1021 .dma[0] = IPUV3_CHANNEL_CSI0,
1022 .dma[1] = -EINVAL,
1023 },
1024 .reg_offset = IPU_CM_CSI0_REG_OFS,
1025 .name = "imx-ipuv3-camera",
1026 }, {
1027 .pdata = {
1028 .csi = 1,
1029 .dma[0] = IPUV3_CHANNEL_CSI1,
1030 .dma[1] = -EINVAL,
1031 },
1032 .reg_offset = IPU_CM_CSI1_REG_OFS,
1033 .name = "imx-ipuv3-camera",
1034 }, 1033 },
1035}; 1034};
1036 1035
@@ -1051,22 +1050,30 @@ static int ipu_add_client_devices(struct ipu_soc *ipu, unsigned long ipu_base)
1051 for (i = 0; i < ARRAY_SIZE(client_reg); i++) { 1050 for (i = 0; i < ARRAY_SIZE(client_reg); i++) {
1052 const struct ipu_platform_reg *reg = &client_reg[i]; 1051 const struct ipu_platform_reg *reg = &client_reg[i];
1053 struct platform_device *pdev; 1052 struct platform_device *pdev;
1054 struct resource res; 1053
1055 1054 pdev = platform_device_alloc(reg->name, id++);
1056 if (reg->reg_offset) { 1055 if (!pdev) {
1057 memset(&res, 0, sizeof(res)); 1056 ret = -ENOMEM;
1058 res.flags = IORESOURCE_MEM; 1057 goto err_register;
1059 res.start = ipu_base + ipu->devtype->cm_ofs + reg->reg_offset; 1058 }
1060 res.end = res.start + PAGE_SIZE - 1; 1059
1061 pdev = platform_device_register_resndata(dev, reg->name, 1060 pdev->dev.parent = dev;
1062 id++, &res, 1, &reg->pdata, sizeof(reg->pdata)); 1061
1063 } else { 1062 /* Associate subdevice with the corresponding port node */
1064 pdev = platform_device_register_data(dev, reg->name, 1063 pdev->dev.of_node = of_graph_get_port_by_id(dev->of_node, i);
1065 id++, &reg->pdata, sizeof(reg->pdata)); 1064 if (!pdev->dev.of_node) {
1065 dev_err(dev, "missing port@%d node in %s\n", i,
1066 dev->of_node->full_name);
1067 ret = -ENODEV;
1068 goto err_register;
1066 } 1069 }
1067 1070
1068 if (IS_ERR(pdev)) { 1071 ret = platform_device_add_data(pdev, &reg->pdata,
1069 ret = PTR_ERR(pdev); 1072 sizeof(reg->pdata));
1073 if (!ret)
1074 ret = platform_device_add(pdev);
1075 if (ret) {
1076 platform_device_put(pdev);
1070 goto err_register; 1077 goto err_register;
1071 } 1078 }
1072 } 1079 }
diff --git a/drivers/gpu/vga/vgaarb.c b/drivers/gpu/vga/vgaarb.c
index 3166e4bc4eb6..9abcaa53bd25 100644
--- a/drivers/gpu/vga/vgaarb.c
+++ b/drivers/gpu/vga/vgaarb.c
@@ -395,8 +395,10 @@ int vga_get(struct pci_dev *pdev, unsigned int rsrc, int interruptible)
395 set_current_state(interruptible ? 395 set_current_state(interruptible ?
396 TASK_INTERRUPTIBLE : 396 TASK_INTERRUPTIBLE :
397 TASK_UNINTERRUPTIBLE); 397 TASK_UNINTERRUPTIBLE);
398 if (signal_pending(current)) { 398 if (interruptible && signal_pending(current)) {
399 rc = -EINTR; 399 __set_current_state(TASK_RUNNING);
400 remove_wait_queue(&vga_wait_queue, &wait);
401 rc = -ERESTARTSYS;
400 break; 402 break;
401 } 403 }
402 schedule(); 404 schedule();
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index ac1feea51be3..8b78a7f1f779 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -316,11 +316,6 @@
316#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_A001 0xa001 316#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_A001 0xa001
317 317
318#define USB_VENDOR_ID_ELAN 0x04f3 318#define USB_VENDOR_ID_ELAN 0x04f3
319#define USB_DEVICE_ID_ELAN_TOUCHSCREEN 0x0089
320#define USB_DEVICE_ID_ELAN_TOUCHSCREEN_009B 0x009b
321#define USB_DEVICE_ID_ELAN_TOUCHSCREEN_0103 0x0103
322#define USB_DEVICE_ID_ELAN_TOUCHSCREEN_010c 0x010c
323#define USB_DEVICE_ID_ELAN_TOUCHSCREEN_016F 0x016f
324 319
325#define USB_VENDOR_ID_ELECOM 0x056e 320#define USB_VENDOR_ID_ELECOM 0x056e
326#define USB_DEVICE_ID_ELECOM_BM084 0x0061 321#define USB_DEVICE_ID_ELECOM_BM084 0x0061
@@ -609,6 +604,7 @@
609#define USB_DEVICE_ID_LOGITECH_HARMONY_FIRST 0xc110 604#define USB_DEVICE_ID_LOGITECH_HARMONY_FIRST 0xc110
610#define USB_DEVICE_ID_LOGITECH_HARMONY_LAST 0xc14f 605#define USB_DEVICE_ID_LOGITECH_HARMONY_LAST 0xc14f
611#define USB_DEVICE_ID_LOGITECH_HARMONY_PS3 0x0306 606#define USB_DEVICE_ID_LOGITECH_HARMONY_PS3 0x0306
607#define USB_DEVICE_ID_LOGITECH_KEYBOARD_G710_PLUS 0xc24d
612#define USB_DEVICE_ID_LOGITECH_MOUSE_C01A 0xc01a 608#define USB_DEVICE_ID_LOGITECH_MOUSE_C01A 0xc01a
613#define USB_DEVICE_ID_LOGITECH_MOUSE_C05A 0xc05a 609#define USB_DEVICE_ID_LOGITECH_MOUSE_C05A 0xc05a
614#define USB_DEVICE_ID_LOGITECH_MOUSE_C06A 0xc06a 610#define USB_DEVICE_ID_LOGITECH_MOUSE_C06A 0xc06a
diff --git a/drivers/hid/hid-lg.c b/drivers/hid/hid-lg.c
index c20ac76c0a8c..c690fae02cf8 100644
--- a/drivers/hid/hid-lg.c
+++ b/drivers/hid/hid-lg.c
@@ -665,8 +665,9 @@ static int lg_probe(struct hid_device *hdev, const struct hid_device_id *id)
665 struct lg_drv_data *drv_data; 665 struct lg_drv_data *drv_data;
666 int ret; 666 int ret;
667 667
668 /* Only work with the 1st interface (G29 presents multiple) */ 668 /* G29 only work with the 1st interface */
669 if (iface_num != 0) { 669 if ((hdev->product == USB_DEVICE_ID_LOGITECH_G29_WHEEL) &&
670 (iface_num != 0)) {
670 dbg_hid("%s: ignoring ifnum %d\n", __func__, iface_num); 671 dbg_hid("%s: ignoring ifnum %d\n", __func__, iface_num);
671 return -ENODEV; 672 return -ENODEV;
672 } 673 }
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
index 94bb137abe32..7dd0953cd70f 100644
--- a/drivers/hid/usbhid/hid-quirks.c
+++ b/drivers/hid/usbhid/hid-quirks.c
@@ -72,11 +72,7 @@ static const struct hid_blacklist {
72 { USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_PIXART_USB_OPTICAL_MOUSE, HID_QUIRK_ALWAYS_POLL }, 72 { USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_PIXART_USB_OPTICAL_MOUSE, HID_QUIRK_ALWAYS_POLL },
73 { USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC, HID_QUIRK_NOGET }, 73 { USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC, HID_QUIRK_NOGET },
74 { USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_WIIU, HID_QUIRK_MULTI_INPUT }, 74 { USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_WIIU, HID_QUIRK_MULTI_INPUT },
75 { USB_VENDOR_ID_ELAN, USB_DEVICE_ID_ELAN_TOUCHSCREEN, HID_QUIRK_ALWAYS_POLL }, 75 { USB_VENDOR_ID_ELAN, HID_ANY_ID, HID_QUIRK_ALWAYS_POLL },
76 { USB_VENDOR_ID_ELAN, USB_DEVICE_ID_ELAN_TOUCHSCREEN_009B, HID_QUIRK_ALWAYS_POLL },
77 { USB_VENDOR_ID_ELAN, USB_DEVICE_ID_ELAN_TOUCHSCREEN_0103, HID_QUIRK_ALWAYS_POLL },
78 { USB_VENDOR_ID_ELAN, USB_DEVICE_ID_ELAN_TOUCHSCREEN_010c, HID_QUIRK_ALWAYS_POLL },
79 { USB_VENDOR_ID_ELAN, USB_DEVICE_ID_ELAN_TOUCHSCREEN_016F, HID_QUIRK_ALWAYS_POLL },
80 { USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_TS2700, HID_QUIRK_NOGET }, 76 { USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_TS2700, HID_QUIRK_NOGET },
81 { USB_VENDOR_ID_FORMOSA, USB_DEVICE_ID_FORMOSA_IR_RECEIVER, HID_QUIRK_NO_INIT_REPORTS }, 77 { USB_VENDOR_ID_FORMOSA, USB_DEVICE_ID_FORMOSA_IR_RECEIVER, HID_QUIRK_NO_INIT_REPORTS },
82 { USB_VENDOR_ID_FREESCALE, USB_DEVICE_ID_FREESCALE_MX28, HID_QUIRK_NOGET }, 78 { USB_VENDOR_ID_FREESCALE, USB_DEVICE_ID_FREESCALE_MX28, HID_QUIRK_NOGET },
@@ -84,6 +80,7 @@ static const struct hid_blacklist {
84 { USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE_0B4A, HID_QUIRK_ALWAYS_POLL }, 80 { USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE_0B4A, HID_QUIRK_ALWAYS_POLL },
85 { USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE, HID_QUIRK_ALWAYS_POLL }, 81 { USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE, HID_QUIRK_ALWAYS_POLL },
86 { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_C077, HID_QUIRK_ALWAYS_POLL }, 82 { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_C077, HID_QUIRK_ALWAYS_POLL },
83 { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_KEYBOARD_G710_PLUS, HID_QUIRK_NOGET },
87 { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOUSE_C01A, HID_QUIRK_ALWAYS_POLL }, 84 { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOUSE_C01A, HID_QUIRK_ALWAYS_POLL },
88 { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOUSE_C05A, HID_QUIRK_ALWAYS_POLL }, 85 { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOUSE_C05A, HID_QUIRK_ALWAYS_POLL },
89 { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOUSE_C06A, HID_QUIRK_ALWAYS_POLL }, 86 { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOUSE_C06A, HID_QUIRK_ALWAYS_POLL },
@@ -339,7 +336,8 @@ static const struct hid_blacklist *usbhid_exists_squirk(const u16 idVendor,
339 336
340 for (; hid_blacklist[n].idVendor; n++) 337 for (; hid_blacklist[n].idVendor; n++)
341 if (hid_blacklist[n].idVendor == idVendor && 338 if (hid_blacklist[n].idVendor == idVendor &&
342 hid_blacklist[n].idProduct == idProduct) 339 (hid_blacklist[n].idProduct == (__u16) HID_ANY_ID ||
340 hid_blacklist[n].idProduct == idProduct))
343 bl_entry = &hid_blacklist[n]; 341 bl_entry = &hid_blacklist[n];
344 342
345 if (bl_entry != NULL) 343 if (bl_entry != NULL)
diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
index 8b29949507d1..01a4f05c1642 100644
--- a/drivers/hid/wacom_wac.c
+++ b/drivers/hid/wacom_wac.c
@@ -2481,7 +2481,7 @@ void wacom_setup_device_quirks(struct wacom *wacom)
2481 if (features->pktlen == WACOM_PKGLEN_BBTOUCH3) { 2481 if (features->pktlen == WACOM_PKGLEN_BBTOUCH3) {
2482 if (features->touch_max) 2482 if (features->touch_max)
2483 features->device_type |= WACOM_DEVICETYPE_TOUCH; 2483 features->device_type |= WACOM_DEVICETYPE_TOUCH;
2484 if (features->type >= INTUOSHT || features->type <= BAMBOO_PT) 2484 if (features->type >= INTUOSHT && features->type <= BAMBOO_PT)
2485 features->device_type |= WACOM_DEVICETYPE_PAD; 2485 features->device_type |= WACOM_DEVICETYPE_PAD;
2486 2486
2487 features->x_max = 4096; 2487 features->x_max = 4096;
@@ -3213,7 +3213,8 @@ static const struct wacom_features wacom_features_0x32F =
3213 WACOM_DTU_OFFSET, WACOM_DTU_OFFSET }; 3213 WACOM_DTU_OFFSET, WACOM_DTU_OFFSET };
3214static const struct wacom_features wacom_features_0x336 = 3214static const struct wacom_features wacom_features_0x336 =
3215 { "Wacom DTU1141", 23472, 13203, 1023, 0, 3215 { "Wacom DTU1141", 23472, 13203, 1023, 0,
3216 DTUS, WACOM_INTUOS_RES, WACOM_INTUOS_RES, 4 }; 3216 DTUS, WACOM_INTUOS_RES, WACOM_INTUOS_RES, 4,
3217 WACOM_DTU_OFFSET, WACOM_DTU_OFFSET };
3217static const struct wacom_features wacom_features_0x57 = 3218static const struct wacom_features wacom_features_0x57 =
3218 { "Wacom DTK2241", 95640, 54060, 2047, 63, 3219 { "Wacom DTK2241", 95640, 54060, 2047, 63,
3219 DTK, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 6, 3220 DTK, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 6,
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index 842b0043ad94..80a73bfc1a65 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -324,6 +324,7 @@ config SENSORS_APPLESMC
324config SENSORS_ARM_SCPI 324config SENSORS_ARM_SCPI
325 tristate "ARM SCPI Sensors" 325 tristate "ARM SCPI Sensors"
326 depends on ARM_SCPI_PROTOCOL 326 depends on ARM_SCPI_PROTOCOL
327 depends on THERMAL || !THERMAL_OF
327 help 328 help
328 This driver provides support for temperature, voltage, current 329 This driver provides support for temperature, voltage, current
329 and power sensors available on ARM Ltd's SCP based platforms. The 330 and power sensors available on ARM Ltd's SCP based platforms. The
@@ -1216,6 +1217,7 @@ config SENSORS_PWM_FAN
1216config SENSORS_SHT15 1217config SENSORS_SHT15
1217 tristate "Sensiron humidity and temperature sensors. SHT15 and compat." 1218 tristate "Sensiron humidity and temperature sensors. SHT15 and compat."
1218 depends on GPIOLIB || COMPILE_TEST 1219 depends on GPIOLIB || COMPILE_TEST
1220 select BITREVERSE
1219 help 1221 help
1220 If you say yes here you get support for the Sensiron SHT10, SHT11, 1222 If you say yes here you get support for the Sensiron SHT10, SHT11,
1221 SHT15, SHT71, SHT75 humidity and temperature sensors. 1223 SHT15, SHT71, SHT75 humidity and temperature sensors.
@@ -1471,6 +1473,7 @@ config SENSORS_INA209
1471config SENSORS_INA2XX 1473config SENSORS_INA2XX
1472 tristate "Texas Instruments INA219 and compatibles" 1474 tristate "Texas Instruments INA219 and compatibles"
1473 depends on I2C 1475 depends on I2C
1476 select REGMAP_I2C
1474 help 1477 help
1475 If you say yes here you get support for INA219, INA220, INA226, 1478 If you say yes here you get support for INA219, INA220, INA226,
1476 INA230, and INA231 power monitor chips. 1479 INA230, and INA231 power monitor chips.
diff --git a/drivers/hwmon/applesmc.c b/drivers/hwmon/applesmc.c
index 1f5e956941b1..0af7fd311979 100644
--- a/drivers/hwmon/applesmc.c
+++ b/drivers/hwmon/applesmc.c
@@ -537,7 +537,7 @@ static int applesmc_init_index(struct applesmc_registers *s)
537static int applesmc_init_smcreg_try(void) 537static int applesmc_init_smcreg_try(void)
538{ 538{
539 struct applesmc_registers *s = &smcreg; 539 struct applesmc_registers *s = &smcreg;
540 bool left_light_sensor, right_light_sensor; 540 bool left_light_sensor = 0, right_light_sensor = 0;
541 unsigned int count; 541 unsigned int count;
542 u8 tmp[1]; 542 u8 tmp[1];
543 int ret; 543 int ret;
diff --git a/drivers/hwmon/scpi-hwmon.c b/drivers/hwmon/scpi-hwmon.c
index 2c1241bbf9af..7e20567bc369 100644
--- a/drivers/hwmon/scpi-hwmon.c
+++ b/drivers/hwmon/scpi-hwmon.c
@@ -117,7 +117,7 @@ static int scpi_hwmon_probe(struct platform_device *pdev)
117 struct scpi_ops *scpi_ops; 117 struct scpi_ops *scpi_ops;
118 struct device *hwdev, *dev = &pdev->dev; 118 struct device *hwdev, *dev = &pdev->dev;
119 struct scpi_sensors *scpi_sensors; 119 struct scpi_sensors *scpi_sensors;
120 int ret; 120 int ret, idx;
121 121
122 scpi_ops = get_scpi_ops(); 122 scpi_ops = get_scpi_ops();
123 if (!scpi_ops) 123 if (!scpi_ops)
@@ -146,8 +146,8 @@ static int scpi_hwmon_probe(struct platform_device *pdev)
146 146
147 scpi_sensors->scpi_ops = scpi_ops; 147 scpi_sensors->scpi_ops = scpi_ops;
148 148
149 for (i = 0; i < nr_sensors; i++) { 149 for (i = 0, idx = 0; i < nr_sensors; i++) {
150 struct sensor_data *sensor = &scpi_sensors->data[i]; 150 struct sensor_data *sensor = &scpi_sensors->data[idx];
151 151
152 ret = scpi_ops->sensor_get_info(i, &sensor->info); 152 ret = scpi_ops->sensor_get_info(i, &sensor->info);
153 if (ret) 153 if (ret)
@@ -183,7 +183,7 @@ static int scpi_hwmon_probe(struct platform_device *pdev)
183 num_power++; 183 num_power++;
184 break; 184 break;
185 default: 185 default:
186 break; 186 continue;
187 } 187 }
188 188
189 sensor->dev_attr_input.attr.mode = S_IRUGO; 189 sensor->dev_attr_input.attr.mode = S_IRUGO;
@@ -194,11 +194,12 @@ static int scpi_hwmon_probe(struct platform_device *pdev)
194 sensor->dev_attr_label.show = scpi_show_label; 194 sensor->dev_attr_label.show = scpi_show_label;
195 sensor->dev_attr_label.attr.name = sensor->label; 195 sensor->dev_attr_label.attr.name = sensor->label;
196 196
197 scpi_sensors->attrs[i << 1] = &sensor->dev_attr_input.attr; 197 scpi_sensors->attrs[idx << 1] = &sensor->dev_attr_input.attr;
198 scpi_sensors->attrs[(i << 1) + 1] = &sensor->dev_attr_label.attr; 198 scpi_sensors->attrs[(idx << 1) + 1] = &sensor->dev_attr_label.attr;
199 199
200 sysfs_attr_init(scpi_sensors->attrs[i << 1]); 200 sysfs_attr_init(scpi_sensors->attrs[idx << 1]);
201 sysfs_attr_init(scpi_sensors->attrs[(i << 1) + 1]); 201 sysfs_attr_init(scpi_sensors->attrs[(idx << 1) + 1]);
202 idx++;
202 } 203 }
203 204
204 scpi_sensors->group.attrs = scpi_sensors->attrs; 205 scpi_sensors->group.attrs = scpi_sensors->attrs;
@@ -236,8 +237,8 @@ static int scpi_hwmon_probe(struct platform_device *pdev)
236 237
237 zone->sensor_id = i; 238 zone->sensor_id = i;
238 zone->scpi_sensors = scpi_sensors; 239 zone->scpi_sensors = scpi_sensors;
239 zone->tzd = thermal_zone_of_sensor_register(dev, i, zone, 240 zone->tzd = thermal_zone_of_sensor_register(dev,
240 &scpi_sensor_ops); 241 sensor->info.sensor_id, zone, &scpi_sensor_ops);
241 /* 242 /*
242 * The call to thermal_zone_of_sensor_register returns 243 * The call to thermal_zone_of_sensor_register returns
243 * an error for sensors that are not associated with 244 * an error for sensors that are not associated with
diff --git a/drivers/hwmon/tmp102.c b/drivers/hwmon/tmp102.c
index 65482624ea2c..5289aa0980a8 100644
--- a/drivers/hwmon/tmp102.c
+++ b/drivers/hwmon/tmp102.c
@@ -58,6 +58,7 @@ struct tmp102 {
58 u16 config_orig; 58 u16 config_orig;
59 unsigned long last_update; 59 unsigned long last_update;
60 int temp[3]; 60 int temp[3];
61 bool first_time;
61}; 62};
62 63
63/* convert left adjusted 13-bit TMP102 register value to milliCelsius */ 64/* convert left adjusted 13-bit TMP102 register value to milliCelsius */
@@ -93,6 +94,7 @@ static struct tmp102 *tmp102_update_device(struct device *dev)
93 tmp102->temp[i] = tmp102_reg_to_mC(status); 94 tmp102->temp[i] = tmp102_reg_to_mC(status);
94 } 95 }
95 tmp102->last_update = jiffies; 96 tmp102->last_update = jiffies;
97 tmp102->first_time = false;
96 } 98 }
97 mutex_unlock(&tmp102->lock); 99 mutex_unlock(&tmp102->lock);
98 return tmp102; 100 return tmp102;
@@ -102,6 +104,12 @@ static int tmp102_read_temp(void *dev, int *temp)
102{ 104{
103 struct tmp102 *tmp102 = tmp102_update_device(dev); 105 struct tmp102 *tmp102 = tmp102_update_device(dev);
104 106
107 /* Is it too early even to return a conversion? */
108 if (tmp102->first_time) {
109 dev_dbg(dev, "%s: Conversion not ready yet..\n", __func__);
110 return -EAGAIN;
111 }
112
105 *temp = tmp102->temp[0]; 113 *temp = tmp102->temp[0];
106 114
107 return 0; 115 return 0;
@@ -114,6 +122,10 @@ static ssize_t tmp102_show_temp(struct device *dev,
114 struct sensor_device_attribute *sda = to_sensor_dev_attr(attr); 122 struct sensor_device_attribute *sda = to_sensor_dev_attr(attr);
115 struct tmp102 *tmp102 = tmp102_update_device(dev); 123 struct tmp102 *tmp102 = tmp102_update_device(dev);
116 124
125 /* Is it too early even to return a read? */
126 if (tmp102->first_time)
127 return -EAGAIN;
128
117 return sprintf(buf, "%d\n", tmp102->temp[sda->index]); 129 return sprintf(buf, "%d\n", tmp102->temp[sda->index]);
118} 130}
119 131
@@ -207,7 +219,9 @@ static int tmp102_probe(struct i2c_client *client,
207 status = -ENODEV; 219 status = -ENODEV;
208 goto fail_restore_config; 220 goto fail_restore_config;
209 } 221 }
210 tmp102->last_update = jiffies - HZ; 222 tmp102->last_update = jiffies;
223 /* Mark that we are not ready with data until conversion is complete */
224 tmp102->first_time = true;
211 mutex_init(&tmp102->lock); 225 mutex_init(&tmp102->lock);
212 226
213 hwmon_dev = hwmon_device_register_with_groups(dev, client->name, 227 hwmon_dev = hwmon_device_register_with_groups(dev, client->name,
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index e24c2b680b47..7b0aa82ea38b 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -126,6 +126,7 @@ config I2C_I801
126 Sunrise Point-LP (PCH) 126 Sunrise Point-LP (PCH)
127 DNV (SOC) 127 DNV (SOC)
128 Broxton (SOC) 128 Broxton (SOC)
129 Lewisburg (PCH)
129 130
130 This driver can also be built as a module. If so, the module 131 This driver can also be built as a module. If so, the module
131 will be called i2c-i801. 132 will be called i2c-i801.
diff --git a/drivers/i2c/busses/i2c-davinci.c b/drivers/i2c/busses/i2c-davinci.c
index c5628a42170a..a8bdcb5292f5 100644
--- a/drivers/i2c/busses/i2c-davinci.c
+++ b/drivers/i2c/busses/i2c-davinci.c
@@ -202,8 +202,15 @@ static void i2c_davinci_calc_clk_dividers(struct davinci_i2c_dev *dev)
202 * d is always 6 on Keystone I2C controller 202 * d is always 6 on Keystone I2C controller
203 */ 203 */
204 204
205 /* get minimum of 7 MHz clock, but max of 12 MHz */ 205 /*
206 psc = (input_clock / 7000000) - 1; 206 * Both Davinci and current Keystone User Guides recommend a value
207 * between 7MHz and 12MHz. In reality 7MHz module clock doesn't
208 * always produce enough margin between SDA and SCL transitions.
209 * Measurements show that the higher the module clock is, the
210 * bigger is the margin, providing more reliable communication.
211 * So we better target for 12MHz.
212 */
213 psc = (input_clock / 12000000) - 1;
207 if ((input_clock / (psc + 1)) > 12000000) 214 if ((input_clock / (psc + 1)) > 12000000)
208 psc++; /* better to run under spec than over */ 215 psc++; /* better to run under spec than over */
209 d = (psc >= 2) ? 5 : 7 - psc; 216 d = (psc >= 2) ? 5 : 7 - psc;
diff --git a/drivers/i2c/busses/i2c-designware-core.c b/drivers/i2c/busses/i2c-designware-core.c
index 8c48b27ba059..de7fbbb374cd 100644
--- a/drivers/i2c/busses/i2c-designware-core.c
+++ b/drivers/i2c/busses/i2c-designware-core.c
@@ -813,6 +813,12 @@ static irqreturn_t i2c_dw_isr(int this_irq, void *dev_id)
813tx_aborted: 813tx_aborted:
814 if ((stat & (DW_IC_INTR_TX_ABRT | DW_IC_INTR_STOP_DET)) || dev->msg_err) 814 if ((stat & (DW_IC_INTR_TX_ABRT | DW_IC_INTR_STOP_DET)) || dev->msg_err)
815 complete(&dev->cmd_complete); 815 complete(&dev->cmd_complete);
816 else if (unlikely(dev->accessor_flags & ACCESS_INTR_MASK)) {
817 /* workaround to trigger pending interrupt */
818 stat = dw_readl(dev, DW_IC_INTR_MASK);
819 i2c_dw_disable_int(dev);
820 dw_writel(dev, stat, DW_IC_INTR_MASK);
821 }
816 822
817 return IRQ_HANDLED; 823 return IRQ_HANDLED;
818} 824}
diff --git a/drivers/i2c/busses/i2c-designware-core.h b/drivers/i2c/busses/i2c-designware-core.h
index 1d50898e7b24..9ffb63a60f95 100644
--- a/drivers/i2c/busses/i2c-designware-core.h
+++ b/drivers/i2c/busses/i2c-designware-core.h
@@ -111,6 +111,7 @@ struct dw_i2c_dev {
111 111
112#define ACCESS_SWAP 0x00000001 112#define ACCESS_SWAP 0x00000001
113#define ACCESS_16BIT 0x00000002 113#define ACCESS_16BIT 0x00000002
114#define ACCESS_INTR_MASK 0x00000004
114 115
115extern int i2c_dw_init(struct dw_i2c_dev *dev); 116extern int i2c_dw_init(struct dw_i2c_dev *dev);
116extern void i2c_dw_disable(struct dw_i2c_dev *dev); 117extern void i2c_dw_disable(struct dw_i2c_dev *dev);
diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c
index 809579ecb5a4..6b00061c3746 100644
--- a/drivers/i2c/busses/i2c-designware-platdrv.c
+++ b/drivers/i2c/busses/i2c-designware-platdrv.c
@@ -93,6 +93,7 @@ static void dw_i2c_acpi_params(struct platform_device *pdev, char method[],
93static int dw_i2c_acpi_configure(struct platform_device *pdev) 93static int dw_i2c_acpi_configure(struct platform_device *pdev)
94{ 94{
95 struct dw_i2c_dev *dev = platform_get_drvdata(pdev); 95 struct dw_i2c_dev *dev = platform_get_drvdata(pdev);
96 const struct acpi_device_id *id;
96 97
97 dev->adapter.nr = -1; 98 dev->adapter.nr = -1;
98 dev->tx_fifo_depth = 32; 99 dev->tx_fifo_depth = 32;
@@ -106,6 +107,10 @@ static int dw_i2c_acpi_configure(struct platform_device *pdev)
106 dw_i2c_acpi_params(pdev, "FMCN", &dev->fs_hcnt, &dev->fs_lcnt, 107 dw_i2c_acpi_params(pdev, "FMCN", &dev->fs_hcnt, &dev->fs_lcnt,
107 &dev->sda_hold_time); 108 &dev->sda_hold_time);
108 109
110 id = acpi_match_device(pdev->dev.driver->acpi_match_table, &pdev->dev);
111 if (id && id->driver_data)
112 dev->accessor_flags |= (u32)id->driver_data;
113
109 return 0; 114 return 0;
110} 115}
111 116
@@ -116,7 +121,7 @@ static const struct acpi_device_id dw_i2c_acpi_match[] = {
116 { "INT3433", 0 }, 121 { "INT3433", 0 },
117 { "80860F41", 0 }, 122 { "80860F41", 0 },
118 { "808622C1", 0 }, 123 { "808622C1", 0 },
119 { "AMD0010", 0 }, 124 { "AMD0010", ACCESS_INTR_MASK },
120 { } 125 { }
121}; 126};
122MODULE_DEVICE_TABLE(acpi, dw_i2c_acpi_match); 127MODULE_DEVICE_TABLE(acpi, dw_i2c_acpi_match);
@@ -240,12 +245,10 @@ static int dw_i2c_plat_probe(struct platform_device *pdev)
240 } 245 }
241 246
242 r = i2c_dw_probe(dev); 247 r = i2c_dw_probe(dev);
243 if (r) { 248 if (r && !dev->pm_runtime_disabled)
244 pm_runtime_disable(&pdev->dev); 249 pm_runtime_disable(&pdev->dev);
245 return r;
246 }
247 250
248 return 0; 251 return r;
249} 252}
250 253
251static int dw_i2c_plat_remove(struct platform_device *pdev) 254static int dw_i2c_plat_remove(struct platform_device *pdev)
@@ -260,7 +263,8 @@ static int dw_i2c_plat_remove(struct platform_device *pdev)
260 263
261 pm_runtime_dont_use_autosuspend(&pdev->dev); 264 pm_runtime_dont_use_autosuspend(&pdev->dev);
262 pm_runtime_put_sync(&pdev->dev); 265 pm_runtime_put_sync(&pdev->dev);
263 pm_runtime_disable(&pdev->dev); 266 if (!dev->pm_runtime_disabled)
267 pm_runtime_disable(&pdev->dev);
264 268
265 return 0; 269 return 0;
266} 270}
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index c306751ceadb..f62d69799a9c 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -62,6 +62,8 @@
62 * Sunrise Point-LP (PCH) 0x9d23 32 hard yes yes yes 62 * Sunrise Point-LP (PCH) 0x9d23 32 hard yes yes yes
63 * DNV (SOC) 0x19df 32 hard yes yes yes 63 * DNV (SOC) 0x19df 32 hard yes yes yes
64 * Broxton (SOC) 0x5ad4 32 hard yes yes yes 64 * Broxton (SOC) 0x5ad4 32 hard yes yes yes
65 * Lewisburg (PCH) 0xa1a3 32 hard yes yes yes
66 * Lewisburg Supersku (PCH) 0xa223 32 hard yes yes yes
65 * 67 *
66 * Features supported by this driver: 68 * Features supported by this driver:
67 * Software PEC no 69 * Software PEC no
@@ -206,6 +208,8 @@
206#define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_SMBUS 0x9d23 208#define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_SMBUS 0x9d23
207#define PCI_DEVICE_ID_INTEL_DNV_SMBUS 0x19df 209#define PCI_DEVICE_ID_INTEL_DNV_SMBUS 0x19df
208#define PCI_DEVICE_ID_INTEL_BROXTON_SMBUS 0x5ad4 210#define PCI_DEVICE_ID_INTEL_BROXTON_SMBUS 0x5ad4
211#define PCI_DEVICE_ID_INTEL_LEWISBURG_SMBUS 0xa1a3
212#define PCI_DEVICE_ID_INTEL_LEWISBURG_SSKU_SMBUS 0xa223
209 213
210struct i801_mux_config { 214struct i801_mux_config {
211 char *gpio_chip; 215 char *gpio_chip;
@@ -869,6 +873,8 @@ static const struct pci_device_id i801_ids[] = {
869 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_SMBUS) }, 873 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_SMBUS) },
870 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_DNV_SMBUS) }, 874 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_DNV_SMBUS) },
871 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BROXTON_SMBUS) }, 875 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BROXTON_SMBUS) },
876 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LEWISBURG_SMBUS) },
877 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LEWISBURG_SSKU_SMBUS) },
872 { 0, } 878 { 0, }
873}; 879};
874 880
diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c
index 1e4d99da4164..d4d853680ae4 100644
--- a/drivers/i2c/busses/i2c-imx.c
+++ b/drivers/i2c/busses/i2c-imx.c
@@ -50,6 +50,7 @@
50#include <linux/of_device.h> 50#include <linux/of_device.h>
51#include <linux/of_dma.h> 51#include <linux/of_dma.h>
52#include <linux/of_gpio.h> 52#include <linux/of_gpio.h>
53#include <linux/pinctrl/consumer.h>
53#include <linux/platform_data/i2c-imx.h> 54#include <linux/platform_data/i2c-imx.h>
54#include <linux/platform_device.h> 55#include <linux/platform_device.h>
55#include <linux/sched.h> 56#include <linux/sched.h>
@@ -1118,6 +1119,8 @@ static int i2c_imx_probe(struct platform_device *pdev)
1118 i2c_imx, IMX_I2C_I2CR); 1119 i2c_imx, IMX_I2C_I2CR);
1119 imx_i2c_write_reg(i2c_imx->hwdata->i2sr_clr_opcode, i2c_imx, IMX_I2C_I2SR); 1120 imx_i2c_write_reg(i2c_imx->hwdata->i2sr_clr_opcode, i2c_imx, IMX_I2C_I2SR);
1120 1121
1122 i2c_imx_init_recovery_info(i2c_imx, pdev);
1123
1121 /* Add I2C adapter */ 1124 /* Add I2C adapter */
1122 ret = i2c_add_numbered_adapter(&i2c_imx->adapter); 1125 ret = i2c_add_numbered_adapter(&i2c_imx->adapter);
1123 if (ret < 0) { 1126 if (ret < 0) {
@@ -1125,8 +1128,6 @@ static int i2c_imx_probe(struct platform_device *pdev)
1125 goto clk_disable; 1128 goto clk_disable;
1126 } 1129 }
1127 1130
1128 i2c_imx_init_recovery_info(i2c_imx, pdev);
1129
1130 /* Set up platform driver data */ 1131 /* Set up platform driver data */
1131 platform_set_drvdata(pdev, i2c_imx); 1132 platform_set_drvdata(pdev, i2c_imx);
1132 clk_disable_unprepare(i2c_imx->clk); 1133 clk_disable_unprepare(i2c_imx->clk);
diff --git a/drivers/i2c/busses/i2c-mv64xxx.c b/drivers/i2c/busses/i2c-mv64xxx.c
index 5801227b97ab..43207f52e5a3 100644
--- a/drivers/i2c/busses/i2c-mv64xxx.c
+++ b/drivers/i2c/busses/i2c-mv64xxx.c
@@ -146,6 +146,8 @@ struct mv64xxx_i2c_data {
146 bool errata_delay; 146 bool errata_delay;
147 struct reset_control *rstc; 147 struct reset_control *rstc;
148 bool irq_clear_inverted; 148 bool irq_clear_inverted;
149 /* Clk div is 2 to the power n, not 2 to the power n + 1 */
150 bool clk_n_base_0;
149}; 151};
150 152
151static struct mv64xxx_i2c_regs mv64xxx_i2c_regs_mv64xxx = { 153static struct mv64xxx_i2c_regs mv64xxx_i2c_regs_mv64xxx = {
@@ -757,25 +759,29 @@ MODULE_DEVICE_TABLE(of, mv64xxx_i2c_of_match_table);
757#ifdef CONFIG_OF 759#ifdef CONFIG_OF
758#ifdef CONFIG_HAVE_CLK 760#ifdef CONFIG_HAVE_CLK
759static int 761static int
760mv64xxx_calc_freq(const int tclk, const int n, const int m) 762mv64xxx_calc_freq(struct mv64xxx_i2c_data *drv_data,
763 const int tclk, const int n, const int m)
761{ 764{
762 return tclk / (10 * (m + 1) * (2 << n)); 765 if (drv_data->clk_n_base_0)
766 return tclk / (10 * (m + 1) * (1 << n));
767 else
768 return tclk / (10 * (m + 1) * (2 << n));
763} 769}
764 770
765static bool 771static bool
766mv64xxx_find_baud_factors(const u32 req_freq, const u32 tclk, u32 *best_n, 772mv64xxx_find_baud_factors(struct mv64xxx_i2c_data *drv_data,
767 u32 *best_m) 773 const u32 req_freq, const u32 tclk)
768{ 774{
769 int freq, delta, best_delta = INT_MAX; 775 int freq, delta, best_delta = INT_MAX;
770 int m, n; 776 int m, n;
771 777
772 for (n = 0; n <= 7; n++) 778 for (n = 0; n <= 7; n++)
773 for (m = 0; m <= 15; m++) { 779 for (m = 0; m <= 15; m++) {
774 freq = mv64xxx_calc_freq(tclk, n, m); 780 freq = mv64xxx_calc_freq(drv_data, tclk, n, m);
775 delta = req_freq - freq; 781 delta = req_freq - freq;
776 if (delta >= 0 && delta < best_delta) { 782 if (delta >= 0 && delta < best_delta) {
777 *best_m = m; 783 drv_data->freq_m = m;
778 *best_n = n; 784 drv_data->freq_n = n;
779 best_delta = delta; 785 best_delta = delta;
780 } 786 }
781 if (best_delta == 0) 787 if (best_delta == 0)
@@ -813,8 +819,11 @@ mv64xxx_of_config(struct mv64xxx_i2c_data *drv_data,
813 if (of_property_read_u32(np, "clock-frequency", &bus_freq)) 819 if (of_property_read_u32(np, "clock-frequency", &bus_freq))
814 bus_freq = 100000; /* 100kHz by default */ 820 bus_freq = 100000; /* 100kHz by default */
815 821
816 if (!mv64xxx_find_baud_factors(bus_freq, tclk, 822 if (of_device_is_compatible(np, "allwinner,sun4i-a10-i2c") ||
817 &drv_data->freq_n, &drv_data->freq_m)) { 823 of_device_is_compatible(np, "allwinner,sun6i-a31-i2c"))
824 drv_data->clk_n_base_0 = true;
825
826 if (!mv64xxx_find_baud_factors(drv_data, bus_freq, tclk)) {
818 rc = -EINVAL; 827 rc = -EINVAL;
819 goto out; 828 goto out;
820 } 829 }
diff --git a/drivers/i2c/busses/i2c-rcar.c b/drivers/i2c/busses/i2c-rcar.c
index b0ae560b38c3..599c0d7bd906 100644
--- a/drivers/i2c/busses/i2c-rcar.c
+++ b/drivers/i2c/busses/i2c-rcar.c
@@ -576,7 +576,7 @@ static int rcar_reg_slave(struct i2c_client *slave)
576 if (slave->flags & I2C_CLIENT_TEN) 576 if (slave->flags & I2C_CLIENT_TEN)
577 return -EAFNOSUPPORT; 577 return -EAFNOSUPPORT;
578 578
579 pm_runtime_forbid(rcar_i2c_priv_to_dev(priv)); 579 pm_runtime_get_sync(rcar_i2c_priv_to_dev(priv));
580 580
581 priv->slave = slave; 581 priv->slave = slave;
582 rcar_i2c_write(priv, ICSAR, slave->addr); 582 rcar_i2c_write(priv, ICSAR, slave->addr);
@@ -598,7 +598,7 @@ static int rcar_unreg_slave(struct i2c_client *slave)
598 598
599 priv->slave = NULL; 599 priv->slave = NULL;
600 600
601 pm_runtime_allow(rcar_i2c_priv_to_dev(priv)); 601 pm_runtime_put(rcar_i2c_priv_to_dev(priv));
602 602
603 return 0; 603 return 0;
604} 604}
diff --git a/drivers/i2c/busses/i2c-rk3x.c b/drivers/i2c/busses/i2c-rk3x.c
index c1935ebd6a9c..9096d17beb5b 100644
--- a/drivers/i2c/busses/i2c-rk3x.c
+++ b/drivers/i2c/busses/i2c-rk3x.c
@@ -908,7 +908,7 @@ static int rk3x_i2c_probe(struct platform_device *pdev)
908 &i2c->scl_fall_ns)) 908 &i2c->scl_fall_ns))
909 i2c->scl_fall_ns = 300; 909 i2c->scl_fall_ns = 300;
910 if (of_property_read_u32(pdev->dev.of_node, "i2c-sda-falling-time-ns", 910 if (of_property_read_u32(pdev->dev.of_node, "i2c-sda-falling-time-ns",
911 &i2c->scl_fall_ns)) 911 &i2c->sda_fall_ns))
912 i2c->sda_fall_ns = i2c->scl_fall_ns; 912 i2c->sda_fall_ns = i2c->scl_fall_ns;
913 913
914 strlcpy(i2c->adap.name, "rk3x-i2c", sizeof(i2c->adap.name)); 914 strlcpy(i2c->adap.name, "rk3x-i2c", sizeof(i2c->adap.name));
diff --git a/drivers/i2c/busses/i2c-st.c b/drivers/i2c/busses/i2c-st.c
index ea72dca32fdf..25020ec777c9 100644
--- a/drivers/i2c/busses/i2c-st.c
+++ b/drivers/i2c/busses/i2c-st.c
@@ -822,7 +822,7 @@ static int st_i2c_probe(struct platform_device *pdev)
822 822
823 adap = &i2c_dev->adap; 823 adap = &i2c_dev->adap;
824 i2c_set_adapdata(adap, i2c_dev); 824 i2c_set_adapdata(adap, i2c_dev);
825 snprintf(adap->name, sizeof(adap->name), "ST I2C(0x%pa)", &res->start); 825 snprintf(adap->name, sizeof(adap->name), "ST I2C(%pa)", &res->start);
826 adap->owner = THIS_MODULE; 826 adap->owner = THIS_MODULE;
827 adap->timeout = 2 * HZ; 827 adap->timeout = 2 * HZ;
828 adap->retries = 0; 828 adap->retries = 0;
diff --git a/drivers/i2c/busses/i2c-xiic.c b/drivers/i2c/busses/i2c-xiic.c
index e23a7b068c60..0b20449e48cf 100644
--- a/drivers/i2c/busses/i2c-xiic.c
+++ b/drivers/i2c/busses/i2c-xiic.c
@@ -662,8 +662,10 @@ static void __xiic_start_xfer(struct xiic_i2c *i2c)
662 662
663static void xiic_start_xfer(struct xiic_i2c *i2c) 663static void xiic_start_xfer(struct xiic_i2c *i2c)
664{ 664{
665 665 spin_lock(&i2c->lock);
666 xiic_reinit(i2c);
666 __xiic_start_xfer(i2c); 667 __xiic_start_xfer(i2c);
668 spin_unlock(&i2c->lock);
667} 669}
668 670
669static int xiic_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num) 671static int xiic_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
index 040af5cc8143..ba8eb087f224 100644
--- a/drivers/i2c/i2c-core.c
+++ b/drivers/i2c/i2c-core.c
@@ -715,7 +715,7 @@ static int i2c_device_probe(struct device *dev)
715 if (wakeirq > 0 && wakeirq != client->irq) 715 if (wakeirq > 0 && wakeirq != client->irq)
716 status = dev_pm_set_dedicated_wake_irq(dev, wakeirq); 716 status = dev_pm_set_dedicated_wake_irq(dev, wakeirq);
717 else if (client->irq > 0) 717 else if (client->irq > 0)
718 status = dev_pm_set_wake_irq(dev, wakeirq); 718 status = dev_pm_set_wake_irq(dev, client->irq);
719 else 719 else
720 status = 0; 720 status = 0;
721 721
diff --git a/drivers/iio/adc/ad7793.c b/drivers/iio/adc/ad7793.c
index eea0c79111e7..4d960d3b93c0 100644
--- a/drivers/iio/adc/ad7793.c
+++ b/drivers/iio/adc/ad7793.c
@@ -101,7 +101,7 @@
101#define AD7795_CH_AIN1M_AIN1M 8 /* AIN1(-) - AIN1(-) */ 101#define AD7795_CH_AIN1M_AIN1M 8 /* AIN1(-) - AIN1(-) */
102 102
103/* ID Register Bit Designations (AD7793_REG_ID) */ 103/* ID Register Bit Designations (AD7793_REG_ID) */
104#define AD7785_ID 0xB 104#define AD7785_ID 0x3
105#define AD7792_ID 0xA 105#define AD7792_ID 0xA
106#define AD7793_ID 0xB 106#define AD7793_ID 0xB
107#define AD7794_ID 0xF 107#define AD7794_ID 0xF
diff --git a/drivers/iio/adc/qcom-spmi-vadc.c b/drivers/iio/adc/qcom-spmi-vadc.c
index 0c4618b4d515..c2babe50a0d8 100644
--- a/drivers/iio/adc/qcom-spmi-vadc.c
+++ b/drivers/iio/adc/qcom-spmi-vadc.c
@@ -839,8 +839,10 @@ static int vadc_get_dt_data(struct vadc_priv *vadc, struct device_node *node)
839 839
840 for_each_available_child_of_node(node, child) { 840 for_each_available_child_of_node(node, child) {
841 ret = vadc_get_dt_channel_data(vadc->dev, &prop, child); 841 ret = vadc_get_dt_channel_data(vadc->dev, &prop, child);
842 if (ret) 842 if (ret) {
843 of_node_put(child);
843 return ret; 844 return ret;
845 }
844 846
845 vadc->chan_props[index] = prop; 847 vadc->chan_props[index] = prop;
846 848
diff --git a/drivers/iio/adc/vf610_adc.c b/drivers/iio/adc/vf610_adc.c
index 599cde3d03a1..b10f629cc44b 100644
--- a/drivers/iio/adc/vf610_adc.c
+++ b/drivers/iio/adc/vf610_adc.c
@@ -106,6 +106,13 @@
106 106
107#define DEFAULT_SAMPLE_TIME 1000 107#define DEFAULT_SAMPLE_TIME 1000
108 108
109/* V at 25°C of 696 mV */
110#define VF610_VTEMP25_3V0 950
111/* V at 25°C of 699 mV */
112#define VF610_VTEMP25_3V3 867
113/* Typical sensor slope coefficient at all temperatures */
114#define VF610_TEMP_SLOPE_COEFF 1840
115
109enum clk_sel { 116enum clk_sel {
110 VF610_ADCIOC_BUSCLK_SET, 117 VF610_ADCIOC_BUSCLK_SET,
111 VF610_ADCIOC_ALTCLK_SET, 118 VF610_ADCIOC_ALTCLK_SET,
@@ -197,6 +204,8 @@ static inline void vf610_adc_calculate_rates(struct vf610_adc *info)
197 adc_feature->clk_div = 8; 204 adc_feature->clk_div = 8;
198 } 205 }
199 206
207 adck_rate = ipg_rate / adc_feature->clk_div;
208
200 /* 209 /*
201 * Determine the long sample time adder value to be used based 210 * Determine the long sample time adder value to be used based
202 * on the default minimum sample time provided. 211 * on the default minimum sample time provided.
@@ -221,7 +230,6 @@ static inline void vf610_adc_calculate_rates(struct vf610_adc *info)
221 * BCT (Base Conversion Time): fixed to 25 ADCK cycles for 12 bit mode 230 * BCT (Base Conversion Time): fixed to 25 ADCK cycles for 12 bit mode
222 * LSTAdder(Long Sample Time): 3, 5, 7, 9, 13, 17, 21, 25 ADCK cycles 231 * LSTAdder(Long Sample Time): 3, 5, 7, 9, 13, 17, 21, 25 ADCK cycles
223 */ 232 */
224 adck_rate = ipg_rate / info->adc_feature.clk_div;
225 for (i = 0; i < ARRAY_SIZE(vf610_hw_avgs); i++) 233 for (i = 0; i < ARRAY_SIZE(vf610_hw_avgs); i++)
226 info->sample_freq_avail[i] = 234 info->sample_freq_avail[i] =
227 adck_rate / (6 + vf610_hw_avgs[i] * 235 adck_rate / (6 + vf610_hw_avgs[i] *
@@ -663,11 +671,13 @@ static int vf610_read_raw(struct iio_dev *indio_dev,
663 break; 671 break;
664 case IIO_TEMP: 672 case IIO_TEMP:
665 /* 673 /*
666 * Calculate in degree Celsius times 1000 674 * Calculate in degree Celsius times 1000
667 * Using sensor slope of 1.84 mV/°C and 675 * Using the typical sensor slope of 1.84 mV/°C
668 * V at 25°C of 696 mV 676 * and VREFH_ADC at 3.3V, V at 25°C of 699 mV
669 */ 677 */
670 *val = 25000 - ((int)info->value - 864) * 1000000 / 1840; 678 *val = 25000 - ((int)info->value - VF610_VTEMP25_3V3) *
679 1000000 / VF610_TEMP_SLOPE_COEFF;
680
671 break; 681 break;
672 default: 682 default:
673 mutex_unlock(&indio_dev->mlock); 683 mutex_unlock(&indio_dev->mlock);
diff --git a/drivers/iio/adc/xilinx-xadc-core.c b/drivers/iio/adc/xilinx-xadc-core.c
index 0370624a35db..02e636a1c49a 100644
--- a/drivers/iio/adc/xilinx-xadc-core.c
+++ b/drivers/iio/adc/xilinx-xadc-core.c
@@ -841,6 +841,7 @@ static int xadc_read_raw(struct iio_dev *indio_dev,
841 case XADC_REG_VCCINT: 841 case XADC_REG_VCCINT:
842 case XADC_REG_VCCAUX: 842 case XADC_REG_VCCAUX:
843 case XADC_REG_VREFP: 843 case XADC_REG_VREFP:
844 case XADC_REG_VREFN:
844 case XADC_REG_VCCBRAM: 845 case XADC_REG_VCCBRAM:
845 case XADC_REG_VCCPINT: 846 case XADC_REG_VCCPINT:
846 case XADC_REG_VCCPAUX: 847 case XADC_REG_VCCPAUX:
diff --git a/drivers/iio/dac/ad5064.c b/drivers/iio/dac/ad5064.c
index 9e4d2c18b554..81ca0081a019 100644
--- a/drivers/iio/dac/ad5064.c
+++ b/drivers/iio/dac/ad5064.c
@@ -113,12 +113,16 @@ enum ad5064_type {
113 ID_AD5065, 113 ID_AD5065,
114 ID_AD5628_1, 114 ID_AD5628_1,
115 ID_AD5628_2, 115 ID_AD5628_2,
116 ID_AD5629_1,
117 ID_AD5629_2,
116 ID_AD5648_1, 118 ID_AD5648_1,
117 ID_AD5648_2, 119 ID_AD5648_2,
118 ID_AD5666_1, 120 ID_AD5666_1,
119 ID_AD5666_2, 121 ID_AD5666_2,
120 ID_AD5668_1, 122 ID_AD5668_1,
121 ID_AD5668_2, 123 ID_AD5668_2,
124 ID_AD5669_1,
125 ID_AD5669_2,
122}; 126};
123 127
124static int ad5064_write(struct ad5064_state *st, unsigned int cmd, 128static int ad5064_write(struct ad5064_state *st, unsigned int cmd,
@@ -291,7 +295,7 @@ static const struct iio_chan_spec_ext_info ad5064_ext_info[] = {
291 { }, 295 { },
292}; 296};
293 297
294#define AD5064_CHANNEL(chan, addr, bits) { \ 298#define AD5064_CHANNEL(chan, addr, bits, _shift) { \
295 .type = IIO_VOLTAGE, \ 299 .type = IIO_VOLTAGE, \
296 .indexed = 1, \ 300 .indexed = 1, \
297 .output = 1, \ 301 .output = 1, \
@@ -303,36 +307,39 @@ static const struct iio_chan_spec_ext_info ad5064_ext_info[] = {
303 .sign = 'u', \ 307 .sign = 'u', \
304 .realbits = (bits), \ 308 .realbits = (bits), \
305 .storagebits = 16, \ 309 .storagebits = 16, \
306 .shift = 20 - bits, \ 310 .shift = (_shift), \
307 }, \ 311 }, \
308 .ext_info = ad5064_ext_info, \ 312 .ext_info = ad5064_ext_info, \
309} 313}
310 314
311#define DECLARE_AD5064_CHANNELS(name, bits) \ 315#define DECLARE_AD5064_CHANNELS(name, bits, shift) \
312const struct iio_chan_spec name[] = { \ 316const struct iio_chan_spec name[] = { \
313 AD5064_CHANNEL(0, 0, bits), \ 317 AD5064_CHANNEL(0, 0, bits, shift), \
314 AD5064_CHANNEL(1, 1, bits), \ 318 AD5064_CHANNEL(1, 1, bits, shift), \
315 AD5064_CHANNEL(2, 2, bits), \ 319 AD5064_CHANNEL(2, 2, bits, shift), \
316 AD5064_CHANNEL(3, 3, bits), \ 320 AD5064_CHANNEL(3, 3, bits, shift), \
317 AD5064_CHANNEL(4, 4, bits), \ 321 AD5064_CHANNEL(4, 4, bits, shift), \
318 AD5064_CHANNEL(5, 5, bits), \ 322 AD5064_CHANNEL(5, 5, bits, shift), \
319 AD5064_CHANNEL(6, 6, bits), \ 323 AD5064_CHANNEL(6, 6, bits, shift), \
320 AD5064_CHANNEL(7, 7, bits), \ 324 AD5064_CHANNEL(7, 7, bits, shift), \
321} 325}
322 326
323#define DECLARE_AD5065_CHANNELS(name, bits) \ 327#define DECLARE_AD5065_CHANNELS(name, bits, shift) \
324const struct iio_chan_spec name[] = { \ 328const struct iio_chan_spec name[] = { \
325 AD5064_CHANNEL(0, 0, bits), \ 329 AD5064_CHANNEL(0, 0, bits, shift), \
326 AD5064_CHANNEL(1, 3, bits), \ 330 AD5064_CHANNEL(1, 3, bits, shift), \
327} 331}
328 332
329static DECLARE_AD5064_CHANNELS(ad5024_channels, 12); 333static DECLARE_AD5064_CHANNELS(ad5024_channels, 12, 8);
330static DECLARE_AD5064_CHANNELS(ad5044_channels, 14); 334static DECLARE_AD5064_CHANNELS(ad5044_channels, 14, 6);
331static DECLARE_AD5064_CHANNELS(ad5064_channels, 16); 335static DECLARE_AD5064_CHANNELS(ad5064_channels, 16, 4);
332 336
333static DECLARE_AD5065_CHANNELS(ad5025_channels, 12); 337static DECLARE_AD5065_CHANNELS(ad5025_channels, 12, 8);
334static DECLARE_AD5065_CHANNELS(ad5045_channels, 14); 338static DECLARE_AD5065_CHANNELS(ad5045_channels, 14, 6);
335static DECLARE_AD5065_CHANNELS(ad5065_channels, 16); 339static DECLARE_AD5065_CHANNELS(ad5065_channels, 16, 4);
340
341static DECLARE_AD5064_CHANNELS(ad5629_channels, 12, 4);
342static DECLARE_AD5064_CHANNELS(ad5669_channels, 16, 0);
336 343
337static const struct ad5064_chip_info ad5064_chip_info_tbl[] = { 344static const struct ad5064_chip_info ad5064_chip_info_tbl[] = {
338 [ID_AD5024] = { 345 [ID_AD5024] = {
@@ -382,6 +389,18 @@ static const struct ad5064_chip_info ad5064_chip_info_tbl[] = {
382 .channels = ad5024_channels, 389 .channels = ad5024_channels,
383 .num_channels = 8, 390 .num_channels = 8,
384 }, 391 },
392 [ID_AD5629_1] = {
393 .shared_vref = true,
394 .internal_vref = 2500000,
395 .channels = ad5629_channels,
396 .num_channels = 8,
397 },
398 [ID_AD5629_2] = {
399 .shared_vref = true,
400 .internal_vref = 5000000,
401 .channels = ad5629_channels,
402 .num_channels = 8,
403 },
385 [ID_AD5648_1] = { 404 [ID_AD5648_1] = {
386 .shared_vref = true, 405 .shared_vref = true,
387 .internal_vref = 2500000, 406 .internal_vref = 2500000,
@@ -418,6 +437,18 @@ static const struct ad5064_chip_info ad5064_chip_info_tbl[] = {
418 .channels = ad5064_channels, 437 .channels = ad5064_channels,
419 .num_channels = 8, 438 .num_channels = 8,
420 }, 439 },
440 [ID_AD5669_1] = {
441 .shared_vref = true,
442 .internal_vref = 2500000,
443 .channels = ad5669_channels,
444 .num_channels = 8,
445 },
446 [ID_AD5669_2] = {
447 .shared_vref = true,
448 .internal_vref = 5000000,
449 .channels = ad5669_channels,
450 .num_channels = 8,
451 },
421}; 452};
422 453
423static inline unsigned int ad5064_num_vref(struct ad5064_state *st) 454static inline unsigned int ad5064_num_vref(struct ad5064_state *st)
@@ -597,10 +628,16 @@ static int ad5064_i2c_write(struct ad5064_state *st, unsigned int cmd,
597 unsigned int addr, unsigned int val) 628 unsigned int addr, unsigned int val)
598{ 629{
599 struct i2c_client *i2c = to_i2c_client(st->dev); 630 struct i2c_client *i2c = to_i2c_client(st->dev);
631 int ret;
600 632
601 st->data.i2c[0] = (cmd << 4) | addr; 633 st->data.i2c[0] = (cmd << 4) | addr;
602 put_unaligned_be16(val, &st->data.i2c[1]); 634 put_unaligned_be16(val, &st->data.i2c[1]);
603 return i2c_master_send(i2c, st->data.i2c, 3); 635
636 ret = i2c_master_send(i2c, st->data.i2c, 3);
637 if (ret < 0)
638 return ret;
639
640 return 0;
604} 641}
605 642
606static int ad5064_i2c_probe(struct i2c_client *i2c, 643static int ad5064_i2c_probe(struct i2c_client *i2c,
@@ -616,12 +653,12 @@ static int ad5064_i2c_remove(struct i2c_client *i2c)
616} 653}
617 654
618static const struct i2c_device_id ad5064_i2c_ids[] = { 655static const struct i2c_device_id ad5064_i2c_ids[] = {
619 {"ad5629-1", ID_AD5628_1}, 656 {"ad5629-1", ID_AD5629_1},
620 {"ad5629-2", ID_AD5628_2}, 657 {"ad5629-2", ID_AD5629_2},
621 {"ad5629-3", ID_AD5628_2}, /* similar enough to ad5629-2 */ 658 {"ad5629-3", ID_AD5629_2}, /* similar enough to ad5629-2 */
622 {"ad5669-1", ID_AD5668_1}, 659 {"ad5669-1", ID_AD5669_1},
623 {"ad5669-2", ID_AD5668_2}, 660 {"ad5669-2", ID_AD5669_2},
624 {"ad5669-3", ID_AD5668_2}, /* similar enough to ad5669-2 */ 661 {"ad5669-3", ID_AD5669_2}, /* similar enough to ad5669-2 */
625 {} 662 {}
626}; 663};
627MODULE_DEVICE_TABLE(i2c, ad5064_i2c_ids); 664MODULE_DEVICE_TABLE(i2c, ad5064_i2c_ids);
diff --git a/drivers/iio/humidity/si7020.c b/drivers/iio/humidity/si7020.c
index 12128d1ca570..71991b5c0658 100644
--- a/drivers/iio/humidity/si7020.c
+++ b/drivers/iio/humidity/si7020.c
@@ -50,10 +50,10 @@ static int si7020_read_raw(struct iio_dev *indio_dev,
50 50
51 switch (mask) { 51 switch (mask) {
52 case IIO_CHAN_INFO_RAW: 52 case IIO_CHAN_INFO_RAW:
53 ret = i2c_smbus_read_word_data(*client, 53 ret = i2c_smbus_read_word_swapped(*client,
54 chan->type == IIO_TEMP ? 54 chan->type == IIO_TEMP ?
55 SI7020CMD_TEMP_HOLD : 55 SI7020CMD_TEMP_HOLD :
56 SI7020CMD_RH_HOLD); 56 SI7020CMD_RH_HOLD);
57 if (ret < 0) 57 if (ret < 0)
58 return ret; 58 return ret;
59 *val = ret >> 2; 59 *val = ret >> 2;
diff --git a/drivers/iio/industrialio-buffer.c b/drivers/iio/industrialio-buffer.c
index d7e908acb480..0f6f63b20263 100644
--- a/drivers/iio/industrialio-buffer.c
+++ b/drivers/iio/industrialio-buffer.c
@@ -302,7 +302,7 @@ static int iio_scan_mask_set(struct iio_dev *indio_dev,
302 if (trialmask == NULL) 302 if (trialmask == NULL)
303 return -ENOMEM; 303 return -ENOMEM;
304 if (!indio_dev->masklength) { 304 if (!indio_dev->masklength) {
305 WARN_ON("Trying to set scanmask prior to registering buffer\n"); 305 WARN(1, "Trying to set scanmask prior to registering buffer\n");
306 goto err_invalid_mask; 306 goto err_invalid_mask;
307 } 307 }
308 bitmap_copy(trialmask, buffer->scan_mask, indio_dev->masklength); 308 bitmap_copy(trialmask, buffer->scan_mask, indio_dev->masklength);
diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
index 208358f9e7e3..159ede61f793 100644
--- a/drivers/iio/industrialio-core.c
+++ b/drivers/iio/industrialio-core.c
@@ -655,7 +655,7 @@ int __iio_device_attr_init(struct device_attribute *dev_attr,
655 break; 655 break;
656 case IIO_SEPARATE: 656 case IIO_SEPARATE:
657 if (!chan->indexed) { 657 if (!chan->indexed) {
658 WARN_ON("Differential channels must be indexed\n"); 658 WARN(1, "Differential channels must be indexed\n");
659 ret = -EINVAL; 659 ret = -EINVAL;
660 goto error_free_full_postfix; 660 goto error_free_full_postfix;
661 } 661 }
diff --git a/drivers/iio/light/apds9960.c b/drivers/iio/light/apds9960.c
index 7d269ef9e062..f6a07dc32ae4 100644
--- a/drivers/iio/light/apds9960.c
+++ b/drivers/iio/light/apds9960.c
@@ -453,6 +453,7 @@ static int apds9960_set_power_state(struct apds9960_data *data, bool on)
453 usleep_range(data->als_adc_int_us, 453 usleep_range(data->als_adc_int_us,
454 APDS9960_MAX_INT_TIME_IN_US); 454 APDS9960_MAX_INT_TIME_IN_US);
455 } else { 455 } else {
456 pm_runtime_mark_last_busy(dev);
456 ret = pm_runtime_put_autosuspend(dev); 457 ret = pm_runtime_put_autosuspend(dev);
457 } 458 }
458 459
diff --git a/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c b/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c
index 961f9f990faf..e544fcfd5ced 100644
--- a/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c
+++ b/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c
@@ -130,10 +130,10 @@ static int lidar_get_measurement(struct lidar_data *data, u16 *reg)
130 if (ret < 0) 130 if (ret < 0)
131 break; 131 break;
132 132
133 /* return 0 since laser is likely pointed out of range */ 133 /* return -EINVAL since laser is likely pointed out of range */
134 if (ret & LIDAR_REG_STATUS_INVALID) { 134 if (ret & LIDAR_REG_STATUS_INVALID) {
135 *reg = 0; 135 *reg = 0;
136 ret = 0; 136 ret = -EINVAL;
137 break; 137 break;
138 } 138 }
139 139
@@ -197,7 +197,7 @@ static irqreturn_t lidar_trigger_handler(int irq, void *private)
197 if (!ret) { 197 if (!ret) {
198 iio_push_to_buffers_with_timestamp(indio_dev, data->buffer, 198 iio_push_to_buffers_with_timestamp(indio_dev, data->buffer,
199 iio_get_time_ns()); 199 iio_get_time_ns());
200 } else { 200 } else if (ret != -EINVAL) {
201 dev_err(&data->client->dev, "cannot read LIDAR measurement"); 201 dev_err(&data->client->dev, "cannot read LIDAR measurement");
202 } 202 }
203 203
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 944cd90417bc..d2d5d004f16d 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -1126,10 +1126,7 @@ static bool validate_ipv4_net_dev(struct net_device *net_dev,
1126 1126
1127 rcu_read_lock(); 1127 rcu_read_lock();
1128 err = fib_lookup(dev_net(net_dev), &fl4, &res, 0); 1128 err = fib_lookup(dev_net(net_dev), &fl4, &res, 0);
1129 if (err) 1129 ret = err == 0 && FIB_RES_DEV(res) == net_dev;
1130 return false;
1131
1132 ret = FIB_RES_DEV(res) == net_dev;
1133 rcu_read_unlock(); 1130 rcu_read_unlock();
1134 1131
1135 return ret; 1132 return ret;
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
index 8d8af7a41a30..2281de122038 100644
--- a/drivers/infiniband/core/mad.c
+++ b/drivers/infiniband/core/mad.c
@@ -1811,6 +1811,11 @@ static int validate_mad(const struct ib_mad_hdr *mad_hdr,
1811 if (qp_num == 0) 1811 if (qp_num == 0)
1812 valid = 1; 1812 valid = 1;
1813 } else { 1813 } else {
1814 /* CM attributes other than ClassPortInfo only use Send method */
1815 if ((mad_hdr->mgmt_class == IB_MGMT_CLASS_CM) &&
1816 (mad_hdr->attr_id != IB_MGMT_CLASSPORTINFO_ATTR_ID) &&
1817 (mad_hdr->method != IB_MGMT_METHOD_SEND))
1818 goto out;
1814 /* Filter GSI packets sent to QP0 */ 1819 /* Filter GSI packets sent to QP0 */
1815 if (qp_num != 0) 1820 if (qp_num != 0)
1816 valid = 1; 1821 valid = 1;
diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c
index 2aba774f835b..a95a32ba596e 100644
--- a/drivers/infiniband/core/sa_query.c
+++ b/drivers/infiniband/core/sa_query.c
@@ -512,7 +512,7 @@ static int ib_nl_get_path_rec_attrs_len(ib_sa_comp_mask comp_mask)
512 return len; 512 return len;
513} 513}
514 514
515static int ib_nl_send_msg(struct ib_sa_query *query) 515static int ib_nl_send_msg(struct ib_sa_query *query, gfp_t gfp_mask)
516{ 516{
517 struct sk_buff *skb = NULL; 517 struct sk_buff *skb = NULL;
518 struct nlmsghdr *nlh; 518 struct nlmsghdr *nlh;
@@ -526,7 +526,7 @@ static int ib_nl_send_msg(struct ib_sa_query *query)
526 if (len <= 0) 526 if (len <= 0)
527 return -EMSGSIZE; 527 return -EMSGSIZE;
528 528
529 skb = nlmsg_new(len, GFP_KERNEL); 529 skb = nlmsg_new(len, gfp_mask);
530 if (!skb) 530 if (!skb)
531 return -ENOMEM; 531 return -ENOMEM;
532 532
@@ -544,7 +544,7 @@ static int ib_nl_send_msg(struct ib_sa_query *query)
544 /* Repair the nlmsg header length */ 544 /* Repair the nlmsg header length */
545 nlmsg_end(skb, nlh); 545 nlmsg_end(skb, nlh);
546 546
547 ret = ibnl_multicast(skb, nlh, RDMA_NL_GROUP_LS, GFP_KERNEL); 547 ret = ibnl_multicast(skb, nlh, RDMA_NL_GROUP_LS, gfp_mask);
548 if (!ret) 548 if (!ret)
549 ret = len; 549 ret = len;
550 else 550 else
@@ -553,7 +553,7 @@ static int ib_nl_send_msg(struct ib_sa_query *query)
553 return ret; 553 return ret;
554} 554}
555 555
556static int ib_nl_make_request(struct ib_sa_query *query) 556static int ib_nl_make_request(struct ib_sa_query *query, gfp_t gfp_mask)
557{ 557{
558 unsigned long flags; 558 unsigned long flags;
559 unsigned long delay; 559 unsigned long delay;
@@ -562,25 +562,27 @@ static int ib_nl_make_request(struct ib_sa_query *query)
562 INIT_LIST_HEAD(&query->list); 562 INIT_LIST_HEAD(&query->list);
563 query->seq = (u32)atomic_inc_return(&ib_nl_sa_request_seq); 563 query->seq = (u32)atomic_inc_return(&ib_nl_sa_request_seq);
564 564
565 /* Put the request on the list first.*/
565 spin_lock_irqsave(&ib_nl_request_lock, flags); 566 spin_lock_irqsave(&ib_nl_request_lock, flags);
566 ret = ib_nl_send_msg(query);
567 if (ret <= 0) {
568 ret = -EIO;
569 goto request_out;
570 } else {
571 ret = 0;
572 }
573
574 delay = msecs_to_jiffies(sa_local_svc_timeout_ms); 567 delay = msecs_to_jiffies(sa_local_svc_timeout_ms);
575 query->timeout = delay + jiffies; 568 query->timeout = delay + jiffies;
576 list_add_tail(&query->list, &ib_nl_request_list); 569 list_add_tail(&query->list, &ib_nl_request_list);
577 /* Start the timeout if this is the only request */ 570 /* Start the timeout if this is the only request */
578 if (ib_nl_request_list.next == &query->list) 571 if (ib_nl_request_list.next == &query->list)
579 queue_delayed_work(ib_nl_wq, &ib_nl_timed_work, delay); 572 queue_delayed_work(ib_nl_wq, &ib_nl_timed_work, delay);
580
581request_out:
582 spin_unlock_irqrestore(&ib_nl_request_lock, flags); 573 spin_unlock_irqrestore(&ib_nl_request_lock, flags);
583 574
575 ret = ib_nl_send_msg(query, gfp_mask);
576 if (ret <= 0) {
577 ret = -EIO;
578 /* Remove the request */
579 spin_lock_irqsave(&ib_nl_request_lock, flags);
580 list_del(&query->list);
581 spin_unlock_irqrestore(&ib_nl_request_lock, flags);
582 } else {
583 ret = 0;
584 }
585
584 return ret; 586 return ret;
585} 587}
586 588
@@ -1108,7 +1110,7 @@ static int send_mad(struct ib_sa_query *query, int timeout_ms, gfp_t gfp_mask)
1108 1110
1109 if (query->flags & IB_SA_ENABLE_LOCAL_SERVICE) { 1111 if (query->flags & IB_SA_ENABLE_LOCAL_SERVICE) {
1110 if (!ibnl_chk_listeners(RDMA_NL_GROUP_LS)) { 1112 if (!ibnl_chk_listeners(RDMA_NL_GROUP_LS)) {
1111 if (!ib_nl_make_request(query)) 1113 if (!ib_nl_make_request(query, gfp_mask))
1112 return id; 1114 return id;
1113 } 1115 }
1114 ib_sa_disable_local_svc(query); 1116 ib_sa_disable_local_svc(query);
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index 94816aeb95a0..1c02deab068f 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -62,9 +62,11 @@ static struct uverbs_lock_class rule_lock_class = { .name = "RULE-uobj" };
62 * The ib_uobject locking scheme is as follows: 62 * The ib_uobject locking scheme is as follows:
63 * 63 *
64 * - ib_uverbs_idr_lock protects the uverbs idrs themselves, so it 64 * - ib_uverbs_idr_lock protects the uverbs idrs themselves, so it
65 * needs to be held during all idr operations. When an object is 65 * needs to be held during all idr write operations. When an object is
66 * looked up, a reference must be taken on the object's kref before 66 * looked up, a reference must be taken on the object's kref before
67 * dropping this lock. 67 * dropping this lock. For read operations, the rcu_read_lock()
68 * and rcu_write_lock() but similarly the kref reference is grabbed
69 * before the rcu_read_unlock().
68 * 70 *
69 * - Each object also has an rwsem. This rwsem must be held for 71 * - Each object also has an rwsem. This rwsem must be held for
70 * reading while an operation that uses the object is performed. 72 * reading while an operation that uses the object is performed.
@@ -96,7 +98,7 @@ static void init_uobj(struct ib_uobject *uobj, u64 user_handle,
96 98
97static void release_uobj(struct kref *kref) 99static void release_uobj(struct kref *kref)
98{ 100{
99 kfree(container_of(kref, struct ib_uobject, ref)); 101 kfree_rcu(container_of(kref, struct ib_uobject, ref), rcu);
100} 102}
101 103
102static void put_uobj(struct ib_uobject *uobj) 104static void put_uobj(struct ib_uobject *uobj)
@@ -145,7 +147,7 @@ static struct ib_uobject *__idr_get_uobj(struct idr *idr, int id,
145{ 147{
146 struct ib_uobject *uobj; 148 struct ib_uobject *uobj;
147 149
148 spin_lock(&ib_uverbs_idr_lock); 150 rcu_read_lock();
149 uobj = idr_find(idr, id); 151 uobj = idr_find(idr, id);
150 if (uobj) { 152 if (uobj) {
151 if (uobj->context == context) 153 if (uobj->context == context)
@@ -153,7 +155,7 @@ static struct ib_uobject *__idr_get_uobj(struct idr *idr, int id,
153 else 155 else
154 uobj = NULL; 156 uobj = NULL;
155 } 157 }
156 spin_unlock(&ib_uverbs_idr_lock); 158 rcu_read_unlock();
157 159
158 return uobj; 160 return uobj;
159} 161}
@@ -2446,6 +2448,7 @@ ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file,
2446 int i, sg_ind; 2448 int i, sg_ind;
2447 int is_ud; 2449 int is_ud;
2448 ssize_t ret = -EINVAL; 2450 ssize_t ret = -EINVAL;
2451 size_t next_size;
2449 2452
2450 if (copy_from_user(&cmd, buf, sizeof cmd)) 2453 if (copy_from_user(&cmd, buf, sizeof cmd))
2451 return -EFAULT; 2454 return -EFAULT;
@@ -2490,7 +2493,8 @@ ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file,
2490 goto out_put; 2493 goto out_put;
2491 } 2494 }
2492 2495
2493 ud = alloc_wr(sizeof(*ud), user_wr->num_sge); 2496 next_size = sizeof(*ud);
2497 ud = alloc_wr(next_size, user_wr->num_sge);
2494 if (!ud) { 2498 if (!ud) {
2495 ret = -ENOMEM; 2499 ret = -ENOMEM;
2496 goto out_put; 2500 goto out_put;
@@ -2511,7 +2515,8 @@ ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file,
2511 user_wr->opcode == IB_WR_RDMA_READ) { 2515 user_wr->opcode == IB_WR_RDMA_READ) {
2512 struct ib_rdma_wr *rdma; 2516 struct ib_rdma_wr *rdma;
2513 2517
2514 rdma = alloc_wr(sizeof(*rdma), user_wr->num_sge); 2518 next_size = sizeof(*rdma);
2519 rdma = alloc_wr(next_size, user_wr->num_sge);
2515 if (!rdma) { 2520 if (!rdma) {
2516 ret = -ENOMEM; 2521 ret = -ENOMEM;
2517 goto out_put; 2522 goto out_put;
@@ -2525,7 +2530,8 @@ ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file,
2525 user_wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD) { 2530 user_wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD) {
2526 struct ib_atomic_wr *atomic; 2531 struct ib_atomic_wr *atomic;
2527 2532
2528 atomic = alloc_wr(sizeof(*atomic), user_wr->num_sge); 2533 next_size = sizeof(*atomic);
2534 atomic = alloc_wr(next_size, user_wr->num_sge);
2529 if (!atomic) { 2535 if (!atomic) {
2530 ret = -ENOMEM; 2536 ret = -ENOMEM;
2531 goto out_put; 2537 goto out_put;
@@ -2540,7 +2546,8 @@ ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file,
2540 } else if (user_wr->opcode == IB_WR_SEND || 2546 } else if (user_wr->opcode == IB_WR_SEND ||
2541 user_wr->opcode == IB_WR_SEND_WITH_IMM || 2547 user_wr->opcode == IB_WR_SEND_WITH_IMM ||
2542 user_wr->opcode == IB_WR_SEND_WITH_INV) { 2548 user_wr->opcode == IB_WR_SEND_WITH_INV) {
2543 next = alloc_wr(sizeof(*next), user_wr->num_sge); 2549 next_size = sizeof(*next);
2550 next = alloc_wr(next_size, user_wr->num_sge);
2544 if (!next) { 2551 if (!next) {
2545 ret = -ENOMEM; 2552 ret = -ENOMEM;
2546 goto out_put; 2553 goto out_put;
@@ -2572,7 +2579,7 @@ ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file,
2572 2579
2573 if (next->num_sge) { 2580 if (next->num_sge) {
2574 next->sg_list = (void *) next + 2581 next->sg_list = (void *) next +
2575 ALIGN(sizeof *next, sizeof (struct ib_sge)); 2582 ALIGN(next_size, sizeof(struct ib_sge));
2576 if (copy_from_user(next->sg_list, 2583 if (copy_from_user(next->sg_list,
2577 buf + sizeof cmd + 2584 buf + sizeof cmd +
2578 cmd.wr_count * cmd.wqe_size + 2585 cmd.wr_count * cmd.wqe_size +
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
index 043a60ee6836..545906dec26d 100644
--- a/drivers/infiniband/core/verbs.c
+++ b/drivers/infiniband/core/verbs.c
@@ -1516,7 +1516,7 @@ EXPORT_SYMBOL(ib_map_mr_sg);
1516 * @sg_nents: number of entries in sg 1516 * @sg_nents: number of entries in sg
1517 * @set_page: driver page assignment function pointer 1517 * @set_page: driver page assignment function pointer
1518 * 1518 *
1519 * Core service helper for drivers to covert the largest 1519 * Core service helper for drivers to convert the largest
1520 * prefix of given sg list to a page vector. The sg list 1520 * prefix of given sg list to a page vector. The sg list
1521 * prefix converted is the prefix that meet the requirements 1521 * prefix converted is the prefix that meet the requirements
1522 * of ib_map_mr_sg. 1522 * of ib_map_mr_sg.
@@ -1533,7 +1533,7 @@ int ib_sg_to_pages(struct ib_mr *mr,
1533 u64 last_end_dma_addr = 0, last_page_addr = 0; 1533 u64 last_end_dma_addr = 0, last_page_addr = 0;
1534 unsigned int last_page_off = 0; 1534 unsigned int last_page_off = 0;
1535 u64 page_mask = ~((u64)mr->page_size - 1); 1535 u64 page_mask = ~((u64)mr->page_size - 1);
1536 int i; 1536 int i, ret;
1537 1537
1538 mr->iova = sg_dma_address(&sgl[0]); 1538 mr->iova = sg_dma_address(&sgl[0]);
1539 mr->length = 0; 1539 mr->length = 0;
@@ -1544,27 +1544,29 @@ int ib_sg_to_pages(struct ib_mr *mr,
1544 u64 end_dma_addr = dma_addr + dma_len; 1544 u64 end_dma_addr = dma_addr + dma_len;
1545 u64 page_addr = dma_addr & page_mask; 1545 u64 page_addr = dma_addr & page_mask;
1546 1546
1547 if (i && page_addr != dma_addr) { 1547 /*
1548 if (last_end_dma_addr != dma_addr) { 1548 * For the second and later elements, check whether either the
1549 /* gap */ 1549 * end of element i-1 or the start of element i is not aligned
1550 goto done; 1550 * on a page boundary.
1551 1551 */
1552 } else if (last_page_off + dma_len <= mr->page_size) { 1552 if (i && (last_page_off != 0 || page_addr != dma_addr)) {
1553 /* chunk this fragment with the last */ 1553 /* Stop mapping if there is a gap. */
1554 mr->length += dma_len; 1554 if (last_end_dma_addr != dma_addr)
1555 last_end_dma_addr += dma_len; 1555 break;
1556 last_page_off += dma_len; 1556
1557 continue; 1557 /*
1558 } else { 1558 * Coalesce this element with the last. If it is small
1559 /* map starting from the next page */ 1559 * enough just update mr->length. Otherwise start
1560 page_addr = last_page_addr + mr->page_size; 1560 * mapping from the next page.
1561 dma_len -= mr->page_size - last_page_off; 1561 */
1562 } 1562 goto next_page;
1563 } 1563 }
1564 1564
1565 do { 1565 do {
1566 if (unlikely(set_page(mr, page_addr))) 1566 ret = set_page(mr, page_addr);
1567 goto done; 1567 if (unlikely(ret < 0))
1568 return i ? : ret;
1569next_page:
1568 page_addr += mr->page_size; 1570 page_addr += mr->page_size;
1569 } while (page_addr < end_dma_addr); 1571 } while (page_addr < end_dma_addr);
1570 1572
@@ -1574,7 +1576,6 @@ int ib_sg_to_pages(struct ib_mr *mr,
1574 last_page_off = end_dma_addr & ~page_mask; 1576 last_page_off = end_dma_addr & ~page_mask;
1575 } 1577 }
1576 1578
1577done:
1578 return i; 1579 return i;
1579} 1580}
1580EXPORT_SYMBOL(ib_sg_to_pages); 1581EXPORT_SYMBOL(ib_sg_to_pages);
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index f567160a4a56..97d6878f9938 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -456,7 +456,7 @@ static int mlx4_ib_query_device(struct ib_device *ibdev,
456 props->max_qp_wr = dev->dev->caps.max_wqes - MLX4_IB_SQ_MAX_SPARE; 456 props->max_qp_wr = dev->dev->caps.max_wqes - MLX4_IB_SQ_MAX_SPARE;
457 props->max_sge = min(dev->dev->caps.max_sq_sg, 457 props->max_sge = min(dev->dev->caps.max_sq_sg,
458 dev->dev->caps.max_rq_sg); 458 dev->dev->caps.max_rq_sg);
459 props->max_sge_rd = props->max_sge; 459 props->max_sge_rd = MLX4_MAX_SGE_RD;
460 props->max_cq = dev->dev->quotas.cq; 460 props->max_cq = dev->dev->quotas.cq;
461 props->max_cqe = dev->dev->caps.max_cqes; 461 props->max_cqe = dev->dev->caps.max_cqes;
462 props->max_mr = dev->dev->quotas.mpt; 462 props->max_mr = dev->dev->quotas.mpt;
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index a2e4ca56da44..13eaaf45288f 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -34,6 +34,7 @@
34#include <linux/log2.h> 34#include <linux/log2.h>
35#include <linux/slab.h> 35#include <linux/slab.h>
36#include <linux/netdevice.h> 36#include <linux/netdevice.h>
37#include <linux/vmalloc.h>
37 38
38#include <rdma/ib_cache.h> 39#include <rdma/ib_cache.h>
39#include <rdma/ib_pack.h> 40#include <rdma/ib_pack.h>
@@ -795,8 +796,14 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
795 if (err) 796 if (err)
796 goto err_mtt; 797 goto err_mtt;
797 798
798 qp->sq.wrid = kmalloc(qp->sq.wqe_cnt * sizeof (u64), gfp); 799 qp->sq.wrid = kmalloc(qp->sq.wqe_cnt * sizeof(u64), gfp);
799 qp->rq.wrid = kmalloc(qp->rq.wqe_cnt * sizeof (u64), gfp); 800 if (!qp->sq.wrid)
801 qp->sq.wrid = __vmalloc(qp->sq.wqe_cnt * sizeof(u64),
802 gfp, PAGE_KERNEL);
803 qp->rq.wrid = kmalloc(qp->rq.wqe_cnt * sizeof(u64), gfp);
804 if (!qp->rq.wrid)
805 qp->rq.wrid = __vmalloc(qp->rq.wqe_cnt * sizeof(u64),
806 gfp, PAGE_KERNEL);
800 if (!qp->sq.wrid || !qp->rq.wrid) { 807 if (!qp->sq.wrid || !qp->rq.wrid) {
801 err = -ENOMEM; 808 err = -ENOMEM;
802 goto err_wrid; 809 goto err_wrid;
@@ -886,8 +893,8 @@ err_wrid:
886 if (qp_has_rq(init_attr)) 893 if (qp_has_rq(init_attr))
887 mlx4_ib_db_unmap_user(to_mucontext(pd->uobject->context), &qp->db); 894 mlx4_ib_db_unmap_user(to_mucontext(pd->uobject->context), &qp->db);
888 } else { 895 } else {
889 kfree(qp->sq.wrid); 896 kvfree(qp->sq.wrid);
890 kfree(qp->rq.wrid); 897 kvfree(qp->rq.wrid);
891 } 898 }
892 899
893err_mtt: 900err_mtt:
@@ -1062,8 +1069,8 @@ static void destroy_qp_common(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp,
1062 &qp->db); 1069 &qp->db);
1063 ib_umem_release(qp->umem); 1070 ib_umem_release(qp->umem);
1064 } else { 1071 } else {
1065 kfree(qp->sq.wrid); 1072 kvfree(qp->sq.wrid);
1066 kfree(qp->rq.wrid); 1073 kvfree(qp->rq.wrid);
1067 if (qp->mlx4_ib_qp_type & (MLX4_IB_QPT_PROXY_SMI_OWNER | 1074 if (qp->mlx4_ib_qp_type & (MLX4_IB_QPT_PROXY_SMI_OWNER |
1068 MLX4_IB_QPT_PROXY_SMI | MLX4_IB_QPT_PROXY_GSI)) 1075 MLX4_IB_QPT_PROXY_SMI | MLX4_IB_QPT_PROXY_GSI))
1069 free_proxy_bufs(&dev->ib_dev, qp); 1076 free_proxy_bufs(&dev->ib_dev, qp);
diff --git a/drivers/infiniband/hw/mlx4/srq.c b/drivers/infiniband/hw/mlx4/srq.c
index dce5dfe3a70e..8d133c40fa0e 100644
--- a/drivers/infiniband/hw/mlx4/srq.c
+++ b/drivers/infiniband/hw/mlx4/srq.c
@@ -34,6 +34,7 @@
34#include <linux/mlx4/qp.h> 34#include <linux/mlx4/qp.h>
35#include <linux/mlx4/srq.h> 35#include <linux/mlx4/srq.h>
36#include <linux/slab.h> 36#include <linux/slab.h>
37#include <linux/vmalloc.h>
37 38
38#include "mlx4_ib.h" 39#include "mlx4_ib.h"
39#include "user.h" 40#include "user.h"
@@ -172,8 +173,12 @@ struct ib_srq *mlx4_ib_create_srq(struct ib_pd *pd,
172 173
173 srq->wrid = kmalloc(srq->msrq.max * sizeof (u64), GFP_KERNEL); 174 srq->wrid = kmalloc(srq->msrq.max * sizeof (u64), GFP_KERNEL);
174 if (!srq->wrid) { 175 if (!srq->wrid) {
175 err = -ENOMEM; 176 srq->wrid = __vmalloc(srq->msrq.max * sizeof(u64),
176 goto err_mtt; 177 GFP_KERNEL, PAGE_KERNEL);
178 if (!srq->wrid) {
179 err = -ENOMEM;
180 goto err_mtt;
181 }
177 } 182 }
178 } 183 }
179 184
@@ -204,7 +209,7 @@ err_wrid:
204 if (pd->uobject) 209 if (pd->uobject)
205 mlx4_ib_db_unmap_user(to_mucontext(pd->uobject->context), &srq->db); 210 mlx4_ib_db_unmap_user(to_mucontext(pd->uobject->context), &srq->db);
206 else 211 else
207 kfree(srq->wrid); 212 kvfree(srq->wrid);
208 213
209err_mtt: 214err_mtt:
210 mlx4_mtt_cleanup(dev->dev, &srq->mtt); 215 mlx4_mtt_cleanup(dev->dev, &srq->mtt);
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index ec8993a7b3be..6000f7aeede9 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -381,7 +381,19 @@ static void __cache_work_func(struct mlx5_cache_ent *ent)
381 } 381 }
382 } 382 }
383 } else if (ent->cur > 2 * ent->limit) { 383 } else if (ent->cur > 2 * ent->limit) {
384 if (!someone_adding(cache) && 384 /*
385 * The remove_keys() logic is performed as garbage collection
386 * task. Such task is intended to be run when no other active
387 * processes are running.
388 *
389 * The need_resched() will return TRUE if there are user tasks
390 * to be activated in near future.
391 *
392 * In such case, we don't execute remove_keys() and postpone
393 * the garbage collection work to try to run in next cycle,
394 * in order to free CPU resources to other tasks.
395 */
396 if (!need_resched() && !someone_adding(cache) &&
385 time_after(jiffies, cache->last_add + 300 * HZ)) { 397 time_after(jiffies, cache->last_add + 300 * HZ)) {
386 remove_keys(dev, i, 1); 398 remove_keys(dev, i, 1);
387 if (ent->cur > ent->limit) 399 if (ent->cur > ent->limit)
diff --git a/drivers/infiniband/hw/qib/qib_qsfp.c b/drivers/infiniband/hw/qib/qib_qsfp.c
index 5e27f76805e2..4c7c3c84a741 100644
--- a/drivers/infiniband/hw/qib/qib_qsfp.c
+++ b/drivers/infiniband/hw/qib/qib_qsfp.c
@@ -292,7 +292,7 @@ int qib_refresh_qsfp_cache(struct qib_pportdata *ppd, struct qib_qsfp_cache *cp)
292 qib_dev_porterr(ppd->dd, ppd->port, 292 qib_dev_porterr(ppd->dd, ppd->port,
293 "QSFP byte0 is 0x%02X, S/B 0x0C/D\n", peek[0]); 293 "QSFP byte0 is 0x%02X, S/B 0x0C/D\n", peek[0]);
294 294
295 if ((peek[2] & 2) == 0) { 295 if ((peek[2] & 4) == 0) {
296 /* 296 /*
297 * If cable is paged, rather than "flat memory", we need to 297 * If cable is paged, rather than "flat memory", we need to
298 * set the page to zero, Even if it already appears to be zero. 298 * set the page to zero, Even if it already appears to be zero.
@@ -538,7 +538,7 @@ int qib_qsfp_dump(struct qib_pportdata *ppd, char *buf, int len)
538 sofar += scnprintf(buf + sofar, len - sofar, "Date:%.*s\n", 538 sofar += scnprintf(buf + sofar, len - sofar, "Date:%.*s\n",
539 QSFP_DATE_LEN, cd.date); 539 QSFP_DATE_LEN, cd.date);
540 sofar += scnprintf(buf + sofar, len - sofar, "Lot:%.*s\n", 540 sofar += scnprintf(buf + sofar, len - sofar, "Lot:%.*s\n",
541 QSFP_LOT_LEN, cd.date); 541 QSFP_LOT_LEN, cd.lot);
542 542
543 while (bidx < QSFP_DEFAULT_HDR_CNT) { 543 while (bidx < QSFP_DEFAULT_HDR_CNT) {
544 int iidx; 544 int iidx;
diff --git a/drivers/infiniband/hw/qib/qib_verbs.h b/drivers/infiniband/hw/qib/qib_verbs.h
index 2baf5ad251ed..bc803f33d5f6 100644
--- a/drivers/infiniband/hw/qib/qib_verbs.h
+++ b/drivers/infiniband/hw/qib/qib_verbs.h
@@ -329,9 +329,9 @@ struct qib_sge {
329struct qib_mr { 329struct qib_mr {
330 struct ib_mr ibmr; 330 struct ib_mr ibmr;
331 struct ib_umem *umem; 331 struct ib_umem *umem;
332 struct qib_mregion mr; /* must be last */
333 u64 *pages; 332 u64 *pages;
334 u32 npages; 333 u32 npages;
334 struct qib_mregion mr; /* must be last */
335}; 335};
336 336
337/* 337/*
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c
index a93070210109..42f4da620f2e 100644
--- a/drivers/infiniband/ulp/iser/iser_verbs.c
+++ b/drivers/infiniband/ulp/iser/iser_verbs.c
@@ -1293,7 +1293,7 @@ u8 iser_check_task_pi_status(struct iscsi_iser_task *iser_task,
1293 if (mr_status.fail_status & IB_MR_CHECK_SIG_STATUS) { 1293 if (mr_status.fail_status & IB_MR_CHECK_SIG_STATUS) {
1294 sector_t sector_off = mr_status.sig_err.sig_err_offset; 1294 sector_t sector_off = mr_status.sig_err.sig_err_offset;
1295 1295
1296 do_div(sector_off, sector_size + 8); 1296 sector_div(sector_off, sector_size + 8);
1297 *sector = scsi_get_lba(iser_task->sc) + sector_off; 1297 *sector = scsi_get_lba(iser_task->sc) + sector_off;
1298 1298
1299 pr_err("PI error found type %d at sector %llx " 1299 pr_err("PI error found type %d at sector %llx "
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
index dfbbbb28090b..8a51c3b5d657 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.c
+++ b/drivers/infiniband/ulp/isert/ib_isert.c
@@ -157,16 +157,9 @@ isert_create_qp(struct isert_conn *isert_conn,
157 attr.recv_cq = comp->cq; 157 attr.recv_cq = comp->cq;
158 attr.cap.max_send_wr = ISERT_QP_MAX_REQ_DTOS; 158 attr.cap.max_send_wr = ISERT_QP_MAX_REQ_DTOS;
159 attr.cap.max_recv_wr = ISERT_QP_MAX_RECV_DTOS + 1; 159 attr.cap.max_recv_wr = ISERT_QP_MAX_RECV_DTOS + 1;
160 /* 160 attr.cap.max_send_sge = device->dev_attr.max_sge;
161 * FIXME: Use devattr.max_sge - 2 for max_send_sge as 161 isert_conn->max_sge = min(device->dev_attr.max_sge,
162 * work-around for RDMA_READs with ConnectX-2. 162 device->dev_attr.max_sge_rd);
163 *
164 * Also, still make sure to have at least two SGEs for
165 * outgoing control PDU responses.
166 */
167 attr.cap.max_send_sge = max(2, device->dev_attr.max_sge - 2);
168 isert_conn->max_sge = attr.cap.max_send_sge;
169
170 attr.cap.max_recv_sge = 1; 163 attr.cap.max_recv_sge = 1;
171 attr.sq_sig_type = IB_SIGNAL_REQ_WR; 164 attr.sq_sig_type = IB_SIGNAL_REQ_WR;
172 attr.qp_type = IB_QPT_RC; 165 attr.qp_type = IB_QPT_RC;
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index 9909022dc6c3..3db9a659719b 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -488,7 +488,7 @@ static int srp_create_ch_ib(struct srp_rdma_ch *ch)
488 struct ib_qp *qp; 488 struct ib_qp *qp;
489 struct ib_fmr_pool *fmr_pool = NULL; 489 struct ib_fmr_pool *fmr_pool = NULL;
490 struct srp_fr_pool *fr_pool = NULL; 490 struct srp_fr_pool *fr_pool = NULL;
491 const int m = 1 + dev->use_fast_reg; 491 const int m = dev->use_fast_reg ? 3 : 1;
492 struct ib_cq_init_attr cq_attr = {}; 492 struct ib_cq_init_attr cq_attr = {};
493 int ret; 493 int ret;
494 494
@@ -994,16 +994,16 @@ static int srp_connect_ch(struct srp_rdma_ch *ch, bool multich)
994 994
995 ret = srp_lookup_path(ch); 995 ret = srp_lookup_path(ch);
996 if (ret) 996 if (ret)
997 return ret; 997 goto out;
998 998
999 while (1) { 999 while (1) {
1000 init_completion(&ch->done); 1000 init_completion(&ch->done);
1001 ret = srp_send_req(ch, multich); 1001 ret = srp_send_req(ch, multich);
1002 if (ret) 1002 if (ret)
1003 return ret; 1003 goto out;
1004 ret = wait_for_completion_interruptible(&ch->done); 1004 ret = wait_for_completion_interruptible(&ch->done);
1005 if (ret < 0) 1005 if (ret < 0)
1006 return ret; 1006 goto out;
1007 1007
1008 /* 1008 /*
1009 * The CM event handling code will set status to 1009 * The CM event handling code will set status to
@@ -1011,15 +1011,16 @@ static int srp_connect_ch(struct srp_rdma_ch *ch, bool multich)
1011 * back, or SRP_DLID_REDIRECT if we get a lid/qp 1011 * back, or SRP_DLID_REDIRECT if we get a lid/qp
1012 * redirect REJ back. 1012 * redirect REJ back.
1013 */ 1013 */
1014 switch (ch->status) { 1014 ret = ch->status;
1015 switch (ret) {
1015 case 0: 1016 case 0:
1016 ch->connected = true; 1017 ch->connected = true;
1017 return 0; 1018 goto out;
1018 1019
1019 case SRP_PORT_REDIRECT: 1020 case SRP_PORT_REDIRECT:
1020 ret = srp_lookup_path(ch); 1021 ret = srp_lookup_path(ch);
1021 if (ret) 1022 if (ret)
1022 return ret; 1023 goto out;
1023 break; 1024 break;
1024 1025
1025 case SRP_DLID_REDIRECT: 1026 case SRP_DLID_REDIRECT:
@@ -1028,13 +1029,16 @@ static int srp_connect_ch(struct srp_rdma_ch *ch, bool multich)
1028 case SRP_STALE_CONN: 1029 case SRP_STALE_CONN:
1029 shost_printk(KERN_ERR, target->scsi_host, PFX 1030 shost_printk(KERN_ERR, target->scsi_host, PFX
1030 "giving up on stale connection\n"); 1031 "giving up on stale connection\n");
1031 ch->status = -ECONNRESET; 1032 ret = -ECONNRESET;
1032 return ch->status; 1033 goto out;
1033 1034
1034 default: 1035 default:
1035 return ch->status; 1036 goto out;
1036 } 1037 }
1037 } 1038 }
1039
1040out:
1041 return ret <= 0 ? ret : -ENODEV;
1038} 1042}
1039 1043
1040static int srp_inv_rkey(struct srp_rdma_ch *ch, u32 rkey) 1044static int srp_inv_rkey(struct srp_rdma_ch *ch, u32 rkey)
@@ -1309,7 +1313,7 @@ reset_state:
1309} 1313}
1310 1314
1311static int srp_map_finish_fr(struct srp_map_state *state, 1315static int srp_map_finish_fr(struct srp_map_state *state,
1312 struct srp_rdma_ch *ch) 1316 struct srp_rdma_ch *ch, int sg_nents)
1313{ 1317{
1314 struct srp_target_port *target = ch->target; 1318 struct srp_target_port *target = ch->target;
1315 struct srp_device *dev = target->srp_host->srp_dev; 1319 struct srp_device *dev = target->srp_host->srp_dev;
@@ -1324,10 +1328,10 @@ static int srp_map_finish_fr(struct srp_map_state *state,
1324 1328
1325 WARN_ON_ONCE(!dev->use_fast_reg); 1329 WARN_ON_ONCE(!dev->use_fast_reg);
1326 1330
1327 if (state->sg_nents == 0) 1331 if (sg_nents == 0)
1328 return 0; 1332 return 0;
1329 1333
1330 if (state->sg_nents == 1 && target->global_mr) { 1334 if (sg_nents == 1 && target->global_mr) {
1331 srp_map_desc(state, sg_dma_address(state->sg), 1335 srp_map_desc(state, sg_dma_address(state->sg),
1332 sg_dma_len(state->sg), 1336 sg_dma_len(state->sg),
1333 target->global_mr->rkey); 1337 target->global_mr->rkey);
@@ -1341,8 +1345,7 @@ static int srp_map_finish_fr(struct srp_map_state *state,
1341 rkey = ib_inc_rkey(desc->mr->rkey); 1345 rkey = ib_inc_rkey(desc->mr->rkey);
1342 ib_update_fast_reg_key(desc->mr, rkey); 1346 ib_update_fast_reg_key(desc->mr, rkey);
1343 1347
1344 n = ib_map_mr_sg(desc->mr, state->sg, state->sg_nents, 1348 n = ib_map_mr_sg(desc->mr, state->sg, sg_nents, dev->mr_page_size);
1345 dev->mr_page_size);
1346 if (unlikely(n < 0)) 1349 if (unlikely(n < 0))
1347 return n; 1350 return n;
1348 1351
@@ -1448,16 +1451,15 @@ static int srp_map_sg_fr(struct srp_map_state *state, struct srp_rdma_ch *ch,
1448 state->fr.next = req->fr_list; 1451 state->fr.next = req->fr_list;
1449 state->fr.end = req->fr_list + ch->target->cmd_sg_cnt; 1452 state->fr.end = req->fr_list + ch->target->cmd_sg_cnt;
1450 state->sg = scat; 1453 state->sg = scat;
1451 state->sg_nents = scsi_sg_count(req->scmnd);
1452 1454
1453 while (state->sg_nents) { 1455 while (count) {
1454 int i, n; 1456 int i, n;
1455 1457
1456 n = srp_map_finish_fr(state, ch); 1458 n = srp_map_finish_fr(state, ch, count);
1457 if (unlikely(n < 0)) 1459 if (unlikely(n < 0))
1458 return n; 1460 return n;
1459 1461
1460 state->sg_nents -= n; 1462 count -= n;
1461 for (i = 0; i < n; i++) 1463 for (i = 0; i < n; i++)
1462 state->sg = sg_next(state->sg); 1464 state->sg = sg_next(state->sg);
1463 } 1465 }
@@ -1517,10 +1519,12 @@ static int srp_map_idb(struct srp_rdma_ch *ch, struct srp_request *req,
1517 1519
1518 if (dev->use_fast_reg) { 1520 if (dev->use_fast_reg) {
1519 state.sg = idb_sg; 1521 state.sg = idb_sg;
1520 state.sg_nents = 1;
1521 sg_set_buf(idb_sg, req->indirect_desc, idb_len); 1522 sg_set_buf(idb_sg, req->indirect_desc, idb_len);
1522 idb_sg->dma_address = req->indirect_dma_addr; /* hack! */ 1523 idb_sg->dma_address = req->indirect_dma_addr; /* hack! */
1523 ret = srp_map_finish_fr(&state, ch); 1524#ifdef CONFIG_NEED_SG_DMA_LENGTH
1525 idb_sg->dma_length = idb_sg->length; /* hack^2 */
1526#endif
1527 ret = srp_map_finish_fr(&state, ch, 1);
1524 if (ret < 0) 1528 if (ret < 0)
1525 return ret; 1529 return ret;
1526 } else if (dev->use_fmr) { 1530 } else if (dev->use_fmr) {
@@ -1655,7 +1659,7 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_rdma_ch *ch,
1655 return ret; 1659 return ret;
1656 req->nmdesc++; 1660 req->nmdesc++;
1657 } else { 1661 } else {
1658 idb_rkey = target->global_mr->rkey; 1662 idb_rkey = cpu_to_be32(target->global_mr->rkey);
1659 } 1663 }
1660 1664
1661 indirect_hdr->table_desc.va = cpu_to_be64(req->indirect_dma_addr); 1665 indirect_hdr->table_desc.va = cpu_to_be64(req->indirect_dma_addr);
diff --git a/drivers/infiniband/ulp/srp/ib_srp.h b/drivers/infiniband/ulp/srp/ib_srp.h
index 87a2a919dc43..f6af531f9f32 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.h
+++ b/drivers/infiniband/ulp/srp/ib_srp.h
@@ -300,10 +300,7 @@ struct srp_map_state {
300 dma_addr_t base_dma_addr; 300 dma_addr_t base_dma_addr;
301 u32 dma_len; 301 u32 dma_len;
302 u32 total_len; 302 u32 total_len;
303 union { 303 unsigned int npages;
304 unsigned int npages;
305 int sg_nents;
306 };
307 unsigned int nmdesc; 304 unsigned int nmdesc;
308 unsigned int ndesc; 305 unsigned int ndesc;
309}; 306};
diff --git a/drivers/input/joystick/db9.c b/drivers/input/joystick/db9.c
index 932d07307454..da326090c2b0 100644
--- a/drivers/input/joystick/db9.c
+++ b/drivers/input/joystick/db9.c
@@ -592,6 +592,7 @@ static void db9_attach(struct parport *pp)
592 return; 592 return;
593 } 593 }
594 594
595 memset(&db9_parport_cb, 0, sizeof(db9_parport_cb));
595 db9_parport_cb.flags = PARPORT_FLAG_EXCL; 596 db9_parport_cb.flags = PARPORT_FLAG_EXCL;
596 597
597 pd = parport_register_dev_model(pp, "db9", &db9_parport_cb, port_idx); 598 pd = parport_register_dev_model(pp, "db9", &db9_parport_cb, port_idx);
diff --git a/drivers/input/joystick/gamecon.c b/drivers/input/joystick/gamecon.c
index 5a672dcac0d8..eae14d512353 100644
--- a/drivers/input/joystick/gamecon.c
+++ b/drivers/input/joystick/gamecon.c
@@ -951,6 +951,7 @@ static void gc_attach(struct parport *pp)
951 pads = gc_cfg[port_idx].args + 1; 951 pads = gc_cfg[port_idx].args + 1;
952 n_pads = gc_cfg[port_idx].nargs - 1; 952 n_pads = gc_cfg[port_idx].nargs - 1;
953 953
954 memset(&gc_parport_cb, 0, sizeof(gc_parport_cb));
954 gc_parport_cb.flags = PARPORT_FLAG_EXCL; 955 gc_parport_cb.flags = PARPORT_FLAG_EXCL;
955 956
956 pd = parport_register_dev_model(pp, "gamecon", &gc_parport_cb, 957 pd = parport_register_dev_model(pp, "gamecon", &gc_parport_cb,
diff --git a/drivers/input/joystick/turbografx.c b/drivers/input/joystick/turbografx.c
index 9f5bca26bd2f..77f575dd0901 100644
--- a/drivers/input/joystick/turbografx.c
+++ b/drivers/input/joystick/turbografx.c
@@ -181,6 +181,7 @@ static void tgfx_attach(struct parport *pp)
181 n_buttons = tgfx_cfg[port_idx].args + 1; 181 n_buttons = tgfx_cfg[port_idx].args + 1;
182 n_devs = tgfx_cfg[port_idx].nargs - 1; 182 n_devs = tgfx_cfg[port_idx].nargs - 1;
183 183
184 memset(&tgfx_parport_cb, 0, sizeof(tgfx_parport_cb));
184 tgfx_parport_cb.flags = PARPORT_FLAG_EXCL; 185 tgfx_parport_cb.flags = PARPORT_FLAG_EXCL;
185 186
186 pd = parport_register_dev_model(pp, "turbografx", &tgfx_parport_cb, 187 pd = parport_register_dev_model(pp, "turbografx", &tgfx_parport_cb,
diff --git a/drivers/input/joystick/walkera0701.c b/drivers/input/joystick/walkera0701.c
index 9c07fe911075..70a893a17467 100644
--- a/drivers/input/joystick/walkera0701.c
+++ b/drivers/input/joystick/walkera0701.c
@@ -218,6 +218,7 @@ static void walkera0701_attach(struct parport *pp)
218 218
219 w->parport = pp; 219 w->parport = pp;
220 220
221 memset(&walkera0701_parport_cb, 0, sizeof(walkera0701_parport_cb));
221 walkera0701_parport_cb.flags = PARPORT_FLAG_EXCL; 222 walkera0701_parport_cb.flags = PARPORT_FLAG_EXCL;
222 walkera0701_parport_cb.irq_func = walkera0701_irq_handler; 223 walkera0701_parport_cb.irq_func = walkera0701_irq_handler;
223 walkera0701_parport_cb.private = w; 224 walkera0701_parport_cb.private = w;
diff --git a/drivers/input/misc/arizona-haptics.c b/drivers/input/misc/arizona-haptics.c
index 4bf678541496..d5994a745ffa 100644
--- a/drivers/input/misc/arizona-haptics.c
+++ b/drivers/input/misc/arizona-haptics.c
@@ -97,8 +97,7 @@ static void arizona_haptics_work(struct work_struct *work)
97 97
98 ret = regmap_update_bits(arizona->regmap, 98 ret = regmap_update_bits(arizona->regmap,
99 ARIZONA_HAPTICS_CONTROL_1, 99 ARIZONA_HAPTICS_CONTROL_1,
100 ARIZONA_HAP_CTRL_MASK, 100 ARIZONA_HAP_CTRL_MASK, 0);
101 1 << ARIZONA_HAP_CTRL_SHIFT);
102 if (ret != 0) { 101 if (ret != 0) {
103 dev_err(arizona->dev, "Failed to stop haptics: %d\n", 102 dev_err(arizona->dev, "Failed to stop haptics: %d\n",
104 ret); 103 ret);
diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c
index 5e1665bbaa0b..2f589857a039 100644
--- a/drivers/input/mouse/elan_i2c_core.c
+++ b/drivers/input/mouse/elan_i2c_core.c
@@ -41,6 +41,7 @@
41 41
42#define DRIVER_NAME "elan_i2c" 42#define DRIVER_NAME "elan_i2c"
43#define ELAN_DRIVER_VERSION "1.6.1" 43#define ELAN_DRIVER_VERSION "1.6.1"
44#define ELAN_VENDOR_ID 0x04f3
44#define ETP_MAX_PRESSURE 255 45#define ETP_MAX_PRESSURE 255
45#define ETP_FWIDTH_REDUCE 90 46#define ETP_FWIDTH_REDUCE 90
46#define ETP_FINGER_WIDTH 15 47#define ETP_FINGER_WIDTH 15
@@ -914,6 +915,8 @@ static int elan_setup_input_device(struct elan_tp_data *data)
914 915
915 input->name = "Elan Touchpad"; 916 input->name = "Elan Touchpad";
916 input->id.bustype = BUS_I2C; 917 input->id.bustype = BUS_I2C;
918 input->id.vendor = ELAN_VENDOR_ID;
919 input->id.product = data->product_id;
917 input_set_drvdata(input, data); 920 input_set_drvdata(input, data);
918 921
919 error = input_mt_init_slots(input, ETP_MAX_FINGERS, 922 error = input_mt_init_slots(input, ETP_MAX_FINGERS,
diff --git a/drivers/input/serio/parkbd.c b/drivers/input/serio/parkbd.c
index 92c31b8f8fb4..1edfac78d4ac 100644
--- a/drivers/input/serio/parkbd.c
+++ b/drivers/input/serio/parkbd.c
@@ -145,6 +145,7 @@ static int parkbd_getport(struct parport *pp)
145{ 145{
146 struct pardev_cb parkbd_parport_cb; 146 struct pardev_cb parkbd_parport_cb;
147 147
148 memset(&parkbd_parport_cb, 0, sizeof(parkbd_parport_cb));
148 parkbd_parport_cb.irq_func = parkbd_interrupt; 149 parkbd_parport_cb.irq_func = parkbd_interrupt;
149 parkbd_parport_cb.flags = PARPORT_FLAG_EXCL; 150 parkbd_parport_cb.flags = PARPORT_FLAG_EXCL;
150 151
diff --git a/drivers/input/tablet/aiptek.c b/drivers/input/tablet/aiptek.c
index e7f966da6efa..78ca44840d60 100644
--- a/drivers/input/tablet/aiptek.c
+++ b/drivers/input/tablet/aiptek.c
@@ -1819,6 +1819,14 @@ aiptek_probe(struct usb_interface *intf, const struct usb_device_id *id)
1819 input_set_abs_params(inputdev, ABS_TILT_Y, AIPTEK_TILT_MIN, AIPTEK_TILT_MAX, 0, 0); 1819 input_set_abs_params(inputdev, ABS_TILT_Y, AIPTEK_TILT_MIN, AIPTEK_TILT_MAX, 0, 0);
1820 input_set_abs_params(inputdev, ABS_WHEEL, AIPTEK_WHEEL_MIN, AIPTEK_WHEEL_MAX - 1, 0, 0); 1820 input_set_abs_params(inputdev, ABS_WHEEL, AIPTEK_WHEEL_MIN, AIPTEK_WHEEL_MAX - 1, 0, 0);
1821 1821
1822 /* Verify that a device really has an endpoint */
1823 if (intf->altsetting[0].desc.bNumEndpoints < 1) {
1824 dev_err(&intf->dev,
1825 "interface has %d endpoints, but must have minimum 1\n",
1826 intf->altsetting[0].desc.bNumEndpoints);
1827 err = -EINVAL;
1828 goto fail3;
1829 }
1822 endpoint = &intf->altsetting[0].endpoint[0].desc; 1830 endpoint = &intf->altsetting[0].endpoint[0].desc;
1823 1831
1824 /* Go set up our URB, which is called when the tablet receives 1832 /* Go set up our URB, which is called when the tablet receives
@@ -1861,6 +1869,7 @@ aiptek_probe(struct usb_interface *intf, const struct usb_device_id *id)
1861 if (i == ARRAY_SIZE(speeds)) { 1869 if (i == ARRAY_SIZE(speeds)) {
1862 dev_info(&intf->dev, 1870 dev_info(&intf->dev,
1863 "Aiptek tried all speeds, no sane response\n"); 1871 "Aiptek tried all speeds, no sane response\n");
1872 err = -EINVAL;
1864 goto fail3; 1873 goto fail3;
1865 } 1874 }
1866 1875
diff --git a/drivers/input/touchscreen/atmel_mxt_ts.c b/drivers/input/touchscreen/atmel_mxt_ts.c
index c5622058c22b..2d5794ec338b 100644
--- a/drivers/input/touchscreen/atmel_mxt_ts.c
+++ b/drivers/input/touchscreen/atmel_mxt_ts.c
@@ -2487,6 +2487,31 @@ static struct mxt_acpi_platform_data samus_platform_data[] = {
2487 { } 2487 { }
2488}; 2488};
2489 2489
2490static unsigned int chromebook_tp_buttons[] = {
2491 KEY_RESERVED,
2492 KEY_RESERVED,
2493 KEY_RESERVED,
2494 KEY_RESERVED,
2495 KEY_RESERVED,
2496 BTN_LEFT
2497};
2498
2499static struct mxt_acpi_platform_data chromebook_platform_data[] = {
2500 {
2501 /* Touchpad */
2502 .hid = "ATML0000",
2503 .pdata = {
2504 .t19_num_keys = ARRAY_SIZE(chromebook_tp_buttons),
2505 .t19_keymap = chromebook_tp_buttons,
2506 },
2507 },
2508 {
2509 /* Touchscreen */
2510 .hid = "ATML0001",
2511 },
2512 { }
2513};
2514
2490static const struct dmi_system_id mxt_dmi_table[] = { 2515static const struct dmi_system_id mxt_dmi_table[] = {
2491 { 2516 {
2492 /* 2015 Google Pixel */ 2517 /* 2015 Google Pixel */
@@ -2497,6 +2522,14 @@ static const struct dmi_system_id mxt_dmi_table[] = {
2497 }, 2522 },
2498 .driver_data = samus_platform_data, 2523 .driver_data = samus_platform_data,
2499 }, 2524 },
2525 {
2526 /* Other Google Chromebooks */
2527 .ident = "Chromebook",
2528 .matches = {
2529 DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"),
2530 },
2531 .driver_data = chromebook_platform_data,
2532 },
2500 { } 2533 { }
2501}; 2534};
2502 2535
@@ -2701,6 +2734,7 @@ static const struct i2c_device_id mxt_id[] = {
2701 { "qt602240_ts", 0 }, 2734 { "qt602240_ts", 0 },
2702 { "atmel_mxt_ts", 0 }, 2735 { "atmel_mxt_ts", 0 },
2703 { "atmel_mxt_tp", 0 }, 2736 { "atmel_mxt_tp", 0 },
2737 { "maxtouch", 0 },
2704 { "mXT224", 0 }, 2738 { "mXT224", 0 },
2705 { } 2739 { }
2706}; 2740};
diff --git a/drivers/input/touchscreen/elants_i2c.c b/drivers/input/touchscreen/elants_i2c.c
index 17cc20ef4923..ac09855fa435 100644
--- a/drivers/input/touchscreen/elants_i2c.c
+++ b/drivers/input/touchscreen/elants_i2c.c
@@ -1316,7 +1316,13 @@ static int __maybe_unused elants_i2c_suspend(struct device *dev)
1316 1316
1317 disable_irq(client->irq); 1317 disable_irq(client->irq);
1318 1318
1319 if (device_may_wakeup(dev) || ts->keep_power_in_suspend) { 1319 if (device_may_wakeup(dev)) {
1320 /*
1321 * The device will automatically enter idle mode
1322 * that has reduced power consumption.
1323 */
1324 ts->wake_irq_enabled = (enable_irq_wake(client->irq) == 0);
1325 } else if (ts->keep_power_in_suspend) {
1320 for (retry_cnt = 0; retry_cnt < MAX_RETRIES; retry_cnt++) { 1326 for (retry_cnt = 0; retry_cnt < MAX_RETRIES; retry_cnt++) {
1321 error = elants_i2c_send(client, set_sleep_cmd, 1327 error = elants_i2c_send(client, set_sleep_cmd,
1322 sizeof(set_sleep_cmd)); 1328 sizeof(set_sleep_cmd));
@@ -1326,10 +1332,6 @@ static int __maybe_unused elants_i2c_suspend(struct device *dev)
1326 dev_err(&client->dev, 1332 dev_err(&client->dev,
1327 "suspend command failed: %d\n", error); 1333 "suspend command failed: %d\n", error);
1328 } 1334 }
1329
1330 if (device_may_wakeup(dev))
1331 ts->wake_irq_enabled =
1332 (enable_irq_wake(client->irq) == 0);
1333 } else { 1335 } else {
1334 elants_i2c_power_off(ts); 1336 elants_i2c_power_off(ts);
1335 } 1337 }
@@ -1345,10 +1347,11 @@ static int __maybe_unused elants_i2c_resume(struct device *dev)
1345 int retry_cnt; 1347 int retry_cnt;
1346 int error; 1348 int error;
1347 1349
1348 if (device_may_wakeup(dev) && ts->wake_irq_enabled) 1350 if (device_may_wakeup(dev)) {
1349 disable_irq_wake(client->irq); 1351 if (ts->wake_irq_enabled)
1350 1352 disable_irq_wake(client->irq);
1351 if (ts->keep_power_in_suspend) { 1353 elants_i2c_sw_reset(client);
1354 } else if (ts->keep_power_in_suspend) {
1352 for (retry_cnt = 0; retry_cnt < MAX_RETRIES; retry_cnt++) { 1355 for (retry_cnt = 0; retry_cnt < MAX_RETRIES; retry_cnt++) {
1353 error = elants_i2c_send(client, set_active_cmd, 1356 error = elants_i2c_send(client, set_active_cmd,
1354 sizeof(set_active_cmd)); 1357 sizeof(set_active_cmd));
diff --git a/drivers/iommu/amd_iommu_v2.c b/drivers/iommu/amd_iommu_v2.c
index d21d4edf7236..7caf2fa237f2 100644
--- a/drivers/iommu/amd_iommu_v2.c
+++ b/drivers/iommu/amd_iommu_v2.c
@@ -494,6 +494,22 @@ static void handle_fault_error(struct fault *fault)
494 } 494 }
495} 495}
496 496
497static bool access_error(struct vm_area_struct *vma, struct fault *fault)
498{
499 unsigned long requested = 0;
500
501 if (fault->flags & PPR_FAULT_EXEC)
502 requested |= VM_EXEC;
503
504 if (fault->flags & PPR_FAULT_READ)
505 requested |= VM_READ;
506
507 if (fault->flags & PPR_FAULT_WRITE)
508 requested |= VM_WRITE;
509
510 return (requested & ~vma->vm_flags) != 0;
511}
512
497static void do_fault(struct work_struct *work) 513static void do_fault(struct work_struct *work)
498{ 514{
499 struct fault *fault = container_of(work, struct fault, work); 515 struct fault *fault = container_of(work, struct fault, work);
@@ -516,8 +532,8 @@ static void do_fault(struct work_struct *work)
516 goto out; 532 goto out;
517 } 533 }
518 534
519 if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE))) { 535 /* Check if we have the right permissions on the vma */
520 /* handle_mm_fault would BUG_ON() */ 536 if (access_error(vma, fault)) {
521 up_read(&mm->mmap_sem); 537 up_read(&mm->mmap_sem);
522 handle_fault_error(fault); 538 handle_fault_error(fault);
523 goto out; 539 goto out;
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index f1042daef9ad..ac7387686ddc 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -2159,7 +2159,7 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
2159 sg_res = aligned_nrpages(sg->offset, sg->length); 2159 sg_res = aligned_nrpages(sg->offset, sg->length);
2160 sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + sg->offset; 2160 sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + sg->offset;
2161 sg->dma_length = sg->length; 2161 sg->dma_length = sg->length;
2162 pteval = (sg_phys(sg) & PAGE_MASK) | prot; 2162 pteval = page_to_phys(sg_page(sg)) | prot;
2163 phys_pfn = pteval >> VTD_PAGE_SHIFT; 2163 phys_pfn = pteval >> VTD_PAGE_SHIFT;
2164 } 2164 }
2165 2165
@@ -3704,7 +3704,7 @@ static int intel_nontranslate_map_sg(struct device *hddev,
3704 3704
3705 for_each_sg(sglist, sg, nelems, i) { 3705 for_each_sg(sglist, sg, nelems, i) {
3706 BUG_ON(!sg_page(sg)); 3706 BUG_ON(!sg_page(sg));
3707 sg->dma_address = sg_phys(sg); 3707 sg->dma_address = page_to_phys(sg_page(sg)) + sg->offset;
3708 sg->dma_length = sg->length; 3708 sg->dma_length = sg->length;
3709 } 3709 }
3710 return nelems; 3710 return nelems;
diff --git a/drivers/iommu/intel-svm.c b/drivers/iommu/intel-svm.c
index c69e3f9ec958..50464833d0b8 100644
--- a/drivers/iommu/intel-svm.c
+++ b/drivers/iommu/intel-svm.c
@@ -484,6 +484,23 @@ struct page_req_dsc {
484}; 484};
485 485
486#define PRQ_RING_MASK ((0x1000 << PRQ_ORDER) - 0x10) 486#define PRQ_RING_MASK ((0x1000 << PRQ_ORDER) - 0x10)
487
488static bool access_error(struct vm_area_struct *vma, struct page_req_dsc *req)
489{
490 unsigned long requested = 0;
491
492 if (req->exe_req)
493 requested |= VM_EXEC;
494
495 if (req->rd_req)
496 requested |= VM_READ;
497
498 if (req->wr_req)
499 requested |= VM_WRITE;
500
501 return (requested & ~vma->vm_flags) != 0;
502}
503
487static irqreturn_t prq_event_thread(int irq, void *d) 504static irqreturn_t prq_event_thread(int irq, void *d)
488{ 505{
489 struct intel_iommu *iommu = d; 506 struct intel_iommu *iommu = d;
@@ -539,6 +556,9 @@ static irqreturn_t prq_event_thread(int irq, void *d)
539 if (!vma || address < vma->vm_start) 556 if (!vma || address < vma->vm_start)
540 goto invalid; 557 goto invalid;
541 558
559 if (access_error(vma, req))
560 goto invalid;
561
542 ret = handle_mm_fault(svm->mm, vma, address, 562 ret = handle_mm_fault(svm->mm, vma, address,
543 req->wr_req ? FAULT_FLAG_WRITE : 0); 563 req->wr_req ? FAULT_FLAG_WRITE : 0);
544 if (ret & VM_FAULT_ERROR) 564 if (ret & VM_FAULT_ERROR)
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index abae363c7b9b..0e3b0092ec92 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -1430,7 +1430,7 @@ size_t default_iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
1430 min_pagesz = 1 << __ffs(domain->ops->pgsize_bitmap); 1430 min_pagesz = 1 << __ffs(domain->ops->pgsize_bitmap);
1431 1431
1432 for_each_sg(sg, s, nents, i) { 1432 for_each_sg(sg, s, nents, i) {
1433 phys_addr_t phys = sg_phys(s); 1433 phys_addr_t phys = page_to_phys(sg_page(s)) + s->offset;
1434 1434
1435 /* 1435 /*
1436 * We are mapping on IOMMU page boundaries, so offset within 1436 * We are mapping on IOMMU page boundaries, so offset within
diff --git a/drivers/iommu/s390-iommu.c b/drivers/iommu/s390-iommu.c
index cbe198cb3699..471ee36b9c6e 100644
--- a/drivers/iommu/s390-iommu.c
+++ b/drivers/iommu/s390-iommu.c
@@ -216,6 +216,7 @@ static int s390_iommu_update_trans(struct s390_domain *s390_domain,
216 u8 *page_addr = (u8 *) (pa & PAGE_MASK); 216 u8 *page_addr = (u8 *) (pa & PAGE_MASK);
217 dma_addr_t start_dma_addr = dma_addr; 217 dma_addr_t start_dma_addr = dma_addr;
218 unsigned long irq_flags, nr_pages, i; 218 unsigned long irq_flags, nr_pages, i;
219 unsigned long *entry;
219 int rc = 0; 220 int rc = 0;
220 221
221 if (dma_addr < s390_domain->domain.geometry.aperture_start || 222 if (dma_addr < s390_domain->domain.geometry.aperture_start ||
@@ -228,8 +229,12 @@ static int s390_iommu_update_trans(struct s390_domain *s390_domain,
228 229
229 spin_lock_irqsave(&s390_domain->dma_table_lock, irq_flags); 230 spin_lock_irqsave(&s390_domain->dma_table_lock, irq_flags);
230 for (i = 0; i < nr_pages; i++) { 231 for (i = 0; i < nr_pages; i++) {
231 dma_update_cpu_trans(s390_domain->dma_table, page_addr, 232 entry = dma_walk_cpu_trans(s390_domain->dma_table, dma_addr);
232 dma_addr, flags); 233 if (!entry) {
234 rc = -ENOMEM;
235 goto undo_cpu_trans;
236 }
237 dma_update_cpu_trans(entry, page_addr, flags);
233 page_addr += PAGE_SIZE; 238 page_addr += PAGE_SIZE;
234 dma_addr += PAGE_SIZE; 239 dma_addr += PAGE_SIZE;
235 } 240 }
@@ -242,6 +247,20 @@ static int s390_iommu_update_trans(struct s390_domain *s390_domain,
242 break; 247 break;
243 } 248 }
244 spin_unlock(&s390_domain->list_lock); 249 spin_unlock(&s390_domain->list_lock);
250
251undo_cpu_trans:
252 if (rc && ((flags & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_VALID)) {
253 flags = ZPCI_PTE_INVALID;
254 while (i-- > 0) {
255 page_addr -= PAGE_SIZE;
256 dma_addr -= PAGE_SIZE;
257 entry = dma_walk_cpu_trans(s390_domain->dma_table,
258 dma_addr);
259 if (!entry)
260 break;
261 dma_update_cpu_trans(entry, page_addr, flags);
262 }
263 }
245 spin_unlock_irqrestore(&s390_domain->dma_table_lock, irq_flags); 264 spin_unlock_irqrestore(&s390_domain->dma_table_lock, irq_flags);
246 265
247 return rc; 266 return rc;
diff --git a/drivers/irqchip/irq-gic-common.c b/drivers/irqchip/irq-gic-common.c
index 44a077f3a4a2..f174ce0ca361 100644
--- a/drivers/irqchip/irq-gic-common.c
+++ b/drivers/irqchip/irq-gic-common.c
@@ -84,12 +84,15 @@ void __init gic_dist_config(void __iomem *base, int gic_irqs,
84 writel_relaxed(GICD_INT_DEF_PRI_X4, base + GIC_DIST_PRI + i); 84 writel_relaxed(GICD_INT_DEF_PRI_X4, base + GIC_DIST_PRI + i);
85 85
86 /* 86 /*
87 * Disable all interrupts. Leave the PPI and SGIs alone 87 * Deactivate and disable all SPIs. Leave the PPI and SGIs
88 * as they are enabled by redistributor registers. 88 * alone as they are in the redistributor registers on GICv3.
89 */ 89 */
90 for (i = 32; i < gic_irqs; i += 32) 90 for (i = 32; i < gic_irqs; i += 32) {
91 writel_relaxed(GICD_INT_EN_CLR_X32, 91 writel_relaxed(GICD_INT_EN_CLR_X32,
92 base + GIC_DIST_ENABLE_CLEAR + i / 8); 92 base + GIC_DIST_ACTIVE_CLEAR + i / 8);
93 writel_relaxed(GICD_INT_EN_CLR_X32,
94 base + GIC_DIST_ENABLE_CLEAR + i / 8);
95 }
93 96
94 if (sync_access) 97 if (sync_access)
95 sync_access(); 98 sync_access();
@@ -102,7 +105,9 @@ void gic_cpu_config(void __iomem *base, void (*sync_access)(void))
102 /* 105 /*
103 * Deal with the banked PPI and SGI interrupts - disable all 106 * Deal with the banked PPI and SGI interrupts - disable all
104 * PPI interrupts, ensure all SGI interrupts are enabled. 107 * PPI interrupts, ensure all SGI interrupts are enabled.
108 * Make sure everything is deactivated.
105 */ 109 */
110 writel_relaxed(GICD_INT_EN_CLR_X32, base + GIC_DIST_ACTIVE_CLEAR);
106 writel_relaxed(GICD_INT_EN_CLR_PPI, base + GIC_DIST_ENABLE_CLEAR); 111 writel_relaxed(GICD_INT_EN_CLR_PPI, base + GIC_DIST_ENABLE_CLEAR);
107 writel_relaxed(GICD_INT_EN_SET_SGI, base + GIC_DIST_ENABLE_SET); 112 writel_relaxed(GICD_INT_EN_SET_SGI, base + GIC_DIST_ENABLE_SET);
108 113
diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
index 515c823c1c95..abf2ffaed392 100644
--- a/drivers/irqchip/irq-gic.c
+++ b/drivers/irqchip/irq-gic.c
@@ -73,9 +73,11 @@ struct gic_chip_data {
73 union gic_base cpu_base; 73 union gic_base cpu_base;
74#ifdef CONFIG_CPU_PM 74#ifdef CONFIG_CPU_PM
75 u32 saved_spi_enable[DIV_ROUND_UP(1020, 32)]; 75 u32 saved_spi_enable[DIV_ROUND_UP(1020, 32)];
76 u32 saved_spi_active[DIV_ROUND_UP(1020, 32)];
76 u32 saved_spi_conf[DIV_ROUND_UP(1020, 16)]; 77 u32 saved_spi_conf[DIV_ROUND_UP(1020, 16)];
77 u32 saved_spi_target[DIV_ROUND_UP(1020, 4)]; 78 u32 saved_spi_target[DIV_ROUND_UP(1020, 4)];
78 u32 __percpu *saved_ppi_enable; 79 u32 __percpu *saved_ppi_enable;
80 u32 __percpu *saved_ppi_active;
79 u32 __percpu *saved_ppi_conf; 81 u32 __percpu *saved_ppi_conf;
80#endif 82#endif
81 struct irq_domain *domain; 83 struct irq_domain *domain;
@@ -566,6 +568,10 @@ static void gic_dist_save(unsigned int gic_nr)
566 for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++) 568 for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++)
567 gic_data[gic_nr].saved_spi_enable[i] = 569 gic_data[gic_nr].saved_spi_enable[i] =
568 readl_relaxed(dist_base + GIC_DIST_ENABLE_SET + i * 4); 570 readl_relaxed(dist_base + GIC_DIST_ENABLE_SET + i * 4);
571
572 for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++)
573 gic_data[gic_nr].saved_spi_active[i] =
574 readl_relaxed(dist_base + GIC_DIST_ACTIVE_SET + i * 4);
569} 575}
570 576
571/* 577/*
@@ -604,9 +610,19 @@ static void gic_dist_restore(unsigned int gic_nr)
604 writel_relaxed(gic_data[gic_nr].saved_spi_target[i], 610 writel_relaxed(gic_data[gic_nr].saved_spi_target[i],
605 dist_base + GIC_DIST_TARGET + i * 4); 611 dist_base + GIC_DIST_TARGET + i * 4);
606 612
607 for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++) 613 for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++) {
614 writel_relaxed(GICD_INT_EN_CLR_X32,
615 dist_base + GIC_DIST_ENABLE_CLEAR + i * 4);
608 writel_relaxed(gic_data[gic_nr].saved_spi_enable[i], 616 writel_relaxed(gic_data[gic_nr].saved_spi_enable[i],
609 dist_base + GIC_DIST_ENABLE_SET + i * 4); 617 dist_base + GIC_DIST_ENABLE_SET + i * 4);
618 }
619
620 for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++) {
621 writel_relaxed(GICD_INT_EN_CLR_X32,
622 dist_base + GIC_DIST_ACTIVE_CLEAR + i * 4);
623 writel_relaxed(gic_data[gic_nr].saved_spi_active[i],
624 dist_base + GIC_DIST_ACTIVE_SET + i * 4);
625 }
610 626
611 writel_relaxed(GICD_ENABLE, dist_base + GIC_DIST_CTRL); 627 writel_relaxed(GICD_ENABLE, dist_base + GIC_DIST_CTRL);
612} 628}
@@ -631,6 +647,10 @@ static void gic_cpu_save(unsigned int gic_nr)
631 for (i = 0; i < DIV_ROUND_UP(32, 32); i++) 647 for (i = 0; i < DIV_ROUND_UP(32, 32); i++)
632 ptr[i] = readl_relaxed(dist_base + GIC_DIST_ENABLE_SET + i * 4); 648 ptr[i] = readl_relaxed(dist_base + GIC_DIST_ENABLE_SET + i * 4);
633 649
650 ptr = raw_cpu_ptr(gic_data[gic_nr].saved_ppi_active);
651 for (i = 0; i < DIV_ROUND_UP(32, 32); i++)
652 ptr[i] = readl_relaxed(dist_base + GIC_DIST_ACTIVE_SET + i * 4);
653
634 ptr = raw_cpu_ptr(gic_data[gic_nr].saved_ppi_conf); 654 ptr = raw_cpu_ptr(gic_data[gic_nr].saved_ppi_conf);
635 for (i = 0; i < DIV_ROUND_UP(32, 16); i++) 655 for (i = 0; i < DIV_ROUND_UP(32, 16); i++)
636 ptr[i] = readl_relaxed(dist_base + GIC_DIST_CONFIG + i * 4); 656 ptr[i] = readl_relaxed(dist_base + GIC_DIST_CONFIG + i * 4);
@@ -654,8 +674,18 @@ static void gic_cpu_restore(unsigned int gic_nr)
654 return; 674 return;
655 675
656 ptr = raw_cpu_ptr(gic_data[gic_nr].saved_ppi_enable); 676 ptr = raw_cpu_ptr(gic_data[gic_nr].saved_ppi_enable);
657 for (i = 0; i < DIV_ROUND_UP(32, 32); i++) 677 for (i = 0; i < DIV_ROUND_UP(32, 32); i++) {
678 writel_relaxed(GICD_INT_EN_CLR_X32,
679 dist_base + GIC_DIST_ENABLE_CLEAR + i * 4);
658 writel_relaxed(ptr[i], dist_base + GIC_DIST_ENABLE_SET + i * 4); 680 writel_relaxed(ptr[i], dist_base + GIC_DIST_ENABLE_SET + i * 4);
681 }
682
683 ptr = raw_cpu_ptr(gic_data[gic_nr].saved_ppi_active);
684 for (i = 0; i < DIV_ROUND_UP(32, 32); i++) {
685 writel_relaxed(GICD_INT_EN_CLR_X32,
686 dist_base + GIC_DIST_ACTIVE_CLEAR + i * 4);
687 writel_relaxed(ptr[i], dist_base + GIC_DIST_ACTIVE_SET + i * 4);
688 }
659 689
660 ptr = raw_cpu_ptr(gic_data[gic_nr].saved_ppi_conf); 690 ptr = raw_cpu_ptr(gic_data[gic_nr].saved_ppi_conf);
661 for (i = 0; i < DIV_ROUND_UP(32, 16); i++) 691 for (i = 0; i < DIV_ROUND_UP(32, 16); i++)
@@ -710,6 +740,10 @@ static void __init gic_pm_init(struct gic_chip_data *gic)
710 sizeof(u32)); 740 sizeof(u32));
711 BUG_ON(!gic->saved_ppi_enable); 741 BUG_ON(!gic->saved_ppi_enable);
712 742
743 gic->saved_ppi_active = __alloc_percpu(DIV_ROUND_UP(32, 32) * 4,
744 sizeof(u32));
745 BUG_ON(!gic->saved_ppi_active);
746
713 gic->saved_ppi_conf = __alloc_percpu(DIV_ROUND_UP(32, 16) * 4, 747 gic->saved_ppi_conf = __alloc_percpu(DIV_ROUND_UP(32, 16) * 4,
714 sizeof(u32)); 748 sizeof(u32));
715 BUG_ON(!gic->saved_ppi_conf); 749 BUG_ON(!gic->saved_ppi_conf);
diff --git a/drivers/irqchip/irq-versatile-fpga.c b/drivers/irqchip/irq-versatile-fpga.c
index 598ab3f0e0ac..cadf104e3074 100644
--- a/drivers/irqchip/irq-versatile-fpga.c
+++ b/drivers/irqchip/irq-versatile-fpga.c
@@ -210,7 +210,12 @@ int __init fpga_irq_of_init(struct device_node *node,
210 parent_irq = -1; 210 parent_irq = -1;
211 } 211 }
212 212
213#ifdef CONFIG_ARCH_VERSATILE
214 fpga_irq_init(base, node->name, IRQ_SIC_START, parent_irq, valid_mask,
215 node);
216#else
213 fpga_irq_init(base, node->name, 0, parent_irq, valid_mask, node); 217 fpga_irq_init(base, node->name, 0, parent_irq, valid_mask, node);
218#endif
214 219
215 writel(clear_mask, base + IRQ_ENABLE_CLEAR); 220 writel(clear_mask, base + IRQ_ENABLE_CLEAR);
216 writel(clear_mask, base + FIQ_ENABLE_CLEAR); 221 writel(clear_mask, base + FIQ_ENABLE_CLEAR);
diff --git a/drivers/isdn/gigaset/ser-gigaset.c b/drivers/isdn/gigaset/ser-gigaset.c
index 375be509e95f..2a506fe0c8a4 100644
--- a/drivers/isdn/gigaset/ser-gigaset.c
+++ b/drivers/isdn/gigaset/ser-gigaset.c
@@ -67,8 +67,7 @@ static int write_modem(struct cardstate *cs)
67 struct sk_buff *skb = bcs->tx_skb; 67 struct sk_buff *skb = bcs->tx_skb;
68 int sent = -EOPNOTSUPP; 68 int sent = -EOPNOTSUPP;
69 69
70 if (!tty || !tty->driver || !skb) 70 WARN_ON(!tty || !tty->ops || !skb);
71 return -EINVAL;
72 71
73 if (!skb->len) { 72 if (!skb->len) {
74 dev_kfree_skb_any(skb); 73 dev_kfree_skb_any(skb);
@@ -109,8 +108,7 @@ static int send_cb(struct cardstate *cs)
109 unsigned long flags; 108 unsigned long flags;
110 int sent = 0; 109 int sent = 0;
111 110
112 if (!tty || !tty->driver) 111 WARN_ON(!tty || !tty->ops);
113 return -EFAULT;
114 112
115 cb = cs->cmdbuf; 113 cb = cs->cmdbuf;
116 if (!cb) 114 if (!cb)
@@ -370,19 +368,18 @@ static void gigaset_freecshw(struct cardstate *cs)
370 tasklet_kill(&cs->write_tasklet); 368 tasklet_kill(&cs->write_tasklet);
371 if (!cs->hw.ser) 369 if (!cs->hw.ser)
372 return; 370 return;
373 dev_set_drvdata(&cs->hw.ser->dev.dev, NULL);
374 platform_device_unregister(&cs->hw.ser->dev); 371 platform_device_unregister(&cs->hw.ser->dev);
375 kfree(cs->hw.ser);
376 cs->hw.ser = NULL;
377} 372}
378 373
379static void gigaset_device_release(struct device *dev) 374static void gigaset_device_release(struct device *dev)
380{ 375{
381 struct platform_device *pdev = to_platform_device(dev); 376 struct cardstate *cs = dev_get_drvdata(dev);
382 377
383 /* adapted from platform_device_release() in drivers/base/platform.c */ 378 if (!cs)
384 kfree(dev->platform_data); 379 return;
385 kfree(pdev->resource); 380 dev_set_drvdata(dev, NULL);
381 kfree(cs->hw.ser);
382 cs->hw.ser = NULL;
386} 383}
387 384
388/* 385/*
@@ -432,7 +429,9 @@ static int gigaset_set_modem_ctrl(struct cardstate *cs, unsigned old_state,
432 struct tty_struct *tty = cs->hw.ser->tty; 429 struct tty_struct *tty = cs->hw.ser->tty;
433 unsigned int set, clear; 430 unsigned int set, clear;
434 431
435 if (!tty || !tty->driver || !tty->ops->tiocmset) 432 WARN_ON(!tty || !tty->ops);
433 /* tiocmset is an optional tty driver method */
434 if (!tty->ops->tiocmset)
436 return -EINVAL; 435 return -EINVAL;
437 set = new_state & ~old_state; 436 set = new_state & ~old_state;
438 clear = old_state & ~new_state; 437 clear = old_state & ~new_state;
diff --git a/drivers/isdn/hardware/mISDN/mISDNipac.c b/drivers/isdn/hardware/mISDN/mISDNipac.c
index a77eea594b69..cb428b9ee441 100644
--- a/drivers/isdn/hardware/mISDN/mISDNipac.c
+++ b/drivers/isdn/hardware/mISDN/mISDNipac.c
@@ -1170,7 +1170,7 @@ mISDNipac_irq(struct ipac_hw *ipac, int maxloop)
1170 1170
1171 if (ipac->type & IPAC_TYPE_IPACX) { 1171 if (ipac->type & IPAC_TYPE_IPACX) {
1172 ista = ReadIPAC(ipac, ISACX_ISTA); 1172 ista = ReadIPAC(ipac, ISACX_ISTA);
1173 while (ista && cnt--) { 1173 while (ista && --cnt) {
1174 pr_debug("%s: ISTA %02x\n", ipac->name, ista); 1174 pr_debug("%s: ISTA %02x\n", ipac->name, ista);
1175 if (ista & IPACX__ICA) 1175 if (ista & IPACX__ICA)
1176 ipac_irq(&ipac->hscx[0], ista); 1176 ipac_irq(&ipac->hscx[0], ista);
@@ -1182,7 +1182,7 @@ mISDNipac_irq(struct ipac_hw *ipac, int maxloop)
1182 } 1182 }
1183 } else if (ipac->type & IPAC_TYPE_IPAC) { 1183 } else if (ipac->type & IPAC_TYPE_IPAC) {
1184 ista = ReadIPAC(ipac, IPAC_ISTA); 1184 ista = ReadIPAC(ipac, IPAC_ISTA);
1185 while (ista && cnt--) { 1185 while (ista && --cnt) {
1186 pr_debug("%s: ISTA %02x\n", ipac->name, ista); 1186 pr_debug("%s: ISTA %02x\n", ipac->name, ista);
1187 if (ista & (IPAC__ICD | IPAC__EXD)) { 1187 if (ista & (IPAC__ICD | IPAC__EXD)) {
1188 istad = ReadISAC(isac, ISAC_ISTA); 1188 istad = ReadISAC(isac, ISAC_ISTA);
@@ -1200,7 +1200,7 @@ mISDNipac_irq(struct ipac_hw *ipac, int maxloop)
1200 ista = ReadIPAC(ipac, IPAC_ISTA); 1200 ista = ReadIPAC(ipac, IPAC_ISTA);
1201 } 1201 }
1202 } else if (ipac->type & IPAC_TYPE_HSCX) { 1202 } else if (ipac->type & IPAC_TYPE_HSCX) {
1203 while (cnt) { 1203 while (--cnt) {
1204 ista = ReadIPAC(ipac, IPAC_ISTAB + ipac->hscx[1].off); 1204 ista = ReadIPAC(ipac, IPAC_ISTAB + ipac->hscx[1].off);
1205 pr_debug("%s: B2 ISTA %02x\n", ipac->name, ista); 1205 pr_debug("%s: B2 ISTA %02x\n", ipac->name, ista);
1206 if (ista) 1206 if (ista)
@@ -1211,7 +1211,6 @@ mISDNipac_irq(struct ipac_hw *ipac, int maxloop)
1211 mISDNisac_irq(isac, istad); 1211 mISDNisac_irq(isac, istad);
1212 if (0 == (ista | istad)) 1212 if (0 == (ista | istad))
1213 break; 1213 break;
1214 cnt--;
1215 } 1214 }
1216 } 1215 }
1217 if (cnt > maxloop) /* only for ISAC/HSCX without PCI IRQ test */ 1216 if (cnt > maxloop) /* only for ISAC/HSCX without PCI IRQ test */
diff --git a/drivers/isdn/hisax/config.c b/drivers/isdn/hisax/config.c
index b33f53b3ca93..bf04d2a3cf4a 100644
--- a/drivers/isdn/hisax/config.c
+++ b/drivers/isdn/hisax/config.c
@@ -1896,7 +1896,7 @@ static void EChannel_proc_rcv(struct hisax_d_if *d_if)
1896 ptr--; 1896 ptr--;
1897 *ptr++ = '\n'; 1897 *ptr++ = '\n';
1898 *ptr = 0; 1898 *ptr = 0;
1899 HiSax_putstatus(cs, NULL, "%s", cs->dlog); 1899 HiSax_putstatus(cs, NULL, cs->dlog);
1900 } else 1900 } else
1901 HiSax_putstatus(cs, "LogEcho: ", 1901 HiSax_putstatus(cs, "LogEcho: ",
1902 "warning Frame too big (%d)", 1902 "warning Frame too big (%d)",
diff --git a/drivers/isdn/hisax/hfc_pci.c b/drivers/isdn/hisax/hfc_pci.c
index 4a4825528188..90449e1e91e5 100644
--- a/drivers/isdn/hisax/hfc_pci.c
+++ b/drivers/isdn/hisax/hfc_pci.c
@@ -901,7 +901,7 @@ Begin:
901 ptr--; 901 ptr--;
902 *ptr++ = '\n'; 902 *ptr++ = '\n';
903 *ptr = 0; 903 *ptr = 0;
904 HiSax_putstatus(cs, NULL, "%s", cs->dlog); 904 HiSax_putstatus(cs, NULL, cs->dlog);
905 } else 905 } else
906 HiSax_putstatus(cs, "LogEcho: ", "warning Frame too big (%d)", total - 3); 906 HiSax_putstatus(cs, "LogEcho: ", "warning Frame too big (%d)", total - 3);
907 } 907 }
diff --git a/drivers/isdn/hisax/hfc_sx.c b/drivers/isdn/hisax/hfc_sx.c
index b1fad81f0722..13b2151c10f5 100644
--- a/drivers/isdn/hisax/hfc_sx.c
+++ b/drivers/isdn/hisax/hfc_sx.c
@@ -674,7 +674,7 @@ receive_emsg(struct IsdnCardState *cs)
674 ptr--; 674 ptr--;
675 *ptr++ = '\n'; 675 *ptr++ = '\n';
676 *ptr = 0; 676 *ptr = 0;
677 HiSax_putstatus(cs, NULL, "%s", cs->dlog); 677 HiSax_putstatus(cs, NULL, cs->dlog);
678 } else 678 } else
679 HiSax_putstatus(cs, "LogEcho: ", "warning Frame too big (%d)", skb->len); 679 HiSax_putstatus(cs, "LogEcho: ", "warning Frame too big (%d)", skb->len);
680 } 680 }
diff --git a/drivers/isdn/hisax/q931.c b/drivers/isdn/hisax/q931.c
index b420f8bd862e..ba4beb25d872 100644
--- a/drivers/isdn/hisax/q931.c
+++ b/drivers/isdn/hisax/q931.c
@@ -1179,7 +1179,7 @@ LogFrame(struct IsdnCardState *cs, u_char *buf, int size)
1179 dp--; 1179 dp--;
1180 *dp++ = '\n'; 1180 *dp++ = '\n';
1181 *dp = 0; 1181 *dp = 0;
1182 HiSax_putstatus(cs, NULL, "%s", cs->dlog); 1182 HiSax_putstatus(cs, NULL, cs->dlog);
1183 } else 1183 } else
1184 HiSax_putstatus(cs, "LogFrame: ", "warning Frame too big (%d)", size); 1184 HiSax_putstatus(cs, "LogFrame: ", "warning Frame too big (%d)", size);
1185} 1185}
@@ -1246,7 +1246,7 @@ dlogframe(struct IsdnCardState *cs, struct sk_buff *skb, int dir)
1246 } 1246 }
1247 if (finish) { 1247 if (finish) {
1248 *dp = 0; 1248 *dp = 0;
1249 HiSax_putstatus(cs, NULL, "%s", cs->dlog); 1249 HiSax_putstatus(cs, NULL, cs->dlog);
1250 return; 1250 return;
1251 } 1251 }
1252 if ((0xfe & buf[0]) == PROTO_DIS_N0) { /* 1TR6 */ 1252 if ((0xfe & buf[0]) == PROTO_DIS_N0) { /* 1TR6 */
@@ -1509,5 +1509,5 @@ dlogframe(struct IsdnCardState *cs, struct sk_buff *skb, int dir)
1509 dp += sprintf(dp, "Unknown protocol %x!", buf[0]); 1509 dp += sprintf(dp, "Unknown protocol %x!", buf[0]);
1510 } 1510 }
1511 *dp = 0; 1511 *dp = 0;
1512 HiSax_putstatus(cs, NULL, "%s", cs->dlog); 1512 HiSax_putstatus(cs, NULL, cs->dlog);
1513} 1513}
diff --git a/drivers/lightnvm/Kconfig b/drivers/lightnvm/Kconfig
index a16bf56d3f28..85a339030e4b 100644
--- a/drivers/lightnvm/Kconfig
+++ b/drivers/lightnvm/Kconfig
@@ -18,6 +18,7 @@ if NVM
18 18
19config NVM_DEBUG 19config NVM_DEBUG
20 bool "Open-Channel SSD debugging support" 20 bool "Open-Channel SSD debugging support"
21 default n
21 ---help--- 22 ---help---
22 Exposes a debug management interface to create/remove targets at: 23 Exposes a debug management interface to create/remove targets at:
23 24
diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c
index f659e605a406..8f41b245cd55 100644
--- a/drivers/lightnvm/core.c
+++ b/drivers/lightnvm/core.c
@@ -74,7 +74,7 @@ EXPORT_SYMBOL(nvm_unregister_target);
74void *nvm_dev_dma_alloc(struct nvm_dev *dev, gfp_t mem_flags, 74void *nvm_dev_dma_alloc(struct nvm_dev *dev, gfp_t mem_flags,
75 dma_addr_t *dma_handler) 75 dma_addr_t *dma_handler)
76{ 76{
77 return dev->ops->dev_dma_alloc(dev->q, dev->ppalist_pool, mem_flags, 77 return dev->ops->dev_dma_alloc(dev, dev->ppalist_pool, mem_flags,
78 dma_handler); 78 dma_handler);
79} 79}
80EXPORT_SYMBOL(nvm_dev_dma_alloc); 80EXPORT_SYMBOL(nvm_dev_dma_alloc);
@@ -97,15 +97,47 @@ static struct nvmm_type *nvm_find_mgr_type(const char *name)
97 return NULL; 97 return NULL;
98} 98}
99 99
100struct nvmm_type *nvm_init_mgr(struct nvm_dev *dev)
101{
102 struct nvmm_type *mt;
103 int ret;
104
105 lockdep_assert_held(&nvm_lock);
106
107 list_for_each_entry(mt, &nvm_mgrs, list) {
108 ret = mt->register_mgr(dev);
109 if (ret < 0) {
110 pr_err("nvm: media mgr failed to init (%d) on dev %s\n",
111 ret, dev->name);
112 return NULL; /* initialization failed */
113 } else if (ret > 0)
114 return mt;
115 }
116
117 return NULL;
118}
119
100int nvm_register_mgr(struct nvmm_type *mt) 120int nvm_register_mgr(struct nvmm_type *mt)
101{ 121{
122 struct nvm_dev *dev;
102 int ret = 0; 123 int ret = 0;
103 124
104 down_write(&nvm_lock); 125 down_write(&nvm_lock);
105 if (nvm_find_mgr_type(mt->name)) 126 if (nvm_find_mgr_type(mt->name)) {
106 ret = -EEXIST; 127 ret = -EEXIST;
107 else 128 goto finish;
129 } else {
108 list_add(&mt->list, &nvm_mgrs); 130 list_add(&mt->list, &nvm_mgrs);
131 }
132
133 /* try to register media mgr if any device have none configured */
134 list_for_each_entry(dev, &nvm_devices, devices) {
135 if (dev->mt)
136 continue;
137
138 dev->mt = nvm_init_mgr(dev);
139 }
140finish:
109 up_write(&nvm_lock); 141 up_write(&nvm_lock);
110 142
111 return ret; 143 return ret;
@@ -160,11 +192,6 @@ int nvm_erase_blk(struct nvm_dev *dev, struct nvm_block *blk)
160} 192}
161EXPORT_SYMBOL(nvm_erase_blk); 193EXPORT_SYMBOL(nvm_erase_blk);
162 194
163static void nvm_core_free(struct nvm_dev *dev)
164{
165 kfree(dev);
166}
167
168static int nvm_core_init(struct nvm_dev *dev) 195static int nvm_core_init(struct nvm_dev *dev)
169{ 196{
170 struct nvm_id *id = &dev->identity; 197 struct nvm_id *id = &dev->identity;
@@ -179,12 +206,21 @@ static int nvm_core_init(struct nvm_dev *dev)
179 dev->sec_size = grp->csecs; 206 dev->sec_size = grp->csecs;
180 dev->oob_size = grp->sos; 207 dev->oob_size = grp->sos;
181 dev->sec_per_pg = grp->fpg_sz / grp->csecs; 208 dev->sec_per_pg = grp->fpg_sz / grp->csecs;
182 dev->addr_mode = id->ppat; 209 memcpy(&dev->ppaf, &id->ppaf, sizeof(struct nvm_addr_format));
183 dev->addr_format = id->ppaf;
184 210
185 dev->plane_mode = NVM_PLANE_SINGLE; 211 dev->plane_mode = NVM_PLANE_SINGLE;
186 dev->max_rq_size = dev->ops->max_phys_sect * dev->sec_size; 212 dev->max_rq_size = dev->ops->max_phys_sect * dev->sec_size;
187 213
214 if (grp->mtype != 0) {
215 pr_err("nvm: memory type not supported\n");
216 return -EINVAL;
217 }
218
219 if (grp->fmtype != 0 && grp->fmtype != 1) {
220 pr_err("nvm: flash type not supported\n");
221 return -EINVAL;
222 }
223
188 if (grp->mpos & 0x020202) 224 if (grp->mpos & 0x020202)
189 dev->plane_mode = NVM_PLANE_DOUBLE; 225 dev->plane_mode = NVM_PLANE_DOUBLE;
190 if (grp->mpos & 0x040404) 226 if (grp->mpos & 0x040404)
@@ -213,21 +249,17 @@ static void nvm_free(struct nvm_dev *dev)
213 249
214 if (dev->mt) 250 if (dev->mt)
215 dev->mt->unregister_mgr(dev); 251 dev->mt->unregister_mgr(dev);
216
217 nvm_core_free(dev);
218} 252}
219 253
220static int nvm_init(struct nvm_dev *dev) 254static int nvm_init(struct nvm_dev *dev)
221{ 255{
222 struct nvmm_type *mt; 256 int ret = -EINVAL;
223 int ret = 0;
224 257
225 if (!dev->q || !dev->ops) 258 if (!dev->q || !dev->ops)
226 return -EINVAL; 259 return ret;
227 260
228 if (dev->ops->identity(dev->q, &dev->identity)) { 261 if (dev->ops->identity(dev, &dev->identity)) {
229 pr_err("nvm: device could not be identified\n"); 262 pr_err("nvm: device could not be identified\n");
230 ret = -EINVAL;
231 goto err; 263 goto err;
232 } 264 }
233 265
@@ -251,29 +283,12 @@ static int nvm_init(struct nvm_dev *dev)
251 goto err; 283 goto err;
252 } 284 }
253 285
254 /* register with device with a supported manager */
255 list_for_each_entry(mt, &nvm_mgrs, list) {
256 ret = mt->register_mgr(dev);
257 if (ret < 0)
258 goto err; /* initialization failed */
259 if (ret > 0) {
260 dev->mt = mt;
261 break; /* successfully initialized */
262 }
263 }
264
265 if (!ret) {
266 pr_info("nvm: no compatible manager found.\n");
267 return 0;
268 }
269
270 pr_info("nvm: registered %s [%u/%u/%u/%u/%u/%u]\n", 286 pr_info("nvm: registered %s [%u/%u/%u/%u/%u/%u]\n",
271 dev->name, dev->sec_per_pg, dev->nr_planes, 287 dev->name, dev->sec_per_pg, dev->nr_planes,
272 dev->pgs_per_blk, dev->blks_per_lun, dev->nr_luns, 288 dev->pgs_per_blk, dev->blks_per_lun, dev->nr_luns,
273 dev->nr_chnls); 289 dev->nr_chnls);
274 return 0; 290 return 0;
275err: 291err:
276 nvm_free(dev);
277 pr_err("nvm: failed to initialize nvm\n"); 292 pr_err("nvm: failed to initialize nvm\n");
278 return ret; 293 return ret;
279} 294}
@@ -308,22 +323,27 @@ int nvm_register(struct request_queue *q, char *disk_name,
308 if (ret) 323 if (ret)
309 goto err_init; 324 goto err_init;
310 325
311 down_write(&nvm_lock); 326 if (dev->ops->max_phys_sect > 256) {
312 list_add(&dev->devices, &nvm_devices); 327 pr_info("nvm: max sectors supported is 256.\n");
313 up_write(&nvm_lock); 328 ret = -EINVAL;
329 goto err_init;
330 }
314 331
315 if (dev->ops->max_phys_sect > 1) { 332 if (dev->ops->max_phys_sect > 1) {
316 dev->ppalist_pool = dev->ops->create_dma_pool(dev->q, 333 dev->ppalist_pool = dev->ops->create_dma_pool(dev, "ppalist");
317 "ppalist");
318 if (!dev->ppalist_pool) { 334 if (!dev->ppalist_pool) {
319 pr_err("nvm: could not create ppa pool\n"); 335 pr_err("nvm: could not create ppa pool\n");
320 return -ENOMEM; 336 ret = -ENOMEM;
337 goto err_init;
321 } 338 }
322 } else if (dev->ops->max_phys_sect > 256) {
323 pr_info("nvm: max sectors supported is 256.\n");
324 return -EINVAL;
325 } 339 }
326 340
341 /* register device with a supported media manager */
342 down_write(&nvm_lock);
343 dev->mt = nvm_init_mgr(dev);
344 list_add(&dev->devices, &nvm_devices);
345 up_write(&nvm_lock);
346
327 return 0; 347 return 0;
328err_init: 348err_init:
329 kfree(dev); 349 kfree(dev);
@@ -333,19 +353,22 @@ EXPORT_SYMBOL(nvm_register);
333 353
334void nvm_unregister(char *disk_name) 354void nvm_unregister(char *disk_name)
335{ 355{
336 struct nvm_dev *dev = nvm_find_nvm_dev(disk_name); 356 struct nvm_dev *dev;
337 357
358 down_write(&nvm_lock);
359 dev = nvm_find_nvm_dev(disk_name);
338 if (!dev) { 360 if (!dev) {
339 pr_err("nvm: could not find device %s to unregister\n", 361 pr_err("nvm: could not find device %s to unregister\n",
340 disk_name); 362 disk_name);
363 up_write(&nvm_lock);
341 return; 364 return;
342 } 365 }
343 366
344 nvm_exit(dev);
345
346 down_write(&nvm_lock);
347 list_del(&dev->devices); 367 list_del(&dev->devices);
348 up_write(&nvm_lock); 368 up_write(&nvm_lock);
369
370 nvm_exit(dev);
371 kfree(dev);
349} 372}
350EXPORT_SYMBOL(nvm_unregister); 373EXPORT_SYMBOL(nvm_unregister);
351 374
@@ -358,38 +381,24 @@ static int nvm_create_target(struct nvm_dev *dev,
358{ 381{
359 struct nvm_ioctl_create_simple *s = &create->conf.s; 382 struct nvm_ioctl_create_simple *s = &create->conf.s;
360 struct request_queue *tqueue; 383 struct request_queue *tqueue;
361 struct nvmm_type *mt;
362 struct gendisk *tdisk; 384 struct gendisk *tdisk;
363 struct nvm_tgt_type *tt; 385 struct nvm_tgt_type *tt;
364 struct nvm_target *t; 386 struct nvm_target *t;
365 void *targetdata; 387 void *targetdata;
366 int ret = 0;
367 388
368 if (!dev->mt) { 389 if (!dev->mt) {
369 /* register with device with a supported NVM manager */ 390 pr_info("nvm: device has no media manager registered.\n");
370 list_for_each_entry(mt, &nvm_mgrs, list) { 391 return -ENODEV;
371 ret = mt->register_mgr(dev);
372 if (ret < 0)
373 return ret; /* initialization failed */
374 if (ret > 0) {
375 dev->mt = mt;
376 break; /* successfully initialized */
377 }
378 }
379
380 if (!ret) {
381 pr_info("nvm: no compatible nvm manager found.\n");
382 return -ENODEV;
383 }
384 } 392 }
385 393
394 down_write(&nvm_lock);
386 tt = nvm_find_target_type(create->tgttype); 395 tt = nvm_find_target_type(create->tgttype);
387 if (!tt) { 396 if (!tt) {
388 pr_err("nvm: target type %s not found\n", create->tgttype); 397 pr_err("nvm: target type %s not found\n", create->tgttype);
398 up_write(&nvm_lock);
389 return -EINVAL; 399 return -EINVAL;
390 } 400 }
391 401
392 down_write(&nvm_lock);
393 list_for_each_entry(t, &dev->online_targets, list) { 402 list_for_each_entry(t, &dev->online_targets, list) {
394 if (!strcmp(create->tgtname, t->disk->disk_name)) { 403 if (!strcmp(create->tgtname, t->disk->disk_name)) {
395 pr_err("nvm: target name already exists.\n"); 404 pr_err("nvm: target name already exists.\n");
@@ -457,11 +466,11 @@ static void nvm_remove_target(struct nvm_target *t)
457 lockdep_assert_held(&nvm_lock); 466 lockdep_assert_held(&nvm_lock);
458 467
459 del_gendisk(tdisk); 468 del_gendisk(tdisk);
469 blk_cleanup_queue(q);
470
460 if (tt->exit) 471 if (tt->exit)
461 tt->exit(tdisk->private_data); 472 tt->exit(tdisk->private_data);
462 473
463 blk_cleanup_queue(q);
464
465 put_disk(tdisk); 474 put_disk(tdisk);
466 475
467 list_del(&t->list); 476 list_del(&t->list);
@@ -473,7 +482,9 @@ static int __nvm_configure_create(struct nvm_ioctl_create *create)
473 struct nvm_dev *dev; 482 struct nvm_dev *dev;
474 struct nvm_ioctl_create_simple *s; 483 struct nvm_ioctl_create_simple *s;
475 484
485 down_write(&nvm_lock);
476 dev = nvm_find_nvm_dev(create->dev); 486 dev = nvm_find_nvm_dev(create->dev);
487 up_write(&nvm_lock);
477 if (!dev) { 488 if (!dev) {
478 pr_err("nvm: device not found\n"); 489 pr_err("nvm: device not found\n");
479 return -EINVAL; 490 return -EINVAL;
@@ -532,7 +543,9 @@ static int nvm_configure_show(const char *val)
532 return -EINVAL; 543 return -EINVAL;
533 } 544 }
534 545
546 down_write(&nvm_lock);
535 dev = nvm_find_nvm_dev(devname); 547 dev = nvm_find_nvm_dev(devname);
548 up_write(&nvm_lock);
536 if (!dev) { 549 if (!dev) {
537 pr_err("nvm: device not found\n"); 550 pr_err("nvm: device not found\n");
538 return -EINVAL; 551 return -EINVAL;
@@ -541,7 +554,7 @@ static int nvm_configure_show(const char *val)
541 if (!dev->mt) 554 if (!dev->mt)
542 return 0; 555 return 0;
543 556
544 dev->mt->free_blocks_print(dev); 557 dev->mt->lun_info_print(dev);
545 558
546 return 0; 559 return 0;
547} 560}
@@ -677,8 +690,10 @@ static long nvm_ioctl_info(struct file *file, void __user *arg)
677 info->tgtsize = tgt_iter; 690 info->tgtsize = tgt_iter;
678 up_write(&nvm_lock); 691 up_write(&nvm_lock);
679 692
680 if (copy_to_user(arg, info, sizeof(struct nvm_ioctl_info))) 693 if (copy_to_user(arg, info, sizeof(struct nvm_ioctl_info))) {
694 kfree(info);
681 return -EFAULT; 695 return -EFAULT;
696 }
682 697
683 kfree(info); 698 kfree(info);
684 return 0; 699 return 0;
@@ -721,8 +736,11 @@ static long nvm_ioctl_get_devices(struct file *file, void __user *arg)
721 736
722 devices->nr_devices = i; 737 devices->nr_devices = i;
723 738
724 if (copy_to_user(arg, devices, sizeof(struct nvm_ioctl_get_devices))) 739 if (copy_to_user(arg, devices,
740 sizeof(struct nvm_ioctl_get_devices))) {
741 kfree(devices);
725 return -EFAULT; 742 return -EFAULT;
743 }
726 744
727 kfree(devices); 745 kfree(devices);
728 return 0; 746 return 0;
diff --git a/drivers/lightnvm/gennvm.c b/drivers/lightnvm/gennvm.c
index ae1fb2bdc5f4..f434e89e1c7a 100644
--- a/drivers/lightnvm/gennvm.c
+++ b/drivers/lightnvm/gennvm.c
@@ -60,23 +60,27 @@ static int gennvm_luns_init(struct nvm_dev *dev, struct gen_nvm *gn)
60 lun->vlun.lun_id = i % dev->luns_per_chnl; 60 lun->vlun.lun_id = i % dev->luns_per_chnl;
61 lun->vlun.chnl_id = i / dev->luns_per_chnl; 61 lun->vlun.chnl_id = i / dev->luns_per_chnl;
62 lun->vlun.nr_free_blocks = dev->blks_per_lun; 62 lun->vlun.nr_free_blocks = dev->blks_per_lun;
63 lun->vlun.nr_inuse_blocks = 0;
64 lun->vlun.nr_bad_blocks = 0;
63 } 65 }
64 return 0; 66 return 0;
65} 67}
66 68
67static int gennvm_block_bb(u32 lun_id, void *bb_bitmap, unsigned int nr_blocks, 69static int gennvm_block_bb(struct ppa_addr ppa, int nr_blocks, u8 *blks,
68 void *private) 70 void *private)
69{ 71{
70 struct gen_nvm *gn = private; 72 struct gen_nvm *gn = private;
71 struct gen_lun *lun = &gn->luns[lun_id]; 73 struct nvm_dev *dev = gn->dev;
74 struct gen_lun *lun;
72 struct nvm_block *blk; 75 struct nvm_block *blk;
73 int i; 76 int i;
74 77
75 if (unlikely(bitmap_empty(bb_bitmap, nr_blocks))) 78 lun = &gn->luns[(dev->nr_luns * ppa.g.ch) + ppa.g.lun];
76 return 0; 79
80 for (i = 0; i < nr_blocks; i++) {
81 if (blks[i] == 0)
82 continue;
77 83
78 i = -1;
79 while ((i = find_next_bit(bb_bitmap, nr_blocks, i + 1)) < nr_blocks) {
80 blk = &lun->vlun.blocks[i]; 84 blk = &lun->vlun.blocks[i];
81 if (!blk) { 85 if (!blk) {
82 pr_err("gennvm: BB data is out of bounds.\n"); 86 pr_err("gennvm: BB data is out of bounds.\n");
@@ -84,6 +88,7 @@ static int gennvm_block_bb(u32 lun_id, void *bb_bitmap, unsigned int nr_blocks,
84 } 88 }
85 89
86 list_move_tail(&blk->list, &lun->bb_list); 90 list_move_tail(&blk->list, &lun->bb_list);
91 lun->vlun.nr_bad_blocks++;
87 } 92 }
88 93
89 return 0; 94 return 0;
@@ -136,6 +141,7 @@ static int gennvm_block_map(u64 slba, u32 nlb, __le64 *entries, void *private)
136 list_move_tail(&blk->list, &lun->used_list); 141 list_move_tail(&blk->list, &lun->used_list);
137 blk->type = 1; 142 blk->type = 1;
138 lun->vlun.nr_free_blocks--; 143 lun->vlun.nr_free_blocks--;
144 lun->vlun.nr_inuse_blocks++;
139 } 145 }
140 } 146 }
141 147
@@ -164,22 +170,32 @@ static int gennvm_blocks_init(struct nvm_dev *dev, struct gen_nvm *gn)
164 block->id = cur_block_id++; 170 block->id = cur_block_id++;
165 171
166 /* First block is reserved for device */ 172 /* First block is reserved for device */
167 if (unlikely(lun_iter == 0 && blk_iter == 0)) 173 if (unlikely(lun_iter == 0 && blk_iter == 0)) {
174 lun->vlun.nr_free_blocks--;
168 continue; 175 continue;
176 }
169 177
170 list_add_tail(&block->list, &lun->free_list); 178 list_add_tail(&block->list, &lun->free_list);
171 } 179 }
172 180
173 if (dev->ops->get_bb_tbl) { 181 if (dev->ops->get_bb_tbl) {
174 ret = dev->ops->get_bb_tbl(dev->q, lun->vlun.id, 182 struct ppa_addr ppa;
175 dev->blks_per_lun, gennvm_block_bb, gn); 183
184 ppa.ppa = 0;
185 ppa.g.ch = lun->vlun.chnl_id;
186 ppa.g.lun = lun->vlun.id;
187 ppa = generic_to_dev_addr(dev, ppa);
188
189 ret = dev->ops->get_bb_tbl(dev, ppa,
190 dev->blks_per_lun,
191 gennvm_block_bb, gn);
176 if (ret) 192 if (ret)
177 pr_err("gennvm: could not read BB table\n"); 193 pr_err("gennvm: could not read BB table\n");
178 } 194 }
179 } 195 }
180 196
181 if (dev->ops->get_l2p_tbl) { 197 if (dev->ops->get_l2p_tbl) {
182 ret = dev->ops->get_l2p_tbl(dev->q, 0, dev->total_pages, 198 ret = dev->ops->get_l2p_tbl(dev, 0, dev->total_pages,
183 gennvm_block_map, dev); 199 gennvm_block_map, dev);
184 if (ret) { 200 if (ret) {
185 pr_err("gennvm: could not read L2P table.\n"); 201 pr_err("gennvm: could not read L2P table.\n");
@@ -190,15 +206,27 @@ static int gennvm_blocks_init(struct nvm_dev *dev, struct gen_nvm *gn)
190 return 0; 206 return 0;
191} 207}
192 208
209static void gennvm_free(struct nvm_dev *dev)
210{
211 gennvm_blocks_free(dev);
212 gennvm_luns_free(dev);
213 kfree(dev->mp);
214 dev->mp = NULL;
215}
216
193static int gennvm_register(struct nvm_dev *dev) 217static int gennvm_register(struct nvm_dev *dev)
194{ 218{
195 struct gen_nvm *gn; 219 struct gen_nvm *gn;
196 int ret; 220 int ret;
197 221
222 if (!try_module_get(THIS_MODULE))
223 return -ENODEV;
224
198 gn = kzalloc(sizeof(struct gen_nvm), GFP_KERNEL); 225 gn = kzalloc(sizeof(struct gen_nvm), GFP_KERNEL);
199 if (!gn) 226 if (!gn)
200 return -ENOMEM; 227 return -ENOMEM;
201 228
229 gn->dev = dev;
202 gn->nr_luns = dev->nr_luns; 230 gn->nr_luns = dev->nr_luns;
203 dev->mp = gn; 231 dev->mp = gn;
204 232
@@ -216,16 +244,15 @@ static int gennvm_register(struct nvm_dev *dev)
216 244
217 return 1; 245 return 1;
218err: 246err:
219 kfree(gn); 247 gennvm_free(dev);
248 module_put(THIS_MODULE);
220 return ret; 249 return ret;
221} 250}
222 251
223static void gennvm_unregister(struct nvm_dev *dev) 252static void gennvm_unregister(struct nvm_dev *dev)
224{ 253{
225 gennvm_blocks_free(dev); 254 gennvm_free(dev);
226 gennvm_luns_free(dev); 255 module_put(THIS_MODULE);
227 kfree(dev->mp);
228 dev->mp = NULL;
229} 256}
230 257
231static struct nvm_block *gennvm_get_blk(struct nvm_dev *dev, 258static struct nvm_block *gennvm_get_blk(struct nvm_dev *dev,
@@ -240,23 +267,21 @@ static struct nvm_block *gennvm_get_blk(struct nvm_dev *dev,
240 if (list_empty(&lun->free_list)) { 267 if (list_empty(&lun->free_list)) {
241 pr_err_ratelimited("gennvm: lun %u have no free pages available", 268 pr_err_ratelimited("gennvm: lun %u have no free pages available",
242 lun->vlun.id); 269 lun->vlun.id);
243 spin_unlock(&vlun->lock);
244 goto out; 270 goto out;
245 } 271 }
246 272
247 while (!is_gc && lun->vlun.nr_free_blocks < lun->reserved_blocks) { 273 if (!is_gc && lun->vlun.nr_free_blocks < lun->reserved_blocks)
248 spin_unlock(&vlun->lock);
249 goto out; 274 goto out;
250 }
251 275
252 blk = list_first_entry(&lun->free_list, struct nvm_block, list); 276 blk = list_first_entry(&lun->free_list, struct nvm_block, list);
253 list_move_tail(&blk->list, &lun->used_list); 277 list_move_tail(&blk->list, &lun->used_list);
254 blk->type = 1; 278 blk->type = 1;
255 279
256 lun->vlun.nr_free_blocks--; 280 lun->vlun.nr_free_blocks--;
281 lun->vlun.nr_inuse_blocks++;
257 282
258 spin_unlock(&vlun->lock);
259out: 283out:
284 spin_unlock(&vlun->lock);
260 return blk; 285 return blk;
261} 286}
262 287
@@ -271,16 +296,21 @@ static void gennvm_put_blk(struct nvm_dev *dev, struct nvm_block *blk)
271 case 1: 296 case 1:
272 list_move_tail(&blk->list, &lun->free_list); 297 list_move_tail(&blk->list, &lun->free_list);
273 lun->vlun.nr_free_blocks++; 298 lun->vlun.nr_free_blocks++;
299 lun->vlun.nr_inuse_blocks--;
274 blk->type = 0; 300 blk->type = 0;
275 break; 301 break;
276 case 2: 302 case 2:
277 list_move_tail(&blk->list, &lun->bb_list); 303 list_move_tail(&blk->list, &lun->bb_list);
304 lun->vlun.nr_bad_blocks++;
305 lun->vlun.nr_inuse_blocks--;
278 break; 306 break;
279 default: 307 default:
280 WARN_ON_ONCE(1); 308 WARN_ON_ONCE(1);
281 pr_err("gennvm: erroneous block type (%lu -> %u)\n", 309 pr_err("gennvm: erroneous block type (%lu -> %u)\n",
282 blk->id, blk->type); 310 blk->id, blk->type);
283 list_move_tail(&blk->list, &lun->bb_list); 311 list_move_tail(&blk->list, &lun->bb_list);
312 lun->vlun.nr_bad_blocks++;
313 lun->vlun.nr_inuse_blocks--;
284 } 314 }
285 315
286 spin_unlock(&vlun->lock); 316 spin_unlock(&vlun->lock);
@@ -292,10 +322,10 @@ static void gennvm_addr_to_generic_mode(struct nvm_dev *dev, struct nvm_rq *rqd)
292 322
293 if (rqd->nr_pages > 1) { 323 if (rqd->nr_pages > 1) {
294 for (i = 0; i < rqd->nr_pages; i++) 324 for (i = 0; i < rqd->nr_pages; i++)
295 rqd->ppa_list[i] = addr_to_generic_mode(dev, 325 rqd->ppa_list[i] = dev_to_generic_addr(dev,
296 rqd->ppa_list[i]); 326 rqd->ppa_list[i]);
297 } else { 327 } else {
298 rqd->ppa_addr = addr_to_generic_mode(dev, rqd->ppa_addr); 328 rqd->ppa_addr = dev_to_generic_addr(dev, rqd->ppa_addr);
299 } 329 }
300} 330}
301 331
@@ -305,10 +335,10 @@ static void gennvm_generic_to_addr_mode(struct nvm_dev *dev, struct nvm_rq *rqd)
305 335
306 if (rqd->nr_pages > 1) { 336 if (rqd->nr_pages > 1) {
307 for (i = 0; i < rqd->nr_pages; i++) 337 for (i = 0; i < rqd->nr_pages; i++)
308 rqd->ppa_list[i] = generic_to_addr_mode(dev, 338 rqd->ppa_list[i] = generic_to_dev_addr(dev,
309 rqd->ppa_list[i]); 339 rqd->ppa_list[i]);
310 } else { 340 } else {
311 rqd->ppa_addr = generic_to_addr_mode(dev, rqd->ppa_addr); 341 rqd->ppa_addr = generic_to_dev_addr(dev, rqd->ppa_addr);
312 } 342 }
313} 343}
314 344
@@ -321,7 +351,7 @@ static int gennvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd)
321 gennvm_generic_to_addr_mode(dev, rqd); 351 gennvm_generic_to_addr_mode(dev, rqd);
322 352
323 rqd->dev = dev; 353 rqd->dev = dev;
324 return dev->ops->submit_io(dev->q, rqd); 354 return dev->ops->submit_io(dev, rqd);
325} 355}
326 356
327static void gennvm_blk_set_type(struct nvm_dev *dev, struct ppa_addr *ppa, 357static void gennvm_blk_set_type(struct nvm_dev *dev, struct ppa_addr *ppa,
@@ -354,10 +384,10 @@ static void gennvm_mark_blk_bad(struct nvm_dev *dev, struct nvm_rq *rqd)
354{ 384{
355 int i; 385 int i;
356 386
357 if (!dev->ops->set_bb) 387 if (!dev->ops->set_bb_tbl)
358 return; 388 return;
359 389
360 if (dev->ops->set_bb(dev->q, rqd, 1)) 390 if (dev->ops->set_bb_tbl(dev, rqd, 1))
361 return; 391 return;
362 392
363 gennvm_addr_to_generic_mode(dev, rqd); 393 gennvm_addr_to_generic_mode(dev, rqd);
@@ -425,7 +455,7 @@ static int gennvm_erase_blk(struct nvm_dev *dev, struct nvm_block *blk,
425 455
426 gennvm_generic_to_addr_mode(dev, &rqd); 456 gennvm_generic_to_addr_mode(dev, &rqd);
427 457
428 ret = dev->ops->erase_block(dev->q, &rqd); 458 ret = dev->ops->erase_block(dev, &rqd);
429 459
430 if (plane_cnt) 460 if (plane_cnt)
431 nvm_dev_dma_free(dev, rqd.ppa_list, rqd.dma_ppa_list); 461 nvm_dev_dma_free(dev, rqd.ppa_list, rqd.dma_ppa_list);
@@ -440,15 +470,24 @@ static struct nvm_lun *gennvm_get_lun(struct nvm_dev *dev, int lunid)
440 return &gn->luns[lunid].vlun; 470 return &gn->luns[lunid].vlun;
441} 471}
442 472
443static void gennvm_free_blocks_print(struct nvm_dev *dev) 473static void gennvm_lun_info_print(struct nvm_dev *dev)
444{ 474{
445 struct gen_nvm *gn = dev->mp; 475 struct gen_nvm *gn = dev->mp;
446 struct gen_lun *lun; 476 struct gen_lun *lun;
447 unsigned int i; 477 unsigned int i;
448 478
449 gennvm_for_each_lun(gn, lun, i) 479
450 pr_info("%s: lun%8u\t%u\n", 480 gennvm_for_each_lun(gn, lun, i) {
451 dev->name, i, lun->vlun.nr_free_blocks); 481 spin_lock(&lun->vlun.lock);
482
483 pr_info("%s: lun%8u\t%u\t%u\t%u\n",
484 dev->name, i,
485 lun->vlun.nr_free_blocks,
486 lun->vlun.nr_inuse_blocks,
487 lun->vlun.nr_bad_blocks);
488
489 spin_unlock(&lun->vlun.lock);
490 }
452} 491}
453 492
454static struct nvmm_type gennvm = { 493static struct nvmm_type gennvm = {
@@ -466,7 +505,7 @@ static struct nvmm_type gennvm = {
466 .erase_blk = gennvm_erase_blk, 505 .erase_blk = gennvm_erase_blk,
467 506
468 .get_lun = gennvm_get_lun, 507 .get_lun = gennvm_get_lun,
469 .free_blocks_print = gennvm_free_blocks_print, 508 .lun_info_print = gennvm_lun_info_print,
470}; 509};
471 510
472static int __init gennvm_module_init(void) 511static int __init gennvm_module_init(void)
diff --git a/drivers/lightnvm/gennvm.h b/drivers/lightnvm/gennvm.h
index d23bd3501ddc..9c24b5b32dac 100644
--- a/drivers/lightnvm/gennvm.h
+++ b/drivers/lightnvm/gennvm.h
@@ -35,6 +35,8 @@ struct gen_lun {
35}; 35};
36 36
37struct gen_nvm { 37struct gen_nvm {
38 struct nvm_dev *dev;
39
38 int nr_luns; 40 int nr_luns;
39 struct gen_lun *luns; 41 struct gen_lun *luns;
40}; 42};
diff --git a/drivers/lightnvm/rrpc.c b/drivers/lightnvm/rrpc.c
index 7ba64c87ba1c..134e4faba482 100644
--- a/drivers/lightnvm/rrpc.c
+++ b/drivers/lightnvm/rrpc.c
@@ -123,12 +123,42 @@ static u64 block_to_addr(struct rrpc *rrpc, struct rrpc_block *rblk)
123 return blk->id * rrpc->dev->pgs_per_blk; 123 return blk->id * rrpc->dev->pgs_per_blk;
124} 124}
125 125
126static struct ppa_addr linear_to_generic_addr(struct nvm_dev *dev,
127 struct ppa_addr r)
128{
129 struct ppa_addr l;
130 int secs, pgs, blks, luns;
131 sector_t ppa = r.ppa;
132
133 l.ppa = 0;
134
135 div_u64_rem(ppa, dev->sec_per_pg, &secs);
136 l.g.sec = secs;
137
138 sector_div(ppa, dev->sec_per_pg);
139 div_u64_rem(ppa, dev->sec_per_blk, &pgs);
140 l.g.pg = pgs;
141
142 sector_div(ppa, dev->pgs_per_blk);
143 div_u64_rem(ppa, dev->blks_per_lun, &blks);
144 l.g.blk = blks;
145
146 sector_div(ppa, dev->blks_per_lun);
147 div_u64_rem(ppa, dev->luns_per_chnl, &luns);
148 l.g.lun = luns;
149
150 sector_div(ppa, dev->luns_per_chnl);
151 l.g.ch = ppa;
152
153 return l;
154}
155
126static struct ppa_addr rrpc_ppa_to_gaddr(struct nvm_dev *dev, u64 addr) 156static struct ppa_addr rrpc_ppa_to_gaddr(struct nvm_dev *dev, u64 addr)
127{ 157{
128 struct ppa_addr paddr; 158 struct ppa_addr paddr;
129 159
130 paddr.ppa = addr; 160 paddr.ppa = addr;
131 return __linear_to_generic_addr(dev, paddr); 161 return linear_to_generic_addr(dev, paddr);
132} 162}
133 163
134/* requires lun->lock taken */ 164/* requires lun->lock taken */
@@ -152,7 +182,7 @@ static struct rrpc_block *rrpc_get_blk(struct rrpc *rrpc, struct rrpc_lun *rlun,
152 struct nvm_block *blk; 182 struct nvm_block *blk;
153 struct rrpc_block *rblk; 183 struct rrpc_block *rblk;
154 184
155 blk = nvm_get_blk(rrpc->dev, rlun->parent, 0); 185 blk = nvm_get_blk(rrpc->dev, rlun->parent, flags);
156 if (!blk) 186 if (!blk)
157 return NULL; 187 return NULL;
158 188
@@ -172,6 +202,20 @@ static void rrpc_put_blk(struct rrpc *rrpc, struct rrpc_block *rblk)
172 nvm_put_blk(rrpc->dev, rblk->parent); 202 nvm_put_blk(rrpc->dev, rblk->parent);
173} 203}
174 204
205static void rrpc_put_blks(struct rrpc *rrpc)
206{
207 struct rrpc_lun *rlun;
208 int i;
209
210 for (i = 0; i < rrpc->nr_luns; i++) {
211 rlun = &rrpc->luns[i];
212 if (rlun->cur)
213 rrpc_put_blk(rrpc, rlun->cur);
214 if (rlun->gc_cur)
215 rrpc_put_blk(rrpc, rlun->gc_cur);
216 }
217}
218
175static struct rrpc_lun *get_next_lun(struct rrpc *rrpc) 219static struct rrpc_lun *get_next_lun(struct rrpc *rrpc)
176{ 220{
177 int next = atomic_inc_return(&rrpc->next_lun); 221 int next = atomic_inc_return(&rrpc->next_lun);
@@ -972,7 +1016,7 @@ static int rrpc_map_init(struct rrpc *rrpc)
972 return 0; 1016 return 0;
973 1017
974 /* Bring up the mapping table from device */ 1018 /* Bring up the mapping table from device */
975 ret = dev->ops->get_l2p_tbl(dev->q, 0, dev->total_pages, 1019 ret = dev->ops->get_l2p_tbl(dev, 0, dev->total_pages,
976 rrpc_l2p_update, rrpc); 1020 rrpc_l2p_update, rrpc);
977 if (ret) { 1021 if (ret) {
978 pr_err("nvm: rrpc: could not read L2P table.\n"); 1022 pr_err("nvm: rrpc: could not read L2P table.\n");
@@ -1194,18 +1238,21 @@ static int rrpc_luns_configure(struct rrpc *rrpc)
1194 1238
1195 rblk = rrpc_get_blk(rrpc, rlun, 0); 1239 rblk = rrpc_get_blk(rrpc, rlun, 0);
1196 if (!rblk) 1240 if (!rblk)
1197 return -EINVAL; 1241 goto err;
1198 1242
1199 rrpc_set_lun_cur(rlun, rblk); 1243 rrpc_set_lun_cur(rlun, rblk);
1200 1244
1201 /* Emergency gc block */ 1245 /* Emergency gc block */
1202 rblk = rrpc_get_blk(rrpc, rlun, 1); 1246 rblk = rrpc_get_blk(rrpc, rlun, 1);
1203 if (!rblk) 1247 if (!rblk)
1204 return -EINVAL; 1248 goto err;
1205 rlun->gc_cur = rblk; 1249 rlun->gc_cur = rblk;
1206 } 1250 }
1207 1251
1208 return 0; 1252 return 0;
1253err:
1254 rrpc_put_blks(rrpc);
1255 return -EINVAL;
1209} 1256}
1210 1257
1211static struct nvm_tgt_type tt_rrpc; 1258static struct nvm_tgt_type tt_rrpc;
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 917d47e290ae..3147c8d09ea8 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -112,7 +112,8 @@ struct iv_tcw_private {
112 * and encrypts / decrypts at the same time. 112 * and encrypts / decrypts at the same time.
113 */ 113 */
114enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID, 114enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID,
115 DM_CRYPT_SAME_CPU, DM_CRYPT_NO_OFFLOAD }; 115 DM_CRYPT_SAME_CPU, DM_CRYPT_NO_OFFLOAD,
116 DM_CRYPT_EXIT_THREAD};
116 117
117/* 118/*
118 * The fields in here must be read only after initialization. 119 * The fields in here must be read only after initialization.
@@ -1203,20 +1204,18 @@ continue_locked:
1203 if (!RB_EMPTY_ROOT(&cc->write_tree)) 1204 if (!RB_EMPTY_ROOT(&cc->write_tree))
1204 goto pop_from_list; 1205 goto pop_from_list;
1205 1206
1207 if (unlikely(test_bit(DM_CRYPT_EXIT_THREAD, &cc->flags))) {
1208 spin_unlock_irq(&cc->write_thread_wait.lock);
1209 break;
1210 }
1211
1206 __set_current_state(TASK_INTERRUPTIBLE); 1212 __set_current_state(TASK_INTERRUPTIBLE);
1207 __add_wait_queue(&cc->write_thread_wait, &wait); 1213 __add_wait_queue(&cc->write_thread_wait, &wait);
1208 1214
1209 spin_unlock_irq(&cc->write_thread_wait.lock); 1215 spin_unlock_irq(&cc->write_thread_wait.lock);
1210 1216
1211 if (unlikely(kthread_should_stop())) {
1212 set_task_state(current, TASK_RUNNING);
1213 remove_wait_queue(&cc->write_thread_wait, &wait);
1214 break;
1215 }
1216
1217 schedule(); 1217 schedule();
1218 1218
1219 set_task_state(current, TASK_RUNNING);
1220 spin_lock_irq(&cc->write_thread_wait.lock); 1219 spin_lock_irq(&cc->write_thread_wait.lock);
1221 __remove_wait_queue(&cc->write_thread_wait, &wait); 1220 __remove_wait_queue(&cc->write_thread_wait, &wait);
1222 goto continue_locked; 1221 goto continue_locked;
@@ -1531,8 +1530,13 @@ static void crypt_dtr(struct dm_target *ti)
1531 if (!cc) 1530 if (!cc)
1532 return; 1531 return;
1533 1532
1534 if (cc->write_thread) 1533 if (cc->write_thread) {
1534 spin_lock_irq(&cc->write_thread_wait.lock);
1535 set_bit(DM_CRYPT_EXIT_THREAD, &cc->flags);
1536 wake_up_locked(&cc->write_thread_wait);
1537 spin_unlock_irq(&cc->write_thread_wait.lock);
1535 kthread_stop(cc->write_thread); 1538 kthread_stop(cc->write_thread);
1539 }
1536 1540
1537 if (cc->io_queue) 1541 if (cc->io_queue)
1538 destroy_workqueue(cc->io_queue); 1542 destroy_workqueue(cc->io_queue);
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index aaa6caa46a9f..cfa29f574c2a 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -1537,32 +1537,34 @@ static int multipath_prepare_ioctl(struct dm_target *ti,
1537 struct block_device **bdev, fmode_t *mode) 1537 struct block_device **bdev, fmode_t *mode)
1538{ 1538{
1539 struct multipath *m = ti->private; 1539 struct multipath *m = ti->private;
1540 struct pgpath *pgpath;
1541 unsigned long flags; 1540 unsigned long flags;
1542 int r; 1541 int r;
1543 1542
1544 r = 0;
1545
1546 spin_lock_irqsave(&m->lock, flags); 1543 spin_lock_irqsave(&m->lock, flags);
1547 1544
1548 if (!m->current_pgpath) 1545 if (!m->current_pgpath)
1549 __choose_pgpath(m, 0); 1546 __choose_pgpath(m, 0);
1550 1547
1551 pgpath = m->current_pgpath; 1548 if (m->current_pgpath) {
1552 1549 if (!m->queue_io) {
1553 if (pgpath) { 1550 *bdev = m->current_pgpath->path.dev->bdev;
1554 *bdev = pgpath->path.dev->bdev; 1551 *mode = m->current_pgpath->path.dev->mode;
1555 *mode = pgpath->path.dev->mode; 1552 r = 0;
1553 } else {
1554 /* pg_init has not started or completed */
1555 r = -ENOTCONN;
1556 }
1557 } else {
1558 /* No path is available */
1559 if (m->queue_if_no_path)
1560 r = -ENOTCONN;
1561 else
1562 r = -EIO;
1556 } 1563 }
1557 1564
1558 if ((pgpath && m->queue_io) || (!pgpath && m->queue_if_no_path))
1559 r = -ENOTCONN;
1560 else if (!*bdev)
1561 r = -EIO;
1562
1563 spin_unlock_irqrestore(&m->lock, flags); 1565 spin_unlock_irqrestore(&m->lock, flags);
1564 1566
1565 if (r == -ENOTCONN && !fatal_signal_pending(current)) { 1567 if (r == -ENOTCONN) {
1566 spin_lock_irqsave(&m->lock, flags); 1568 spin_lock_irqsave(&m->lock, flags);
1567 if (!m->current_pg) { 1569 if (!m->current_pg) {
1568 /* Path status changed, redo selection */ 1570 /* Path status changed, redo selection */
diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
index 1fa45695b68a..c219a053c7f6 100644
--- a/drivers/md/dm-thin-metadata.c
+++ b/drivers/md/dm-thin-metadata.c
@@ -1207,6 +1207,12 @@ static int __reserve_metadata_snap(struct dm_pool_metadata *pmd)
1207 dm_block_t held_root; 1207 dm_block_t held_root;
1208 1208
1209 /* 1209 /*
1210 * We commit to ensure the btree roots which we increment in a
1211 * moment are up to date.
1212 */
1213 __commit_transaction(pmd);
1214
1215 /*
1210 * Copy the superblock. 1216 * Copy the superblock.
1211 */ 1217 */
1212 dm_sm_inc_block(pmd->metadata_sm, THIN_SUPERBLOCK_LOCATION); 1218 dm_sm_inc_block(pmd->metadata_sm, THIN_SUPERBLOCK_LOCATION);
@@ -1538,7 +1544,7 @@ static int __remove(struct dm_thin_device *td, dm_block_t block)
1538static int __remove_range(struct dm_thin_device *td, dm_block_t begin, dm_block_t end) 1544static int __remove_range(struct dm_thin_device *td, dm_block_t begin, dm_block_t end)
1539{ 1545{
1540 int r; 1546 int r;
1541 unsigned count; 1547 unsigned count, total_count = 0;
1542 struct dm_pool_metadata *pmd = td->pmd; 1548 struct dm_pool_metadata *pmd = td->pmd;
1543 dm_block_t keys[1] = { td->id }; 1549 dm_block_t keys[1] = { td->id };
1544 __le64 value; 1550 __le64 value;
@@ -1561,11 +1567,29 @@ static int __remove_range(struct dm_thin_device *td, dm_block_t begin, dm_block_
1561 if (r) 1567 if (r)
1562 return r; 1568 return r;
1563 1569
1564 r = dm_btree_remove_leaves(&pmd->bl_info, mapping_root, &begin, end, &mapping_root, &count); 1570 /*
1565 if (r) 1571 * Remove leaves stops at the first unmapped entry, so we have to
1566 return r; 1572 * loop round finding mapped ranges.
1573 */
1574 while (begin < end) {
1575 r = dm_btree_lookup_next(&pmd->bl_info, mapping_root, &begin, &begin, &value);
1576 if (r == -ENODATA)
1577 break;
1578
1579 if (r)
1580 return r;
1581
1582 if (begin >= end)
1583 break;
1584
1585 r = dm_btree_remove_leaves(&pmd->bl_info, mapping_root, &begin, end, &mapping_root, &count);
1586 if (r)
1587 return r;
1588
1589 total_count += count;
1590 }
1567 1591
1568 td->mapped_blocks -= count; 1592 td->mapped_blocks -= total_count;
1569 td->changed = 1; 1593 td->changed = 1;
1570 1594
1571 /* 1595 /*
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index 3897b90bd462..63903a5a5d9e 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -2432,6 +2432,7 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode)
2432 case PM_WRITE: 2432 case PM_WRITE:
2433 if (old_mode != new_mode) 2433 if (old_mode != new_mode)
2434 notify_of_pool_mode_change(pool, "write"); 2434 notify_of_pool_mode_change(pool, "write");
2435 pool->pf.error_if_no_space = pt->requested_pf.error_if_no_space;
2435 dm_pool_metadata_read_write(pool->pmd); 2436 dm_pool_metadata_read_write(pool->pmd);
2436 pool->process_bio = process_bio; 2437 pool->process_bio = process_bio;
2437 pool->process_discard = process_discard_bio; 2438 pool->process_discard = process_discard_bio;
@@ -4249,10 +4250,9 @@ static void thin_io_hints(struct dm_target *ti, struct queue_limits *limits)
4249{ 4250{
4250 struct thin_c *tc = ti->private; 4251 struct thin_c *tc = ti->private;
4251 struct pool *pool = tc->pool; 4252 struct pool *pool = tc->pool;
4252 struct queue_limits *pool_limits = dm_get_queue_limits(pool->pool_md);
4253 4253
4254 if (!pool_limits->discard_granularity) 4254 if (!pool->pf.discard_enabled)
4255 return; /* pool's discard support is disabled */ 4255 return;
4256 4256
4257 limits->discard_granularity = pool->sectors_per_block << SECTOR_SHIFT; 4257 limits->discard_granularity = pool->sectors_per_block << SECTOR_SHIFT;
4258 limits->max_discard_sectors = 2048 * 1024 * 16; /* 16G */ 4258 limits->max_discard_sectors = 2048 * 1024 * 16; /* 16G */
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 6e15f3565892..5df40480228b 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -591,7 +591,7 @@ retry:
591 591
592out: 592out:
593 dm_put_live_table(md, *srcu_idx); 593 dm_put_live_table(md, *srcu_idx);
594 if (r == -ENOTCONN) { 594 if (r == -ENOTCONN && !fatal_signal_pending(current)) {
595 msleep(10); 595 msleep(10);
596 goto retry; 596 goto retry;
597 } 597 }
@@ -603,9 +603,10 @@ static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode,
603{ 603{
604 struct mapped_device *md = bdev->bd_disk->private_data; 604 struct mapped_device *md = bdev->bd_disk->private_data;
605 struct dm_target *tgt; 605 struct dm_target *tgt;
606 struct block_device *tgt_bdev = NULL;
606 int srcu_idx, r; 607 int srcu_idx, r;
607 608
608 r = dm_get_live_table_for_ioctl(md, &tgt, &bdev, &mode, &srcu_idx); 609 r = dm_get_live_table_for_ioctl(md, &tgt, &tgt_bdev, &mode, &srcu_idx);
609 if (r < 0) 610 if (r < 0)
610 return r; 611 return r;
611 612
@@ -620,7 +621,7 @@ static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode,
620 goto out; 621 goto out;
621 } 622 }
622 623
623 r = __blkdev_driver_ioctl(bdev, mode, cmd, arg); 624 r = __blkdev_driver_ioctl(tgt_bdev, mode, cmd, arg);
624out: 625out:
625 dm_put_live_table(md, srcu_idx); 626 dm_put_live_table(md, srcu_idx);
626 return r; 627 return r;
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 807095f4c793..dbedc58d8c00 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -314,8 +314,8 @@ static blk_qc_t md_make_request(struct request_queue *q, struct bio *bio)
314 */ 314 */
315void mddev_suspend(struct mddev *mddev) 315void mddev_suspend(struct mddev *mddev)
316{ 316{
317 BUG_ON(mddev->suspended); 317 if (mddev->suspended++)
318 mddev->suspended = 1; 318 return;
319 synchronize_rcu(); 319 synchronize_rcu();
320 wait_event(mddev->sb_wait, atomic_read(&mddev->active_io) == 0); 320 wait_event(mddev->sb_wait, atomic_read(&mddev->active_io) == 0);
321 mddev->pers->quiesce(mddev, 1); 321 mddev->pers->quiesce(mddev, 1);
@@ -326,7 +326,8 @@ EXPORT_SYMBOL_GPL(mddev_suspend);
326 326
327void mddev_resume(struct mddev *mddev) 327void mddev_resume(struct mddev *mddev)
328{ 328{
329 mddev->suspended = 0; 329 if (--mddev->suspended)
330 return;
330 wake_up(&mddev->sb_wait); 331 wake_up(&mddev->sb_wait);
331 mddev->pers->quiesce(mddev, 0); 332 mddev->pers->quiesce(mddev, 0);
332 333
@@ -1652,7 +1653,7 @@ static int super_1_validate(struct mddev *mddev, struct md_rdev *rdev)
1652 rdev->journal_tail = le64_to_cpu(sb->journal_tail); 1653 rdev->journal_tail = le64_to_cpu(sb->journal_tail);
1653 if (mddev->recovery_cp == MaxSector) 1654 if (mddev->recovery_cp == MaxSector)
1654 set_bit(MD_JOURNAL_CLEAN, &mddev->flags); 1655 set_bit(MD_JOURNAL_CLEAN, &mddev->flags);
1655 rdev->raid_disk = mddev->raid_disks; 1656 rdev->raid_disk = 0;
1656 break; 1657 break;
1657 default: 1658 default:
1658 rdev->saved_raid_disk = role; 1659 rdev->saved_raid_disk = role;
@@ -2773,6 +2774,7 @@ slot_store(struct md_rdev *rdev, const char *buf, size_t len)
2773 /* Activating a spare .. or possibly reactivating 2774 /* Activating a spare .. or possibly reactivating
2774 * if we ever get bitmaps working here. 2775 * if we ever get bitmaps working here.
2775 */ 2776 */
2777 int err;
2776 2778
2777 if (rdev->raid_disk != -1) 2779 if (rdev->raid_disk != -1)
2778 return -EBUSY; 2780 return -EBUSY;
@@ -2794,9 +2796,15 @@ slot_store(struct md_rdev *rdev, const char *buf, size_t len)
2794 rdev->saved_raid_disk = -1; 2796 rdev->saved_raid_disk = -1;
2795 clear_bit(In_sync, &rdev->flags); 2797 clear_bit(In_sync, &rdev->flags);
2796 clear_bit(Bitmap_sync, &rdev->flags); 2798 clear_bit(Bitmap_sync, &rdev->flags);
2797 remove_and_add_spares(rdev->mddev, rdev); 2799 err = rdev->mddev->pers->
2798 if (rdev->raid_disk == -1) 2800 hot_add_disk(rdev->mddev, rdev);
2799 return -EBUSY; 2801 if (err) {
2802 rdev->raid_disk = -1;
2803 return err;
2804 } else
2805 sysfs_notify_dirent_safe(rdev->sysfs_state);
2806 if (sysfs_link_rdev(rdev->mddev, rdev))
2807 /* failure here is OK */;
2800 /* don't wakeup anyone, leave that to userspace. */ 2808 /* don't wakeup anyone, leave that to userspace. */
2801 } else { 2809 } else {
2802 if (slot >= rdev->mddev->raid_disks && 2810 if (slot >= rdev->mddev->raid_disks &&
diff --git a/drivers/md/md.h b/drivers/md/md.h
index 2bea51edfab7..ca0b643fe3c1 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -566,7 +566,9 @@ static inline char * mdname (struct mddev * mddev)
566static inline int sysfs_link_rdev(struct mddev *mddev, struct md_rdev *rdev) 566static inline int sysfs_link_rdev(struct mddev *mddev, struct md_rdev *rdev)
567{ 567{
568 char nm[20]; 568 char nm[20];
569 if (!test_bit(Replacement, &rdev->flags) && mddev->kobj.sd) { 569 if (!test_bit(Replacement, &rdev->flags) &&
570 !test_bit(Journal, &rdev->flags) &&
571 mddev->kobj.sd) {
570 sprintf(nm, "rd%d", rdev->raid_disk); 572 sprintf(nm, "rd%d", rdev->raid_disk);
571 return sysfs_create_link(&mddev->kobj, &rdev->kobj, nm); 573 return sysfs_create_link(&mddev->kobj, &rdev->kobj, nm);
572 } else 574 } else
@@ -576,7 +578,9 @@ static inline int sysfs_link_rdev(struct mddev *mddev, struct md_rdev *rdev)
576static inline void sysfs_unlink_rdev(struct mddev *mddev, struct md_rdev *rdev) 578static inline void sysfs_unlink_rdev(struct mddev *mddev, struct md_rdev *rdev)
577{ 579{
578 char nm[20]; 580 char nm[20];
579 if (!test_bit(Replacement, &rdev->flags) && mddev->kobj.sd) { 581 if (!test_bit(Replacement, &rdev->flags) &&
582 !test_bit(Journal, &rdev->flags) &&
583 mddev->kobj.sd) {
580 sprintf(nm, "rd%d", rdev->raid_disk); 584 sprintf(nm, "rd%d", rdev->raid_disk);
581 sysfs_remove_link(&mddev->kobj, nm); 585 sysfs_remove_link(&mddev->kobj, nm);
582 } 586 }
diff --git a/drivers/md/persistent-data/dm-btree.c b/drivers/md/persistent-data/dm-btree.c
index c573402033b2..b1ced58eb5e1 100644
--- a/drivers/md/persistent-data/dm-btree.c
+++ b/drivers/md/persistent-data/dm-btree.c
@@ -63,6 +63,11 @@ int lower_bound(struct btree_node *n, uint64_t key)
63 return bsearch(n, key, 0); 63 return bsearch(n, key, 0);
64} 64}
65 65
66static int upper_bound(struct btree_node *n, uint64_t key)
67{
68 return bsearch(n, key, 1);
69}
70
66void inc_children(struct dm_transaction_manager *tm, struct btree_node *n, 71void inc_children(struct dm_transaction_manager *tm, struct btree_node *n,
67 struct dm_btree_value_type *vt) 72 struct dm_btree_value_type *vt)
68{ 73{
@@ -252,6 +257,16 @@ static void pop_frame(struct del_stack *s)
252 dm_tm_unlock(s->tm, f->b); 257 dm_tm_unlock(s->tm, f->b);
253} 258}
254 259
260static void unlock_all_frames(struct del_stack *s)
261{
262 struct frame *f;
263
264 while (unprocessed_frames(s)) {
265 f = s->spine + s->top--;
266 dm_tm_unlock(s->tm, f->b);
267 }
268}
269
255int dm_btree_del(struct dm_btree_info *info, dm_block_t root) 270int dm_btree_del(struct dm_btree_info *info, dm_block_t root)
256{ 271{
257 int r; 272 int r;
@@ -308,9 +323,13 @@ int dm_btree_del(struct dm_btree_info *info, dm_block_t root)
308 pop_frame(s); 323 pop_frame(s);
309 } 324 }
310 } 325 }
311
312out: 326out:
327 if (r) {
328 /* cleanup all frames of del_stack */
329 unlock_all_frames(s);
330 }
313 kfree(s); 331 kfree(s);
332
314 return r; 333 return r;
315} 334}
316EXPORT_SYMBOL_GPL(dm_btree_del); 335EXPORT_SYMBOL_GPL(dm_btree_del);
@@ -392,6 +411,82 @@ int dm_btree_lookup(struct dm_btree_info *info, dm_block_t root,
392} 411}
393EXPORT_SYMBOL_GPL(dm_btree_lookup); 412EXPORT_SYMBOL_GPL(dm_btree_lookup);
394 413
414static int dm_btree_lookup_next_single(struct dm_btree_info *info, dm_block_t root,
415 uint64_t key, uint64_t *rkey, void *value_le)
416{
417 int r, i;
418 uint32_t flags, nr_entries;
419 struct dm_block *node;
420 struct btree_node *n;
421
422 r = bn_read_lock(info, root, &node);
423 if (r)
424 return r;
425
426 n = dm_block_data(node);
427 flags = le32_to_cpu(n->header.flags);
428 nr_entries = le32_to_cpu(n->header.nr_entries);
429
430 if (flags & INTERNAL_NODE) {
431 i = lower_bound(n, key);
432 if (i < 0 || i >= nr_entries) {
433 r = -ENODATA;
434 goto out;
435 }
436
437 r = dm_btree_lookup_next_single(info, value64(n, i), key, rkey, value_le);
438 if (r == -ENODATA && i < (nr_entries - 1)) {
439 i++;
440 r = dm_btree_lookup_next_single(info, value64(n, i), key, rkey, value_le);
441 }
442
443 } else {
444 i = upper_bound(n, key);
445 if (i < 0 || i >= nr_entries) {
446 r = -ENODATA;
447 goto out;
448 }
449
450 *rkey = le64_to_cpu(n->keys[i]);
451 memcpy(value_le, value_ptr(n, i), info->value_type.size);
452 }
453out:
454 dm_tm_unlock(info->tm, node);
455 return r;
456}
457
458int dm_btree_lookup_next(struct dm_btree_info *info, dm_block_t root,
459 uint64_t *keys, uint64_t *rkey, void *value_le)
460{
461 unsigned level;
462 int r = -ENODATA;
463 __le64 internal_value_le;
464 struct ro_spine spine;
465
466 init_ro_spine(&spine, info);
467 for (level = 0; level < info->levels - 1u; level++) {
468 r = btree_lookup_raw(&spine, root, keys[level],
469 lower_bound, rkey,
470 &internal_value_le, sizeof(uint64_t));
471 if (r)
472 goto out;
473
474 if (*rkey != keys[level]) {
475 r = -ENODATA;
476 goto out;
477 }
478
479 root = le64_to_cpu(internal_value_le);
480 }
481
482 r = dm_btree_lookup_next_single(info, root, keys[level], rkey, value_le);
483out:
484 exit_ro_spine(&spine);
485 return r;
486}
487
488EXPORT_SYMBOL_GPL(dm_btree_lookup_next);
489
395/* 490/*
396 * Splits a node by creating a sibling node and shifting half the nodes 491 * Splits a node by creating a sibling node and shifting half the nodes
397 * contents across. Assumes there is a parent node, and it has room for 492 * contents across. Assumes there is a parent node, and it has room for
@@ -473,8 +568,10 @@ static int btree_split_sibling(struct shadow_spine *s, unsigned parent_index,
473 568
474 r = insert_at(sizeof(__le64), pn, parent_index + 1, 569 r = insert_at(sizeof(__le64), pn, parent_index + 1,
475 le64_to_cpu(rn->keys[0]), &location); 570 le64_to_cpu(rn->keys[0]), &location);
476 if (r) 571 if (r) {
572 unlock_block(s->info, right);
477 return r; 573 return r;
574 }
478 575
479 if (key < le64_to_cpu(rn->keys[0])) { 576 if (key < le64_to_cpu(rn->keys[0])) {
480 unlock_block(s->info, right); 577 unlock_block(s->info, right);
diff --git a/drivers/md/persistent-data/dm-btree.h b/drivers/md/persistent-data/dm-btree.h
index 11d8cf78621d..c74301fa5a37 100644
--- a/drivers/md/persistent-data/dm-btree.h
+++ b/drivers/md/persistent-data/dm-btree.h
@@ -110,6 +110,13 @@ int dm_btree_lookup(struct dm_btree_info *info, dm_block_t root,
110 uint64_t *keys, void *value_le); 110 uint64_t *keys, void *value_le);
111 111
112/* 112/*
113 * Tries to find the first key where the bottom level key is >= to that
114 * given. Useful for skipping empty sections of the btree.
115 */
116int dm_btree_lookup_next(struct dm_btree_info *info, dm_block_t root,
117 uint64_t *keys, uint64_t *rkey, void *value_le);
118
119/*
113 * Insertion (or overwrite an existing value). O(ln(n)) 120 * Insertion (or overwrite an existing value). O(ln(n))
114 */ 121 */
115int dm_btree_insert(struct dm_btree_info *info, dm_block_t root, 122int dm_btree_insert(struct dm_btree_info *info, dm_block_t root,
@@ -135,9 +142,10 @@ int dm_btree_remove(struct dm_btree_info *info, dm_block_t root,
135 uint64_t *keys, dm_block_t *new_root); 142 uint64_t *keys, dm_block_t *new_root);
136 143
137/* 144/*
138 * Removes values between 'keys' and keys2, where keys2 is keys with the 145 * Removes a _contiguous_ run of values starting from 'keys' and not
139 * final key replaced with 'end_key'. 'end_key' is the one-past-the-end 146 * reaching keys2 (where keys2 is keys with the final key replaced with
140 * value. 'keys' may be altered. 147 * 'end_key'). 'end_key' is the one-past-the-end value. 'keys' may be
148 * altered.
141 */ 149 */
142int dm_btree_remove_leaves(struct dm_btree_info *info, dm_block_t root, 150int dm_btree_remove_leaves(struct dm_btree_info *info, dm_block_t root,
143 uint64_t *keys, uint64_t end_key, 151 uint64_t *keys, uint64_t end_key,
diff --git a/drivers/md/persistent-data/dm-space-map-metadata.c b/drivers/md/persistent-data/dm-space-map-metadata.c
index 53091295fce9..fca6dbcf9a47 100644
--- a/drivers/md/persistent-data/dm-space-map-metadata.c
+++ b/drivers/md/persistent-data/dm-space-map-metadata.c
@@ -136,7 +136,7 @@ static int brb_push(struct bop_ring_buffer *brb,
136 return 0; 136 return 0;
137} 137}
138 138
139static int brb_pop(struct bop_ring_buffer *brb, struct block_op *result) 139static int brb_peek(struct bop_ring_buffer *brb, struct block_op *result)
140{ 140{
141 struct block_op *bop; 141 struct block_op *bop;
142 142
@@ -147,6 +147,17 @@ static int brb_pop(struct bop_ring_buffer *brb, struct block_op *result)
147 result->type = bop->type; 147 result->type = bop->type;
148 result->block = bop->block; 148 result->block = bop->block;
149 149
150 return 0;
151}
152
153static int brb_pop(struct bop_ring_buffer *brb)
154{
155 struct block_op *bop;
156
157 if (brb_empty(brb))
158 return -ENODATA;
159
160 bop = brb->bops + brb->begin;
150 brb->begin = brb_next(brb, brb->begin); 161 brb->begin = brb_next(brb, brb->begin);
151 162
152 return 0; 163 return 0;
@@ -211,7 +222,7 @@ static int apply_bops(struct sm_metadata *smm)
211 while (!brb_empty(&smm->uncommitted)) { 222 while (!brb_empty(&smm->uncommitted)) {
212 struct block_op bop; 223 struct block_op bop;
213 224
214 r = brb_pop(&smm->uncommitted, &bop); 225 r = brb_peek(&smm->uncommitted, &bop);
215 if (r) { 226 if (r) {
216 DMERR("bug in bop ring buffer"); 227 DMERR("bug in bop ring buffer");
217 break; 228 break;
@@ -220,6 +231,8 @@ static int apply_bops(struct sm_metadata *smm)
220 r = commit_bop(smm, &bop); 231 r = commit_bop(smm, &bop);
221 if (r) 232 if (r)
222 break; 233 break;
234
235 brb_pop(&smm->uncommitted);
223 } 236 }
224 237
225 return r; 238 return r;
@@ -683,7 +696,6 @@ static struct dm_space_map bootstrap_ops = {
683static int sm_metadata_extend(struct dm_space_map *sm, dm_block_t extra_blocks) 696static int sm_metadata_extend(struct dm_space_map *sm, dm_block_t extra_blocks)
684{ 697{
685 int r, i; 698 int r, i;
686 enum allocation_event ev;
687 struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm); 699 struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
688 dm_block_t old_len = smm->ll.nr_blocks; 700 dm_block_t old_len = smm->ll.nr_blocks;
689 701
@@ -705,11 +717,12 @@ static int sm_metadata_extend(struct dm_space_map *sm, dm_block_t extra_blocks)
705 * allocate any new blocks. 717 * allocate any new blocks.
706 */ 718 */
707 do { 719 do {
708 for (i = old_len; !r && i < smm->begin; i++) { 720 for (i = old_len; !r && i < smm->begin; i++)
709 r = sm_ll_inc(&smm->ll, i, &ev); 721 r = add_bop(smm, BOP_INC, i);
710 if (r) 722
711 goto out; 723 if (r)
712 } 724 goto out;
725
713 old_len = smm->begin; 726 old_len = smm->begin;
714 727
715 r = apply_bops(smm); 728 r = apply_bops(smm);
@@ -754,7 +767,6 @@ int dm_sm_metadata_create(struct dm_space_map *sm,
754{ 767{
755 int r; 768 int r;
756 dm_block_t i; 769 dm_block_t i;
757 enum allocation_event ev;
758 struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm); 770 struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
759 771
760 smm->begin = superblock + 1; 772 smm->begin = superblock + 1;
@@ -782,7 +794,7 @@ int dm_sm_metadata_create(struct dm_space_map *sm,
782 * allocated blocks that they were built from. 794 * allocated blocks that they were built from.
783 */ 795 */
784 for (i = superblock; !r && i < smm->begin; i++) 796 for (i = superblock; !r && i < smm->begin; i++)
785 r = sm_ll_inc(&smm->ll, i, &ev); 797 r = add_bop(smm, BOP_INC, i);
786 798
787 if (r) 799 if (r)
788 return r; 800 return r;
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 41d70bc9ba2f..84e597e1c489 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -1946,6 +1946,8 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
1946 1946
1947 first = i; 1947 first = i;
1948 fbio = r10_bio->devs[i].bio; 1948 fbio = r10_bio->devs[i].bio;
1949 fbio->bi_iter.bi_size = r10_bio->sectors << 9;
1950 fbio->bi_iter.bi_idx = 0;
1949 1951
1950 vcnt = (r10_bio->sectors + (PAGE_SIZE >> 9) - 1) >> (PAGE_SHIFT - 9); 1952 vcnt = (r10_bio->sectors + (PAGE_SIZE >> 9) - 1) >> (PAGE_SHIFT - 9);
1951 /* now find blocks with errors */ 1953 /* now find blocks with errors */
@@ -1989,7 +1991,7 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
1989 bio_reset(tbio); 1991 bio_reset(tbio);
1990 1992
1991 tbio->bi_vcnt = vcnt; 1993 tbio->bi_vcnt = vcnt;
1992 tbio->bi_iter.bi_size = r10_bio->sectors << 9; 1994 tbio->bi_iter.bi_size = fbio->bi_iter.bi_size;
1993 tbio->bi_rw = WRITE; 1995 tbio->bi_rw = WRITE;
1994 tbio->bi_private = r10_bio; 1996 tbio->bi_private = r10_bio;
1995 tbio->bi_iter.bi_sector = r10_bio->devs[i].addr; 1997 tbio->bi_iter.bi_sector = r10_bio->devs[i].addr;
diff --git a/drivers/media/pci/cx23885/cx23885-core.c b/drivers/media/pci/cx23885/cx23885-core.c
index 35759a91d47d..e8f847226a19 100644
--- a/drivers/media/pci/cx23885/cx23885-core.c
+++ b/drivers/media/pci/cx23885/cx23885-core.c
@@ -1992,9 +1992,9 @@ static int cx23885_initdev(struct pci_dev *pci_dev,
1992 (unsigned long long)pci_resource_start(pci_dev, 0)); 1992 (unsigned long long)pci_resource_start(pci_dev, 0));
1993 1993
1994 pci_set_master(pci_dev); 1994 pci_set_master(pci_dev);
1995 if (!pci_set_dma_mask(pci_dev, 0xffffffff)) { 1995 err = pci_set_dma_mask(pci_dev, 0xffffffff);
1996 if (err) {
1996 printk("%s/0: Oops: no 32bit PCI DMA ???\n", dev->name); 1997 printk("%s/0: Oops: no 32bit PCI DMA ???\n", dev->name);
1997 err = -EIO;
1998 goto fail_context; 1998 goto fail_context;
1999 } 1999 }
2000 2000
diff --git a/drivers/media/pci/cx25821/cx25821-core.c b/drivers/media/pci/cx25821/cx25821-core.c
index dbc695f32760..0042803a9de7 100644
--- a/drivers/media/pci/cx25821/cx25821-core.c
+++ b/drivers/media/pci/cx25821/cx25821-core.c
@@ -1319,7 +1319,8 @@ static int cx25821_initdev(struct pci_dev *pci_dev,
1319 dev->pci_lat, (unsigned long long)dev->base_io_addr); 1319 dev->pci_lat, (unsigned long long)dev->base_io_addr);
1320 1320
1321 pci_set_master(pci_dev); 1321 pci_set_master(pci_dev);
1322 if (!pci_set_dma_mask(pci_dev, 0xffffffff)) { 1322 err = pci_set_dma_mask(pci_dev, 0xffffffff);
1323 if (err) {
1323 pr_err("%s/0: Oops: no 32bit PCI DMA ???\n", dev->name); 1324 pr_err("%s/0: Oops: no 32bit PCI DMA ???\n", dev->name);
1324 err = -EIO; 1325 err = -EIO;
1325 goto fail_irq; 1326 goto fail_irq;
diff --git a/drivers/media/pci/cx88/cx88-alsa.c b/drivers/media/pci/cx88/cx88-alsa.c
index 0ed1b6530374..1b5268f9bb24 100644
--- a/drivers/media/pci/cx88/cx88-alsa.c
+++ b/drivers/media/pci/cx88/cx88-alsa.c
@@ -890,9 +890,9 @@ static int snd_cx88_create(struct snd_card *card, struct pci_dev *pci,
890 return err; 890 return err;
891 } 891 }
892 892
893 if (!pci_set_dma_mask(pci,DMA_BIT_MASK(32))) { 893 err = pci_set_dma_mask(pci,DMA_BIT_MASK(32));
894 if (err) {
894 dprintk(0, "%s/1: Oops: no 32bit PCI DMA ???\n",core->name); 895 dprintk(0, "%s/1: Oops: no 32bit PCI DMA ???\n",core->name);
895 err = -EIO;
896 cx88_core_put(core, pci); 896 cx88_core_put(core, pci);
897 return err; 897 return err;
898 } 898 }
diff --git a/drivers/media/pci/cx88/cx88-mpeg.c b/drivers/media/pci/cx88/cx88-mpeg.c
index 9db7767d1fe0..f34c229f9b37 100644
--- a/drivers/media/pci/cx88/cx88-mpeg.c
+++ b/drivers/media/pci/cx88/cx88-mpeg.c
@@ -393,7 +393,8 @@ static int cx8802_init_common(struct cx8802_dev *dev)
393 if (pci_enable_device(dev->pci)) 393 if (pci_enable_device(dev->pci))
394 return -EIO; 394 return -EIO;
395 pci_set_master(dev->pci); 395 pci_set_master(dev->pci);
396 if (!pci_set_dma_mask(dev->pci,DMA_BIT_MASK(32))) { 396 err = pci_set_dma_mask(dev->pci,DMA_BIT_MASK(32));
397 if (err) {
397 printk("%s/2: Oops: no 32bit PCI DMA ???\n",dev->core->name); 398 printk("%s/2: Oops: no 32bit PCI DMA ???\n",dev->core->name);
398 return -EIO; 399 return -EIO;
399 } 400 }
diff --git a/drivers/media/pci/cx88/cx88-video.c b/drivers/media/pci/cx88/cx88-video.c
index 0de1ad5a977d..aef9acf351f6 100644
--- a/drivers/media/pci/cx88/cx88-video.c
+++ b/drivers/media/pci/cx88/cx88-video.c
@@ -1314,9 +1314,9 @@ static int cx8800_initdev(struct pci_dev *pci_dev,
1314 dev->pci_lat,(unsigned long long)pci_resource_start(pci_dev,0)); 1314 dev->pci_lat,(unsigned long long)pci_resource_start(pci_dev,0));
1315 1315
1316 pci_set_master(pci_dev); 1316 pci_set_master(pci_dev);
1317 if (!pci_set_dma_mask(pci_dev,DMA_BIT_MASK(32))) { 1317 err = pci_set_dma_mask(pci_dev,DMA_BIT_MASK(32));
1318 if (err) {
1318 printk("%s/0: Oops: no 32bit PCI DMA ???\n",core->name); 1319 printk("%s/0: Oops: no 32bit PCI DMA ???\n",core->name);
1319 err = -EIO;
1320 goto fail_core; 1320 goto fail_core;
1321 } 1321 }
1322 dev->alloc_ctx = vb2_dma_sg_init_ctx(&pci_dev->dev); 1322 dev->alloc_ctx = vb2_dma_sg_init_ctx(&pci_dev->dev);
diff --git a/drivers/media/pci/ivtv/ivtv-driver.c b/drivers/media/pci/ivtv/ivtv-driver.c
index 8616fa8193bc..c2e60b4f292d 100644
--- a/drivers/media/pci/ivtv/ivtv-driver.c
+++ b/drivers/media/pci/ivtv/ivtv-driver.c
@@ -805,11 +805,11 @@ static void ivtv_init_struct2(struct ivtv *itv)
805{ 805{
806 int i; 806 int i;
807 807
808 for (i = 0; i < IVTV_CARD_MAX_VIDEO_INPUTS - 1; i++) 808 for (i = 0; i < IVTV_CARD_MAX_VIDEO_INPUTS; i++)
809 if (itv->card->video_inputs[i].video_type == 0) 809 if (itv->card->video_inputs[i].video_type == 0)
810 break; 810 break;
811 itv->nof_inputs = i; 811 itv->nof_inputs = i;
812 for (i = 0; i < IVTV_CARD_MAX_AUDIO_INPUTS - 1; i++) 812 for (i = 0; i < IVTV_CARD_MAX_AUDIO_INPUTS; i++)
813 if (itv->card->audio_inputs[i].audio_type == 0) 813 if (itv->card->audio_inputs[i].audio_type == 0)
814 break; 814 break;
815 itv->nof_audio_inputs = i; 815 itv->nof_audio_inputs = i;
diff --git a/drivers/media/pci/netup_unidvb/netup_unidvb_core.c b/drivers/media/pci/netup_unidvb/netup_unidvb_core.c
index 60b2d462f98d..3fdbd81b5580 100644
--- a/drivers/media/pci/netup_unidvb/netup_unidvb_core.c
+++ b/drivers/media/pci/netup_unidvb/netup_unidvb_core.c
@@ -810,7 +810,7 @@ static int netup_unidvb_initdev(struct pci_dev *pci_dev,
810 "%s(): board vendor 0x%x, revision 0x%x\n", 810 "%s(): board vendor 0x%x, revision 0x%x\n",
811 __func__, board_vendor, board_revision); 811 __func__, board_vendor, board_revision);
812 pci_set_master(pci_dev); 812 pci_set_master(pci_dev);
813 if (!pci_set_dma_mask(pci_dev, 0xffffffff)) { 813 if (pci_set_dma_mask(pci_dev, 0xffffffff) < 0) {
814 dev_err(&pci_dev->dev, 814 dev_err(&pci_dev->dev,
815 "%s(): 32bit PCI DMA is not supported\n", __func__); 815 "%s(): 32bit PCI DMA is not supported\n", __func__);
816 goto pci_detect_err; 816 goto pci_detect_err;
diff --git a/drivers/media/pci/saa7134/saa7134-core.c b/drivers/media/pci/saa7134/saa7134-core.c
index e79d63eb774e..f720cea80e28 100644
--- a/drivers/media/pci/saa7134/saa7134-core.c
+++ b/drivers/media/pci/saa7134/saa7134-core.c
@@ -951,9 +951,9 @@ static int saa7134_initdev(struct pci_dev *pci_dev,
951 pci_name(pci_dev), dev->pci_rev, pci_dev->irq, 951 pci_name(pci_dev), dev->pci_rev, pci_dev->irq,
952 dev->pci_lat,(unsigned long long)pci_resource_start(pci_dev,0)); 952 dev->pci_lat,(unsigned long long)pci_resource_start(pci_dev,0));
953 pci_set_master(pci_dev); 953 pci_set_master(pci_dev);
954 if (!pci_set_dma_mask(pci_dev, DMA_BIT_MASK(32))) { 954 err = pci_set_dma_mask(pci_dev, DMA_BIT_MASK(32));
955 if (err) {
955 pr_warn("%s: Oops: no 32bit PCI DMA ???\n", dev->name); 956 pr_warn("%s: Oops: no 32bit PCI DMA ???\n", dev->name);
956 err = -EIO;
957 goto fail1; 957 goto fail1;
958 } 958 }
959 959
diff --git a/drivers/media/pci/saa7164/saa7164-core.c b/drivers/media/pci/saa7164/saa7164-core.c
index 8f36b48ef733..8bbd092fbe1d 100644
--- a/drivers/media/pci/saa7164/saa7164-core.c
+++ b/drivers/media/pci/saa7164/saa7164-core.c
@@ -1264,9 +1264,9 @@ static int saa7164_initdev(struct pci_dev *pci_dev,
1264 1264
1265 pci_set_master(pci_dev); 1265 pci_set_master(pci_dev);
1266 /* TODO */ 1266 /* TODO */
1267 if (!pci_set_dma_mask(pci_dev, 0xffffffff)) { 1267 err = pci_set_dma_mask(pci_dev, 0xffffffff);
1268 if (err) {
1268 printk("%s/0: Oops: no 32bit PCI DMA ???\n", dev->name); 1269 printk("%s/0: Oops: no 32bit PCI DMA ???\n", dev->name);
1269 err = -EIO;
1270 goto fail_irq; 1270 goto fail_irq;
1271 } 1271 }
1272 1272
diff --git a/drivers/media/pci/tw68/tw68-core.c b/drivers/media/pci/tw68/tw68-core.c
index 8c5655d351d3..4e77618fbb2b 100644
--- a/drivers/media/pci/tw68/tw68-core.c
+++ b/drivers/media/pci/tw68/tw68-core.c
@@ -257,9 +257,9 @@ static int tw68_initdev(struct pci_dev *pci_dev,
257 dev->name, pci_name(pci_dev), dev->pci_rev, pci_dev->irq, 257 dev->name, pci_name(pci_dev), dev->pci_rev, pci_dev->irq,
258 dev->pci_lat, (u64)pci_resource_start(pci_dev, 0)); 258 dev->pci_lat, (u64)pci_resource_start(pci_dev, 0));
259 pci_set_master(pci_dev); 259 pci_set_master(pci_dev);
260 if (!pci_set_dma_mask(pci_dev, DMA_BIT_MASK(32))) { 260 err = pci_set_dma_mask(pci_dev, DMA_BIT_MASK(32));
261 if (err) {
261 pr_info("%s: Oops: no 32bit PCI DMA ???\n", dev->name); 262 pr_info("%s: Oops: no 32bit PCI DMA ???\n", dev->name);
262 err = -EIO;
263 goto fail1; 263 goto fail1;
264 } 264 }
265 265
diff --git a/drivers/media/usb/airspy/airspy.c b/drivers/media/usb/airspy/airspy.c
index fcbb49757614..565a59310747 100644
--- a/drivers/media/usb/airspy/airspy.c
+++ b/drivers/media/usb/airspy/airspy.c
@@ -134,7 +134,7 @@ struct airspy {
134 int urbs_submitted; 134 int urbs_submitted;
135 135
136 /* USB control message buffer */ 136 /* USB control message buffer */
137 #define BUF_SIZE 24 137 #define BUF_SIZE 128
138 u8 buf[BUF_SIZE]; 138 u8 buf[BUF_SIZE];
139 139
140 /* Current configuration */ 140 /* Current configuration */
diff --git a/drivers/media/usb/hackrf/hackrf.c b/drivers/media/usb/hackrf/hackrf.c
index e05bfec90f46..0fe5cb2c260c 100644
--- a/drivers/media/usb/hackrf/hackrf.c
+++ b/drivers/media/usb/hackrf/hackrf.c
@@ -24,6 +24,15 @@
24#include <media/videobuf2-v4l2.h> 24#include <media/videobuf2-v4l2.h>
25#include <media/videobuf2-vmalloc.h> 25#include <media/videobuf2-vmalloc.h>
26 26
27/*
28 * Used Avago MGA-81563 RF amplifier could be destroyed pretty easily with too
29 * strong signal or transmitting to bad antenna.
30 * Set RF gain control to 'grabbed' state by default for sure.
31 */
32static bool hackrf_enable_rf_gain_ctrl;
33module_param_named(enable_rf_gain_ctrl, hackrf_enable_rf_gain_ctrl, bool, 0644);
34MODULE_PARM_DESC(enable_rf_gain_ctrl, "enable RX/TX RF amplifier control (warn: could damage amplifier)");
35
27/* HackRF USB API commands (from HackRF Library) */ 36/* HackRF USB API commands (from HackRF Library) */
28enum { 37enum {
29 CMD_SET_TRANSCEIVER_MODE = 0x01, 38 CMD_SET_TRANSCEIVER_MODE = 0x01,
@@ -1451,6 +1460,7 @@ static int hackrf_probe(struct usb_interface *intf,
1451 dev_err(dev->dev, "Could not initialize controls\n"); 1460 dev_err(dev->dev, "Could not initialize controls\n");
1452 goto err_v4l2_ctrl_handler_free_rx; 1461 goto err_v4l2_ctrl_handler_free_rx;
1453 } 1462 }
1463 v4l2_ctrl_grab(dev->rx_rf_gain, !hackrf_enable_rf_gain_ctrl);
1454 v4l2_ctrl_handler_setup(&dev->rx_ctrl_handler); 1464 v4l2_ctrl_handler_setup(&dev->rx_ctrl_handler);
1455 1465
1456 /* Register controls for transmitter */ 1466 /* Register controls for transmitter */
@@ -1471,6 +1481,7 @@ static int hackrf_probe(struct usb_interface *intf,
1471 dev_err(dev->dev, "Could not initialize controls\n"); 1481 dev_err(dev->dev, "Could not initialize controls\n");
1472 goto err_v4l2_ctrl_handler_free_tx; 1482 goto err_v4l2_ctrl_handler_free_tx;
1473 } 1483 }
1484 v4l2_ctrl_grab(dev->tx_rf_gain, !hackrf_enable_rf_gain_ctrl);
1474 v4l2_ctrl_handler_setup(&dev->tx_ctrl_handler); 1485 v4l2_ctrl_handler_setup(&dev->tx_ctrl_handler);
1475 1486
1476 /* Register the v4l2_device structure */ 1487 /* Register the v4l2_device structure */
@@ -1530,7 +1541,7 @@ err_v4l2_ctrl_handler_free_rx:
1530err_kfree: 1541err_kfree:
1531 kfree(dev); 1542 kfree(dev);
1532err: 1543err:
1533 dev_dbg(dev->dev, "failed=%d\n", ret); 1544 dev_dbg(&intf->dev, "failed=%d\n", ret);
1534 return ret; 1545 return ret;
1535} 1546}
1536 1547
diff --git a/drivers/misc/cxl/native.c b/drivers/misc/cxl/native.c
index d2e75c88f4d2..f40909793490 100644
--- a/drivers/misc/cxl/native.c
+++ b/drivers/misc/cxl/native.c
@@ -497,6 +497,7 @@ static u64 calculate_sr(struct cxl_context *ctx)
497{ 497{
498 u64 sr = 0; 498 u64 sr = 0;
499 499
500 set_endian(sr);
500 if (ctx->master) 501 if (ctx->master)
501 sr |= CXL_PSL_SR_An_MP; 502 sr |= CXL_PSL_SR_An_MP;
502 if (mfspr(SPRN_LPCR) & LPCR_TC) 503 if (mfspr(SPRN_LPCR) & LPCR_TC)
@@ -506,7 +507,6 @@ static u64 calculate_sr(struct cxl_context *ctx)
506 sr |= CXL_PSL_SR_An_HV; 507 sr |= CXL_PSL_SR_An_HV;
507 } else { 508 } else {
508 sr |= CXL_PSL_SR_An_PR | CXL_PSL_SR_An_R; 509 sr |= CXL_PSL_SR_An_PR | CXL_PSL_SR_An_R;
509 set_endian(sr);
510 sr &= ~(CXL_PSL_SR_An_HV); 510 sr &= ~(CXL_PSL_SR_An_HV);
511 if (!test_tsk_thread_flag(current, TIF_32BIT)) 511 if (!test_tsk_thread_flag(current, TIF_32BIT))
512 sr |= CXL_PSL_SR_An_SF; 512 sr |= CXL_PSL_SR_An_SF;
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index 23b6c8e8701c..d8486168415a 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -65,8 +65,7 @@ MODULE_ALIAS("mmc:block");
65#define MMC_SANITIZE_REQ_TIMEOUT 240000 65#define MMC_SANITIZE_REQ_TIMEOUT 240000
66#define MMC_EXTRACT_INDEX_FROM_ARG(x) ((x & 0x00FF0000) >> 16) 66#define MMC_EXTRACT_INDEX_FROM_ARG(x) ((x & 0x00FF0000) >> 16)
67 67
68#define mmc_req_rel_wr(req) (((req->cmd_flags & REQ_FUA) || \ 68#define mmc_req_rel_wr(req) ((req->cmd_flags & REQ_FUA) && \
69 (req->cmd_flags & REQ_META)) && \
70 (rq_data_dir(req) == WRITE)) 69 (rq_data_dir(req) == WRITE))
71#define PACKED_CMD_VER 0x01 70#define PACKED_CMD_VER 0x01
72#define PACKED_CMD_WR 0x02 71#define PACKED_CMD_WR 0x02
@@ -1467,13 +1466,9 @@ static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
1467 1466
1468 /* 1467 /*
1469 * Reliable writes are used to implement Forced Unit Access and 1468 * Reliable writes are used to implement Forced Unit Access and
1470 * REQ_META accesses, and are supported only on MMCs. 1469 * are supported only on MMCs.
1471 *
1472 * XXX: this really needs a good explanation of why REQ_META
1473 * is treated special.
1474 */ 1470 */
1475 bool do_rel_wr = ((req->cmd_flags & REQ_FUA) || 1471 bool do_rel_wr = (req->cmd_flags & REQ_FUA) &&
1476 (req->cmd_flags & REQ_META)) &&
1477 (rq_data_dir(req) == WRITE) && 1472 (rq_data_dir(req) == WRITE) &&
1478 (md->flags & MMC_BLK_REL_WR); 1473 (md->flags & MMC_BLK_REL_WR);
1479 1474
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index c793fda27321..3a9a79ec4343 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -1040,9 +1040,24 @@ static int mmc_select_hs_ddr(struct mmc_card *card)
1040 return err; 1040 return err;
1041} 1041}
1042 1042
1043/* Caller must hold re-tuning */
1044static int mmc_switch_status(struct mmc_card *card)
1045{
1046 u32 status;
1047 int err;
1048
1049 err = mmc_send_status(card, &status);
1050 if (err)
1051 return err;
1052
1053 return mmc_switch_status_error(card->host, status);
1054}
1055
1043static int mmc_select_hs400(struct mmc_card *card) 1056static int mmc_select_hs400(struct mmc_card *card)
1044{ 1057{
1045 struct mmc_host *host = card->host; 1058 struct mmc_host *host = card->host;
1059 bool send_status = true;
1060 unsigned int max_dtr;
1046 int err = 0; 1061 int err = 0;
1047 u8 val; 1062 u8 val;
1048 1063
@@ -1053,25 +1068,36 @@ static int mmc_select_hs400(struct mmc_card *card)
1053 host->ios.bus_width == MMC_BUS_WIDTH_8)) 1068 host->ios.bus_width == MMC_BUS_WIDTH_8))
1054 return 0; 1069 return 0;
1055 1070
1056 /* 1071 if (host->caps & MMC_CAP_WAIT_WHILE_BUSY)
1057 * Before switching to dual data rate operation for HS400, 1072 send_status = false;
1058 * it is required to convert from HS200 mode to HS mode.
1059 */
1060 mmc_set_timing(card->host, MMC_TIMING_MMC_HS);
1061 mmc_set_bus_speed(card);
1062 1073
1074 /* Reduce frequency to HS frequency */
1075 max_dtr = card->ext_csd.hs_max_dtr;
1076 mmc_set_clock(host, max_dtr);
1077
1078 /* Switch card to HS mode */
1063 val = EXT_CSD_TIMING_HS | 1079 val = EXT_CSD_TIMING_HS |
1064 card->drive_strength << EXT_CSD_DRV_STR_SHIFT; 1080 card->drive_strength << EXT_CSD_DRV_STR_SHIFT;
1065 err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 1081 err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1066 EXT_CSD_HS_TIMING, val, 1082 EXT_CSD_HS_TIMING, val,
1067 card->ext_csd.generic_cmd6_time, 1083 card->ext_csd.generic_cmd6_time,
1068 true, true, true); 1084 true, send_status, true);
1069 if (err) { 1085 if (err) {
1070 pr_err("%s: switch to high-speed from hs200 failed, err:%d\n", 1086 pr_err("%s: switch to high-speed from hs200 failed, err:%d\n",
1071 mmc_hostname(host), err); 1087 mmc_hostname(host), err);
1072 return err; 1088 return err;
1073 } 1089 }
1074 1090
1091 /* Set host controller to HS timing */
1092 mmc_set_timing(card->host, MMC_TIMING_MMC_HS);
1093
1094 if (!send_status) {
1095 err = mmc_switch_status(card);
1096 if (err)
1097 goto out_err;
1098 }
1099
1100 /* Switch card to DDR */
1075 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 1101 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1076 EXT_CSD_BUS_WIDTH, 1102 EXT_CSD_BUS_WIDTH,
1077 EXT_CSD_DDR_BUS_WIDTH_8, 1103 EXT_CSD_DDR_BUS_WIDTH_8,
@@ -1082,22 +1108,35 @@ static int mmc_select_hs400(struct mmc_card *card)
1082 return err; 1108 return err;
1083 } 1109 }
1084 1110
1111 /* Switch card to HS400 */
1085 val = EXT_CSD_TIMING_HS400 | 1112 val = EXT_CSD_TIMING_HS400 |
1086 card->drive_strength << EXT_CSD_DRV_STR_SHIFT; 1113 card->drive_strength << EXT_CSD_DRV_STR_SHIFT;
1087 err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 1114 err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1088 EXT_CSD_HS_TIMING, val, 1115 EXT_CSD_HS_TIMING, val,
1089 card->ext_csd.generic_cmd6_time, 1116 card->ext_csd.generic_cmd6_time,
1090 true, true, true); 1117 true, send_status, true);
1091 if (err) { 1118 if (err) {
1092 pr_err("%s: switch to hs400 failed, err:%d\n", 1119 pr_err("%s: switch to hs400 failed, err:%d\n",
1093 mmc_hostname(host), err); 1120 mmc_hostname(host), err);
1094 return err; 1121 return err;
1095 } 1122 }
1096 1123
1124 /* Set host controller to HS400 timing and frequency */
1097 mmc_set_timing(host, MMC_TIMING_MMC_HS400); 1125 mmc_set_timing(host, MMC_TIMING_MMC_HS400);
1098 mmc_set_bus_speed(card); 1126 mmc_set_bus_speed(card);
1099 1127
1128 if (!send_status) {
1129 err = mmc_switch_status(card);
1130 if (err)
1131 goto out_err;
1132 }
1133
1100 return 0; 1134 return 0;
1135
1136out_err:
1137 pr_err("%s: %s failed, error %d\n", mmc_hostname(card->host),
1138 __func__, err);
1139 return err;
1101} 1140}
1102 1141
1103int mmc_hs200_to_hs400(struct mmc_card *card) 1142int mmc_hs200_to_hs400(struct mmc_card *card)
@@ -1105,19 +1144,6 @@ int mmc_hs200_to_hs400(struct mmc_card *card)
1105 return mmc_select_hs400(card); 1144 return mmc_select_hs400(card);
1106} 1145}
1107 1146
1108/* Caller must hold re-tuning */
1109static int mmc_switch_status(struct mmc_card *card)
1110{
1111 u32 status;
1112 int err;
1113
1114 err = mmc_send_status(card, &status);
1115 if (err)
1116 return err;
1117
1118 return mmc_switch_status_error(card->host, status);
1119}
1120
1121int mmc_hs400_to_hs200(struct mmc_card *card) 1147int mmc_hs400_to_hs200(struct mmc_card *card)
1122{ 1148{
1123 struct mmc_host *host = card->host; 1149 struct mmc_host *host = card->host;
@@ -1219,6 +1245,8 @@ static void mmc_select_driver_type(struct mmc_card *card)
1219static int mmc_select_hs200(struct mmc_card *card) 1245static int mmc_select_hs200(struct mmc_card *card)
1220{ 1246{
1221 struct mmc_host *host = card->host; 1247 struct mmc_host *host = card->host;
1248 bool send_status = true;
1249 unsigned int old_timing;
1222 int err = -EINVAL; 1250 int err = -EINVAL;
1223 u8 val; 1251 u8 val;
1224 1252
@@ -1234,6 +1262,9 @@ static int mmc_select_hs200(struct mmc_card *card)
1234 1262
1235 mmc_select_driver_type(card); 1263 mmc_select_driver_type(card);
1236 1264
1265 if (host->caps & MMC_CAP_WAIT_WHILE_BUSY)
1266 send_status = false;
1267
1237 /* 1268 /*
1238 * Set the bus width(4 or 8) with host's support and 1269 * Set the bus width(4 or 8) with host's support and
1239 * switch to HS200 mode if bus width is set successfully. 1270 * switch to HS200 mode if bus width is set successfully.
@@ -1245,11 +1276,25 @@ static int mmc_select_hs200(struct mmc_card *card)
1245 err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 1276 err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1246 EXT_CSD_HS_TIMING, val, 1277 EXT_CSD_HS_TIMING, val,
1247 card->ext_csd.generic_cmd6_time, 1278 card->ext_csd.generic_cmd6_time,
1248 true, true, true); 1279 true, send_status, true);
1249 if (!err) 1280 if (err)
1250 mmc_set_timing(host, MMC_TIMING_MMC_HS200); 1281 goto err;
1282 old_timing = host->ios.timing;
1283 mmc_set_timing(host, MMC_TIMING_MMC_HS200);
1284 if (!send_status) {
1285 err = mmc_switch_status(card);
1286 /*
1287 * mmc_select_timing() assumes timing has not changed if
1288 * it is a switch error.
1289 */
1290 if (err == -EBADMSG)
1291 mmc_set_timing(host, old_timing);
1292 }
1251 } 1293 }
1252err: 1294err:
1295 if (err)
1296 pr_err("%s: %s failed, error %d\n", mmc_hostname(card->host),
1297 __func__, err);
1253 return err; 1298 return err;
1254} 1299}
1255 1300
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index af71de5fda3b..1dee533634c9 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -473,6 +473,7 @@ config MMC_DAVINCI
473 473
474config MMC_GOLDFISH 474config MMC_GOLDFISH
475 tristate "goldfish qemu Multimedia Card Interface support" 475 tristate "goldfish qemu Multimedia Card Interface support"
476 depends on HAS_DMA
476 depends on GOLDFISH || COMPILE_TEST 477 depends on GOLDFISH || COMPILE_TEST
477 help 478 help
478 This selects the Goldfish Multimedia card Interface emulation 479 This selects the Goldfish Multimedia card Interface emulation
diff --git a/drivers/mmc/host/mtk-sd.c b/drivers/mmc/host/mtk-sd.c
index 39568cc29a2a..33dfd7e72516 100644
--- a/drivers/mmc/host/mtk-sd.c
+++ b/drivers/mmc/host/mtk-sd.c
@@ -1276,7 +1276,7 @@ static struct msdc_delay_phase get_best_delay(struct msdc_host *host, u32 delay)
1276 int start = 0, len = 0; 1276 int start = 0, len = 0;
1277 int start_final = 0, len_final = 0; 1277 int start_final = 0, len_final = 0;
1278 u8 final_phase = 0xff; 1278 u8 final_phase = 0xff;
1279 struct msdc_delay_phase delay_phase; 1279 struct msdc_delay_phase delay_phase = { 0, };
1280 1280
1281 if (delay == 0) { 1281 if (delay == 0) {
1282 dev_err(host->dev, "phase error: [map:%x]\n", delay); 1282 dev_err(host->dev, "phase error: [map:%x]\n", delay);
diff --git a/drivers/mmc/host/pxamci.c b/drivers/mmc/host/pxamci.c
index 8cadd74e8407..ce08896b9d69 100644
--- a/drivers/mmc/host/pxamci.c
+++ b/drivers/mmc/host/pxamci.c
@@ -805,7 +805,7 @@ static int pxamci_probe(struct platform_device *pdev)
805 goto out; 805 goto out;
806 } else { 806 } else {
807 mmc->caps |= host->pdata->gpio_card_ro_invert ? 807 mmc->caps |= host->pdata->gpio_card_ro_invert ?
808 MMC_CAP2_RO_ACTIVE_HIGH : 0; 808 0 : MMC_CAP2_RO_ACTIVE_HIGH;
809 } 809 }
810 810
811 if (gpio_is_valid(gpio_cd)) 811 if (gpio_is_valid(gpio_cd))
diff --git a/drivers/mtd/nand/jz4740_nand.c b/drivers/mtd/nand/jz4740_nand.c
index dc4e8446f1ff..5a99a93ed025 100644
--- a/drivers/mtd/nand/jz4740_nand.c
+++ b/drivers/mtd/nand/jz4740_nand.c
@@ -25,6 +25,7 @@
25 25
26#include <linux/gpio.h> 26#include <linux/gpio.h>
27 27
28#include <asm/mach-jz4740/gpio.h>
28#include <asm/mach-jz4740/jz4740_nand.h> 29#include <asm/mach-jz4740/jz4740_nand.h>
29 30
30#define JZ_REG_NAND_CTRL 0x50 31#define JZ_REG_NAND_CTRL 0x50
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index cc74142938b0..ece544efccc3 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -3110,7 +3110,7 @@ static void nand_resume(struct mtd_info *mtd)
3110 */ 3110 */
3111static void nand_shutdown(struct mtd_info *mtd) 3111static void nand_shutdown(struct mtd_info *mtd)
3112{ 3112{
3113 nand_get_device(mtd, FL_SHUTDOWN); 3113 nand_get_device(mtd, FL_PM_SUSPENDED);
3114} 3114}
3115 3115
3116/* Set default functions */ 3116/* Set default functions */
diff --git a/drivers/mtd/ofpart.c b/drivers/mtd/ofpart.c
index 669c3452f278..9ed6038e47d2 100644
--- a/drivers/mtd/ofpart.c
+++ b/drivers/mtd/ofpart.c
@@ -46,10 +46,18 @@ static int parse_ofpart_partitions(struct mtd_info *master,
46 46
47 ofpart_node = of_get_child_by_name(mtd_node, "partitions"); 47 ofpart_node = of_get_child_by_name(mtd_node, "partitions");
48 if (!ofpart_node) { 48 if (!ofpart_node) {
49 pr_warn("%s: 'partitions' subnode not found on %s. Trying to parse direct subnodes as partitions.\n", 49 /*
50 master->name, mtd_node->full_name); 50 * We might get here even when ofpart isn't used at all (e.g.,
51 * when using another parser), so don't be louder than
52 * KERN_DEBUG
53 */
54 pr_debug("%s: 'partitions' subnode not found on %s. Trying to parse direct subnodes as partitions.\n",
55 master->name, mtd_node->full_name);
51 ofpart_node = mtd_node; 56 ofpart_node = mtd_node;
52 dedicated = false; 57 dedicated = false;
58 } else if (!of_device_is_compatible(ofpart_node, "fixed-partitions")) {
59 /* The 'partitions' subnode might be used by another parser */
60 return 0;
53 } 61 }
54 62
55 /* First count the subnodes */ 63 /* First count the subnodes */
diff --git a/drivers/net/can/bfin_can.c b/drivers/net/can/bfin_can.c
index 57dadd52b428..1deb8ff90a89 100644
--- a/drivers/net/can/bfin_can.c
+++ b/drivers/net/can/bfin_can.c
@@ -501,8 +501,6 @@ static int bfin_can_err(struct net_device *dev, u16 isrc, u16 status)
501 cf->data[2] |= CAN_ERR_PROT_FORM; 501 cf->data[2] |= CAN_ERR_PROT_FORM;
502 else if (status & SER) 502 else if (status & SER)
503 cf->data[2] |= CAN_ERR_PROT_STUFF; 503 cf->data[2] |= CAN_ERR_PROT_STUFF;
504 else
505 cf->data[2] |= CAN_ERR_PROT_UNSPEC;
506 } 504 }
507 505
508 priv->can.state = state; 506 priv->can.state = state;
diff --git a/drivers/net/can/c_can/c_can.c b/drivers/net/can/c_can/c_can.c
index 5d214d135332..f91b094288da 100644
--- a/drivers/net/can/c_can/c_can.c
+++ b/drivers/net/can/c_can/c_can.c
@@ -962,7 +962,6 @@ static int c_can_handle_bus_err(struct net_device *dev,
962 * type of the last error to occur on the CAN bus 962 * type of the last error to occur on the CAN bus
963 */ 963 */
964 cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR; 964 cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
965 cf->data[2] |= CAN_ERR_PROT_UNSPEC;
966 965
967 switch (lec_type) { 966 switch (lec_type) {
968 case LEC_STUFF_ERROR: 967 case LEC_STUFF_ERROR:
@@ -975,8 +974,7 @@ static int c_can_handle_bus_err(struct net_device *dev,
975 break; 974 break;
976 case LEC_ACK_ERROR: 975 case LEC_ACK_ERROR:
977 netdev_dbg(dev, "ack error\n"); 976 netdev_dbg(dev, "ack error\n");
978 cf->data[3] |= (CAN_ERR_PROT_LOC_ACK | 977 cf->data[3] = CAN_ERR_PROT_LOC_ACK;
979 CAN_ERR_PROT_LOC_ACK_DEL);
980 break; 978 break;
981 case LEC_BIT1_ERROR: 979 case LEC_BIT1_ERROR:
982 netdev_dbg(dev, "bit1 error\n"); 980 netdev_dbg(dev, "bit1 error\n");
@@ -988,8 +986,7 @@ static int c_can_handle_bus_err(struct net_device *dev,
988 break; 986 break;
989 case LEC_CRC_ERROR: 987 case LEC_CRC_ERROR:
990 netdev_dbg(dev, "CRC error\n"); 988 netdev_dbg(dev, "CRC error\n");
991 cf->data[3] |= (CAN_ERR_PROT_LOC_CRC_SEQ | 989 cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ;
992 CAN_ERR_PROT_LOC_CRC_DEL);
993 break; 990 break;
994 default: 991 default:
995 break; 992 break;
diff --git a/drivers/net/can/cc770/cc770.c b/drivers/net/can/cc770/cc770.c
index 70a8cbb29e75..1e37313054f3 100644
--- a/drivers/net/can/cc770/cc770.c
+++ b/drivers/net/can/cc770/cc770.c
@@ -578,7 +578,7 @@ static int cc770_err(struct net_device *dev, u8 status)
578 cf->data[2] |= CAN_ERR_PROT_BIT0; 578 cf->data[2] |= CAN_ERR_PROT_BIT0;
579 break; 579 break;
580 case STAT_LEC_CRC: 580 case STAT_LEC_CRC:
581 cf->data[3] |= CAN_ERR_PROT_LOC_CRC_SEQ; 581 cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ;
582 break; 582 break;
583 } 583 }
584 } 584 }
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
index 868fe945e35a..41c0fc9f3b14 100644
--- a/drivers/net/can/flexcan.c
+++ b/drivers/net/can/flexcan.c
@@ -535,13 +535,13 @@ static void do_bus_err(struct net_device *dev,
535 if (reg_esr & FLEXCAN_ESR_ACK_ERR) { 535 if (reg_esr & FLEXCAN_ESR_ACK_ERR) {
536 netdev_dbg(dev, "ACK_ERR irq\n"); 536 netdev_dbg(dev, "ACK_ERR irq\n");
537 cf->can_id |= CAN_ERR_ACK; 537 cf->can_id |= CAN_ERR_ACK;
538 cf->data[3] |= CAN_ERR_PROT_LOC_ACK; 538 cf->data[3] = CAN_ERR_PROT_LOC_ACK;
539 tx_errors = 1; 539 tx_errors = 1;
540 } 540 }
541 if (reg_esr & FLEXCAN_ESR_CRC_ERR) { 541 if (reg_esr & FLEXCAN_ESR_CRC_ERR) {
542 netdev_dbg(dev, "CRC_ERR irq\n"); 542 netdev_dbg(dev, "CRC_ERR irq\n");
543 cf->data[2] |= CAN_ERR_PROT_BIT; 543 cf->data[2] |= CAN_ERR_PROT_BIT;
544 cf->data[3] |= CAN_ERR_PROT_LOC_CRC_SEQ; 544 cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ;
545 rx_errors = 1; 545 rx_errors = 1;
546 } 546 }
547 if (reg_esr & FLEXCAN_ESR_FRM_ERR) { 547 if (reg_esr & FLEXCAN_ESR_FRM_ERR) {
diff --git a/drivers/net/can/janz-ican3.c b/drivers/net/can/janz-ican3.c
index c1e85368a198..5d04f5464faf 100644
--- a/drivers/net/can/janz-ican3.c
+++ b/drivers/net/can/janz-ican3.c
@@ -1096,7 +1096,6 @@ static int ican3_handle_cevtind(struct ican3_dev *mod, struct ican3_msg *msg)
1096 cf->data[2] |= CAN_ERR_PROT_STUFF; 1096 cf->data[2] |= CAN_ERR_PROT_STUFF;
1097 break; 1097 break;
1098 default: 1098 default:
1099 cf->data[2] |= CAN_ERR_PROT_UNSPEC;
1100 cf->data[3] = ecc & ECC_SEG; 1099 cf->data[3] = ecc & ECC_SEG;
1101 break; 1100 break;
1102 } 1101 }
diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c
index ef655177bb5e..39cf911f7a1e 100644
--- a/drivers/net/can/m_can/m_can.c
+++ b/drivers/net/can/m_can/m_can.c
@@ -487,7 +487,6 @@ static int m_can_handle_lec_err(struct net_device *dev,
487 * type of the last error to occur on the CAN bus 487 * type of the last error to occur on the CAN bus
488 */ 488 */
489 cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR; 489 cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
490 cf->data[2] |= CAN_ERR_PROT_UNSPEC;
491 490
492 switch (lec_type) { 491 switch (lec_type) {
493 case LEC_STUFF_ERROR: 492 case LEC_STUFF_ERROR:
@@ -500,8 +499,7 @@ static int m_can_handle_lec_err(struct net_device *dev,
500 break; 499 break;
501 case LEC_ACK_ERROR: 500 case LEC_ACK_ERROR:
502 netdev_dbg(dev, "ack error\n"); 501 netdev_dbg(dev, "ack error\n");
503 cf->data[3] |= (CAN_ERR_PROT_LOC_ACK | 502 cf->data[3] = CAN_ERR_PROT_LOC_ACK;
504 CAN_ERR_PROT_LOC_ACK_DEL);
505 break; 503 break;
506 case LEC_BIT1_ERROR: 504 case LEC_BIT1_ERROR:
507 netdev_dbg(dev, "bit1 error\n"); 505 netdev_dbg(dev, "bit1 error\n");
@@ -513,8 +511,7 @@ static int m_can_handle_lec_err(struct net_device *dev,
513 break; 511 break;
514 case LEC_CRC_ERROR: 512 case LEC_CRC_ERROR:
515 netdev_dbg(dev, "CRC error\n"); 513 netdev_dbg(dev, "CRC error\n");
516 cf->data[3] |= (CAN_ERR_PROT_LOC_CRC_SEQ | 514 cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ;
517 CAN_ERR_PROT_LOC_CRC_DEL);
518 break; 515 break;
519 default: 516 default:
520 break; 517 break;
diff --git a/drivers/net/can/pch_can.c b/drivers/net/can/pch_can.c
index e187ca783da0..c1317889d3d8 100644
--- a/drivers/net/can/pch_can.c
+++ b/drivers/net/can/pch_can.c
@@ -559,8 +559,7 @@ static void pch_can_error(struct net_device *ndev, u32 status)
559 stats->rx_errors++; 559 stats->rx_errors++;
560 break; 560 break;
561 case PCH_CRC_ERR: 561 case PCH_CRC_ERR:
562 cf->data[3] |= CAN_ERR_PROT_LOC_CRC_SEQ | 562 cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ;
563 CAN_ERR_PROT_LOC_CRC_DEL;
564 priv->can.can_stats.bus_error++; 563 priv->can.can_stats.bus_error++;
565 stats->rx_errors++; 564 stats->rx_errors++;
566 break; 565 break;
diff --git a/drivers/net/can/rcar_can.c b/drivers/net/can/rcar_can.c
index 7bd54191f962..bc46be39549d 100644
--- a/drivers/net/can/rcar_can.c
+++ b/drivers/net/can/rcar_can.c
@@ -241,17 +241,16 @@ static void rcar_can_error(struct net_device *ndev)
241 u8 ecsr; 241 u8 ecsr;
242 242
243 netdev_dbg(priv->ndev, "Bus error interrupt:\n"); 243 netdev_dbg(priv->ndev, "Bus error interrupt:\n");
244 if (skb) { 244 if (skb)
245 cf->can_id |= CAN_ERR_BUSERROR | CAN_ERR_PROT; 245 cf->can_id |= CAN_ERR_BUSERROR | CAN_ERR_PROT;
246 cf->data[2] = CAN_ERR_PROT_UNSPEC; 246
247 }
248 ecsr = readb(&priv->regs->ecsr); 247 ecsr = readb(&priv->regs->ecsr);
249 if (ecsr & RCAR_CAN_ECSR_ADEF) { 248 if (ecsr & RCAR_CAN_ECSR_ADEF) {
250 netdev_dbg(priv->ndev, "ACK Delimiter Error\n"); 249 netdev_dbg(priv->ndev, "ACK Delimiter Error\n");
251 tx_errors++; 250 tx_errors++;
252 writeb(~RCAR_CAN_ECSR_ADEF, &priv->regs->ecsr); 251 writeb(~RCAR_CAN_ECSR_ADEF, &priv->regs->ecsr);
253 if (skb) 252 if (skb)
254 cf->data[3] |= CAN_ERR_PROT_LOC_ACK_DEL; 253 cf->data[3] = CAN_ERR_PROT_LOC_ACK_DEL;
255 } 254 }
256 if (ecsr & RCAR_CAN_ECSR_BE0F) { 255 if (ecsr & RCAR_CAN_ECSR_BE0F) {
257 netdev_dbg(priv->ndev, "Bit Error (dominant)\n"); 256 netdev_dbg(priv->ndev, "Bit Error (dominant)\n");
@@ -272,7 +271,7 @@ static void rcar_can_error(struct net_device *ndev)
272 rx_errors++; 271 rx_errors++;
273 writeb(~RCAR_CAN_ECSR_CEF, &priv->regs->ecsr); 272 writeb(~RCAR_CAN_ECSR_CEF, &priv->regs->ecsr);
274 if (skb) 273 if (skb)
275 cf->data[3] |= CAN_ERR_PROT_LOC_CRC_SEQ; 274 cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ;
276 } 275 }
277 if (ecsr & RCAR_CAN_ECSR_AEF) { 276 if (ecsr & RCAR_CAN_ECSR_AEF) {
278 netdev_dbg(priv->ndev, "ACK Error\n"); 277 netdev_dbg(priv->ndev, "ACK Error\n");
@@ -280,7 +279,7 @@ static void rcar_can_error(struct net_device *ndev)
280 writeb(~RCAR_CAN_ECSR_AEF, &priv->regs->ecsr); 279 writeb(~RCAR_CAN_ECSR_AEF, &priv->regs->ecsr);
281 if (skb) { 280 if (skb) {
282 cf->can_id |= CAN_ERR_ACK; 281 cf->can_id |= CAN_ERR_ACK;
283 cf->data[3] |= CAN_ERR_PROT_LOC_ACK; 282 cf->data[3] = CAN_ERR_PROT_LOC_ACK;
284 } 283 }
285 } 284 }
286 if (ecsr & RCAR_CAN_ECSR_FEF) { 285 if (ecsr & RCAR_CAN_ECSR_FEF) {
diff --git a/drivers/net/can/sja1000/sja1000.c b/drivers/net/can/sja1000/sja1000.c
index 7b92e911a616..8dda3b703d39 100644
--- a/drivers/net/can/sja1000/sja1000.c
+++ b/drivers/net/can/sja1000/sja1000.c
@@ -218,6 +218,9 @@ static void sja1000_start(struct net_device *dev)
218 priv->write_reg(priv, SJA1000_RXERR, 0x0); 218 priv->write_reg(priv, SJA1000_RXERR, 0x0);
219 priv->read_reg(priv, SJA1000_ECC); 219 priv->read_reg(priv, SJA1000_ECC);
220 220
221 /* clear interrupt flags */
222 priv->read_reg(priv, SJA1000_IR);
223
221 /* leave reset mode */ 224 /* leave reset mode */
222 set_normal_mode(dev); 225 set_normal_mode(dev);
223} 226}
@@ -446,7 +449,6 @@ static int sja1000_err(struct net_device *dev, uint8_t isrc, uint8_t status)
446 cf->data[2] |= CAN_ERR_PROT_STUFF; 449 cf->data[2] |= CAN_ERR_PROT_STUFF;
447 break; 450 break;
448 default: 451 default:
449 cf->data[2] |= CAN_ERR_PROT_UNSPEC;
450 cf->data[3] = ecc & ECC_SEG; 452 cf->data[3] = ecc & ECC_SEG;
451 break; 453 break;
452 } 454 }
diff --git a/drivers/net/can/sun4i_can.c b/drivers/net/can/sun4i_can.c
index d9a42c646783..68ef0a4cd821 100644
--- a/drivers/net/can/sun4i_can.c
+++ b/drivers/net/can/sun4i_can.c
@@ -575,7 +575,6 @@ static int sun4i_can_err(struct net_device *dev, u8 isrc, u8 status)
575 cf->data[2] |= CAN_ERR_PROT_STUFF; 575 cf->data[2] |= CAN_ERR_PROT_STUFF;
576 break; 576 break;
577 default: 577 default:
578 cf->data[2] |= CAN_ERR_PROT_UNSPEC;
579 cf->data[3] = (ecc & SUN4I_STA_ERR_SEG_CODE) 578 cf->data[3] = (ecc & SUN4I_STA_ERR_SEG_CODE)
580 >> 16; 579 >> 16;
581 break; 580 break;
diff --git a/drivers/net/can/ti_hecc.c b/drivers/net/can/ti_hecc.c
index cf345cbfe819..680d1ff07a55 100644
--- a/drivers/net/can/ti_hecc.c
+++ b/drivers/net/can/ti_hecc.c
@@ -722,7 +722,6 @@ static int ti_hecc_error(struct net_device *ndev, int int_status,
722 if (err_status & HECC_BUS_ERROR) { 722 if (err_status & HECC_BUS_ERROR) {
723 ++priv->can.can_stats.bus_error; 723 ++priv->can.can_stats.bus_error;
724 cf->can_id |= CAN_ERR_BUSERROR | CAN_ERR_PROT; 724 cf->can_id |= CAN_ERR_BUSERROR | CAN_ERR_PROT;
725 cf->data[2] |= CAN_ERR_PROT_UNSPEC;
726 if (err_status & HECC_CANES_FE) { 725 if (err_status & HECC_CANES_FE) {
727 hecc_set_bit(priv, HECC_CANES, HECC_CANES_FE); 726 hecc_set_bit(priv, HECC_CANES, HECC_CANES_FE);
728 cf->data[2] |= CAN_ERR_PROT_FORM; 727 cf->data[2] |= CAN_ERR_PROT_FORM;
@@ -737,13 +736,11 @@ static int ti_hecc_error(struct net_device *ndev, int int_status,
737 } 736 }
738 if (err_status & HECC_CANES_CRCE) { 737 if (err_status & HECC_CANES_CRCE) {
739 hecc_set_bit(priv, HECC_CANES, HECC_CANES_CRCE); 738 hecc_set_bit(priv, HECC_CANES, HECC_CANES_CRCE);
740 cf->data[3] |= CAN_ERR_PROT_LOC_CRC_SEQ | 739 cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ;
741 CAN_ERR_PROT_LOC_CRC_DEL;
742 } 740 }
743 if (err_status & HECC_CANES_ACKE) { 741 if (err_status & HECC_CANES_ACKE) {
744 hecc_set_bit(priv, HECC_CANES, HECC_CANES_ACKE); 742 hecc_set_bit(priv, HECC_CANES, HECC_CANES_ACKE);
745 cf->data[3] |= CAN_ERR_PROT_LOC_ACK | 743 cf->data[3] = CAN_ERR_PROT_LOC_ACK;
746 CAN_ERR_PROT_LOC_ACK_DEL;
747 } 744 }
748 } 745 }
749 746
diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c
index 2d390384ef3b..fc5b75675cd8 100644
--- a/drivers/net/can/usb/ems_usb.c
+++ b/drivers/net/can/usb/ems_usb.c
@@ -377,7 +377,6 @@ static void ems_usb_rx_err(struct ems_usb *dev, struct ems_cpc_msg *msg)
377 cf->data[2] |= CAN_ERR_PROT_STUFF; 377 cf->data[2] |= CAN_ERR_PROT_STUFF;
378 break; 378 break;
379 default: 379 default:
380 cf->data[2] |= CAN_ERR_PROT_UNSPEC;
381 cf->data[3] = ecc & SJA1000_ECC_SEG; 380 cf->data[3] = ecc & SJA1000_ECC_SEG;
382 break; 381 break;
383 } 382 }
diff --git a/drivers/net/can/usb/esd_usb2.c b/drivers/net/can/usb/esd_usb2.c
index 0e5a4493ba4f..113e64fcd73b 100644
--- a/drivers/net/can/usb/esd_usb2.c
+++ b/drivers/net/can/usb/esd_usb2.c
@@ -282,7 +282,6 @@ static void esd_usb2_rx_event(struct esd_usb2_net_priv *priv,
282 cf->data[2] |= CAN_ERR_PROT_STUFF; 282 cf->data[2] |= CAN_ERR_PROT_STUFF;
283 break; 283 break;
284 default: 284 default:
285 cf->data[2] |= CAN_ERR_PROT_UNSPEC;
286 cf->data[3] = ecc & SJA1000_ECC_SEG; 285 cf->data[3] = ecc & SJA1000_ECC_SEG;
287 break; 286 break;
288 } 287 }
diff --git a/drivers/net/can/usb/kvaser_usb.c b/drivers/net/can/usb/kvaser_usb.c
index 8b17a9065b0b..022bfa13ebfa 100644
--- a/drivers/net/can/usb/kvaser_usb.c
+++ b/drivers/net/can/usb/kvaser_usb.c
@@ -944,10 +944,9 @@ static void kvaser_usb_rx_error(const struct kvaser_usb *dev,
944 cf->can_id |= CAN_ERR_BUSERROR | CAN_ERR_PROT; 944 cf->can_id |= CAN_ERR_BUSERROR | CAN_ERR_PROT;
945 945
946 if (es->leaf.error_factor & M16C_EF_ACKE) 946 if (es->leaf.error_factor & M16C_EF_ACKE)
947 cf->data[3] |= (CAN_ERR_PROT_LOC_ACK); 947 cf->data[3] = CAN_ERR_PROT_LOC_ACK;
948 if (es->leaf.error_factor & M16C_EF_CRCE) 948 if (es->leaf.error_factor & M16C_EF_CRCE)
949 cf->data[3] |= (CAN_ERR_PROT_LOC_CRC_SEQ | 949 cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ;
950 CAN_ERR_PROT_LOC_CRC_DEL);
951 if (es->leaf.error_factor & M16C_EF_FORME) 950 if (es->leaf.error_factor & M16C_EF_FORME)
952 cf->data[2] |= CAN_ERR_PROT_FORM; 951 cf->data[2] |= CAN_ERR_PROT_FORM;
953 if (es->leaf.error_factor & M16C_EF_STFE) 952 if (es->leaf.error_factor & M16C_EF_STFE)
diff --git a/drivers/net/can/usb/usb_8dev.c b/drivers/net/can/usb/usb_8dev.c
index de95b1ccba3e..a731720f1d13 100644
--- a/drivers/net/can/usb/usb_8dev.c
+++ b/drivers/net/can/usb/usb_8dev.c
@@ -401,9 +401,7 @@ static void usb_8dev_rx_err_msg(struct usb_8dev_priv *priv,
401 tx_errors = 1; 401 tx_errors = 1;
402 break; 402 break;
403 case USB_8DEV_STATUSMSG_CRC: 403 case USB_8DEV_STATUSMSG_CRC:
404 cf->data[2] |= CAN_ERR_PROT_UNSPEC; 404 cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ;
405 cf->data[3] |= CAN_ERR_PROT_LOC_CRC_SEQ |
406 CAN_ERR_PROT_LOC_CRC_DEL;
407 rx_errors = 1; 405 rx_errors = 1;
408 break; 406 break;
409 case USB_8DEV_STATUSMSG_BIT0: 407 case USB_8DEV_STATUSMSG_BIT0:
diff --git a/drivers/net/can/xilinx_can.c b/drivers/net/can/xilinx_can.c
index fc55e8e0351d..51670b322409 100644
--- a/drivers/net/can/xilinx_can.c
+++ b/drivers/net/can/xilinx_can.c
@@ -608,17 +608,15 @@ static void xcan_err_interrupt(struct net_device *ndev, u32 isr)
608 608
609 /* Check for error interrupt */ 609 /* Check for error interrupt */
610 if (isr & XCAN_IXR_ERROR_MASK) { 610 if (isr & XCAN_IXR_ERROR_MASK) {
611 if (skb) { 611 if (skb)
612 cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR; 612 cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
613 cf->data[2] |= CAN_ERR_PROT_UNSPEC;
614 }
615 613
616 /* Check for Ack error interrupt */ 614 /* Check for Ack error interrupt */
617 if (err_status & XCAN_ESR_ACKER_MASK) { 615 if (err_status & XCAN_ESR_ACKER_MASK) {
618 stats->tx_errors++; 616 stats->tx_errors++;
619 if (skb) { 617 if (skb) {
620 cf->can_id |= CAN_ERR_ACK; 618 cf->can_id |= CAN_ERR_ACK;
621 cf->data[3] |= CAN_ERR_PROT_LOC_ACK; 619 cf->data[3] = CAN_ERR_PROT_LOC_ACK;
622 } 620 }
623 } 621 }
624 622
@@ -654,8 +652,7 @@ static void xcan_err_interrupt(struct net_device *ndev, u32 isr)
654 stats->rx_errors++; 652 stats->rx_errors++;
655 if (skb) { 653 if (skb) {
656 cf->can_id |= CAN_ERR_PROT; 654 cf->can_id |= CAN_ERR_PROT;
657 cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ | 655 cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ;
658 CAN_ERR_PROT_LOC_CRC_DEL;
659 } 656 }
660 } 657 }
661 priv->can.can_stats.bus_error++; 658 priv->can.can_stats.bus_error++;
diff --git a/drivers/net/dsa/mv88e6060.c b/drivers/net/dsa/mv88e6060.c
index 9093577755f6..0527f485c3dc 100644
--- a/drivers/net/dsa/mv88e6060.c
+++ b/drivers/net/dsa/mv88e6060.c
@@ -15,9 +15,7 @@
15#include <linux/netdevice.h> 15#include <linux/netdevice.h>
16#include <linux/phy.h> 16#include <linux/phy.h>
17#include <net/dsa.h> 17#include <net/dsa.h>
18 18#include "mv88e6060.h"
19#define REG_PORT(p) (8 + (p))
20#define REG_GLOBAL 0x0f
21 19
22static int reg_read(struct dsa_switch *ds, int addr, int reg) 20static int reg_read(struct dsa_switch *ds, int addr, int reg)
23{ 21{
@@ -67,13 +65,14 @@ static char *mv88e6060_probe(struct device *host_dev, int sw_addr)
67 if (bus == NULL) 65 if (bus == NULL)
68 return NULL; 66 return NULL;
69 67
70 ret = mdiobus_read(bus, sw_addr + REG_PORT(0), 0x03); 68 ret = mdiobus_read(bus, sw_addr + REG_PORT(0), PORT_SWITCH_ID);
71 if (ret >= 0) { 69 if (ret >= 0) {
72 if (ret == 0x0600) 70 if (ret == PORT_SWITCH_ID_6060)
73 return "Marvell 88E6060 (A0)"; 71 return "Marvell 88E6060 (A0)";
74 if (ret == 0x0601 || ret == 0x0602) 72 if (ret == PORT_SWITCH_ID_6060_R1 ||
73 ret == PORT_SWITCH_ID_6060_R2)
75 return "Marvell 88E6060 (B0)"; 74 return "Marvell 88E6060 (B0)";
76 if ((ret & 0xfff0) == 0x0600) 75 if ((ret & PORT_SWITCH_ID_6060_MASK) == PORT_SWITCH_ID_6060)
77 return "Marvell 88E6060"; 76 return "Marvell 88E6060";
78 } 77 }
79 78
@@ -87,22 +86,26 @@ static int mv88e6060_switch_reset(struct dsa_switch *ds)
87 unsigned long timeout; 86 unsigned long timeout;
88 87
89 /* Set all ports to the disabled state. */ 88 /* Set all ports to the disabled state. */
90 for (i = 0; i < 6; i++) { 89 for (i = 0; i < MV88E6060_PORTS; i++) {
91 ret = REG_READ(REG_PORT(i), 0x04); 90 ret = REG_READ(REG_PORT(i), PORT_CONTROL);
92 REG_WRITE(REG_PORT(i), 0x04, ret & 0xfffc); 91 REG_WRITE(REG_PORT(i), PORT_CONTROL,
92 ret & ~PORT_CONTROL_STATE_MASK);
93 } 93 }
94 94
95 /* Wait for transmit queues to drain. */ 95 /* Wait for transmit queues to drain. */
96 usleep_range(2000, 4000); 96 usleep_range(2000, 4000);
97 97
98 /* Reset the switch. */ 98 /* Reset the switch. */
99 REG_WRITE(REG_GLOBAL, 0x0a, 0xa130); 99 REG_WRITE(REG_GLOBAL, GLOBAL_ATU_CONTROL,
100 GLOBAL_ATU_CONTROL_SWRESET |
101 GLOBAL_ATU_CONTROL_ATUSIZE_1024 |
102 GLOBAL_ATU_CONTROL_ATE_AGE_5MIN);
100 103
101 /* Wait up to one second for reset to complete. */ 104 /* Wait up to one second for reset to complete. */
102 timeout = jiffies + 1 * HZ; 105 timeout = jiffies + 1 * HZ;
103 while (time_before(jiffies, timeout)) { 106 while (time_before(jiffies, timeout)) {
104 ret = REG_READ(REG_GLOBAL, 0x00); 107 ret = REG_READ(REG_GLOBAL, GLOBAL_STATUS);
105 if ((ret & 0x8000) == 0x0000) 108 if (ret & GLOBAL_STATUS_INIT_READY)
106 break; 109 break;
107 110
108 usleep_range(1000, 2000); 111 usleep_range(1000, 2000);
@@ -119,13 +122,15 @@ static int mv88e6060_setup_global(struct dsa_switch *ds)
119 * set the maximum frame size to 1536 bytes, and mask all 122 * set the maximum frame size to 1536 bytes, and mask all
120 * interrupt sources. 123 * interrupt sources.
121 */ 124 */
122 REG_WRITE(REG_GLOBAL, 0x04, 0x0800); 125 REG_WRITE(REG_GLOBAL, GLOBAL_CONTROL, GLOBAL_CONTROL_MAX_FRAME_1536);
123 126
124 /* Enable automatic address learning, set the address 127 /* Enable automatic address learning, set the address
125 * database size to 1024 entries, and set the default aging 128 * database size to 1024 entries, and set the default aging
126 * time to 5 minutes. 129 * time to 5 minutes.
127 */ 130 */
128 REG_WRITE(REG_GLOBAL, 0x0a, 0x2130); 131 REG_WRITE(REG_GLOBAL, GLOBAL_ATU_CONTROL,
132 GLOBAL_ATU_CONTROL_ATUSIZE_1024 |
133 GLOBAL_ATU_CONTROL_ATE_AGE_5MIN);
129 134
130 return 0; 135 return 0;
131} 136}
@@ -139,25 +144,30 @@ static int mv88e6060_setup_port(struct dsa_switch *ds, int p)
139 * state to Forwarding. Additionally, if this is the CPU 144 * state to Forwarding. Additionally, if this is the CPU
140 * port, enable Ingress and Egress Trailer tagging mode. 145 * port, enable Ingress and Egress Trailer tagging mode.
141 */ 146 */
142 REG_WRITE(addr, 0x04, dsa_is_cpu_port(ds, p) ? 0x4103 : 0x0003); 147 REG_WRITE(addr, PORT_CONTROL,
148 dsa_is_cpu_port(ds, p) ?
149 PORT_CONTROL_TRAILER |
150 PORT_CONTROL_INGRESS_MODE |
151 PORT_CONTROL_STATE_FORWARDING :
152 PORT_CONTROL_STATE_FORWARDING);
143 153
144 /* Port based VLAN map: give each port its own address 154 /* Port based VLAN map: give each port its own address
145 * database, allow the CPU port to talk to each of the 'real' 155 * database, allow the CPU port to talk to each of the 'real'
146 * ports, and allow each of the 'real' ports to only talk to 156 * ports, and allow each of the 'real' ports to only talk to
147 * the CPU port. 157 * the CPU port.
148 */ 158 */
149 REG_WRITE(addr, 0x06, 159 REG_WRITE(addr, PORT_VLAN_MAP,
150 ((p & 0xf) << 12) | 160 ((p & 0xf) << PORT_VLAN_MAP_DBNUM_SHIFT) |
151 (dsa_is_cpu_port(ds, p) ? 161 (dsa_is_cpu_port(ds, p) ?
152 ds->phys_port_mask : 162 ds->phys_port_mask :
153 (1 << ds->dst->cpu_port))); 163 BIT(ds->dst->cpu_port)));
154 164
155 /* Port Association Vector: when learning source addresses 165 /* Port Association Vector: when learning source addresses
156 * of packets, add the address to the address database using 166 * of packets, add the address to the address database using
157 * a port bitmap that has only the bit for this port set and 167 * a port bitmap that has only the bit for this port set and
158 * the other bits clear. 168 * the other bits clear.
159 */ 169 */
160 REG_WRITE(addr, 0x0b, 1 << p); 170 REG_WRITE(addr, PORT_ASSOC_VECTOR, BIT(p));
161 171
162 return 0; 172 return 0;
163} 173}
@@ -177,7 +187,7 @@ static int mv88e6060_setup(struct dsa_switch *ds)
177 if (ret < 0) 187 if (ret < 0)
178 return ret; 188 return ret;
179 189
180 for (i = 0; i < 6; i++) { 190 for (i = 0; i < MV88E6060_PORTS; i++) {
181 ret = mv88e6060_setup_port(ds, i); 191 ret = mv88e6060_setup_port(ds, i);
182 if (ret < 0) 192 if (ret < 0)
183 return ret; 193 return ret;
@@ -188,16 +198,17 @@ static int mv88e6060_setup(struct dsa_switch *ds)
188 198
189static int mv88e6060_set_addr(struct dsa_switch *ds, u8 *addr) 199static int mv88e6060_set_addr(struct dsa_switch *ds, u8 *addr)
190{ 200{
191 REG_WRITE(REG_GLOBAL, 0x01, (addr[0] << 8) | addr[1]); 201 /* Use the same MAC Address as FD Pause frames for all ports */
192 REG_WRITE(REG_GLOBAL, 0x02, (addr[2] << 8) | addr[3]); 202 REG_WRITE(REG_GLOBAL, GLOBAL_MAC_01, (addr[0] << 9) | addr[1]);
193 REG_WRITE(REG_GLOBAL, 0x03, (addr[4] << 8) | addr[5]); 203 REG_WRITE(REG_GLOBAL, GLOBAL_MAC_23, (addr[2] << 8) | addr[3]);
204 REG_WRITE(REG_GLOBAL, GLOBAL_MAC_45, (addr[4] << 8) | addr[5]);
194 205
195 return 0; 206 return 0;
196} 207}
197 208
198static int mv88e6060_port_to_phy_addr(int port) 209static int mv88e6060_port_to_phy_addr(int port)
199{ 210{
200 if (port >= 0 && port <= 5) 211 if (port >= 0 && port < MV88E6060_PORTS)
201 return port; 212 return port;
202 return -1; 213 return -1;
203} 214}
@@ -225,54 +236,6 @@ mv88e6060_phy_write(struct dsa_switch *ds, int port, int regnum, u16 val)
225 return reg_write(ds, addr, regnum, val); 236 return reg_write(ds, addr, regnum, val);
226} 237}
227 238
228static void mv88e6060_poll_link(struct dsa_switch *ds)
229{
230 int i;
231
232 for (i = 0; i < DSA_MAX_PORTS; i++) {
233 struct net_device *dev;
234 int uninitialized_var(port_status);
235 int link;
236 int speed;
237 int duplex;
238 int fc;
239
240 dev = ds->ports[i];
241 if (dev == NULL)
242 continue;
243
244 link = 0;
245 if (dev->flags & IFF_UP) {
246 port_status = reg_read(ds, REG_PORT(i), 0x00);
247 if (port_status < 0)
248 continue;
249
250 link = !!(port_status & 0x1000);
251 }
252
253 if (!link) {
254 if (netif_carrier_ok(dev)) {
255 netdev_info(dev, "link down\n");
256 netif_carrier_off(dev);
257 }
258 continue;
259 }
260
261 speed = (port_status & 0x0100) ? 100 : 10;
262 duplex = (port_status & 0x0200) ? 1 : 0;
263 fc = ((port_status & 0xc000) == 0xc000) ? 1 : 0;
264
265 if (!netif_carrier_ok(dev)) {
266 netdev_info(dev,
267 "link up, %d Mb/s, %s duplex, flow control %sabled\n",
268 speed,
269 duplex ? "full" : "half",
270 fc ? "en" : "dis");
271 netif_carrier_on(dev);
272 }
273 }
274}
275
276static struct dsa_switch_driver mv88e6060_switch_driver = { 239static struct dsa_switch_driver mv88e6060_switch_driver = {
277 .tag_protocol = DSA_TAG_PROTO_TRAILER, 240 .tag_protocol = DSA_TAG_PROTO_TRAILER,
278 .probe = mv88e6060_probe, 241 .probe = mv88e6060_probe,
@@ -280,7 +243,6 @@ static struct dsa_switch_driver mv88e6060_switch_driver = {
280 .set_addr = mv88e6060_set_addr, 243 .set_addr = mv88e6060_set_addr,
281 .phy_read = mv88e6060_phy_read, 244 .phy_read = mv88e6060_phy_read,
282 .phy_write = mv88e6060_phy_write, 245 .phy_write = mv88e6060_phy_write,
283 .poll_link = mv88e6060_poll_link,
284}; 246};
285 247
286static int __init mv88e6060_init(void) 248static int __init mv88e6060_init(void)
diff --git a/drivers/net/dsa/mv88e6060.h b/drivers/net/dsa/mv88e6060.h
new file mode 100644
index 000000000000..cc9b2ed4aff4
--- /dev/null
+++ b/drivers/net/dsa/mv88e6060.h
@@ -0,0 +1,111 @@
1/*
2 * drivers/net/dsa/mv88e6060.h - Marvell 88e6060 switch chip support
3 * Copyright (c) 2015 Neil Armstrong
4 *
5 * Based on mv88e6xxx.h
6 * Copyright (c) 2008 Marvell Semiconductor
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 */
13
14#ifndef __MV88E6060_H
15#define __MV88E6060_H
16
17#define MV88E6060_PORTS 6
18
19#define REG_PORT(p) (0x8 + (p))
20#define PORT_STATUS 0x00
21#define PORT_STATUS_PAUSE_EN BIT(15)
22#define PORT_STATUS_MY_PAUSE BIT(14)
23#define PORT_STATUS_FC (PORT_STATUS_MY_PAUSE | PORT_STATUS_PAUSE_EN)
24#define PORT_STATUS_RESOLVED BIT(13)
25#define PORT_STATUS_LINK BIT(12)
26#define PORT_STATUS_PORTMODE BIT(11)
27#define PORT_STATUS_PHYMODE BIT(10)
28#define PORT_STATUS_DUPLEX BIT(9)
29#define PORT_STATUS_SPEED BIT(8)
30#define PORT_SWITCH_ID 0x03
31#define PORT_SWITCH_ID_6060 0x0600
32#define PORT_SWITCH_ID_6060_MASK 0xfff0
33#define PORT_SWITCH_ID_6060_R1 0x0601
34#define PORT_SWITCH_ID_6060_R2 0x0602
35#define PORT_CONTROL 0x04
36#define PORT_CONTROL_FORCE_FLOW_CTRL BIT(15)
37#define PORT_CONTROL_TRAILER BIT(14)
38#define PORT_CONTROL_HEADER BIT(11)
39#define PORT_CONTROL_INGRESS_MODE BIT(8)
40#define PORT_CONTROL_VLAN_TUNNEL BIT(7)
41#define PORT_CONTROL_STATE_MASK 0x03
42#define PORT_CONTROL_STATE_DISABLED 0x00
43#define PORT_CONTROL_STATE_BLOCKING 0x01
44#define PORT_CONTROL_STATE_LEARNING 0x02
45#define PORT_CONTROL_STATE_FORWARDING 0x03
46#define PORT_VLAN_MAP 0x06
47#define PORT_VLAN_MAP_DBNUM_SHIFT 12
48#define PORT_VLAN_MAP_TABLE_MASK 0x1f
49#define PORT_ASSOC_VECTOR 0x0b
50#define PORT_ASSOC_VECTOR_MONITOR BIT(15)
51#define PORT_ASSOC_VECTOR_PAV_MASK 0x1f
52#define PORT_RX_CNTR 0x10
53#define PORT_TX_CNTR 0x11
54
55#define REG_GLOBAL 0x0f
56#define GLOBAL_STATUS 0x00
57#define GLOBAL_STATUS_SW_MODE_MASK (0x3 << 12)
58#define GLOBAL_STATUS_SW_MODE_0 (0x0 << 12)
59#define GLOBAL_STATUS_SW_MODE_1 (0x1 << 12)
60#define GLOBAL_STATUS_SW_MODE_2 (0x2 << 12)
61#define GLOBAL_STATUS_SW_MODE_3 (0x3 << 12)
62#define GLOBAL_STATUS_INIT_READY BIT(11)
63#define GLOBAL_STATUS_ATU_FULL BIT(3)
64#define GLOBAL_STATUS_ATU_DONE BIT(2)
65#define GLOBAL_STATUS_PHY_INT BIT(1)
66#define GLOBAL_STATUS_EEINT BIT(0)
67#define GLOBAL_MAC_01 0x01
68#define GLOBAL_MAC_01_DIFF_ADDR BIT(8)
69#define GLOBAL_MAC_23 0x02
70#define GLOBAL_MAC_45 0x03
71#define GLOBAL_CONTROL 0x04
72#define GLOBAL_CONTROL_DISCARD_EXCESS BIT(13)
73#define GLOBAL_CONTROL_MAX_FRAME_1536 BIT(10)
74#define GLOBAL_CONTROL_RELOAD_EEPROM BIT(9)
75#define GLOBAL_CONTROL_CTRMODE BIT(8)
76#define GLOBAL_CONTROL_ATU_FULL_EN BIT(3)
77#define GLOBAL_CONTROL_ATU_DONE_EN BIT(2)
78#define GLOBAL_CONTROL_PHYINT_EN BIT(1)
79#define GLOBAL_CONTROL_EEPROM_DONE_EN BIT(0)
80#define GLOBAL_ATU_CONTROL 0x0a
81#define GLOBAL_ATU_CONTROL_SWRESET BIT(15)
82#define GLOBAL_ATU_CONTROL_LEARNDIS BIT(14)
83#define GLOBAL_ATU_CONTROL_ATUSIZE_256 (0x0 << 12)
84#define GLOBAL_ATU_CONTROL_ATUSIZE_512 (0x1 << 12)
85#define GLOBAL_ATU_CONTROL_ATUSIZE_1024 (0x2 << 12)
86#define GLOBAL_ATU_CONTROL_ATE_AGE_SHIFT 4
87#define GLOBAL_ATU_CONTROL_ATE_AGE_MASK (0xff << 4)
88#define GLOBAL_ATU_CONTROL_ATE_AGE_5MIN (0x13 << 4)
89#define GLOBAL_ATU_OP 0x0b
90#define GLOBAL_ATU_OP_BUSY BIT(15)
91#define GLOBAL_ATU_OP_NOP (0 << 12)
92#define GLOBAL_ATU_OP_FLUSH_ALL ((1 << 12) | GLOBAL_ATU_OP_BUSY)
93#define GLOBAL_ATU_OP_FLUSH_UNLOCKED ((2 << 12) | GLOBAL_ATU_OP_BUSY)
94#define GLOBAL_ATU_OP_LOAD_DB ((3 << 12) | GLOBAL_ATU_OP_BUSY)
95#define GLOBAL_ATU_OP_GET_NEXT_DB ((4 << 12) | GLOBAL_ATU_OP_BUSY)
96#define GLOBAL_ATU_OP_FLUSH_DB ((5 << 12) | GLOBAL_ATU_OP_BUSY)
97#define GLOBAL_ATU_OP_FLUSH_UNLOCKED_DB ((6 << 12) | GLOBAL_ATU_OP_BUSY)
98#define GLOBAL_ATU_DATA 0x0c
99#define GLOBAL_ATU_DATA_PORT_VECTOR_MASK 0x3f0
100#define GLOBAL_ATU_DATA_PORT_VECTOR_SHIFT 4
101#define GLOBAL_ATU_DATA_STATE_MASK 0x0f
102#define GLOBAL_ATU_DATA_STATE_UNUSED 0x00
103#define GLOBAL_ATU_DATA_STATE_UC_STATIC 0x0e
104#define GLOBAL_ATU_DATA_STATE_UC_LOCKED 0x0f
105#define GLOBAL_ATU_DATA_STATE_MC_STATIC 0x07
106#define GLOBAL_ATU_DATA_STATE_MC_LOCKED 0x0e
107#define GLOBAL_ATU_MAC_01 0x0d
108#define GLOBAL_ATU_MAC_23 0x0e
109#define GLOBAL_ATU_MAC_45 0x0f
110
111#endif
diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig
index 05aa7597dab9..31c5e476fd64 100644
--- a/drivers/net/ethernet/Kconfig
+++ b/drivers/net/ethernet/Kconfig
@@ -29,6 +29,7 @@ source "drivers/net/ethernet/apm/Kconfig"
29source "drivers/net/ethernet/apple/Kconfig" 29source "drivers/net/ethernet/apple/Kconfig"
30source "drivers/net/ethernet/arc/Kconfig" 30source "drivers/net/ethernet/arc/Kconfig"
31source "drivers/net/ethernet/atheros/Kconfig" 31source "drivers/net/ethernet/atheros/Kconfig"
32source "drivers/net/ethernet/aurora/Kconfig"
32source "drivers/net/ethernet/cadence/Kconfig" 33source "drivers/net/ethernet/cadence/Kconfig"
33source "drivers/net/ethernet/adi/Kconfig" 34source "drivers/net/ethernet/adi/Kconfig"
34source "drivers/net/ethernet/broadcom/Kconfig" 35source "drivers/net/ethernet/broadcom/Kconfig"
@@ -78,7 +79,6 @@ source "drivers/net/ethernet/ibm/Kconfig"
78source "drivers/net/ethernet/intel/Kconfig" 79source "drivers/net/ethernet/intel/Kconfig"
79source "drivers/net/ethernet/i825xx/Kconfig" 80source "drivers/net/ethernet/i825xx/Kconfig"
80source "drivers/net/ethernet/xscale/Kconfig" 81source "drivers/net/ethernet/xscale/Kconfig"
81source "drivers/net/ethernet/icplus/Kconfig"
82 82
83config JME 83config JME
84 tristate "JMicron(R) PCI-Express Gigabit Ethernet support" 84 tristate "JMicron(R) PCI-Express Gigabit Ethernet support"
diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile
index ddfc808110a1..071f84eb6f3f 100644
--- a/drivers/net/ethernet/Makefile
+++ b/drivers/net/ethernet/Makefile
@@ -15,6 +15,7 @@ obj-$(CONFIG_NET_XGENE) += apm/
15obj-$(CONFIG_NET_VENDOR_APPLE) += apple/ 15obj-$(CONFIG_NET_VENDOR_APPLE) += apple/
16obj-$(CONFIG_NET_VENDOR_ARC) += arc/ 16obj-$(CONFIG_NET_VENDOR_ARC) += arc/
17obj-$(CONFIG_NET_VENDOR_ATHEROS) += atheros/ 17obj-$(CONFIG_NET_VENDOR_ATHEROS) += atheros/
18obj-$(CONFIG_NET_VENDOR_AURORA) += aurora/
18obj-$(CONFIG_NET_CADENCE) += cadence/ 19obj-$(CONFIG_NET_CADENCE) += cadence/
19obj-$(CONFIG_NET_BFIN) += adi/ 20obj-$(CONFIG_NET_BFIN) += adi/
20obj-$(CONFIG_NET_VENDOR_BROADCOM) += broadcom/ 21obj-$(CONFIG_NET_VENDOR_BROADCOM) += broadcom/
@@ -41,7 +42,6 @@ obj-$(CONFIG_NET_VENDOR_IBM) += ibm/
41obj-$(CONFIG_NET_VENDOR_INTEL) += intel/ 42obj-$(CONFIG_NET_VENDOR_INTEL) += intel/
42obj-$(CONFIG_NET_VENDOR_I825XX) += i825xx/ 43obj-$(CONFIG_NET_VENDOR_I825XX) += i825xx/
43obj-$(CONFIG_NET_VENDOR_XSCALE) += xscale/ 44obj-$(CONFIG_NET_VENDOR_XSCALE) += xscale/
44obj-$(CONFIG_IP1000) += icplus/
45obj-$(CONFIG_JME) += jme.o 45obj-$(CONFIG_JME) += jme.o
46obj-$(CONFIG_KORINA) += korina.o 46obj-$(CONFIG_KORINA) += korina.o
47obj-$(CONFIG_LANTIQ_ETOP) += lantiq_etop.o 47obj-$(CONFIG_LANTIQ_ETOP) += lantiq_etop.o
diff --git a/drivers/net/ethernet/amd/pcnet32.c b/drivers/net/ethernet/amd/pcnet32.c
index e2afabf3a465..7ccebae9cb48 100644
--- a/drivers/net/ethernet/amd/pcnet32.c
+++ b/drivers/net/ethernet/amd/pcnet32.c
@@ -1500,10 +1500,11 @@ pcnet32_probe_pci(struct pci_dev *pdev, const struct pci_device_id *ent)
1500 return -ENODEV; 1500 return -ENODEV;
1501 } 1501 }
1502 1502
1503 if (!pci_set_dma_mask(pdev, PCNET32_DMA_MASK)) { 1503 err = pci_set_dma_mask(pdev, PCNET32_DMA_MASK);
1504 if (err) {
1504 if (pcnet32_debug & NETIF_MSG_PROBE) 1505 if (pcnet32_debug & NETIF_MSG_PROBE)
1505 pr_err("architecture does not support 32bit PCI busmaster DMA\n"); 1506 pr_err("architecture does not support 32bit PCI busmaster DMA\n");
1506 return -ENODEV; 1507 return err;
1507 } 1508 }
1508 if (!request_region(ioaddr, PCNET32_TOTAL_SIZE, "pcnet32_probe_pci")) { 1509 if (!request_region(ioaddr, PCNET32_TOTAL_SIZE, "pcnet32_probe_pci")) {
1509 if (pcnet32_debug & NETIF_MSG_PROBE) 1510 if (pcnet32_debug & NETIF_MSG_PROBE)
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
index 970781a9e677..f6a7161e3b85 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
@@ -1849,7 +1849,7 @@ static int xgbe_exit(struct xgbe_prv_data *pdata)
1849 usleep_range(10, 15); 1849 usleep_range(10, 15);
1850 1850
1851 /* Poll Until Poll Condition */ 1851 /* Poll Until Poll Condition */
1852 while (count-- && XGMAC_IOREAD_BITS(pdata, DMA_MR, SWR)) 1852 while (--count && XGMAC_IOREAD_BITS(pdata, DMA_MR, SWR))
1853 usleep_range(500, 600); 1853 usleep_range(500, 600);
1854 1854
1855 if (!count) 1855 if (!count)
@@ -1873,7 +1873,7 @@ static int xgbe_flush_tx_queues(struct xgbe_prv_data *pdata)
1873 /* Poll Until Poll Condition */ 1873 /* Poll Until Poll Condition */
1874 for (i = 0; i < pdata->tx_q_count; i++) { 1874 for (i = 0; i < pdata->tx_q_count; i++) {
1875 count = 2000; 1875 count = 2000;
1876 while (count-- && XGMAC_MTL_IOREAD_BITS(pdata, i, 1876 while (--count && XGMAC_MTL_IOREAD_BITS(pdata, i,
1877 MTL_Q_TQOMR, FTQ)) 1877 MTL_Q_TQOMR, FTQ))
1878 usleep_range(500, 600); 1878 usleep_range(500, 600);
1879 1879
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
index 991412ce6f48..d0ae1a6cc212 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
@@ -289,6 +289,7 @@ static int xgene_enet_setup_tx_desc(struct xgene_enet_desc_ring *tx_ring,
289 struct sk_buff *skb) 289 struct sk_buff *skb)
290{ 290{
291 struct device *dev = ndev_to_dev(tx_ring->ndev); 291 struct device *dev = ndev_to_dev(tx_ring->ndev);
292 struct xgene_enet_pdata *pdata = netdev_priv(tx_ring->ndev);
292 struct xgene_enet_raw_desc *raw_desc; 293 struct xgene_enet_raw_desc *raw_desc;
293 __le64 *exp_desc = NULL, *exp_bufs = NULL; 294 __le64 *exp_desc = NULL, *exp_bufs = NULL;
294 dma_addr_t dma_addr, pbuf_addr, *frag_dma_addr; 295 dma_addr_t dma_addr, pbuf_addr, *frag_dma_addr;
@@ -419,6 +420,7 @@ out:
419 raw_desc->m0 = cpu_to_le64(SET_VAL(LL, ll) | SET_VAL(NV, nv) | 420 raw_desc->m0 = cpu_to_le64(SET_VAL(LL, ll) | SET_VAL(NV, nv) |
420 SET_VAL(USERINFO, tx_ring->tail)); 421 SET_VAL(USERINFO, tx_ring->tail));
421 tx_ring->cp_ring->cp_skb[tx_ring->tail] = skb; 422 tx_ring->cp_ring->cp_skb[tx_ring->tail] = skb;
423 pdata->tx_level += count;
422 tx_ring->tail = tail; 424 tx_ring->tail = tail;
423 425
424 return count; 426 return count;
@@ -429,14 +431,13 @@ static netdev_tx_t xgene_enet_start_xmit(struct sk_buff *skb,
429{ 431{
430 struct xgene_enet_pdata *pdata = netdev_priv(ndev); 432 struct xgene_enet_pdata *pdata = netdev_priv(ndev);
431 struct xgene_enet_desc_ring *tx_ring = pdata->tx_ring; 433 struct xgene_enet_desc_ring *tx_ring = pdata->tx_ring;
432 struct xgene_enet_desc_ring *cp_ring = tx_ring->cp_ring; 434 u32 tx_level = pdata->tx_level;
433 u32 tx_level, cq_level;
434 int count; 435 int count;
435 436
436 tx_level = pdata->ring_ops->len(tx_ring); 437 if (tx_level < pdata->txc_level)
437 cq_level = pdata->ring_ops->len(cp_ring); 438 tx_level += ((typeof(pdata->tx_level))~0U);
438 if (unlikely(tx_level > pdata->tx_qcnt_hi || 439
439 cq_level > pdata->cp_qcnt_hi)) { 440 if ((tx_level - pdata->txc_level) > pdata->tx_qcnt_hi) {
440 netif_stop_queue(ndev); 441 netif_stop_queue(ndev);
441 return NETDEV_TX_BUSY; 442 return NETDEV_TX_BUSY;
442 } 443 }
@@ -450,12 +451,12 @@ static netdev_tx_t xgene_enet_start_xmit(struct sk_buff *skb,
450 return NETDEV_TX_OK; 451 return NETDEV_TX_OK;
451 } 452 }
452 453
453 pdata->ring_ops->wr_cmd(tx_ring, count);
454 skb_tx_timestamp(skb); 454 skb_tx_timestamp(skb);
455 455
456 pdata->stats.tx_packets++; 456 pdata->stats.tx_packets++;
457 pdata->stats.tx_bytes += skb->len; 457 pdata->stats.tx_bytes += skb->len;
458 458
459 pdata->ring_ops->wr_cmd(tx_ring, count);
459 return NETDEV_TX_OK; 460 return NETDEV_TX_OK;
460} 461}
461 462
@@ -539,10 +540,13 @@ static int xgene_enet_process_ring(struct xgene_enet_desc_ring *ring,
539 struct xgene_enet_raw_desc *raw_desc, *exp_desc; 540 struct xgene_enet_raw_desc *raw_desc, *exp_desc;
540 u16 head = ring->head; 541 u16 head = ring->head;
541 u16 slots = ring->slots - 1; 542 u16 slots = ring->slots - 1;
542 int ret, count = 0, processed = 0; 543 int ret, desc_count, count = 0, processed = 0;
544 bool is_completion;
543 545
544 do { 546 do {
545 raw_desc = &ring->raw_desc[head]; 547 raw_desc = &ring->raw_desc[head];
548 desc_count = 0;
549 is_completion = false;
546 exp_desc = NULL; 550 exp_desc = NULL;
547 if (unlikely(xgene_enet_is_desc_slot_empty(raw_desc))) 551 if (unlikely(xgene_enet_is_desc_slot_empty(raw_desc)))
548 break; 552 break;
@@ -559,18 +563,24 @@ static int xgene_enet_process_ring(struct xgene_enet_desc_ring *ring,
559 } 563 }
560 dma_rmb(); 564 dma_rmb();
561 count++; 565 count++;
566 desc_count++;
562 } 567 }
563 if (is_rx_desc(raw_desc)) 568 if (is_rx_desc(raw_desc)) {
564 ret = xgene_enet_rx_frame(ring, raw_desc); 569 ret = xgene_enet_rx_frame(ring, raw_desc);
565 else 570 } else {
566 ret = xgene_enet_tx_completion(ring, raw_desc); 571 ret = xgene_enet_tx_completion(ring, raw_desc);
572 is_completion = true;
573 }
567 xgene_enet_mark_desc_slot_empty(raw_desc); 574 xgene_enet_mark_desc_slot_empty(raw_desc);
568 if (exp_desc) 575 if (exp_desc)
569 xgene_enet_mark_desc_slot_empty(exp_desc); 576 xgene_enet_mark_desc_slot_empty(exp_desc);
570 577
571 head = (head + 1) & slots; 578 head = (head + 1) & slots;
572 count++; 579 count++;
580 desc_count++;
573 processed++; 581 processed++;
582 if (is_completion)
583 pdata->txc_level += desc_count;
574 584
575 if (ret) 585 if (ret)
576 break; 586 break;
@@ -580,10 +590,8 @@ static int xgene_enet_process_ring(struct xgene_enet_desc_ring *ring,
580 pdata->ring_ops->wr_cmd(ring, -count); 590 pdata->ring_ops->wr_cmd(ring, -count);
581 ring->head = head; 591 ring->head = head;
582 592
583 if (netif_queue_stopped(ring->ndev)) { 593 if (netif_queue_stopped(ring->ndev))
584 if (pdata->ring_ops->len(ring) < pdata->cp_qcnt_low) 594 netif_start_queue(ring->ndev);
585 netif_wake_queue(ring->ndev);
586 }
587 } 595 }
588 596
589 return processed; 597 return processed;
@@ -688,10 +696,10 @@ static int xgene_enet_open(struct net_device *ndev)
688 mac_ops->tx_enable(pdata); 696 mac_ops->tx_enable(pdata);
689 mac_ops->rx_enable(pdata); 697 mac_ops->rx_enable(pdata);
690 698
699 xgene_enet_napi_enable(pdata);
691 ret = xgene_enet_register_irq(ndev); 700 ret = xgene_enet_register_irq(ndev);
692 if (ret) 701 if (ret)
693 return ret; 702 return ret;
694 xgene_enet_napi_enable(pdata);
695 703
696 if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII) 704 if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII)
697 phy_start(pdata->phy_dev); 705 phy_start(pdata->phy_dev);
@@ -715,13 +723,13 @@ static int xgene_enet_close(struct net_device *ndev)
715 else 723 else
716 cancel_delayed_work_sync(&pdata->link_work); 724 cancel_delayed_work_sync(&pdata->link_work);
717 725
718 xgene_enet_napi_disable(pdata);
719 xgene_enet_free_irq(ndev);
720 xgene_enet_process_ring(pdata->rx_ring, -1);
721
722 mac_ops->tx_disable(pdata); 726 mac_ops->tx_disable(pdata);
723 mac_ops->rx_disable(pdata); 727 mac_ops->rx_disable(pdata);
724 728
729 xgene_enet_free_irq(ndev);
730 xgene_enet_napi_disable(pdata);
731 xgene_enet_process_ring(pdata->rx_ring, -1);
732
725 return 0; 733 return 0;
726} 734}
727 735
@@ -1033,9 +1041,7 @@ static int xgene_enet_create_desc_rings(struct net_device *ndev)
1033 pdata->tx_ring->cp_ring = cp_ring; 1041 pdata->tx_ring->cp_ring = cp_ring;
1034 pdata->tx_ring->dst_ring_num = xgene_enet_dst_ring_num(cp_ring); 1042 pdata->tx_ring->dst_ring_num = xgene_enet_dst_ring_num(cp_ring);
1035 1043
1036 pdata->tx_qcnt_hi = pdata->tx_ring->slots / 2; 1044 pdata->tx_qcnt_hi = pdata->tx_ring->slots - 128;
1037 pdata->cp_qcnt_hi = pdata->rx_ring->slots / 2;
1038 pdata->cp_qcnt_low = pdata->cp_qcnt_hi / 2;
1039 1045
1040 return 0; 1046 return 0;
1041 1047
@@ -1474,15 +1480,15 @@ static int xgene_enet_probe(struct platform_device *pdev)
1474 } 1480 }
1475 ndev->hw_features = ndev->features; 1481 ndev->hw_features = ndev->features;
1476 1482
1477 ret = register_netdev(ndev); 1483 ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(64));
1478 if (ret) { 1484 if (ret) {
1479 netdev_err(ndev, "Failed to register netdev\n"); 1485 netdev_err(ndev, "No usable DMA configuration\n");
1480 goto err; 1486 goto err;
1481 } 1487 }
1482 1488
1483 ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(64)); 1489 ret = register_netdev(ndev);
1484 if (ret) { 1490 if (ret) {
1485 netdev_err(ndev, "No usable DMA configuration\n"); 1491 netdev_err(ndev, "Failed to register netdev\n");
1486 goto err; 1492 goto err;
1487 } 1493 }
1488 1494
@@ -1490,14 +1496,17 @@ static int xgene_enet_probe(struct platform_device *pdev)
1490 if (ret) 1496 if (ret)
1491 goto err; 1497 goto err;
1492 1498
1493 xgene_enet_napi_add(pdata);
1494 mac_ops = pdata->mac_ops; 1499 mac_ops = pdata->mac_ops;
1495 if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII) 1500 if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII) {
1496 ret = xgene_enet_mdio_config(pdata); 1501 ret = xgene_enet_mdio_config(pdata);
1497 else 1502 if (ret)
1503 goto err;
1504 } else {
1498 INIT_DELAYED_WORK(&pdata->link_work, mac_ops->link_state); 1505 INIT_DELAYED_WORK(&pdata->link_work, mac_ops->link_state);
1506 }
1499 1507
1500 return ret; 1508 xgene_enet_napi_add(pdata);
1509 return 0;
1501err: 1510err:
1502 unregister_netdev(ndev); 1511 unregister_netdev(ndev);
1503 free_netdev(ndev); 1512 free_netdev(ndev);
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.h b/drivers/net/ethernet/apm/xgene/xgene_enet_main.h
index a6e56b88c0a0..1aa72c787f8d 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.h
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.h
@@ -155,11 +155,11 @@ struct xgene_enet_pdata {
155 enum xgene_enet_id enet_id; 155 enum xgene_enet_id enet_id;
156 struct xgene_enet_desc_ring *tx_ring; 156 struct xgene_enet_desc_ring *tx_ring;
157 struct xgene_enet_desc_ring *rx_ring; 157 struct xgene_enet_desc_ring *rx_ring;
158 u16 tx_level;
159 u16 txc_level;
158 char *dev_name; 160 char *dev_name;
159 u32 rx_buff_cnt; 161 u32 rx_buff_cnt;
160 u32 tx_qcnt_hi; 162 u32 tx_qcnt_hi;
161 u32 cp_qcnt_hi;
162 u32 cp_qcnt_low;
163 u32 rx_irq; 163 u32 rx_irq;
164 u32 txc_irq; 164 u32 txc_irq;
165 u8 cq_cnt; 165 u8 cq_cnt;
diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c
index c8af3ce3ea38..bd377a6b067d 100644
--- a/drivers/net/ethernet/atheros/alx/main.c
+++ b/drivers/net/ethernet/atheros/alx/main.c
@@ -1534,6 +1534,8 @@ static const struct pci_device_id alx_pci_tbl[] = {
1534 .driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG }, 1534 .driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG },
1535 { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_E2200), 1535 { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_E2200),
1536 .driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG }, 1536 .driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG },
1537 { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_E2400),
1538 .driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG },
1537 { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_AR8162), 1539 { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_AR8162),
1538 .driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG }, 1540 .driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG },
1539 { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_AR8171) }, 1541 { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_AR8171) },
diff --git a/drivers/net/ethernet/atheros/alx/reg.h b/drivers/net/ethernet/atheros/alx/reg.h
index af006b44b2a6..0959e6824cb6 100644
--- a/drivers/net/ethernet/atheros/alx/reg.h
+++ b/drivers/net/ethernet/atheros/alx/reg.h
@@ -37,6 +37,7 @@
37 37
38#define ALX_DEV_ID_AR8161 0x1091 38#define ALX_DEV_ID_AR8161 0x1091
39#define ALX_DEV_ID_E2200 0xe091 39#define ALX_DEV_ID_E2200 0xe091
40#define ALX_DEV_ID_E2400 0xe0a1
40#define ALX_DEV_ID_AR8162 0x1090 41#define ALX_DEV_ID_AR8162 0x1090
41#define ALX_DEV_ID_AR8171 0x10A1 42#define ALX_DEV_ID_AR8171 0x10A1
42#define ALX_DEV_ID_AR8172 0x10A0 43#define ALX_DEV_ID_AR8172 0x10A0
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
index 2795d6db10e1..8b5988e210d5 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
@@ -1016,13 +1016,12 @@ static int atl1c_setup_ring_resources(struct atl1c_adapter *adapter)
1016 sizeof(struct atl1c_recv_ret_status) * rx_desc_count + 1016 sizeof(struct atl1c_recv_ret_status) * rx_desc_count +
1017 8 * 4; 1017 8 * 4;
1018 1018
1019 ring_header->desc = pci_alloc_consistent(pdev, ring_header->size, 1019 ring_header->desc = dma_zalloc_coherent(&pdev->dev, ring_header->size,
1020 &ring_header->dma); 1020 &ring_header->dma, GFP_KERNEL);
1021 if (unlikely(!ring_header->desc)) { 1021 if (unlikely(!ring_header->desc)) {
1022 dev_err(&pdev->dev, "pci_alloc_consistend failed\n"); 1022 dev_err(&pdev->dev, "could not get memory for DMA buffer\n");
1023 goto err_nomem; 1023 goto err_nomem;
1024 } 1024 }
1025 memset(ring_header->desc, 0, ring_header->size);
1026 /* init TPD ring */ 1025 /* init TPD ring */
1027 1026
1028 tpd_ring[0].dma = roundup(ring_header->dma, 8); 1027 tpd_ring[0].dma = roundup(ring_header->dma, 8);
diff --git a/drivers/net/ethernet/aurora/Kconfig b/drivers/net/ethernet/aurora/Kconfig
new file mode 100644
index 000000000000..8ba7f8ff3434
--- /dev/null
+++ b/drivers/net/ethernet/aurora/Kconfig
@@ -0,0 +1,21 @@
1config NET_VENDOR_AURORA
2 bool "Aurora VLSI devices"
3 help
4 If you have a network (Ethernet) device belonging to this class,
5 say Y.
6
7 Note that the answer to this question doesn't directly affect the
8 kernel: saying N will just cause the configurator to skip all
9 questions about Aurora devices. If you say Y, you will be asked
10 for your specific device in the following questions.
11
12if NET_VENDOR_AURORA
13
14config AURORA_NB8800
15 tristate "Aurora AU-NB8800 support"
16 depends on HAS_DMA
17 select PHYLIB
18 help
19 Support for the AU-NB8800 gigabit Ethernet controller.
20
21endif
diff --git a/drivers/net/ethernet/aurora/Makefile b/drivers/net/ethernet/aurora/Makefile
new file mode 100644
index 000000000000..6cb528a2fc26
--- /dev/null
+++ b/drivers/net/ethernet/aurora/Makefile
@@ -0,0 +1 @@
obj-$(CONFIG_AURORA_NB8800) += nb8800.o
diff --git a/drivers/net/ethernet/aurora/nb8800.c b/drivers/net/ethernet/aurora/nb8800.c
new file mode 100644
index 000000000000..ecc4a334c507
--- /dev/null
+++ b/drivers/net/ethernet/aurora/nb8800.c
@@ -0,0 +1,1552 @@
1/*
2 * Copyright (C) 2015 Mans Rullgard <mans@mansr.com>
3 *
4 * Mostly rewritten, based on driver from Sigma Designs. Original
5 * copyright notice below.
6 *
7 *
8 * Driver for tangox SMP864x/SMP865x/SMP867x/SMP868x builtin Ethernet Mac.
9 *
10 * Copyright (C) 2005 Maxime Bizon <mbizon@freebox.fr>
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 */
22
23#include <linux/module.h>
24#include <linux/etherdevice.h>
25#include <linux/delay.h>
26#include <linux/ethtool.h>
27#include <linux/interrupt.h>
28#include <linux/platform_device.h>
29#include <linux/of_device.h>
30#include <linux/of_mdio.h>
31#include <linux/of_net.h>
32#include <linux/dma-mapping.h>
33#include <linux/phy.h>
34#include <linux/cache.h>
35#include <linux/jiffies.h>
36#include <linux/io.h>
37#include <linux/iopoll.h>
38#include <asm/barrier.h>
39
40#include "nb8800.h"
41
42static void nb8800_tx_done(struct net_device *dev);
43static int nb8800_dma_stop(struct net_device *dev);
44
45static inline u8 nb8800_readb(struct nb8800_priv *priv, int reg)
46{
47 return readb_relaxed(priv->base + reg);
48}
49
50static inline u32 nb8800_readl(struct nb8800_priv *priv, int reg)
51{
52 return readl_relaxed(priv->base + reg);
53}
54
55static inline void nb8800_writeb(struct nb8800_priv *priv, int reg, u8 val)
56{
57 writeb_relaxed(val, priv->base + reg);
58}
59
60static inline void nb8800_writew(struct nb8800_priv *priv, int reg, u16 val)
61{
62 writew_relaxed(val, priv->base + reg);
63}
64
65static inline void nb8800_writel(struct nb8800_priv *priv, int reg, u32 val)
66{
67 writel_relaxed(val, priv->base + reg);
68}
69
70static inline void nb8800_maskb(struct nb8800_priv *priv, int reg,
71 u32 mask, u32 val)
72{
73 u32 old = nb8800_readb(priv, reg);
74 u32 new = (old & ~mask) | (val & mask);
75
76 if (new != old)
77 nb8800_writeb(priv, reg, new);
78}
79
80static inline void nb8800_maskl(struct nb8800_priv *priv, int reg,
81 u32 mask, u32 val)
82{
83 u32 old = nb8800_readl(priv, reg);
84 u32 new = (old & ~mask) | (val & mask);
85
86 if (new != old)
87 nb8800_writel(priv, reg, new);
88}
89
90static inline void nb8800_modb(struct nb8800_priv *priv, int reg, u8 bits,
91 bool set)
92{
93 nb8800_maskb(priv, reg, bits, set ? bits : 0);
94}
95
96static inline void nb8800_setb(struct nb8800_priv *priv, int reg, u8 bits)
97{
98 nb8800_maskb(priv, reg, bits, bits);
99}
100
101static inline void nb8800_clearb(struct nb8800_priv *priv, int reg, u8 bits)
102{
103 nb8800_maskb(priv, reg, bits, 0);
104}
105
106static inline void nb8800_modl(struct nb8800_priv *priv, int reg, u32 bits,
107 bool set)
108{
109 nb8800_maskl(priv, reg, bits, set ? bits : 0);
110}
111
112static inline void nb8800_setl(struct nb8800_priv *priv, int reg, u32 bits)
113{
114 nb8800_maskl(priv, reg, bits, bits);
115}
116
117static inline void nb8800_clearl(struct nb8800_priv *priv, int reg, u32 bits)
118{
119 nb8800_maskl(priv, reg, bits, 0);
120}
121
122static int nb8800_mdio_wait(struct mii_bus *bus)
123{
124 struct nb8800_priv *priv = bus->priv;
125 u32 val;
126
127 return readl_poll_timeout_atomic(priv->base + NB8800_MDIO_CMD,
128 val, !(val & MDIO_CMD_GO), 1, 1000);
129}
130
131static int nb8800_mdio_cmd(struct mii_bus *bus, u32 cmd)
132{
133 struct nb8800_priv *priv = bus->priv;
134 int err;
135
136 err = nb8800_mdio_wait(bus);
137 if (err)
138 return err;
139
140 nb8800_writel(priv, NB8800_MDIO_CMD, cmd);
141 udelay(10);
142 nb8800_writel(priv, NB8800_MDIO_CMD, cmd | MDIO_CMD_GO);
143
144 return nb8800_mdio_wait(bus);
145}
146
147static int nb8800_mdio_read(struct mii_bus *bus, int phy_id, int reg)
148{
149 struct nb8800_priv *priv = bus->priv;
150 u32 val;
151 int err;
152
153 err = nb8800_mdio_cmd(bus, MDIO_CMD_ADDR(phy_id) | MDIO_CMD_REG(reg));
154 if (err)
155 return err;
156
157 val = nb8800_readl(priv, NB8800_MDIO_STS);
158 if (val & MDIO_STS_ERR)
159 return 0xffff;
160
161 return val & 0xffff;
162}
163
164static int nb8800_mdio_write(struct mii_bus *bus, int phy_id, int reg, u16 val)
165{
166 u32 cmd = MDIO_CMD_ADDR(phy_id) | MDIO_CMD_REG(reg) |
167 MDIO_CMD_DATA(val) | MDIO_CMD_WR;
168
169 return nb8800_mdio_cmd(bus, cmd);
170}
171
172static void nb8800_mac_tx(struct net_device *dev, bool enable)
173{
174 struct nb8800_priv *priv = netdev_priv(dev);
175
176 while (nb8800_readl(priv, NB8800_TXC_CR) & TCR_EN)
177 cpu_relax();
178
179 nb8800_modb(priv, NB8800_TX_CTL1, TX_EN, enable);
180}
181
182static void nb8800_mac_rx(struct net_device *dev, bool enable)
183{
184 nb8800_modb(netdev_priv(dev), NB8800_RX_CTL, RX_EN, enable);
185}
186
187static void nb8800_mac_af(struct net_device *dev, bool enable)
188{
189 nb8800_modb(netdev_priv(dev), NB8800_RX_CTL, RX_AF_EN, enable);
190}
191
192static void nb8800_start_rx(struct net_device *dev)
193{
194 nb8800_setl(netdev_priv(dev), NB8800_RXC_CR, RCR_EN);
195}
196
197static int nb8800_alloc_rx(struct net_device *dev, unsigned int i, bool napi)
198{
199 struct nb8800_priv *priv = netdev_priv(dev);
200 struct nb8800_rx_desc *rxd = &priv->rx_descs[i];
201 struct nb8800_rx_buf *rxb = &priv->rx_bufs[i];
202 int size = L1_CACHE_ALIGN(RX_BUF_SIZE);
203 dma_addr_t dma_addr;
204 struct page *page;
205 unsigned long offset;
206 void *data;
207
208 data = napi ? napi_alloc_frag(size) : netdev_alloc_frag(size);
209 if (!data)
210 return -ENOMEM;
211
212 page = virt_to_head_page(data);
213 offset = data - page_address(page);
214
215 dma_addr = dma_map_page(&dev->dev, page, offset, RX_BUF_SIZE,
216 DMA_FROM_DEVICE);
217
218 if (dma_mapping_error(&dev->dev, dma_addr)) {
219 skb_free_frag(data);
220 return -ENOMEM;
221 }
222
223 rxb->page = page;
224 rxb->offset = offset;
225 rxd->desc.s_addr = dma_addr;
226
227 return 0;
228}
229
230static void nb8800_receive(struct net_device *dev, unsigned int i,
231 unsigned int len)
232{
233 struct nb8800_priv *priv = netdev_priv(dev);
234 struct nb8800_rx_desc *rxd = &priv->rx_descs[i];
235 struct page *page = priv->rx_bufs[i].page;
236 int offset = priv->rx_bufs[i].offset;
237 void *data = page_address(page) + offset;
238 dma_addr_t dma = rxd->desc.s_addr;
239 struct sk_buff *skb;
240 unsigned int size;
241 int err;
242
243 size = len <= RX_COPYBREAK ? len : RX_COPYHDR;
244
245 skb = napi_alloc_skb(&priv->napi, size);
246 if (!skb) {
247 netdev_err(dev, "rx skb allocation failed\n");
248 dev->stats.rx_dropped++;
249 return;
250 }
251
252 if (len <= RX_COPYBREAK) {
253 dma_sync_single_for_cpu(&dev->dev, dma, len, DMA_FROM_DEVICE);
254 memcpy(skb_put(skb, len), data, len);
255 dma_sync_single_for_device(&dev->dev, dma, len,
256 DMA_FROM_DEVICE);
257 } else {
258 err = nb8800_alloc_rx(dev, i, true);
259 if (err) {
260 netdev_err(dev, "rx buffer allocation failed\n");
261 dev->stats.rx_dropped++;
262 return;
263 }
264
265 dma_unmap_page(&dev->dev, dma, RX_BUF_SIZE, DMA_FROM_DEVICE);
266 memcpy(skb_put(skb, RX_COPYHDR), data, RX_COPYHDR);
267 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
268 offset + RX_COPYHDR, len - RX_COPYHDR,
269 RX_BUF_SIZE);
270 }
271
272 skb->protocol = eth_type_trans(skb, dev);
273 napi_gro_receive(&priv->napi, skb);
274}
275
276static void nb8800_rx_error(struct net_device *dev, u32 report)
277{
278 if (report & RX_LENGTH_ERR)
279 dev->stats.rx_length_errors++;
280
281 if (report & RX_FCS_ERR)
282 dev->stats.rx_crc_errors++;
283
284 if (report & RX_FIFO_OVERRUN)
285 dev->stats.rx_fifo_errors++;
286
287 if (report & RX_ALIGNMENT_ERROR)
288 dev->stats.rx_frame_errors++;
289
290 dev->stats.rx_errors++;
291}
292
293static int nb8800_poll(struct napi_struct *napi, int budget)
294{
295 struct net_device *dev = napi->dev;
296 struct nb8800_priv *priv = netdev_priv(dev);
297 struct nb8800_rx_desc *rxd;
298 unsigned int last = priv->rx_eoc;
299 unsigned int next;
300 int work = 0;
301
302 nb8800_tx_done(dev);
303
304again:
305 while (work < budget) {
306 struct nb8800_rx_buf *rxb;
307 unsigned int len;
308
309 next = (last + 1) % RX_DESC_COUNT;
310
311 rxb = &priv->rx_bufs[next];
312 rxd = &priv->rx_descs[next];
313
314 if (!rxd->report)
315 break;
316
317 len = RX_BYTES_TRANSFERRED(rxd->report);
318
319 if (IS_RX_ERROR(rxd->report))
320 nb8800_rx_error(dev, rxd->report);
321 else
322 nb8800_receive(dev, next, len);
323
324 dev->stats.rx_packets++;
325 dev->stats.rx_bytes += len;
326
327 if (rxd->report & RX_MULTICAST_PKT)
328 dev->stats.multicast++;
329
330 rxd->report = 0;
331 last = next;
332 work++;
333 }
334
335 if (work) {
336 priv->rx_descs[last].desc.config |= DESC_EOC;
337 wmb(); /* ensure new EOC is written before clearing old */
338 priv->rx_descs[priv->rx_eoc].desc.config &= ~DESC_EOC;
339 priv->rx_eoc = last;
340 nb8800_start_rx(dev);
341 }
342
343 if (work < budget) {
344 nb8800_writel(priv, NB8800_RX_ITR, priv->rx_itr_irq);
345
346 /* If a packet arrived after we last checked but
347 * before writing RX_ITR, the interrupt will be
348 * delayed, so we retrieve it now.
349 */
350 if (priv->rx_descs[next].report)
351 goto again;
352
353 napi_complete_done(napi, work);
354 }
355
356 return work;
357}
358
359static void __nb8800_tx_dma_start(struct net_device *dev)
360{
361 struct nb8800_priv *priv = netdev_priv(dev);
362 struct nb8800_tx_buf *txb;
363 u32 txc_cr;
364
365 txb = &priv->tx_bufs[priv->tx_queue];
366 if (!txb->ready)
367 return;
368
369 txc_cr = nb8800_readl(priv, NB8800_TXC_CR);
370 if (txc_cr & TCR_EN)
371 return;
372
373 nb8800_writel(priv, NB8800_TX_DESC_ADDR, txb->dma_desc);
374 wmb(); /* ensure desc addr is written before starting DMA */
375 nb8800_writel(priv, NB8800_TXC_CR, txc_cr | TCR_EN);
376
377 priv->tx_queue = (priv->tx_queue + txb->chain_len) % TX_DESC_COUNT;
378}
379
380static void nb8800_tx_dma_start(struct net_device *dev)
381{
382 struct nb8800_priv *priv = netdev_priv(dev);
383
384 spin_lock_irq(&priv->tx_lock);
385 __nb8800_tx_dma_start(dev);
386 spin_unlock_irq(&priv->tx_lock);
387}
388
389static void nb8800_tx_dma_start_irq(struct net_device *dev)
390{
391 struct nb8800_priv *priv = netdev_priv(dev);
392
393 spin_lock(&priv->tx_lock);
394 __nb8800_tx_dma_start(dev);
395 spin_unlock(&priv->tx_lock);
396}
397
398static int nb8800_xmit(struct sk_buff *skb, struct net_device *dev)
399{
400 struct nb8800_priv *priv = netdev_priv(dev);
401 struct nb8800_tx_desc *txd;
402 struct nb8800_tx_buf *txb;
403 struct nb8800_dma_desc *desc;
404 dma_addr_t dma_addr;
405 unsigned int dma_len;
406 unsigned int align;
407 unsigned int next;
408
409 if (atomic_read(&priv->tx_free) <= NB8800_DESC_LOW) {
410 netif_stop_queue(dev);
411 return NETDEV_TX_BUSY;
412 }
413
414 align = (8 - (uintptr_t)skb->data) & 7;
415
416 dma_len = skb->len - align;
417 dma_addr = dma_map_single(&dev->dev, skb->data + align,
418 dma_len, DMA_TO_DEVICE);
419
420 if (dma_mapping_error(&dev->dev, dma_addr)) {
421 netdev_err(dev, "tx dma mapping error\n");
422 kfree_skb(skb);
423 dev->stats.tx_dropped++;
424 return NETDEV_TX_OK;
425 }
426
427 if (atomic_dec_return(&priv->tx_free) <= NB8800_DESC_LOW) {
428 netif_stop_queue(dev);
429 skb->xmit_more = 0;
430 }
431
432 next = priv->tx_next;
433 txb = &priv->tx_bufs[next];
434 txd = &priv->tx_descs[next];
435 desc = &txd->desc[0];
436
437 next = (next + 1) % TX_DESC_COUNT;
438
439 if (align) {
440 memcpy(txd->buf, skb->data, align);
441
442 desc->s_addr =
443 txb->dma_desc + offsetof(struct nb8800_tx_desc, buf);
444 desc->n_addr = txb->dma_desc + sizeof(txd->desc[0]);
445 desc->config = DESC_BTS(2) | DESC_DS | align;
446
447 desc++;
448 }
449
450 desc->s_addr = dma_addr;
451 desc->n_addr = priv->tx_bufs[next].dma_desc;
452 desc->config = DESC_BTS(2) | DESC_DS | DESC_EOF | dma_len;
453
454 if (!skb->xmit_more)
455 desc->config |= DESC_EOC;
456
457 txb->skb = skb;
458 txb->dma_addr = dma_addr;
459 txb->dma_len = dma_len;
460
461 if (!priv->tx_chain) {
462 txb->chain_len = 1;
463 priv->tx_chain = txb;
464 } else {
465 priv->tx_chain->chain_len++;
466 }
467
468 netdev_sent_queue(dev, skb->len);
469
470 priv->tx_next = next;
471
472 if (!skb->xmit_more) {
473 smp_wmb();
474 priv->tx_chain->ready = true;
475 priv->tx_chain = NULL;
476 nb8800_tx_dma_start(dev);
477 }
478
479 return NETDEV_TX_OK;
480}
481
482static void nb8800_tx_error(struct net_device *dev, u32 report)
483{
484 if (report & TX_LATE_COLLISION)
485 dev->stats.collisions++;
486
487 if (report & TX_PACKET_DROPPED)
488 dev->stats.tx_dropped++;
489
490 if (report & TX_FIFO_UNDERRUN)
491 dev->stats.tx_fifo_errors++;
492
493 dev->stats.tx_errors++;
494}
495
496static void nb8800_tx_done(struct net_device *dev)
497{
498 struct nb8800_priv *priv = netdev_priv(dev);
499 unsigned int limit = priv->tx_next;
500 unsigned int done = priv->tx_done;
501 unsigned int packets = 0;
502 unsigned int len = 0;
503
504 while (done != limit) {
505 struct nb8800_tx_desc *txd = &priv->tx_descs[done];
506 struct nb8800_tx_buf *txb = &priv->tx_bufs[done];
507 struct sk_buff *skb;
508
509 if (!txd->report)
510 break;
511
512 skb = txb->skb;
513 len += skb->len;
514
515 dma_unmap_single(&dev->dev, txb->dma_addr, txb->dma_len,
516 DMA_TO_DEVICE);
517
518 if (IS_TX_ERROR(txd->report)) {
519 nb8800_tx_error(dev, txd->report);
520 kfree_skb(skb);
521 } else {
522 consume_skb(skb);
523 }
524
525 dev->stats.tx_packets++;
526 dev->stats.tx_bytes += TX_BYTES_TRANSFERRED(txd->report);
527 dev->stats.collisions += TX_EARLY_COLLISIONS(txd->report);
528
529 txb->skb = NULL;
530 txb->ready = false;
531 txd->report = 0;
532
533 done = (done + 1) % TX_DESC_COUNT;
534 packets++;
535 }
536
537 if (packets) {
538 smp_mb__before_atomic();
539 atomic_add(packets, &priv->tx_free);
540 netdev_completed_queue(dev, packets, len);
541 netif_wake_queue(dev);
542 priv->tx_done = done;
543 }
544}
545
546static irqreturn_t nb8800_irq(int irq, void *dev_id)
547{
548 struct net_device *dev = dev_id;
549 struct nb8800_priv *priv = netdev_priv(dev);
550 irqreturn_t ret = IRQ_NONE;
551 u32 val;
552
553 /* tx interrupt */
554 val = nb8800_readl(priv, NB8800_TXC_SR);
555 if (val) {
556 nb8800_writel(priv, NB8800_TXC_SR, val);
557
558 if (val & TSR_DI)
559 nb8800_tx_dma_start_irq(dev);
560
561 if (val & TSR_TI)
562 napi_schedule_irqoff(&priv->napi);
563
564 if (unlikely(val & TSR_DE))
565 netdev_err(dev, "TX DMA error\n");
566
567 /* should never happen with automatic status retrieval */
568 if (unlikely(val & TSR_TO))
569 netdev_err(dev, "TX Status FIFO overflow\n");
570
571 ret = IRQ_HANDLED;
572 }
573
574 /* rx interrupt */
575 val = nb8800_readl(priv, NB8800_RXC_SR);
576 if (val) {
577 nb8800_writel(priv, NB8800_RXC_SR, val);
578
579 if (likely(val & (RSR_RI | RSR_DI))) {
580 nb8800_writel(priv, NB8800_RX_ITR, priv->rx_itr_poll);
581 napi_schedule_irqoff(&priv->napi);
582 }
583
584 if (unlikely(val & RSR_DE))
585 netdev_err(dev, "RX DMA error\n");
586
587 /* should never happen with automatic status retrieval */
588 if (unlikely(val & RSR_RO))
589 netdev_err(dev, "RX Status FIFO overflow\n");
590
591 ret = IRQ_HANDLED;
592 }
593
594 return ret;
595}
596
597static void nb8800_mac_config(struct net_device *dev)
598{
599 struct nb8800_priv *priv = netdev_priv(dev);
600 bool gigabit = priv->speed == SPEED_1000;
601 u32 mac_mode_mask = RGMII_MODE | HALF_DUPLEX | GMAC_MODE;
602 u32 mac_mode = 0;
603 u32 slot_time;
604 u32 phy_clk;
605 u32 ict;
606
607 if (!priv->duplex)
608 mac_mode |= HALF_DUPLEX;
609
610 if (gigabit) {
611 if (priv->phy_mode == PHY_INTERFACE_MODE_RGMII)
612 mac_mode |= RGMII_MODE;
613
614 mac_mode |= GMAC_MODE;
615 phy_clk = 125000000;
616
617 /* Should be 512 but register is only 8 bits */
618 slot_time = 255;
619 } else {
620 phy_clk = 25000000;
621 slot_time = 128;
622 }
623
624 ict = DIV_ROUND_UP(phy_clk, clk_get_rate(priv->clk));
625
626 nb8800_writeb(priv, NB8800_IC_THRESHOLD, ict);
627 nb8800_writeb(priv, NB8800_SLOT_TIME, slot_time);
628 nb8800_maskb(priv, NB8800_MAC_MODE, mac_mode_mask, mac_mode);
629}
630
631static void nb8800_pause_config(struct net_device *dev)
632{
633 struct nb8800_priv *priv = netdev_priv(dev);
634 struct phy_device *phydev = priv->phydev;
635 u32 rxcr;
636
637 if (priv->pause_aneg) {
638 if (!phydev || !phydev->link)
639 return;
640
641 priv->pause_rx = phydev->pause;
642 priv->pause_tx = phydev->pause ^ phydev->asym_pause;
643 }
644
645 nb8800_modb(priv, NB8800_RX_CTL, RX_PAUSE_EN, priv->pause_rx);
646
647 rxcr = nb8800_readl(priv, NB8800_RXC_CR);
648 if (!!(rxcr & RCR_FL) == priv->pause_tx)
649 return;
650
651 if (netif_running(dev)) {
652 napi_disable(&priv->napi);
653 netif_tx_lock_bh(dev);
654 nb8800_dma_stop(dev);
655 nb8800_modl(priv, NB8800_RXC_CR, RCR_FL, priv->pause_tx);
656 nb8800_start_rx(dev);
657 netif_tx_unlock_bh(dev);
658 napi_enable(&priv->napi);
659 } else {
660 nb8800_modl(priv, NB8800_RXC_CR, RCR_FL, priv->pause_tx);
661 }
662}
663
664static void nb8800_link_reconfigure(struct net_device *dev)
665{
666 struct nb8800_priv *priv = netdev_priv(dev);
667 struct phy_device *phydev = priv->phydev;
668 int change = 0;
669
670 if (phydev->link) {
671 if (phydev->speed != priv->speed) {
672 priv->speed = phydev->speed;
673 change = 1;
674 }
675
676 if (phydev->duplex != priv->duplex) {
677 priv->duplex = phydev->duplex;
678 change = 1;
679 }
680
681 if (change)
682 nb8800_mac_config(dev);
683
684 nb8800_pause_config(dev);
685 }
686
687 if (phydev->link != priv->link) {
688 priv->link = phydev->link;
689 change = 1;
690 }
691
692 if (change)
693 phy_print_status(priv->phydev);
694}
695
696static void nb8800_update_mac_addr(struct net_device *dev)
697{
698 struct nb8800_priv *priv = netdev_priv(dev);
699 int i;
700
701 for (i = 0; i < ETH_ALEN; i++)
702 nb8800_writeb(priv, NB8800_SRC_ADDR(i), dev->dev_addr[i]);
703
704 for (i = 0; i < ETH_ALEN; i++)
705 nb8800_writeb(priv, NB8800_UC_ADDR(i), dev->dev_addr[i]);
706}
707
708static int nb8800_set_mac_address(struct net_device *dev, void *addr)
709{
710 struct sockaddr *sock = addr;
711
712 if (netif_running(dev))
713 return -EBUSY;
714
715 ether_addr_copy(dev->dev_addr, sock->sa_data);
716 nb8800_update_mac_addr(dev);
717
718 return 0;
719}
720
721static void nb8800_mc_init(struct net_device *dev, int val)
722{
723 struct nb8800_priv *priv = netdev_priv(dev);
724
725 nb8800_writeb(priv, NB8800_MC_INIT, val);
726 readb_poll_timeout_atomic(priv->base + NB8800_MC_INIT, val, !val,
727 1, 1000);
728}
729
730static void nb8800_set_rx_mode(struct net_device *dev)
731{
732 struct nb8800_priv *priv = netdev_priv(dev);
733 struct netdev_hw_addr *ha;
734 int i;
735
736 if (dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) {
737 nb8800_mac_af(dev, false);
738 return;
739 }
740
741 nb8800_mac_af(dev, true);
742 nb8800_mc_init(dev, 0);
743
744 netdev_for_each_mc_addr(ha, dev) {
745 for (i = 0; i < ETH_ALEN; i++)
746 nb8800_writeb(priv, NB8800_MC_ADDR(i), ha->addr[i]);
747
748 nb8800_mc_init(dev, 0xff);
749 }
750}
751
752#define RX_DESC_SIZE (RX_DESC_COUNT * sizeof(struct nb8800_rx_desc))
753#define TX_DESC_SIZE (TX_DESC_COUNT * sizeof(struct nb8800_tx_desc))
754
755static void nb8800_dma_free(struct net_device *dev)
756{
757 struct nb8800_priv *priv = netdev_priv(dev);
758 unsigned int i;
759
760 if (priv->rx_bufs) {
761 for (i = 0; i < RX_DESC_COUNT; i++)
762 if (priv->rx_bufs[i].page)
763 put_page(priv->rx_bufs[i].page);
764
765 kfree(priv->rx_bufs);
766 priv->rx_bufs = NULL;
767 }
768
769 if (priv->tx_bufs) {
770 for (i = 0; i < TX_DESC_COUNT; i++)
771 kfree_skb(priv->tx_bufs[i].skb);
772
773 kfree(priv->tx_bufs);
774 priv->tx_bufs = NULL;
775 }
776
777 if (priv->rx_descs) {
778 dma_free_coherent(dev->dev.parent, RX_DESC_SIZE, priv->rx_descs,
779 priv->rx_desc_dma);
780 priv->rx_descs = NULL;
781 }
782
783 if (priv->tx_descs) {
784 dma_free_coherent(dev->dev.parent, TX_DESC_SIZE, priv->tx_descs,
785 priv->tx_desc_dma);
786 priv->tx_descs = NULL;
787 }
788}
789
790static void nb8800_dma_reset(struct net_device *dev)
791{
792 struct nb8800_priv *priv = netdev_priv(dev);
793 struct nb8800_rx_desc *rxd;
794 struct nb8800_tx_desc *txd;
795 unsigned int i;
796
797 for (i = 0; i < RX_DESC_COUNT; i++) {
798 dma_addr_t rx_dma = priv->rx_desc_dma + i * sizeof(*rxd);
799
800 rxd = &priv->rx_descs[i];
801 rxd->desc.n_addr = rx_dma + sizeof(*rxd);
802 rxd->desc.r_addr =
803 rx_dma + offsetof(struct nb8800_rx_desc, report);
804 rxd->desc.config = priv->rx_dma_config;
805 rxd->report = 0;
806 }
807
808 rxd->desc.n_addr = priv->rx_desc_dma;
809 rxd->desc.config |= DESC_EOC;
810
811 priv->rx_eoc = RX_DESC_COUNT - 1;
812
813 for (i = 0; i < TX_DESC_COUNT; i++) {
814 struct nb8800_tx_buf *txb = &priv->tx_bufs[i];
815 dma_addr_t r_dma = txb->dma_desc +
816 offsetof(struct nb8800_tx_desc, report);
817
818 txd = &priv->tx_descs[i];
819 txd->desc[0].r_addr = r_dma;
820 txd->desc[1].r_addr = r_dma;
821 txd->report = 0;
822 }
823
824 priv->tx_next = 0;
825 priv->tx_queue = 0;
826 priv->tx_done = 0;
827 atomic_set(&priv->tx_free, TX_DESC_COUNT);
828
829 nb8800_writel(priv, NB8800_RX_DESC_ADDR, priv->rx_desc_dma);
830
831 wmb(); /* ensure all setup is written before starting */
832}
833
834static int nb8800_dma_init(struct net_device *dev)
835{
836 struct nb8800_priv *priv = netdev_priv(dev);
837 unsigned int n_rx = RX_DESC_COUNT;
838 unsigned int n_tx = TX_DESC_COUNT;
839 unsigned int i;
840 int err;
841
842 priv->rx_descs = dma_alloc_coherent(dev->dev.parent, RX_DESC_SIZE,
843 &priv->rx_desc_dma, GFP_KERNEL);
844 if (!priv->rx_descs)
845 goto err_out;
846
847 priv->rx_bufs = kcalloc(n_rx, sizeof(*priv->rx_bufs), GFP_KERNEL);
848 if (!priv->rx_bufs)
849 goto err_out;
850
851 for (i = 0; i < n_rx; i++) {
852 err = nb8800_alloc_rx(dev, i, false);
853 if (err)
854 goto err_out;
855 }
856
857 priv->tx_descs = dma_alloc_coherent(dev->dev.parent, TX_DESC_SIZE,
858 &priv->tx_desc_dma, GFP_KERNEL);
859 if (!priv->tx_descs)
860 goto err_out;
861
862 priv->tx_bufs = kcalloc(n_tx, sizeof(*priv->tx_bufs), GFP_KERNEL);
863 if (!priv->tx_bufs)
864 goto err_out;
865
866 for (i = 0; i < n_tx; i++)
867 priv->tx_bufs[i].dma_desc =
868 priv->tx_desc_dma + i * sizeof(struct nb8800_tx_desc);
869
870 nb8800_dma_reset(dev);
871
872 return 0;
873
874err_out:
875 nb8800_dma_free(dev);
876
877 return -ENOMEM;
878}
879
880static int nb8800_dma_stop(struct net_device *dev)
881{
882 struct nb8800_priv *priv = netdev_priv(dev);
883 struct nb8800_tx_buf *txb = &priv->tx_bufs[0];
884 struct nb8800_tx_desc *txd = &priv->tx_descs[0];
885 int retry = 5;
886 u32 txcr;
887 u32 rxcr;
888 int err;
889 unsigned int i;
890
891 /* wait for tx to finish */
892 err = readl_poll_timeout_atomic(priv->base + NB8800_TXC_CR, txcr,
893 !(txcr & TCR_EN) &&
894 priv->tx_done == priv->tx_next,
895 1000, 1000000);
896 if (err)
897 return err;
898
899 /* The rx DMA only stops if it reaches the end of chain.
900 * To make this happen, we set the EOC flag on all rx
901 * descriptors, put the device in loopback mode, and send
902 * a few dummy frames. The interrupt handler will ignore
903 * these since NAPI is disabled and no real frames are in
904 * the tx queue.
905 */
906
907 for (i = 0; i < RX_DESC_COUNT; i++)
908 priv->rx_descs[i].desc.config |= DESC_EOC;
909
910 txd->desc[0].s_addr =
911 txb->dma_desc + offsetof(struct nb8800_tx_desc, buf);
912 txd->desc[0].config = DESC_BTS(2) | DESC_DS | DESC_EOF | DESC_EOC | 8;
913 memset(txd->buf, 0, sizeof(txd->buf));
914
915 nb8800_mac_af(dev, false);
916 nb8800_setb(priv, NB8800_MAC_MODE, LOOPBACK_EN);
917
918 do {
919 nb8800_writel(priv, NB8800_TX_DESC_ADDR, txb->dma_desc);
920 wmb();
921 nb8800_writel(priv, NB8800_TXC_CR, txcr | TCR_EN);
922
923 err = readl_poll_timeout_atomic(priv->base + NB8800_RXC_CR,
924 rxcr, !(rxcr & RCR_EN),
925 1000, 100000);
926 } while (err && --retry);
927
928 nb8800_mac_af(dev, true);
929 nb8800_clearb(priv, NB8800_MAC_MODE, LOOPBACK_EN);
930 nb8800_dma_reset(dev);
931
932 return retry ? 0 : -ETIMEDOUT;
933}
934
935static void nb8800_pause_adv(struct net_device *dev)
936{
937 struct nb8800_priv *priv = netdev_priv(dev);
938 u32 adv = 0;
939
940 if (!priv->phydev)
941 return;
942
943 if (priv->pause_rx)
944 adv |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
945 if (priv->pause_tx)
946 adv ^= ADVERTISED_Asym_Pause;
947
948 priv->phydev->supported |= adv;
949 priv->phydev->advertising |= adv;
950}
951
952static int nb8800_open(struct net_device *dev)
953{
954 struct nb8800_priv *priv = netdev_priv(dev);
955 int err;
956
957 /* clear any pending interrupts */
958 nb8800_writel(priv, NB8800_RXC_SR, 0xf);
959 nb8800_writel(priv, NB8800_TXC_SR, 0xf);
960
961 err = nb8800_dma_init(dev);
962 if (err)
963 return err;
964
965 err = request_irq(dev->irq, nb8800_irq, 0, dev_name(&dev->dev), dev);
966 if (err)
967 goto err_free_dma;
968
969 nb8800_mac_rx(dev, true);
970 nb8800_mac_tx(dev, true);
971
972 priv->phydev = of_phy_connect(dev, priv->phy_node,
973 nb8800_link_reconfigure, 0,
974 priv->phy_mode);
975 if (!priv->phydev)
976 goto err_free_irq;
977
978 nb8800_pause_adv(dev);
979
980 netdev_reset_queue(dev);
981 napi_enable(&priv->napi);
982 netif_start_queue(dev);
983
984 nb8800_start_rx(dev);
985 phy_start(priv->phydev);
986
987 return 0;
988
989err_free_irq:
990 free_irq(dev->irq, dev);
991err_free_dma:
992 nb8800_dma_free(dev);
993
994 return err;
995}
996
997static int nb8800_stop(struct net_device *dev)
998{
999 struct nb8800_priv *priv = netdev_priv(dev);
1000
1001 phy_stop(priv->phydev);
1002
1003 netif_stop_queue(dev);
1004 napi_disable(&priv->napi);
1005
1006 nb8800_dma_stop(dev);
1007 nb8800_mac_rx(dev, false);
1008 nb8800_mac_tx(dev, false);
1009
1010 phy_disconnect(priv->phydev);
1011 priv->phydev = NULL;
1012
1013 free_irq(dev->irq, dev);
1014
1015 nb8800_dma_free(dev);
1016
1017 return 0;
1018}
1019
1020static int nb8800_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1021{
1022 struct nb8800_priv *priv = netdev_priv(dev);
1023
1024 return phy_mii_ioctl(priv->phydev, rq, cmd);
1025}
1026
1027static const struct net_device_ops nb8800_netdev_ops = {
1028 .ndo_open = nb8800_open,
1029 .ndo_stop = nb8800_stop,
1030 .ndo_start_xmit = nb8800_xmit,
1031 .ndo_set_mac_address = nb8800_set_mac_address,
1032 .ndo_set_rx_mode = nb8800_set_rx_mode,
1033 .ndo_do_ioctl = nb8800_ioctl,
1034 .ndo_change_mtu = eth_change_mtu,
1035 .ndo_validate_addr = eth_validate_addr,
1036};
1037
1038static int nb8800_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1039{
1040 struct nb8800_priv *priv = netdev_priv(dev);
1041
1042 if (!priv->phydev)
1043 return -ENODEV;
1044
1045 return phy_ethtool_gset(priv->phydev, cmd);
1046}
1047
1048static int nb8800_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1049{
1050 struct nb8800_priv *priv = netdev_priv(dev);
1051
1052 if (!priv->phydev)
1053 return -ENODEV;
1054
1055 return phy_ethtool_sset(priv->phydev, cmd);
1056}
1057
1058static int nb8800_nway_reset(struct net_device *dev)
1059{
1060 struct nb8800_priv *priv = netdev_priv(dev);
1061
1062 if (!priv->phydev)
1063 return -ENODEV;
1064
1065 return genphy_restart_aneg(priv->phydev);
1066}
1067
1068static void nb8800_get_pauseparam(struct net_device *dev,
1069 struct ethtool_pauseparam *pp)
1070{
1071 struct nb8800_priv *priv = netdev_priv(dev);
1072
1073 pp->autoneg = priv->pause_aneg;
1074 pp->rx_pause = priv->pause_rx;
1075 pp->tx_pause = priv->pause_tx;
1076}
1077
1078static int nb8800_set_pauseparam(struct net_device *dev,
1079 struct ethtool_pauseparam *pp)
1080{
1081 struct nb8800_priv *priv = netdev_priv(dev);
1082
1083 priv->pause_aneg = pp->autoneg;
1084 priv->pause_rx = pp->rx_pause;
1085 priv->pause_tx = pp->tx_pause;
1086
1087 nb8800_pause_adv(dev);
1088
1089 if (!priv->pause_aneg)
1090 nb8800_pause_config(dev);
1091 else if (priv->phydev)
1092 phy_start_aneg(priv->phydev);
1093
1094 return 0;
1095}
1096
1097static const char nb8800_stats_names[][ETH_GSTRING_LEN] = {
1098 "rx_bytes_ok",
1099 "rx_frames_ok",
1100 "rx_undersize_frames",
1101 "rx_fragment_frames",
1102 "rx_64_byte_frames",
1103 "rx_127_byte_frames",
1104 "rx_255_byte_frames",
1105 "rx_511_byte_frames",
1106 "rx_1023_byte_frames",
1107 "rx_max_size_frames",
1108 "rx_oversize_frames",
1109 "rx_bad_fcs_frames",
1110 "rx_broadcast_frames",
1111 "rx_multicast_frames",
1112 "rx_control_frames",
1113 "rx_pause_frames",
1114 "rx_unsup_control_frames",
1115 "rx_align_error_frames",
1116 "rx_overrun_frames",
1117 "rx_jabber_frames",
1118 "rx_bytes",
1119 "rx_frames",
1120
1121 "tx_bytes_ok",
1122 "tx_frames_ok",
1123 "tx_64_byte_frames",
1124 "tx_127_byte_frames",
1125 "tx_255_byte_frames",
1126 "tx_511_byte_frames",
1127 "tx_1023_byte_frames",
1128 "tx_max_size_frames",
1129 "tx_oversize_frames",
1130 "tx_broadcast_frames",
1131 "tx_multicast_frames",
1132 "tx_control_frames",
1133 "tx_pause_frames",
1134 "tx_underrun_frames",
1135 "tx_single_collision_frames",
1136 "tx_multi_collision_frames",
1137 "tx_deferred_collision_frames",
1138 "tx_late_collision_frames",
1139 "tx_excessive_collision_frames",
1140 "tx_bytes",
1141 "tx_frames",
1142 "tx_collisions",
1143};
1144
1145#define NB8800_NUM_STATS ARRAY_SIZE(nb8800_stats_names)
1146
1147static int nb8800_get_sset_count(struct net_device *dev, int sset)
1148{
1149 if (sset == ETH_SS_STATS)
1150 return NB8800_NUM_STATS;
1151
1152 return -EOPNOTSUPP;
1153}
1154
1155static void nb8800_get_strings(struct net_device *dev, u32 sset, u8 *buf)
1156{
1157 if (sset == ETH_SS_STATS)
1158 memcpy(buf, &nb8800_stats_names, sizeof(nb8800_stats_names));
1159}
1160
1161static u32 nb8800_read_stat(struct net_device *dev, int index)
1162{
1163 struct nb8800_priv *priv = netdev_priv(dev);
1164
1165 nb8800_writeb(priv, NB8800_STAT_INDEX, index);
1166
1167 return nb8800_readl(priv, NB8800_STAT_DATA);
1168}
1169
1170static void nb8800_get_ethtool_stats(struct net_device *dev,
1171 struct ethtool_stats *estats, u64 *st)
1172{
1173 unsigned int i;
1174 u32 rx, tx;
1175
1176 for (i = 0; i < NB8800_NUM_STATS / 2; i++) {
1177 rx = nb8800_read_stat(dev, i);
1178 tx = nb8800_read_stat(dev, i | 0x80);
1179 st[i] = rx;
1180 st[i + NB8800_NUM_STATS / 2] = tx;
1181 }
1182}
1183
1184static const struct ethtool_ops nb8800_ethtool_ops = {
1185 .get_settings = nb8800_get_settings,
1186 .set_settings = nb8800_set_settings,
1187 .nway_reset = nb8800_nway_reset,
1188 .get_link = ethtool_op_get_link,
1189 .get_pauseparam = nb8800_get_pauseparam,
1190 .set_pauseparam = nb8800_set_pauseparam,
1191 .get_sset_count = nb8800_get_sset_count,
1192 .get_strings = nb8800_get_strings,
1193 .get_ethtool_stats = nb8800_get_ethtool_stats,
1194};
1195
1196static int nb8800_hw_init(struct net_device *dev)
1197{
1198 struct nb8800_priv *priv = netdev_priv(dev);
1199 u32 val;
1200
1201 val = TX_RETRY_EN | TX_PAD_EN | TX_APPEND_FCS;
1202 nb8800_writeb(priv, NB8800_TX_CTL1, val);
1203
1204 /* Collision retry count */
1205 nb8800_writeb(priv, NB8800_TX_CTL2, 5);
1206
1207 val = RX_PAD_STRIP | RX_AF_EN;
1208 nb8800_writeb(priv, NB8800_RX_CTL, val);
1209
1210 /* Chosen by fair dice roll */
1211 nb8800_writeb(priv, NB8800_RANDOM_SEED, 4);
1212
1213 /* TX cycles per deferral period */
1214 nb8800_writeb(priv, NB8800_TX_SDP, 12);
1215
1216 /* The following three threshold values have been
1217 * experimentally determined for good results.
1218 */
1219
1220 /* RX/TX FIFO threshold for partial empty (64-bit entries) */
1221 nb8800_writeb(priv, NB8800_PE_THRESHOLD, 0);
1222
1223 /* RX/TX FIFO threshold for partial full (64-bit entries) */
1224 nb8800_writeb(priv, NB8800_PF_THRESHOLD, 255);
1225
1226 /* Buffer size for transmit (64-bit entries) */
1227 nb8800_writeb(priv, NB8800_TX_BUFSIZE, 64);
1228
1229 /* Configure tx DMA */
1230
1231 val = nb8800_readl(priv, NB8800_TXC_CR);
1232 val &= TCR_LE; /* keep endian setting */
1233 val |= TCR_DM; /* DMA descriptor mode */
1234 val |= TCR_RS; /* automatically store tx status */
1235 val |= TCR_DIE; /* interrupt on DMA chain completion */
1236 val |= TCR_TFI(7); /* interrupt after 7 frames transmitted */
1237 val |= TCR_BTS(2); /* 32-byte bus transaction size */
1238 nb8800_writel(priv, NB8800_TXC_CR, val);
1239
1240 /* TX complete interrupt after 10 ms or 7 frames (see above) */
1241 val = clk_get_rate(priv->clk) / 100;
1242 nb8800_writel(priv, NB8800_TX_ITR, val);
1243
1244 /* Configure rx DMA */
1245
1246 val = nb8800_readl(priv, NB8800_RXC_CR);
1247 val &= RCR_LE; /* keep endian setting */
1248 val |= RCR_DM; /* DMA descriptor mode */
1249 val |= RCR_RS; /* automatically store rx status */
1250 val |= RCR_DIE; /* interrupt at end of DMA chain */
1251 val |= RCR_RFI(7); /* interrupt after 7 frames received */
1252 val |= RCR_BTS(2); /* 32-byte bus transaction size */
1253 nb8800_writel(priv, NB8800_RXC_CR, val);
1254
1255 /* The rx interrupt can fire before the DMA has completed
1256 * unless a small delay is added. 50 us is hopefully enough.
1257 */
1258 priv->rx_itr_irq = clk_get_rate(priv->clk) / 20000;
1259
1260 /* In NAPI poll mode we want to disable interrupts, but the
1261 * hardware does not permit this. Delay 10 ms instead.
1262 */
1263 priv->rx_itr_poll = clk_get_rate(priv->clk) / 100;
1264
1265 nb8800_writel(priv, NB8800_RX_ITR, priv->rx_itr_irq);
1266
1267 priv->rx_dma_config = RX_BUF_SIZE | DESC_BTS(2) | DESC_DS | DESC_EOF;
1268
1269 /* Flow control settings */
1270
1271 /* Pause time of 0.1 ms */
1272 val = 100000 / 512;
1273 nb8800_writeb(priv, NB8800_PQ1, val >> 8);
1274 nb8800_writeb(priv, NB8800_PQ2, val & 0xff);
1275
1276 /* Auto-negotiate by default */
1277 priv->pause_aneg = true;
1278 priv->pause_rx = true;
1279 priv->pause_tx = true;
1280
1281 nb8800_mc_init(dev, 0);
1282
1283 return 0;
1284}
1285
1286static int nb8800_tangox_init(struct net_device *dev)
1287{
1288 struct nb8800_priv *priv = netdev_priv(dev);
1289 u32 pad_mode = PAD_MODE_MII;
1290
1291 switch (priv->phy_mode) {
1292 case PHY_INTERFACE_MODE_MII:
1293 case PHY_INTERFACE_MODE_GMII:
1294 pad_mode = PAD_MODE_MII;
1295 break;
1296
1297 case PHY_INTERFACE_MODE_RGMII:
1298 pad_mode = PAD_MODE_RGMII;
1299 break;
1300
1301 case PHY_INTERFACE_MODE_RGMII_TXID:
1302 pad_mode = PAD_MODE_RGMII | PAD_MODE_GTX_CLK_DELAY;
1303 break;
1304
1305 default:
1306 dev_err(dev->dev.parent, "unsupported phy mode %s\n",
1307 phy_modes(priv->phy_mode));
1308 return -EINVAL;
1309 }
1310
1311 nb8800_writeb(priv, NB8800_TANGOX_PAD_MODE, pad_mode);
1312
1313 return 0;
1314}
1315
1316static int nb8800_tangox_reset(struct net_device *dev)
1317{
1318 struct nb8800_priv *priv = netdev_priv(dev);
1319 int clk_div;
1320
1321 nb8800_writeb(priv, NB8800_TANGOX_RESET, 0);
1322 usleep_range(1000, 10000);
1323 nb8800_writeb(priv, NB8800_TANGOX_RESET, 1);
1324
1325 wmb(); /* ensure reset is cleared before proceeding */
1326
1327 clk_div = DIV_ROUND_UP(clk_get_rate(priv->clk), 2 * MAX_MDC_CLOCK);
1328 nb8800_writew(priv, NB8800_TANGOX_MDIO_CLKDIV, clk_div);
1329
1330 return 0;
1331}
1332
1333static const struct nb8800_ops nb8800_tangox_ops = {
1334 .init = nb8800_tangox_init,
1335 .reset = nb8800_tangox_reset,
1336};
1337
1338static int nb8800_tango4_init(struct net_device *dev)
1339{
1340 struct nb8800_priv *priv = netdev_priv(dev);
1341 int err;
1342
1343 err = nb8800_tangox_init(dev);
1344 if (err)
1345 return err;
1346
1347 /* On tango4 interrupt on DMA completion per frame works and gives
1348 * better performance despite generating more rx interrupts.
1349 */
1350
1351 /* Disable unnecessary interrupt on rx completion */
1352 nb8800_clearl(priv, NB8800_RXC_CR, RCR_RFI(7));
1353
1354 /* Request interrupt on descriptor DMA completion */
1355 priv->rx_dma_config |= DESC_ID;
1356
1357 return 0;
1358}
1359
1360static const struct nb8800_ops nb8800_tango4_ops = {
1361 .init = nb8800_tango4_init,
1362 .reset = nb8800_tangox_reset,
1363};
1364
1365static const struct of_device_id nb8800_dt_ids[] = {
1366 {
1367 .compatible = "aurora,nb8800",
1368 },
1369 {
1370 .compatible = "sigma,smp8642-ethernet",
1371 .data = &nb8800_tangox_ops,
1372 },
1373 {
1374 .compatible = "sigma,smp8734-ethernet",
1375 .data = &nb8800_tango4_ops,
1376 },
1377 { }
1378};
1379
1380static int nb8800_probe(struct platform_device *pdev)
1381{
1382 const struct of_device_id *match;
1383 const struct nb8800_ops *ops = NULL;
1384 struct nb8800_priv *priv;
1385 struct resource *res;
1386 struct net_device *dev;
1387 struct mii_bus *bus;
1388 const unsigned char *mac;
1389 void __iomem *base;
1390 int irq;
1391 int ret;
1392
1393 match = of_match_device(nb8800_dt_ids, &pdev->dev);
1394 if (match)
1395 ops = match->data;
1396
1397 irq = platform_get_irq(pdev, 0);
1398 if (irq <= 0) {
1399 dev_err(&pdev->dev, "No IRQ\n");
1400 return -EINVAL;
1401 }
1402
1403 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1404 base = devm_ioremap_resource(&pdev->dev, res);
1405 if (IS_ERR(base))
1406 return PTR_ERR(base);
1407
1408 dev_dbg(&pdev->dev, "AU-NB8800 Ethernet at %pa\n", &res->start);
1409
1410 dev = alloc_etherdev(sizeof(*priv));
1411 if (!dev)
1412 return -ENOMEM;
1413
1414 platform_set_drvdata(pdev, dev);
1415 SET_NETDEV_DEV(dev, &pdev->dev);
1416
1417 priv = netdev_priv(dev);
1418 priv->base = base;
1419
1420 priv->phy_mode = of_get_phy_mode(pdev->dev.of_node);
1421 if (priv->phy_mode < 0)
1422 priv->phy_mode = PHY_INTERFACE_MODE_RGMII;
1423
1424 priv->clk = devm_clk_get(&pdev->dev, NULL);
1425 if (IS_ERR(priv->clk)) {
1426 dev_err(&pdev->dev, "failed to get clock\n");
1427 ret = PTR_ERR(priv->clk);
1428 goto err_free_dev;
1429 }
1430
1431 ret = clk_prepare_enable(priv->clk);
1432 if (ret)
1433 goto err_free_dev;
1434
1435 spin_lock_init(&priv->tx_lock);
1436
1437 if (ops && ops->reset) {
1438 ret = ops->reset(dev);
1439 if (ret)
1440 goto err_free_dev;
1441 }
1442
1443 bus = devm_mdiobus_alloc(&pdev->dev);
1444 if (!bus) {
1445 ret = -ENOMEM;
1446 goto err_disable_clk;
1447 }
1448
1449 bus->name = "nb8800-mii";
1450 bus->read = nb8800_mdio_read;
1451 bus->write = nb8800_mdio_write;
1452 bus->parent = &pdev->dev;
1453 snprintf(bus->id, MII_BUS_ID_SIZE, "%lx.nb8800-mii",
1454 (unsigned long)res->start);
1455 bus->priv = priv;
1456
1457 ret = of_mdiobus_register(bus, pdev->dev.of_node);
1458 if (ret) {
1459 dev_err(&pdev->dev, "failed to register MII bus\n");
1460 goto err_disable_clk;
1461 }
1462
1463 priv->phy_node = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0);
1464 if (!priv->phy_node) {
1465 dev_err(&pdev->dev, "no PHY specified\n");
1466 ret = -ENODEV;
1467 goto err_free_bus;
1468 }
1469
1470 priv->mii_bus = bus;
1471
1472 ret = nb8800_hw_init(dev);
1473 if (ret)
1474 goto err_free_bus;
1475
1476 if (ops && ops->init) {
1477 ret = ops->init(dev);
1478 if (ret)
1479 goto err_free_bus;
1480 }
1481
1482 dev->netdev_ops = &nb8800_netdev_ops;
1483 dev->ethtool_ops = &nb8800_ethtool_ops;
1484 dev->flags |= IFF_MULTICAST;
1485 dev->irq = irq;
1486
1487 mac = of_get_mac_address(pdev->dev.of_node);
1488 if (mac)
1489 ether_addr_copy(dev->dev_addr, mac);
1490
1491 if (!is_valid_ether_addr(dev->dev_addr))
1492 eth_hw_addr_random(dev);
1493
1494 nb8800_update_mac_addr(dev);
1495
1496 netif_carrier_off(dev);
1497
1498 ret = register_netdev(dev);
1499 if (ret) {
1500 netdev_err(dev, "failed to register netdev\n");
1501 goto err_free_dma;
1502 }
1503
1504 netif_napi_add(dev, &priv->napi, nb8800_poll, NAPI_POLL_WEIGHT);
1505
1506 netdev_info(dev, "MAC address %pM\n", dev->dev_addr);
1507
1508 return 0;
1509
1510err_free_dma:
1511 nb8800_dma_free(dev);
1512err_free_bus:
1513 mdiobus_unregister(bus);
1514err_disable_clk:
1515 clk_disable_unprepare(priv->clk);
1516err_free_dev:
1517 free_netdev(dev);
1518
1519 return ret;
1520}
1521
1522static int nb8800_remove(struct platform_device *pdev)
1523{
1524 struct net_device *ndev = platform_get_drvdata(pdev);
1525 struct nb8800_priv *priv = netdev_priv(ndev);
1526
1527 unregister_netdev(ndev);
1528
1529 mdiobus_unregister(priv->mii_bus);
1530
1531 clk_disable_unprepare(priv->clk);
1532
1533 nb8800_dma_free(ndev);
1534 free_netdev(ndev);
1535
1536 return 0;
1537}
1538
1539static struct platform_driver nb8800_driver = {
1540 .driver = {
1541 .name = "nb8800",
1542 .of_match_table = nb8800_dt_ids,
1543 },
1544 .probe = nb8800_probe,
1545 .remove = nb8800_remove,
1546};
1547
1548module_platform_driver(nb8800_driver);
1549
1550MODULE_DESCRIPTION("Aurora AU-NB8800 Ethernet driver");
1551MODULE_AUTHOR("Mans Rullgard <mans@mansr.com>");
1552MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/aurora/nb8800.h b/drivers/net/ethernet/aurora/nb8800.h
new file mode 100644
index 000000000000..e5adbc2aac9f
--- /dev/null
+++ b/drivers/net/ethernet/aurora/nb8800.h
@@ -0,0 +1,316 @@
1#ifndef _NB8800_H_
2#define _NB8800_H_
3
4#include <linux/types.h>
5#include <linux/skbuff.h>
6#include <linux/phy.h>
7#include <linux/clk.h>
8#include <linux/bitops.h>
9
10#define RX_DESC_COUNT 256
11#define TX_DESC_COUNT 256
12
13#define NB8800_DESC_LOW 4
14
15#define RX_BUF_SIZE 1552
16
17#define RX_COPYBREAK 256
18#define RX_COPYHDR 128
19
20#define MAX_MDC_CLOCK 2500000
21
22/* Stargate Solutions SSN8800 core registers */
23#define NB8800_TX_CTL1 0x000
24#define TX_TPD BIT(5)
25#define TX_APPEND_FCS BIT(4)
26#define TX_PAD_EN BIT(3)
27#define TX_RETRY_EN BIT(2)
28#define TX_EN BIT(0)
29
30#define NB8800_TX_CTL2 0x001
31
32#define NB8800_RX_CTL 0x004
33#define RX_BC_DISABLE BIT(7)
34#define RX_RUNT BIT(6)
35#define RX_AF_EN BIT(5)
36#define RX_PAUSE_EN BIT(3)
37#define RX_SEND_CRC BIT(2)
38#define RX_PAD_STRIP BIT(1)
39#define RX_EN BIT(0)
40
41#define NB8800_RANDOM_SEED 0x008
42#define NB8800_TX_SDP 0x14
43#define NB8800_TX_TPDP1 0x18
44#define NB8800_TX_TPDP2 0x19
45#define NB8800_SLOT_TIME 0x1c
46
47#define NB8800_MDIO_CMD 0x020
48#define MDIO_CMD_GO BIT(31)
49#define MDIO_CMD_WR BIT(26)
50#define MDIO_CMD_ADDR(x) ((x) << 21)
51#define MDIO_CMD_REG(x) ((x) << 16)
52#define MDIO_CMD_DATA(x) ((x) << 0)
53
54#define NB8800_MDIO_STS 0x024
55#define MDIO_STS_ERR BIT(31)
56
57#define NB8800_MC_ADDR(i) (0x028 + (i))
58#define NB8800_MC_INIT 0x02e
59#define NB8800_UC_ADDR(i) (0x03c + (i))
60
61#define NB8800_MAC_MODE 0x044
62#define RGMII_MODE BIT(7)
63#define HALF_DUPLEX BIT(4)
64#define BURST_EN BIT(3)
65#define LOOPBACK_EN BIT(2)
66#define GMAC_MODE BIT(0)
67
68#define NB8800_IC_THRESHOLD 0x050
69#define NB8800_PE_THRESHOLD 0x051
70#define NB8800_PF_THRESHOLD 0x052
71#define NB8800_TX_BUFSIZE 0x054
72#define NB8800_FIFO_CTL 0x056
73#define NB8800_PQ1 0x060
74#define NB8800_PQ2 0x061
75#define NB8800_SRC_ADDR(i) (0x06a + (i))
76#define NB8800_STAT_DATA 0x078
77#define NB8800_STAT_INDEX 0x07c
78#define NB8800_STAT_CLEAR 0x07d
79
80#define NB8800_SLEEP_MODE 0x07e
81#define SLEEP_MODE BIT(0)
82
83#define NB8800_WAKEUP 0x07f
84#define WAKEUP BIT(0)
85
86/* Aurora NB8800 host interface registers */
87#define NB8800_TXC_CR 0x100
88#define TCR_LK BIT(12)
89#define TCR_DS BIT(11)
90#define TCR_BTS(x) (((x) & 0x7) << 8)
91#define TCR_DIE BIT(7)
92#define TCR_TFI(x) (((x) & 0x7) << 4)
93#define TCR_LE BIT(3)
94#define TCR_RS BIT(2)
95#define TCR_DM BIT(1)
96#define TCR_EN BIT(0)
97
98#define NB8800_TXC_SR 0x104
99#define TSR_DE BIT(3)
100#define TSR_DI BIT(2)
101#define TSR_TO BIT(1)
102#define TSR_TI BIT(0)
103
104#define NB8800_TX_SAR 0x108
105#define NB8800_TX_DESC_ADDR 0x10c
106
107#define NB8800_TX_REPORT_ADDR 0x110
108#define TX_BYTES_TRANSFERRED(x) (((x) >> 16) & 0xffff)
109#define TX_FIRST_DEFERRAL BIT(7)
110#define TX_EARLY_COLLISIONS(x) (((x) >> 3) & 0xf)
111#define TX_LATE_COLLISION BIT(2)
112#define TX_PACKET_DROPPED BIT(1)
113#define TX_FIFO_UNDERRUN BIT(0)
114#define IS_TX_ERROR(r) ((r) & 0x07)
115
116#define NB8800_TX_FIFO_SR 0x114
117#define NB8800_TX_ITR 0x118
118
119#define NB8800_RXC_CR 0x200
120#define RCR_FL BIT(13)
121#define RCR_LK BIT(12)
122#define RCR_DS BIT(11)
123#define RCR_BTS(x) (((x) & 7) << 8)
124#define RCR_DIE BIT(7)
125#define RCR_RFI(x) (((x) & 7) << 4)
126#define RCR_LE BIT(3)
127#define RCR_RS BIT(2)
128#define RCR_DM BIT(1)
129#define RCR_EN BIT(0)
130
131#define NB8800_RXC_SR 0x204
132#define RSR_DE BIT(3)
133#define RSR_DI BIT(2)
134#define RSR_RO BIT(1)
135#define RSR_RI BIT(0)
136
137#define NB8800_RX_SAR 0x208
138#define NB8800_RX_DESC_ADDR 0x20c
139
140#define NB8800_RX_REPORT_ADDR 0x210
141#define RX_BYTES_TRANSFERRED(x) (((x) >> 16) & 0xFFFF)
142#define RX_MULTICAST_PKT BIT(9)
143#define RX_BROADCAST_PKT BIT(8)
144#define RX_LENGTH_ERR BIT(7)
145#define RX_FCS_ERR BIT(6)
146#define RX_RUNT_PKT BIT(5)
147#define RX_FIFO_OVERRUN BIT(4)
148#define RX_LATE_COLLISION BIT(3)
149#define RX_ALIGNMENT_ERROR BIT(2)
150#define RX_ERROR_MASK 0xfc
151#define IS_RX_ERROR(r) ((r) & RX_ERROR_MASK)
152
153#define NB8800_RX_FIFO_SR 0x214
154#define NB8800_RX_ITR 0x218
155
156/* Sigma Designs SMP86xx additional registers */
157#define NB8800_TANGOX_PAD_MODE 0x400
158#define PAD_MODE_MASK 0x7
159#define PAD_MODE_MII 0x0
160#define PAD_MODE_RGMII 0x1
161#define PAD_MODE_GTX_CLK_INV BIT(3)
162#define PAD_MODE_GTX_CLK_DELAY BIT(4)
163
164#define NB8800_TANGOX_MDIO_CLKDIV 0x420
165#define NB8800_TANGOX_RESET 0x424
166
167/* Hardware DMA descriptor */
168struct nb8800_dma_desc {
169 u32 s_addr; /* start address */
170 u32 n_addr; /* next descriptor address */
171 u32 r_addr; /* report address */
172 u32 config;
173} __aligned(8);
174
175#define DESC_ID BIT(23)
176#define DESC_EOC BIT(22)
177#define DESC_EOF BIT(21)
178#define DESC_LK BIT(20)
179#define DESC_DS BIT(19)
180#define DESC_BTS(x) (((x) & 0x7) << 16)
181
182/* DMA descriptor and associated data for rx.
183 * Allocated from coherent memory.
184 */
185struct nb8800_rx_desc {
186 /* DMA descriptor */
187 struct nb8800_dma_desc desc;
188
189 /* Status report filled in by hardware */
190 u32 report;
191};
192
193/* Address of buffer on rx ring */
194struct nb8800_rx_buf {
195 struct page *page;
196 unsigned long offset;
197};
198
199/* DMA descriptors and associated data for tx.
200 * Allocated from coherent memory.
201 */
202struct nb8800_tx_desc {
203 /* DMA descriptor. The second descriptor is used if packet
204 * data is unaligned.
205 */
206 struct nb8800_dma_desc desc[2];
207
208 /* Status report filled in by hardware */
209 u32 report;
210
211 /* Bounce buffer for initial unaligned part of packet */
212 u8 buf[8] __aligned(8);
213};
214
215/* Packet in tx queue */
216struct nb8800_tx_buf {
217 /* Currently queued skb */
218 struct sk_buff *skb;
219
220 /* DMA address of the first descriptor */
221 dma_addr_t dma_desc;
222
223 /* DMA address of packet data */
224 dma_addr_t dma_addr;
225
226 /* Length of DMA mapping, less than skb->len if alignment
227 * buffer is used.
228 */
229 unsigned int dma_len;
230
231 /* Number of packets in chain starting here */
232 unsigned int chain_len;
233
234 /* Packet chain ready to be submitted to hardware */
235 bool ready;
236};
237
238struct nb8800_priv {
239 struct napi_struct napi;
240
241 void __iomem *base;
242
243 /* RX DMA descriptors */
244 struct nb8800_rx_desc *rx_descs;
245
246 /* RX buffers referenced by DMA descriptors */
247 struct nb8800_rx_buf *rx_bufs;
248
249 /* Current end of chain */
250 u32 rx_eoc;
251
252 /* Value for rx interrupt time register in NAPI interrupt mode */
253 u32 rx_itr_irq;
254
255 /* Value for rx interrupt time register in NAPI poll mode */
256 u32 rx_itr_poll;
257
258 /* Value for config field of rx DMA descriptors */
259 u32 rx_dma_config;
260
261 /* TX DMA descriptors */
262 struct nb8800_tx_desc *tx_descs;
263
264 /* TX packet queue */
265 struct nb8800_tx_buf *tx_bufs;
266
267 /* Number of free tx queue entries */
268 atomic_t tx_free;
269
270 /* First free tx queue entry */
271 u32 tx_next;
272
273 /* Next buffer to transmit */
274 u32 tx_queue;
275
276 /* Start of current packet chain */
277 struct nb8800_tx_buf *tx_chain;
278
279 /* Next buffer to reclaim */
280 u32 tx_done;
281
282 /* Lock for DMA activation */
283 spinlock_t tx_lock;
284
285 struct mii_bus *mii_bus;
286 struct device_node *phy_node;
287 struct phy_device *phydev;
288
289 /* PHY connection type from DT */
290 int phy_mode;
291
292 /* Current link status */
293 int speed;
294 int duplex;
295 int link;
296
297 /* Pause settings */
298 bool pause_aneg;
299 bool pause_rx;
300 bool pause_tx;
301
302 /* DMA base address of rx descriptors, see rx_descs above */
303 dma_addr_t rx_desc_dma;
304
305 /* DMA base address of tx descriptors, see tx_descs above */
306 dma_addr_t tx_desc_dma;
307
308 struct clk *clk;
309};
310
311struct nb8800_ops {
312 int (*init)(struct net_device *dev);
313 int (*reset)(struct net_device *dev);
314};
315
316#endif /* _NB8800_H_ */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index f1d62d5dbaff..2e611dc5f162 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -10139,8 +10139,8 @@ static void __bnx2x_del_vxlan_port(struct bnx2x *bp, u16 port)
10139 DP(BNX2X_MSG_SP, "Invalid vxlan port\n"); 10139 DP(BNX2X_MSG_SP, "Invalid vxlan port\n");
10140 return; 10140 return;
10141 } 10141 }
10142 bp->vxlan_dst_port--; 10142 bp->vxlan_dst_port_count--;
10143 if (bp->vxlan_dst_port) 10143 if (bp->vxlan_dst_port_count)
10144 return; 10144 return;
10145 10145
10146 if (netif_running(bp->dev)) { 10146 if (netif_running(bp->dev)) {
@@ -13207,7 +13207,7 @@ static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev,
13207 13207
13208 /* VF with OLD Hypervisor or old PF do not support filtering */ 13208 /* VF with OLD Hypervisor or old PF do not support filtering */
13209 if (IS_PF(bp)) { 13209 if (IS_PF(bp)) {
13210 if (CHIP_IS_E1x(bp)) 13210 if (chip_is_e1x)
13211 bp->accept_any_vlan = true; 13211 bp->accept_any_vlan = true;
13212 else 13212 else
13213 dev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER; 13213 dev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index db15c5ee09c5..07f5f239cb65 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -2693,17 +2693,16 @@ static int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp)
2693 req.ver_upd = DRV_VER_UPD; 2693 req.ver_upd = DRV_VER_UPD;
2694 2694
2695 if (BNXT_PF(bp)) { 2695 if (BNXT_PF(bp)) {
2696 unsigned long vf_req_snif_bmap[4]; 2696 DECLARE_BITMAP(vf_req_snif_bmap, 256);
2697 u32 *data = (u32 *)vf_req_snif_bmap; 2697 u32 *data = (u32 *)vf_req_snif_bmap;
2698 2698
2699 memset(vf_req_snif_bmap, 0, 32); 2699 memset(vf_req_snif_bmap, 0, sizeof(vf_req_snif_bmap));
2700 for (i = 0; i < ARRAY_SIZE(bnxt_vf_req_snif); i++) 2700 for (i = 0; i < ARRAY_SIZE(bnxt_vf_req_snif); i++)
2701 __set_bit(bnxt_vf_req_snif[i], vf_req_snif_bmap); 2701 __set_bit(bnxt_vf_req_snif[i], vf_req_snif_bmap);
2702 2702
2703 for (i = 0; i < 8; i++) { 2703 for (i = 0; i < 8; i++)
2704 req.vf_req_fwd[i] = cpu_to_le32(*data); 2704 req.vf_req_fwd[i] = cpu_to_le32(data[i]);
2705 data++; 2705
2706 }
2707 req.enables |= 2706 req.enables |=
2708 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD); 2707 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD);
2709 } 2708 }
@@ -3625,6 +3624,7 @@ static int bnxt_hwrm_func_qcaps(struct bnxt *bp)
3625 pf->fw_fid = le16_to_cpu(resp->fid); 3624 pf->fw_fid = le16_to_cpu(resp->fid);
3626 pf->port_id = le16_to_cpu(resp->port_id); 3625 pf->port_id = le16_to_cpu(resp->port_id);
3627 memcpy(pf->mac_addr, resp->perm_mac_address, ETH_ALEN); 3626 memcpy(pf->mac_addr, resp->perm_mac_address, ETH_ALEN);
3627 memcpy(bp->dev->dev_addr, pf->mac_addr, ETH_ALEN);
3628 pf->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx); 3628 pf->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
3629 pf->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings); 3629 pf->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
3630 pf->max_tx_rings = le16_to_cpu(resp->max_tx_rings); 3630 pf->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
@@ -3648,8 +3648,11 @@ static int bnxt_hwrm_func_qcaps(struct bnxt *bp)
3648 3648
3649 vf->fw_fid = le16_to_cpu(resp->fid); 3649 vf->fw_fid = le16_to_cpu(resp->fid);
3650 memcpy(vf->mac_addr, resp->perm_mac_address, ETH_ALEN); 3650 memcpy(vf->mac_addr, resp->perm_mac_address, ETH_ALEN);
3651 if (!is_valid_ether_addr(vf->mac_addr)) 3651 if (is_valid_ether_addr(vf->mac_addr))
3652 random_ether_addr(vf->mac_addr); 3652 /* overwrite netdev dev_adr with admin VF MAC */
3653 memcpy(bp->dev->dev_addr, vf->mac_addr, ETH_ALEN);
3654 else
3655 random_ether_addr(bp->dev->dev_addr);
3653 3656
3654 vf->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx); 3657 vf->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
3655 vf->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings); 3658 vf->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
@@ -3880,6 +3883,8 @@ static int bnxt_alloc_rfs_vnics(struct bnxt *bp)
3880#endif 3883#endif
3881} 3884}
3882 3885
3886static int bnxt_cfg_rx_mode(struct bnxt *);
3887
3883static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init) 3888static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
3884{ 3889{
3885 int rc = 0; 3890 int rc = 0;
@@ -3946,11 +3951,9 @@ static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
3946 bp->vnic_info[0].rx_mask |= 3951 bp->vnic_info[0].rx_mask |=
3947 CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS; 3952 CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
3948 3953
3949 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0); 3954 rc = bnxt_cfg_rx_mode(bp);
3950 if (rc) { 3955 if (rc)
3951 netdev_err(bp->dev, "HWRM cfa l2 rx mask failure rc: %x\n", rc);
3952 goto err_out; 3956 goto err_out;
3953 }
3954 3957
3955 rc = bnxt_hwrm_set_coal(bp); 3958 rc = bnxt_hwrm_set_coal(bp);
3956 if (rc) 3959 if (rc)
@@ -4599,7 +4602,7 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
4599 bp->nge_port_cnt = 1; 4602 bp->nge_port_cnt = 1;
4600 } 4603 }
4601 4604
4602 bp->state = BNXT_STATE_OPEN; 4605 set_bit(BNXT_STATE_OPEN, &bp->state);
4603 bnxt_enable_int(bp); 4606 bnxt_enable_int(bp);
4604 /* Enable TX queues */ 4607 /* Enable TX queues */
4605 bnxt_tx_enable(bp); 4608 bnxt_tx_enable(bp);
@@ -4675,8 +4678,10 @@ int bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
4675 /* Change device state to avoid TX queue wake up's */ 4678 /* Change device state to avoid TX queue wake up's */
4676 bnxt_tx_disable(bp); 4679 bnxt_tx_disable(bp);
4677 4680
4678 bp->state = BNXT_STATE_CLOSED; 4681 clear_bit(BNXT_STATE_OPEN, &bp->state);
4679 cancel_work_sync(&bp->sp_task); 4682 smp_mb__after_atomic();
4683 while (test_bit(BNXT_STATE_IN_SP_TASK, &bp->state))
4684 msleep(20);
4680 4685
4681 /* Flush rings before disabling interrupts */ 4686 /* Flush rings before disabling interrupts */
4682 bnxt_shutdown_nic(bp, irq_re_init); 4687 bnxt_shutdown_nic(bp, irq_re_init);
@@ -4865,7 +4870,7 @@ static void bnxt_set_rx_mode(struct net_device *dev)
4865 } 4870 }
4866} 4871}
4867 4872
4868static void bnxt_cfg_rx_mode(struct bnxt *bp) 4873static int bnxt_cfg_rx_mode(struct bnxt *bp)
4869{ 4874{
4870 struct net_device *dev = bp->dev; 4875 struct net_device *dev = bp->dev;
4871 struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; 4876 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
@@ -4914,6 +4919,7 @@ static void bnxt_cfg_rx_mode(struct bnxt *bp)
4914 netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", 4919 netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n",
4915 rc); 4920 rc);
4916 vnic->uc_filter_count = i; 4921 vnic->uc_filter_count = i;
4922 return rc;
4917 } 4923 }
4918 } 4924 }
4919 4925
@@ -4922,6 +4928,8 @@ skip_uc:
4922 if (rc) 4928 if (rc)
4923 netdev_err(bp->dev, "HWRM cfa l2 rx mask failure rc: %x\n", 4929 netdev_err(bp->dev, "HWRM cfa l2 rx mask failure rc: %x\n",
4924 rc); 4930 rc);
4931
4932 return rc;
4925} 4933}
4926 4934
4927static netdev_features_t bnxt_fix_features(struct net_device *dev, 4935static netdev_features_t bnxt_fix_features(struct net_device *dev,
@@ -5023,8 +5031,10 @@ static void bnxt_dbg_dump_states(struct bnxt *bp)
5023static void bnxt_reset_task(struct bnxt *bp) 5031static void bnxt_reset_task(struct bnxt *bp)
5024{ 5032{
5025 bnxt_dbg_dump_states(bp); 5033 bnxt_dbg_dump_states(bp);
5026 if (netif_running(bp->dev)) 5034 if (netif_running(bp->dev)) {
5027 bnxt_tx_disable(bp); /* prevent tx timout again */ 5035 bnxt_close_nic(bp, false, false);
5036 bnxt_open_nic(bp, false, false);
5037 }
5028} 5038}
5029 5039
5030static void bnxt_tx_timeout(struct net_device *dev) 5040static void bnxt_tx_timeout(struct net_device *dev)
@@ -5074,8 +5084,12 @@ static void bnxt_sp_task(struct work_struct *work)
5074 struct bnxt *bp = container_of(work, struct bnxt, sp_task); 5084 struct bnxt *bp = container_of(work, struct bnxt, sp_task);
5075 int rc; 5085 int rc;
5076 5086
5077 if (bp->state != BNXT_STATE_OPEN) 5087 set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
5088 smp_mb__after_atomic();
5089 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
5090 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
5078 return; 5091 return;
5092 }
5079 5093
5080 if (test_and_clear_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event)) 5094 if (test_and_clear_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event))
5081 bnxt_cfg_rx_mode(bp); 5095 bnxt_cfg_rx_mode(bp);
@@ -5099,8 +5113,19 @@ static void bnxt_sp_task(struct work_struct *work)
5099 bnxt_hwrm_tunnel_dst_port_free( 5113 bnxt_hwrm_tunnel_dst_port_free(
5100 bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN); 5114 bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
5101 } 5115 }
5102 if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event)) 5116 if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event)) {
5117 /* bnxt_reset_task() calls bnxt_close_nic() which waits
5118 * for BNXT_STATE_IN_SP_TASK to clear.
5119 */
5120 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
5121 rtnl_lock();
5103 bnxt_reset_task(bp); 5122 bnxt_reset_task(bp);
5123 set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
5124 rtnl_unlock();
5125 }
5126
5127 smp_mb__before_atomic();
5128 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
5104} 5129}
5105 5130
5106static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev) 5131static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev)
@@ -5179,7 +5204,7 @@ static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev)
5179 bp->timer.function = bnxt_timer; 5204 bp->timer.function = bnxt_timer;
5180 bp->current_interval = BNXT_TIMER_INTERVAL; 5205 bp->current_interval = BNXT_TIMER_INTERVAL;
5181 5206
5182 bp->state = BNXT_STATE_CLOSED; 5207 clear_bit(BNXT_STATE_OPEN, &bp->state);
5183 5208
5184 return 0; 5209 return 0;
5185 5210
@@ -5212,13 +5237,27 @@ init_err:
5212static int bnxt_change_mac_addr(struct net_device *dev, void *p) 5237static int bnxt_change_mac_addr(struct net_device *dev, void *p)
5213{ 5238{
5214 struct sockaddr *addr = p; 5239 struct sockaddr *addr = p;
5240 struct bnxt *bp = netdev_priv(dev);
5241 int rc = 0;
5215 5242
5216 if (!is_valid_ether_addr(addr->sa_data)) 5243 if (!is_valid_ether_addr(addr->sa_data))
5217 return -EADDRNOTAVAIL; 5244 return -EADDRNOTAVAIL;
5218 5245
5246#ifdef CONFIG_BNXT_SRIOV
5247 if (BNXT_VF(bp) && is_valid_ether_addr(bp->vf.mac_addr))
5248 return -EADDRNOTAVAIL;
5249#endif
5250
5251 if (ether_addr_equal(addr->sa_data, dev->dev_addr))
5252 return 0;
5253
5219 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); 5254 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5255 if (netif_running(dev)) {
5256 bnxt_close_nic(bp, false, false);
5257 rc = bnxt_open_nic(bp, false, false);
5258 }
5220 5259
5221 return 0; 5260 return rc;
5222} 5261}
5223 5262
5224/* rtnl_lock held */ 5263/* rtnl_lock held */
@@ -5686,15 +5725,12 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
5686 bnxt_set_tpa_flags(bp); 5725 bnxt_set_tpa_flags(bp);
5687 bnxt_set_ring_params(bp); 5726 bnxt_set_ring_params(bp);
5688 dflt_rings = netif_get_num_default_rss_queues(); 5727 dflt_rings = netif_get_num_default_rss_queues();
5689 if (BNXT_PF(bp)) { 5728 if (BNXT_PF(bp))
5690 memcpy(dev->dev_addr, bp->pf.mac_addr, ETH_ALEN);
5691 bp->pf.max_irqs = max_irqs; 5729 bp->pf.max_irqs = max_irqs;
5692 } else {
5693#if defined(CONFIG_BNXT_SRIOV) 5730#if defined(CONFIG_BNXT_SRIOV)
5694 memcpy(dev->dev_addr, bp->vf.mac_addr, ETH_ALEN); 5731 else
5695 bp->vf.max_irqs = max_irqs; 5732 bp->vf.max_irqs = max_irqs;
5696#endif 5733#endif
5697 }
5698 bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings); 5734 bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings);
5699 bp->rx_nr_rings = min_t(int, dflt_rings, max_rx_rings); 5735 bp->rx_nr_rings = min_t(int, dflt_rings, max_rx_rings);
5700 bp->tx_nr_rings_per_tc = min_t(int, dflt_rings, max_tx_rings); 5736 bp->tx_nr_rings_per_tc = min_t(int, dflt_rings, max_tx_rings);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index 674bc5159b91..f199f4cc8ffe 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -925,9 +925,9 @@ struct bnxt {
925 925
926 struct timer_list timer; 926 struct timer_list timer;
927 927
928 int state; 928 unsigned long state;
929#define BNXT_STATE_CLOSED 0 929#define BNXT_STATE_OPEN 0
930#define BNXT_STATE_OPEN 1 930#define BNXT_STATE_IN_SP_TASK 1
931 931
932 struct bnxt_irq *irq_tbl; 932 struct bnxt_irq *irq_tbl;
933 u8 mac_addr[ETH_ALEN]; 933 u8 mac_addr[ETH_ALEN];
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
index f4cf68861069..ea044bbcd384 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
@@ -21,7 +21,7 @@
21#ifdef CONFIG_BNXT_SRIOV 21#ifdef CONFIG_BNXT_SRIOV
22static int bnxt_vf_ndo_prep(struct bnxt *bp, int vf_id) 22static int bnxt_vf_ndo_prep(struct bnxt *bp, int vf_id)
23{ 23{
24 if (bp->state != BNXT_STATE_OPEN) { 24 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
25 netdev_err(bp->dev, "vf ndo called though PF is down\n"); 25 netdev_err(bp->dev, "vf ndo called though PF is down\n");
26 return -EINVAL; 26 return -EINVAL;
27 } 27 }
@@ -804,10 +804,9 @@ void bnxt_update_vf_mac(struct bnxt *bp)
804 if (!is_valid_ether_addr(resp->perm_mac_address)) 804 if (!is_valid_ether_addr(resp->perm_mac_address))
805 goto update_vf_mac_exit; 805 goto update_vf_mac_exit;
806 806
807 if (ether_addr_equal(resp->perm_mac_address, bp->vf.mac_addr)) 807 if (!ether_addr_equal(resp->perm_mac_address, bp->vf.mac_addr))
808 goto update_vf_mac_exit; 808 memcpy(bp->vf.mac_addr, resp->perm_mac_address, ETH_ALEN);
809 809 /* overwrite netdev dev_adr with admin VF MAC */
810 memcpy(bp->vf.mac_addr, resp->perm_mac_address, ETH_ALEN);
811 memcpy(bp->dev->dev_addr, bp->vf.mac_addr, ETH_ALEN); 810 memcpy(bp->dev->dev_addr, bp->vf.mac_addr, ETH_ALEN);
812update_vf_mac_exit: 811update_vf_mac_exit:
813 mutex_unlock(&bp->hwrm_cmd_lock); 812 mutex_unlock(&bp->hwrm_cmd_lock);
diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c
index 88c1e1a834f8..169059c92f80 100644
--- a/drivers/net/ethernet/cadence/macb.c
+++ b/drivers/net/ethernet/cadence/macb.c
@@ -1682,6 +1682,8 @@ static void macb_init_hw(struct macb *bp)
1682 macb_set_hwaddr(bp); 1682 macb_set_hwaddr(bp);
1683 1683
1684 config = macb_mdc_clk_div(bp); 1684 config = macb_mdc_clk_div(bp);
1685 if (bp->phy_interface == PHY_INTERFACE_MODE_SGMII)
1686 config |= GEM_BIT(SGMIIEN) | GEM_BIT(PCSSEL);
1685 config |= MACB_BF(RBOF, NET_IP_ALIGN); /* Make eth data aligned */ 1687 config |= MACB_BF(RBOF, NET_IP_ALIGN); /* Make eth data aligned */
1686 config |= MACB_BIT(PAE); /* PAuse Enable */ 1688 config |= MACB_BIT(PAE); /* PAuse Enable */
1687 config |= MACB_BIT(DRFCS); /* Discard Rx FCS */ 1689 config |= MACB_BIT(DRFCS); /* Discard Rx FCS */
@@ -2416,6 +2418,8 @@ static int macb_init(struct platform_device *pdev)
2416 /* Set MII management clock divider */ 2418 /* Set MII management clock divider */
2417 val = macb_mdc_clk_div(bp); 2419 val = macb_mdc_clk_div(bp);
2418 val |= macb_dbw(bp); 2420 val |= macb_dbw(bp);
2421 if (bp->phy_interface == PHY_INTERFACE_MODE_SGMII)
2422 val |= GEM_BIT(SGMIIEN) | GEM_BIT(PCSSEL);
2419 macb_writel(bp, NCFGR, val); 2423 macb_writel(bp, NCFGR, val);
2420 2424
2421 return 0; 2425 return 0;
diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h
index 6e1faea00ca8..d83b0db77821 100644
--- a/drivers/net/ethernet/cadence/macb.h
+++ b/drivers/net/ethernet/cadence/macb.h
@@ -215,12 +215,17 @@
215/* GEM specific NCFGR bitfields. */ 215/* GEM specific NCFGR bitfields. */
216#define GEM_GBE_OFFSET 10 /* Gigabit mode enable */ 216#define GEM_GBE_OFFSET 10 /* Gigabit mode enable */
217#define GEM_GBE_SIZE 1 217#define GEM_GBE_SIZE 1
218#define GEM_PCSSEL_OFFSET 11
219#define GEM_PCSSEL_SIZE 1
218#define GEM_CLK_OFFSET 18 /* MDC clock division */ 220#define GEM_CLK_OFFSET 18 /* MDC clock division */
219#define GEM_CLK_SIZE 3 221#define GEM_CLK_SIZE 3
220#define GEM_DBW_OFFSET 21 /* Data bus width */ 222#define GEM_DBW_OFFSET 21 /* Data bus width */
221#define GEM_DBW_SIZE 2 223#define GEM_DBW_SIZE 2
222#define GEM_RXCOEN_OFFSET 24 224#define GEM_RXCOEN_OFFSET 24
223#define GEM_RXCOEN_SIZE 1 225#define GEM_RXCOEN_SIZE 1
226#define GEM_SGMIIEN_OFFSET 27
227#define GEM_SGMIIEN_SIZE 1
228
224 229
225/* Constants for data bus width. */ 230/* Constants for data bus width. */
226#define GEM_DBW32 0 /* 32 bit AMBA AHB data bus width */ 231#define GEM_DBW32 0 /* 32 bit AMBA AHB data bus width */
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c
index f683d97d7614..b89504405b72 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c
@@ -560,7 +560,7 @@ static int liquidio_resume(struct pci_dev *pdev)
560#endif 560#endif
561 561
562/* For PCI-E Advanced Error Recovery (AER) Interface */ 562/* For PCI-E Advanced Error Recovery (AER) Interface */
563static struct pci_error_handlers liquidio_err_handler = { 563static const struct pci_error_handlers liquidio_err_handler = {
564 .error_detected = liquidio_pcie_error_detected, 564 .error_detected = liquidio_pcie_error_detected,
565 .mmio_enabled = liquidio_pcie_mmio_enabled, 565 .mmio_enabled = liquidio_pcie_mmio_enabled,
566 .slot_reset = liquidio_pcie_slot_reset, 566 .slot_reset = liquidio_pcie_slot_reset,
diff --git a/drivers/net/ethernet/cavium/thunder/nic.h b/drivers/net/ethernet/cavium/thunder/nic.h
index d3950b20feb9..39ca6744a4e6 100644
--- a/drivers/net/ethernet/cavium/thunder/nic.h
+++ b/drivers/net/ethernet/cavium/thunder/nic.h
@@ -120,10 +120,9 @@
120 * Calculated for SCLK of 700Mhz 120 * Calculated for SCLK of 700Mhz
121 * value written should be a 1/16th of what is expected 121 * value written should be a 1/16th of what is expected
122 * 122 *
123 * 1 tick per 0.05usec = value of 2.2 123 * 1 tick per 0.025usec
124 * This 10% would be covered in CQ timer thresh value
125 */ 124 */
126#define NICPF_CLK_PER_INT_TICK 2 125#define NICPF_CLK_PER_INT_TICK 1
127 126
128/* Time to wait before we decide that a SQ is stuck. 127/* Time to wait before we decide that a SQ is stuck.
129 * 128 *
diff --git a/drivers/net/ethernet/cavium/thunder/nic_main.c b/drivers/net/ethernet/cavium/thunder/nic_main.c
index c561fdcb79a7..5f24d11cb16a 100644
--- a/drivers/net/ethernet/cavium/thunder/nic_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nic_main.c
@@ -615,6 +615,21 @@ static int nic_config_loopback(struct nicpf *nic, struct set_loopback *lbk)
615 return 0; 615 return 0;
616} 616}
617 617
618static void nic_enable_vf(struct nicpf *nic, int vf, bool enable)
619{
620 int bgx, lmac;
621
622 nic->vf_enabled[vf] = enable;
623
624 if (vf >= nic->num_vf_en)
625 return;
626
627 bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
628 lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
629
630 bgx_lmac_rx_tx_enable(nic->node, bgx, lmac, enable);
631}
632
618/* Interrupt handler to handle mailbox messages from VFs */ 633/* Interrupt handler to handle mailbox messages from VFs */
619static void nic_handle_mbx_intr(struct nicpf *nic, int vf) 634static void nic_handle_mbx_intr(struct nicpf *nic, int vf)
620{ 635{
@@ -714,14 +729,14 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf)
714 break; 729 break;
715 case NIC_MBOX_MSG_CFG_DONE: 730 case NIC_MBOX_MSG_CFG_DONE:
716 /* Last message of VF config msg sequence */ 731 /* Last message of VF config msg sequence */
717 nic->vf_enabled[vf] = true; 732 nic_enable_vf(nic, vf, true);
718 goto unlock; 733 goto unlock;
719 case NIC_MBOX_MSG_SHUTDOWN: 734 case NIC_MBOX_MSG_SHUTDOWN:
720 /* First msg in VF teardown sequence */ 735 /* First msg in VF teardown sequence */
721 nic->vf_enabled[vf] = false;
722 if (vf >= nic->num_vf_en) 736 if (vf >= nic->num_vf_en)
723 nic->sqs_used[vf - nic->num_vf_en] = false; 737 nic->sqs_used[vf - nic->num_vf_en] = false;
724 nic->pqs_vf[vf] = 0; 738 nic->pqs_vf[vf] = 0;
739 nic_enable_vf(nic, vf, false);
725 break; 740 break;
726 case NIC_MBOX_MSG_ALLOC_SQS: 741 case NIC_MBOX_MSG_ALLOC_SQS:
727 nic_alloc_sqs(nic, &mbx.sqs_alloc); 742 nic_alloc_sqs(nic, &mbx.sqs_alloc);
@@ -1074,8 +1089,7 @@ static void nic_remove(struct pci_dev *pdev)
1074 1089
1075 if (nic->check_link) { 1090 if (nic->check_link) {
1076 /* Destroy work Queue */ 1091 /* Destroy work Queue */
1077 cancel_delayed_work(&nic->dwork); 1092 cancel_delayed_work_sync(&nic->dwork);
1078 flush_workqueue(nic->check_link);
1079 destroy_workqueue(nic->check_link); 1093 destroy_workqueue(nic->check_link);
1080 } 1094 }
1081 1095
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
index af54c10945c2..a12b2e38cf61 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
@@ -112,6 +112,13 @@ static int nicvf_get_settings(struct net_device *netdev,
112 112
113 cmd->supported = 0; 113 cmd->supported = 0;
114 cmd->transceiver = XCVR_EXTERNAL; 114 cmd->transceiver = XCVR_EXTERNAL;
115
116 if (!nic->link_up) {
117 cmd->duplex = DUPLEX_UNKNOWN;
118 ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
119 return 0;
120 }
121
115 if (nic->speed <= 1000) { 122 if (nic->speed <= 1000) {
116 cmd->port = PORT_MII; 123 cmd->port = PORT_MII;
117 cmd->autoneg = AUTONEG_ENABLE; 124 cmd->autoneg = AUTONEG_ENABLE;
@@ -125,6 +132,13 @@ static int nicvf_get_settings(struct net_device *netdev,
125 return 0; 132 return 0;
126} 133}
127 134
135static u32 nicvf_get_link(struct net_device *netdev)
136{
137 struct nicvf *nic = netdev_priv(netdev);
138
139 return nic->link_up;
140}
141
128static void nicvf_get_drvinfo(struct net_device *netdev, 142static void nicvf_get_drvinfo(struct net_device *netdev,
129 struct ethtool_drvinfo *info) 143 struct ethtool_drvinfo *info)
130{ 144{
@@ -660,7 +674,7 @@ static int nicvf_set_channels(struct net_device *dev,
660 674
661static const struct ethtool_ops nicvf_ethtool_ops = { 675static const struct ethtool_ops nicvf_ethtool_ops = {
662 .get_settings = nicvf_get_settings, 676 .get_settings = nicvf_get_settings,
663 .get_link = ethtool_op_get_link, 677 .get_link = nicvf_get_link,
664 .get_drvinfo = nicvf_get_drvinfo, 678 .get_drvinfo = nicvf_get_drvinfo,
665 .get_msglevel = nicvf_get_msglevel, 679 .get_msglevel = nicvf_get_msglevel,
666 .set_msglevel = nicvf_set_msglevel, 680 .set_msglevel = nicvf_set_msglevel,
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
index a9377727c11c..dde8dc720cd3 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
@@ -1057,6 +1057,7 @@ int nicvf_stop(struct net_device *netdev)
1057 1057
1058 netif_carrier_off(netdev); 1058 netif_carrier_off(netdev);
1059 netif_tx_stop_all_queues(nic->netdev); 1059 netif_tx_stop_all_queues(nic->netdev);
1060 nic->link_up = false;
1060 1061
1061 /* Teardown secondary qsets first */ 1062 /* Teardown secondary qsets first */
1062 if (!nic->sqs_mode) { 1063 if (!nic->sqs_mode) {
@@ -1211,9 +1212,6 @@ int nicvf_open(struct net_device *netdev)
1211 nic->drv_stats.txq_stop = 0; 1212 nic->drv_stats.txq_stop = 0;
1212 nic->drv_stats.txq_wake = 0; 1213 nic->drv_stats.txq_wake = 0;
1213 1214
1214 netif_carrier_on(netdev);
1215 netif_tx_start_all_queues(netdev);
1216
1217 return 0; 1215 return 0;
1218cleanup: 1216cleanup:
1219 nicvf_disable_intr(nic, NICVF_INTR_MBOX, 0); 1217 nicvf_disable_intr(nic, NICVF_INTR_MBOX, 0);
@@ -1583,8 +1581,14 @@ err_disable_device:
1583static void nicvf_remove(struct pci_dev *pdev) 1581static void nicvf_remove(struct pci_dev *pdev)
1584{ 1582{
1585 struct net_device *netdev = pci_get_drvdata(pdev); 1583 struct net_device *netdev = pci_get_drvdata(pdev);
1586 struct nicvf *nic = netdev_priv(netdev); 1584 struct nicvf *nic;
1587 struct net_device *pnetdev = nic->pnicvf->netdev; 1585 struct net_device *pnetdev;
1586
1587 if (!netdev)
1588 return;
1589
1590 nic = netdev_priv(netdev);
1591 pnetdev = nic->pnicvf->netdev;
1588 1592
1589 /* Check if this Qset is assigned to different VF. 1593 /* Check if this Qset is assigned to different VF.
1590 * If yes, clean primary and all secondary Qsets. 1594 * If yes, clean primary and all secondary Qsets.
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
index e404ea837727..206b6a71a545 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
@@ -592,7 +592,7 @@ void nicvf_cmp_queue_config(struct nicvf *nic, struct queue_set *qs,
592 /* Set threshold value for interrupt generation */ 592 /* Set threshold value for interrupt generation */
593 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_THRESH, qidx, cq->thresh); 593 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_THRESH, qidx, cq->thresh);
594 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG2, 594 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG2,
595 qidx, nic->cq_coalesce_usecs); 595 qidx, CMP_QUEUE_TIMER_THRESH);
596} 596}
597 597
598/* Configures transmit queue */ 598/* Configures transmit queue */
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
index fb4957d09914..033e8306e91c 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
@@ -76,7 +76,7 @@
76#define CMP_QSIZE CMP_QUEUE_SIZE2 76#define CMP_QSIZE CMP_QUEUE_SIZE2
77#define CMP_QUEUE_LEN (1ULL << (CMP_QSIZE + 10)) 77#define CMP_QUEUE_LEN (1ULL << (CMP_QSIZE + 10))
78#define CMP_QUEUE_CQE_THRESH 0 78#define CMP_QUEUE_CQE_THRESH 0
79#define CMP_QUEUE_TIMER_THRESH 220 /* 10usec */ 79#define CMP_QUEUE_TIMER_THRESH 80 /* ~2usec */
80 80
81#define RBDR_SIZE RBDR_SIZE0 81#define RBDR_SIZE RBDR_SIZE0
82#define RCV_BUF_COUNT (1ULL << (RBDR_SIZE + 13)) 82#define RCV_BUF_COUNT (1ULL << (RBDR_SIZE + 13))
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
index 180aa9fabf48..9df26c2263bc 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
@@ -186,6 +186,23 @@ void bgx_set_lmac_mac(int node, int bgx_idx, int lmacid, const u8 *mac)
186} 186}
187EXPORT_SYMBOL(bgx_set_lmac_mac); 187EXPORT_SYMBOL(bgx_set_lmac_mac);
188 188
189void bgx_lmac_rx_tx_enable(int node, int bgx_idx, int lmacid, bool enable)
190{
191 struct bgx *bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx];
192 u64 cfg;
193
194 if (!bgx)
195 return;
196
197 cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG);
198 if (enable)
199 cfg |= CMR_PKT_RX_EN | CMR_PKT_TX_EN;
200 else
201 cfg &= ~(CMR_PKT_RX_EN | CMR_PKT_TX_EN);
202 bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cfg);
203}
204EXPORT_SYMBOL(bgx_lmac_rx_tx_enable);
205
189static void bgx_sgmii_change_link_state(struct lmac *lmac) 206static void bgx_sgmii_change_link_state(struct lmac *lmac)
190{ 207{
191 struct bgx *bgx = lmac->bgx; 208 struct bgx *bgx = lmac->bgx;
@@ -612,6 +629,8 @@ static void bgx_poll_for_link(struct work_struct *work)
612 lmac->last_duplex = 1; 629 lmac->last_duplex = 1;
613 } else { 630 } else {
614 lmac->link_up = 0; 631 lmac->link_up = 0;
632 lmac->last_speed = SPEED_UNKNOWN;
633 lmac->last_duplex = DUPLEX_UNKNOWN;
615 } 634 }
616 635
617 if (lmac->last_link != lmac->link_up) { 636 if (lmac->last_link != lmac->link_up) {
@@ -654,8 +673,7 @@ static int bgx_lmac_enable(struct bgx *bgx, u8 lmacid)
654 } 673 }
655 674
656 /* Enable lmac */ 675 /* Enable lmac */
657 bgx_reg_modify(bgx, lmacid, BGX_CMRX_CFG, 676 bgx_reg_modify(bgx, lmacid, BGX_CMRX_CFG, CMR_EN);
658 CMR_EN | CMR_PKT_RX_EN | CMR_PKT_TX_EN);
659 677
660 /* Restore default cfg, incase low level firmware changed it */ 678 /* Restore default cfg, incase low level firmware changed it */
661 bgx_reg_write(bgx, lmacid, BGX_CMRX_RX_DMAC_CTL, 0x03); 679 bgx_reg_write(bgx, lmacid, BGX_CMRX_RX_DMAC_CTL, 0x03);
@@ -695,8 +713,7 @@ static void bgx_lmac_disable(struct bgx *bgx, u8 lmacid)
695 lmac = &bgx->lmac[lmacid]; 713 lmac = &bgx->lmac[lmacid];
696 if (lmac->check_link) { 714 if (lmac->check_link) {
697 /* Destroy work queue */ 715 /* Destroy work queue */
698 cancel_delayed_work(&lmac->dwork); 716 cancel_delayed_work_sync(&lmac->dwork);
699 flush_workqueue(lmac->check_link);
700 destroy_workqueue(lmac->check_link); 717 destroy_workqueue(lmac->check_link);
701 } 718 }
702 719
@@ -1009,6 +1026,9 @@ static int bgx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1009 struct bgx *bgx = NULL; 1026 struct bgx *bgx = NULL;
1010 u8 lmac; 1027 u8 lmac;
1011 1028
1029 /* Load octeon mdio driver */
1030 octeon_mdiobus_force_mod_depencency();
1031
1012 bgx = devm_kzalloc(dev, sizeof(*bgx), GFP_KERNEL); 1032 bgx = devm_kzalloc(dev, sizeof(*bgx), GFP_KERNEL);
1013 if (!bgx) 1033 if (!bgx)
1014 return -ENOMEM; 1034 return -ENOMEM;
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
index 07b7ec66c60d..149e179363a1 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
@@ -182,6 +182,8 @@ enum MCAST_MODE {
182#define BCAST_ACCEPT 1 182#define BCAST_ACCEPT 1
183#define CAM_ACCEPT 1 183#define CAM_ACCEPT 1
184 184
185void octeon_mdiobus_force_mod_depencency(void);
186void bgx_lmac_rx_tx_enable(int node, int bgx_idx, int lmacid, bool enable);
185void bgx_add_dmac_addr(u64 dmac, int node, int bgx_idx, int lmac); 187void bgx_add_dmac_addr(u64 dmac, int node, int bgx_idx, int lmac);
186unsigned bgx_get_map(int node); 188unsigned bgx_get_map(int node);
187int bgx_get_lmac_count(int node, int bgx); 189int bgx_get_lmac_count(int node, int bgx);
diff --git a/drivers/net/ethernet/dec/tulip/tulip_core.c b/drivers/net/ethernet/dec/tulip/tulip_core.c
index ed41559bae77..b553409e04ad 100644
--- a/drivers/net/ethernet/dec/tulip/tulip_core.c
+++ b/drivers/net/ethernet/dec/tulip/tulip_core.c
@@ -98,8 +98,7 @@ static int csr0 = 0x01A00000 | 0x4800;
98#elif defined(__mips__) 98#elif defined(__mips__)
99static int csr0 = 0x00200000 | 0x4000; 99static int csr0 = 0x00200000 | 0x4000;
100#else 100#else
101#warning Processor architecture undefined! 101static int csr0;
102static int csr0 = 0x00A00000 | 0x4800;
103#endif 102#endif
104 103
105/* Operational parameters that usually are not changed. */ 104/* Operational parameters that usually are not changed. */
@@ -1982,6 +1981,12 @@ static int __init tulip_init (void)
1982 pr_info("%s", version); 1981 pr_info("%s", version);
1983#endif 1982#endif
1984 1983
1984 if (!csr0) {
1985 pr_warn("tulip: unknown CPU architecture, using default csr0\n");
1986 /* default to 8 longword cache line alignment */
1987 csr0 = 0x00A00000 | 0x4800;
1988 }
1989
1985 /* copy module parms into globals */ 1990 /* copy module parms into globals */
1986 tulip_rx_copybreak = rx_copybreak; 1991 tulip_rx_copybreak = rx_copybreak;
1987 tulip_max_interrupt_work = max_interrupt_work; 1992 tulip_max_interrupt_work = max_interrupt_work;
diff --git a/drivers/net/ethernet/dec/tulip/winbond-840.c b/drivers/net/ethernet/dec/tulip/winbond-840.c
index 9beb3d34d4ba..3c0e4d5c5fef 100644
--- a/drivers/net/ethernet/dec/tulip/winbond-840.c
+++ b/drivers/net/ethernet/dec/tulip/winbond-840.c
@@ -907,7 +907,7 @@ static void init_registers(struct net_device *dev)
907#elif defined(CONFIG_SPARC) || defined (CONFIG_PARISC) || defined(CONFIG_ARM) 907#elif defined(CONFIG_SPARC) || defined (CONFIG_PARISC) || defined(CONFIG_ARM)
908 i |= 0x4800; 908 i |= 0x4800;
909#else 909#else
910#warning Processor architecture undefined 910 dev_warn(&dev->dev, "unknown CPU architecture, using default csr0 setting\n");
911 i |= 0x4800; 911 i |= 0x4800;
912#endif 912#endif
913 iowrite32(i, ioaddr + PCIBusCfg); 913 iowrite32(i, ioaddr + PCIBusCfg);
diff --git a/drivers/net/ethernet/dlink/Kconfig b/drivers/net/ethernet/dlink/Kconfig
index f6e858d0b9d4..ebdc83247bb6 100644
--- a/drivers/net/ethernet/dlink/Kconfig
+++ b/drivers/net/ethernet/dlink/Kconfig
@@ -17,15 +17,16 @@ config NET_VENDOR_DLINK
17if NET_VENDOR_DLINK 17if NET_VENDOR_DLINK
18 18
19config DL2K 19config DL2K
20 tristate "DL2000/TC902x-based Gigabit Ethernet support" 20 tristate "DL2000/TC902x/IP1000A-based Gigabit Ethernet support"
21 depends on PCI 21 depends on PCI
22 select CRC32 22 select CRC32
23 ---help--- 23 ---help---
24 This driver supports DL2000/TC902x-based Gigabit ethernet cards, 24 This driver supports DL2000/TC902x/IP1000A-based Gigabit ethernet cards,
25 which includes 25 which includes
26 D-Link DGE-550T Gigabit Ethernet Adapter. 26 D-Link DGE-550T Gigabit Ethernet Adapter.
27 D-Link DL2000-based Gigabit Ethernet Adapter. 27 D-Link DL2000-based Gigabit Ethernet Adapter.
28 Sundance/Tamarack TC902x Gigabit Ethernet Adapter. 28 Sundance/Tamarack TC902x Gigabit Ethernet Adapter.
29 ICPlus IP1000A-based cards
29 30
30 To compile this driver as a module, choose M here: the 31 To compile this driver as a module, choose M here: the
31 module will be called dl2k. 32 module will be called dl2k.
diff --git a/drivers/net/ethernet/dlink/dl2k.c b/drivers/net/ethernet/dlink/dl2k.c
index cf0a5fcdaaaf..ccca4799c27b 100644
--- a/drivers/net/ethernet/dlink/dl2k.c
+++ b/drivers/net/ethernet/dlink/dl2k.c
@@ -253,6 +253,19 @@ rio_probe1 (struct pci_dev *pdev, const struct pci_device_id *ent)
253 if (err) 253 if (err)
254 goto err_out_unmap_rx; 254 goto err_out_unmap_rx;
255 255
256 if (np->chip_id == CHIP_IP1000A &&
257 (np->pdev->revision == 0x40 || np->pdev->revision == 0x41)) {
258 /* PHY magic taken from ipg driver, undocumented registers */
259 mii_write(dev, np->phy_addr, 31, 0x0001);
260 mii_write(dev, np->phy_addr, 27, 0x01e0);
261 mii_write(dev, np->phy_addr, 31, 0x0002);
262 mii_write(dev, np->phy_addr, 27, 0xeb8e);
263 mii_write(dev, np->phy_addr, 31, 0x0000);
264 mii_write(dev, np->phy_addr, 30, 0x005e);
265 /* advertise 1000BASE-T half & full duplex, prefer MASTER */
266 mii_write(dev, np->phy_addr, MII_CTRL1000, 0x0700);
267 }
268
256 /* Fiber device? */ 269 /* Fiber device? */
257 np->phy_media = (dr16(ASICCtrl) & PhyMedia) ? 1 : 0; 270 np->phy_media = (dr16(ASICCtrl) & PhyMedia) ? 1 : 0;
258 np->link_status = 0; 271 np->link_status = 0;
@@ -361,6 +374,11 @@ parse_eeprom (struct net_device *dev)
361 for (i = 0; i < 6; i++) 374 for (i = 0; i < 6; i++)
362 dev->dev_addr[i] = psrom->mac_addr[i]; 375 dev->dev_addr[i] = psrom->mac_addr[i];
363 376
377 if (np->chip_id == CHIP_IP1000A) {
378 np->led_mode = psrom->led_mode;
379 return 0;
380 }
381
364 if (np->pdev->vendor != PCI_VENDOR_ID_DLINK) { 382 if (np->pdev->vendor != PCI_VENDOR_ID_DLINK) {
365 return 0; 383 return 0;
366 } 384 }
@@ -406,6 +424,28 @@ parse_eeprom (struct net_device *dev)
406 return 0; 424 return 0;
407} 425}
408 426
427static void rio_set_led_mode(struct net_device *dev)
428{
429 struct netdev_private *np = netdev_priv(dev);
430 void __iomem *ioaddr = np->ioaddr;
431 u32 mode;
432
433 if (np->chip_id != CHIP_IP1000A)
434 return;
435
436 mode = dr32(ASICCtrl);
437 mode &= ~(IPG_AC_LED_MODE_BIT_1 | IPG_AC_LED_MODE | IPG_AC_LED_SPEED);
438
439 if (np->led_mode & 0x01)
440 mode |= IPG_AC_LED_MODE;
441 if (np->led_mode & 0x02)
442 mode |= IPG_AC_LED_MODE_BIT_1;
443 if (np->led_mode & 0x08)
444 mode |= IPG_AC_LED_SPEED;
445
446 dw32(ASICCtrl, mode);
447}
448
409static int 449static int
410rio_open (struct net_device *dev) 450rio_open (struct net_device *dev)
411{ 451{
@@ -424,6 +464,8 @@ rio_open (struct net_device *dev)
424 GlobalReset | DMAReset | FIFOReset | NetworkReset | HostReset); 464 GlobalReset | DMAReset | FIFOReset | NetworkReset | HostReset);
425 mdelay(10); 465 mdelay(10);
426 466
467 rio_set_led_mode(dev);
468
427 /* DebugCtrl bit 4, 5, 9 must set */ 469 /* DebugCtrl bit 4, 5, 9 must set */
428 dw32(DebugCtrl, dr32(DebugCtrl) | 0x0230); 470 dw32(DebugCtrl, dr32(DebugCtrl) | 0x0230);
429 471
@@ -433,9 +475,13 @@ rio_open (struct net_device *dev)
433 475
434 alloc_list (dev); 476 alloc_list (dev);
435 477
436 /* Get station address */ 478 /* Set station address */
437 for (i = 0; i < 6; i++) 479 /* 16 or 32-bit access is required by TC9020 datasheet but 8-bit works
438 dw8(StationAddr0 + i, dev->dev_addr[i]); 480 * too. However, it doesn't work on IP1000A so we use 16-bit access.
481 */
482 for (i = 0; i < 3; i++)
483 dw16(StationAddr0 + 2 * i,
484 cpu_to_le16(((u16 *)dev->dev_addr)[i]));
439 485
440 set_multicast (dev); 486 set_multicast (dev);
441 if (np->coalesce) { 487 if (np->coalesce) {
@@ -780,6 +826,7 @@ tx_error (struct net_device *dev, int tx_status)
780 break; 826 break;
781 mdelay (1); 827 mdelay (1);
782 } 828 }
829 rio_set_led_mode(dev);
783 rio_free_tx (dev, 1); 830 rio_free_tx (dev, 1);
784 /* Reset TFDListPtr */ 831 /* Reset TFDListPtr */
785 dw32(TFDListPtr0, np->tx_ring_dma + 832 dw32(TFDListPtr0, np->tx_ring_dma +
@@ -799,6 +846,7 @@ tx_error (struct net_device *dev, int tx_status)
799 break; 846 break;
800 mdelay (1); 847 mdelay (1);
801 } 848 }
849 rio_set_led_mode(dev);
802 /* Let TxStartThresh stay default value */ 850 /* Let TxStartThresh stay default value */
803 } 851 }
804 /* Maximum Collisions */ 852 /* Maximum Collisions */
@@ -965,6 +1013,7 @@ rio_error (struct net_device *dev, int int_status)
965 dev->name, int_status); 1013 dev->name, int_status);
966 dw16(ASICCtrl + 2, GlobalReset | HostReset); 1014 dw16(ASICCtrl + 2, GlobalReset | HostReset);
967 mdelay (500); 1015 mdelay (500);
1016 rio_set_led_mode(dev);
968 } 1017 }
969} 1018}
970 1019
diff --git a/drivers/net/ethernet/dlink/dl2k.h b/drivers/net/ethernet/dlink/dl2k.h
index 23c07b007069..8f4f61262d5c 100644
--- a/drivers/net/ethernet/dlink/dl2k.h
+++ b/drivers/net/ethernet/dlink/dl2k.h
@@ -211,6 +211,10 @@ enum ASICCtrl_HiWord_bits {
211 ResetBusy = 0x0400, 211 ResetBusy = 0x0400,
212}; 212};
213 213
214#define IPG_AC_LED_MODE BIT(14)
215#define IPG_AC_LED_SPEED BIT(27)
216#define IPG_AC_LED_MODE_BIT_1 BIT(29)
217
214/* Transmit Frame Control bits */ 218/* Transmit Frame Control bits */
215enum TFC_bits { 219enum TFC_bits {
216 DwordAlign = 0x00000000, 220 DwordAlign = 0x00000000,
@@ -332,7 +336,10 @@ typedef struct t_SROM {
332 u16 asic_ctrl; /* 0x02 */ 336 u16 asic_ctrl; /* 0x02 */
333 u16 sub_vendor_id; /* 0x04 */ 337 u16 sub_vendor_id; /* 0x04 */
334 u16 sub_system_id; /* 0x06 */ 338 u16 sub_system_id; /* 0x06 */
335 u16 reserved1[12]; /* 0x08-0x1f */ 339 u16 pci_base_1; /* 0x08 (IP1000A only) */
340 u16 pci_base_2; /* 0x0a (IP1000A only) */
341 u16 led_mode; /* 0x0c (IP1000A only) */
342 u16 reserved1[9]; /* 0x0e-0x1f */
336 u8 mac_addr[6]; /* 0x20-0x25 */ 343 u8 mac_addr[6]; /* 0x20-0x25 */
337 u8 reserved2[10]; /* 0x26-0x2f */ 344 u8 reserved2[10]; /* 0x26-0x2f */
338 u8 sib[204]; /* 0x30-0xfb */ 345 u8 sib[204]; /* 0x30-0xfb */
@@ -397,6 +404,7 @@ struct netdev_private {
397 u16 advertising; /* NWay media advertisement */ 404 u16 advertising; /* NWay media advertisement */
398 u16 negotiate; /* Negotiated media */ 405 u16 negotiate; /* Negotiated media */
399 int phy_addr; /* PHY addresses. */ 406 int phy_addr; /* PHY addresses. */
407 u16 led_mode; /* LED mode read from EEPROM (IP1000A only) */
400}; 408};
401 409
402/* The station address location in the EEPROM. */ 410/* The station address location in the EEPROM. */
@@ -407,10 +415,15 @@ struct netdev_private {
407 class_mask of the class are honored during the comparison. 415 class_mask of the class are honored during the comparison.
408 driver_data Data private to the driver. 416 driver_data Data private to the driver.
409*/ 417*/
418#define CHIP_IP1000A 1
410 419
411static const struct pci_device_id rio_pci_tbl[] = { 420static const struct pci_device_id rio_pci_tbl[] = {
412 {0x1186, 0x4000, PCI_ANY_ID, PCI_ANY_ID, }, 421 {0x1186, 0x4000, PCI_ANY_ID, PCI_ANY_ID, },
413 {0x13f0, 0x1021, PCI_ANY_ID, PCI_ANY_ID, }, 422 {0x13f0, 0x1021, PCI_ANY_ID, PCI_ANY_ID, },
423 { PCI_VDEVICE(SUNDANCE, 0x1023), CHIP_IP1000A },
424 { PCI_VDEVICE(SUNDANCE, 0x2021), CHIP_IP1000A },
425 { PCI_VDEVICE(DLINK, 0x9021), CHIP_IP1000A },
426 { PCI_VDEVICE(DLINK, 0x4020), CHIP_IP1000A },
414 { } 427 { }
415}; 428};
416MODULE_DEVICE_TABLE (pci, rio_pci_tbl); 429MODULE_DEVICE_TABLE (pci, rio_pci_tbl);
diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c
index f4cb8e425853..734f655c99c1 100644
--- a/drivers/net/ethernet/emulex/benet/be_ethtool.c
+++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c
@@ -1062,9 +1062,7 @@ static int be_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
1062static int be_set_rss_hash_opts(struct be_adapter *adapter, 1062static int be_set_rss_hash_opts(struct be_adapter *adapter,
1063 struct ethtool_rxnfc *cmd) 1063 struct ethtool_rxnfc *cmd)
1064{ 1064{
1065 struct be_rx_obj *rxo; 1065 int status;
1066 int status = 0, i, j;
1067 u8 rsstable[128];
1068 u32 rss_flags = adapter->rss_info.rss_flags; 1066 u32 rss_flags = adapter->rss_info.rss_flags;
1069 1067
1070 if (cmd->data != L3_RSS_FLAGS && 1068 if (cmd->data != L3_RSS_FLAGS &&
@@ -1113,20 +1111,11 @@ static int be_set_rss_hash_opts(struct be_adapter *adapter,
1113 } 1111 }
1114 1112
1115 if (rss_flags == adapter->rss_info.rss_flags) 1113 if (rss_flags == adapter->rss_info.rss_flags)
1116 return status; 1114 return 0;
1117
1118 if (be_multi_rxq(adapter)) {
1119 for (j = 0; j < 128; j += adapter->num_rss_qs) {
1120 for_all_rss_queues(adapter, rxo, i) {
1121 if ((j + i) >= 128)
1122 break;
1123 rsstable[j + i] = rxo->rss_id;
1124 }
1125 }
1126 }
1127 1115
1128 status = be_cmd_rss_config(adapter, adapter->rss_info.rsstable, 1116 status = be_cmd_rss_config(adapter, adapter->rss_info.rsstable,
1129 rss_flags, 128, adapter->rss_info.rss_hkey); 1117 rss_flags, RSS_INDIR_TABLE_LEN,
1118 adapter->rss_info.rss_hkey);
1130 if (!status) 1119 if (!status)
1131 adapter->rss_info.rss_flags = rss_flags; 1120 adapter->rss_info.rss_flags = rss_flags;
1132 1121
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index eb48a977f8da..b6ad02909d6b 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -3518,7 +3518,7 @@ static int be_rx_qs_create(struct be_adapter *adapter)
3518 3518
3519 netdev_rss_key_fill(rss_key, RSS_HASH_KEY_LEN); 3519 netdev_rss_key_fill(rss_key, RSS_HASH_KEY_LEN);
3520 rc = be_cmd_rss_config(adapter, rss->rsstable, rss->rss_flags, 3520 rc = be_cmd_rss_config(adapter, rss->rsstable, rss->rss_flags,
3521 128, rss_key); 3521 RSS_INDIR_TABLE_LEN, rss_key);
3522 if (rc) { 3522 if (rc) {
3523 rss->rss_flags = RSS_ENABLE_NONE; 3523 rss->rss_flags = RSS_ENABLE_NONE;
3524 return rc; 3524 return rc;
diff --git a/drivers/net/ethernet/ezchip/nps_enet.c b/drivers/net/ethernet/ezchip/nps_enet.c
index 63c2bcf8031a..b1026689b78f 100644
--- a/drivers/net/ethernet/ezchip/nps_enet.c
+++ b/drivers/net/ethernet/ezchip/nps_enet.c
@@ -48,21 +48,15 @@ static void nps_enet_read_rx_fifo(struct net_device *ndev,
48 *reg = nps_enet_reg_get(priv, NPS_ENET_REG_RX_BUF); 48 *reg = nps_enet_reg_get(priv, NPS_ENET_REG_RX_BUF);
49 else { /* !dst_is_aligned */ 49 else { /* !dst_is_aligned */
50 for (i = 0; i < len; i++, reg++) { 50 for (i = 0; i < len; i++, reg++) {
51 u32 buf = 51 u32 buf = nps_enet_reg_get(priv, NPS_ENET_REG_RX_BUF);
52 nps_enet_reg_get(priv, NPS_ENET_REG_RX_BUF); 52 put_unaligned(buf, reg);
53
54 /* to accommodate word-unaligned address of "reg"
55 * we have to do memcpy_toio() instead of simple "=".
56 */
57 memcpy_toio((void __iomem *)reg, &buf, sizeof(buf));
58 } 53 }
59 } 54 }
60 55
61 /* copy last bytes (if any) */ 56 /* copy last bytes (if any) */
62 if (last) { 57 if (last) {
63 u32 buf = nps_enet_reg_get(priv, NPS_ENET_REG_RX_BUF); 58 u32 buf = nps_enet_reg_get(priv, NPS_ENET_REG_RX_BUF);
64 59 memcpy((u8*)reg, &buf, last);
65 memcpy_toio((void __iomem *)reg, &buf, last);
66 } 60 }
67} 61}
68 62
@@ -367,7 +361,7 @@ static void nps_enet_send_frame(struct net_device *ndev,
367 struct nps_enet_tx_ctl tx_ctrl; 361 struct nps_enet_tx_ctl tx_ctrl;
368 short length = skb->len; 362 short length = skb->len;
369 u32 i, len = DIV_ROUND_UP(length, sizeof(u32)); 363 u32 i, len = DIV_ROUND_UP(length, sizeof(u32));
370 u32 *src = (u32 *)virt_to_phys(skb->data); 364 u32 *src = (void *)skb->data;
371 bool src_is_aligned = IS_ALIGNED((unsigned long)src, sizeof(u32)); 365 bool src_is_aligned = IS_ALIGNED((unsigned long)src, sizeof(u32));
372 366
373 tx_ctrl.value = 0; 367 tx_ctrl.value = 0;
@@ -375,17 +369,11 @@ static void nps_enet_send_frame(struct net_device *ndev,
375 if (src_is_aligned) 369 if (src_is_aligned)
376 for (i = 0; i < len; i++, src++) 370 for (i = 0; i < len; i++, src++)
377 nps_enet_reg_set(priv, NPS_ENET_REG_TX_BUF, *src); 371 nps_enet_reg_set(priv, NPS_ENET_REG_TX_BUF, *src);
378 else { /* !src_is_aligned */ 372 else /* !src_is_aligned */
379 for (i = 0; i < len; i++, src++) { 373 for (i = 0; i < len; i++, src++)
380 u32 buf; 374 nps_enet_reg_set(priv, NPS_ENET_REG_TX_BUF,
381 375 get_unaligned(src));
382 /* to accommodate word-unaligned address of "src" 376
383 * we have to do memcpy_fromio() instead of simple "="
384 */
385 memcpy_fromio(&buf, (void __iomem *)src, sizeof(buf));
386 nps_enet_reg_set(priv, NPS_ENET_REG_TX_BUF, buf);
387 }
388 }
389 /* Write the length of the Frame */ 377 /* Write the length of the Frame */
390 tx_ctrl.nt = length; 378 tx_ctrl.nt = length;
391 379
diff --git a/drivers/net/ethernet/freescale/Kconfig b/drivers/net/ethernet/freescale/Kconfig
index ff76d4e9dc1b..bee32a9d9876 100644
--- a/drivers/net/ethernet/freescale/Kconfig
+++ b/drivers/net/ethernet/freescale/Kconfig
@@ -7,7 +7,8 @@ config NET_VENDOR_FREESCALE
7 default y 7 default y
8 depends on FSL_SOC || QUICC_ENGINE || CPM1 || CPM2 || PPC_MPC512x || \ 8 depends on FSL_SOC || QUICC_ENGINE || CPM1 || CPM2 || PPC_MPC512x || \
9 M523x || M527x || M5272 || M528x || M520x || M532x || \ 9 M523x || M527x || M5272 || M528x || M520x || M532x || \
10 ARCH_MXC || ARCH_MXS || (PPC_MPC52xx && PPC_BESTCOMM) 10 ARCH_MXC || ARCH_MXS || (PPC_MPC52xx && PPC_BESTCOMM) || \
11 ARCH_LAYERSCAPE
11 ---help--- 12 ---help---
12 If you have a network (Ethernet) card belonging to this class, say Y. 13 If you have a network (Ethernet) card belonging to this class, say Y.
13 14
diff --git a/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c b/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c
index 08f5b911d96b..52e0091b4fb2 100644
--- a/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c
+++ b/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c
@@ -552,7 +552,7 @@ static void tx_restart(struct net_device *dev)
552 cbd_t __iomem *prev_bd; 552 cbd_t __iomem *prev_bd;
553 cbd_t __iomem *last_tx_bd; 553 cbd_t __iomem *last_tx_bd;
554 554
555 last_tx_bd = fep->tx_bd_base + (fpi->tx_ring * sizeof(cbd_t)); 555 last_tx_bd = fep->tx_bd_base + ((fpi->tx_ring - 1) * sizeof(cbd_t));
556 556
557 /* get the current bd held in TBPTR and scan back from this point */ 557 /* get the current bd held in TBPTR and scan back from this point */
558 recheck_bd = curr_tbptr = (cbd_t __iomem *) 558 recheck_bd = curr_tbptr = (cbd_t __iomem *)
diff --git a/drivers/net/ethernet/freescale/fsl_pq_mdio.c b/drivers/net/ethernet/freescale/fsl_pq_mdio.c
index 55c36230e176..40071dad1c57 100644
--- a/drivers/net/ethernet/freescale/fsl_pq_mdio.c
+++ b/drivers/net/ethernet/freescale/fsl_pq_mdio.c
@@ -464,7 +464,7 @@ static int fsl_pq_mdio_probe(struct platform_device *pdev)
464 * address). Print error message but continue anyway. 464 * address). Print error message but continue anyway.
465 */ 465 */
466 if ((void *)tbipa > priv->map + resource_size(&res) - 4) 466 if ((void *)tbipa > priv->map + resource_size(&res) - 4)
467 dev_err(&pdev->dev, "invalid register map (should be at least 0x%04x to contain TBI address)\n", 467 dev_err(&pdev->dev, "invalid register map (should be at least 0x%04zx to contain TBI address)\n",
468 ((void *)tbipa - priv->map) + 4); 468 ((void *)tbipa - priv->map) + 4);
469 469
470 iowrite32be(be32_to_cpup(prop), tbipa); 470 iowrite32be(be32_to_cpup(prop), tbipa);
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index 3e6b9b437497..3e233d924cce 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -647,9 +647,9 @@ static int gfar_parse_group(struct device_node *np,
647 if (model && strcasecmp(model, "FEC")) { 647 if (model && strcasecmp(model, "FEC")) {
648 gfar_irq(grp, RX)->irq = irq_of_parse_and_map(np, 1); 648 gfar_irq(grp, RX)->irq = irq_of_parse_and_map(np, 1);
649 gfar_irq(grp, ER)->irq = irq_of_parse_and_map(np, 2); 649 gfar_irq(grp, ER)->irq = irq_of_parse_and_map(np, 2);
650 if (gfar_irq(grp, TX)->irq == NO_IRQ || 650 if (!gfar_irq(grp, TX)->irq ||
651 gfar_irq(grp, RX)->irq == NO_IRQ || 651 !gfar_irq(grp, RX)->irq ||
652 gfar_irq(grp, ER)->irq == NO_IRQ) 652 !gfar_irq(grp, ER)->irq)
653 return -EINVAL; 653 return -EINVAL;
654 } 654 }
655 655
@@ -894,7 +894,8 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
894 FSL_GIANFAR_DEV_HAS_VLAN | 894 FSL_GIANFAR_DEV_HAS_VLAN |
895 FSL_GIANFAR_DEV_HAS_MAGIC_PACKET | 895 FSL_GIANFAR_DEV_HAS_MAGIC_PACKET |
896 FSL_GIANFAR_DEV_HAS_EXTENDED_HASH | 896 FSL_GIANFAR_DEV_HAS_EXTENDED_HASH |
897 FSL_GIANFAR_DEV_HAS_TIMER; 897 FSL_GIANFAR_DEV_HAS_TIMER |
898 FSL_GIANFAR_DEV_HAS_RX_FILER;
898 899
899 err = of_property_read_string(np, "phy-connection-type", &ctype); 900 err = of_property_read_string(np, "phy-connection-type", &ctype);
900 901
@@ -1396,8 +1397,9 @@ static int gfar_probe(struct platform_device *ofdev)
1396 priv->rx_queue[i]->rxic = DEFAULT_RXIC; 1397 priv->rx_queue[i]->rxic = DEFAULT_RXIC;
1397 } 1398 }
1398 1399
1399 /* always enable rx filer */ 1400 /* Always enable rx filer if available */
1400 priv->rx_filer_enable = 1; 1401 priv->rx_filer_enable =
1402 (priv->device_flags & FSL_GIANFAR_DEV_HAS_RX_FILER) ? 1 : 0;
1401 /* Enable most messages by default */ 1403 /* Enable most messages by default */
1402 priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1; 1404 priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1;
1403 /* use pritority h/w tx queue scheduling for single queue devices */ 1405 /* use pritority h/w tx queue scheduling for single queue devices */
diff --git a/drivers/net/ethernet/freescale/gianfar.h b/drivers/net/ethernet/freescale/gianfar.h
index f266b20f9ef5..cb77667971a7 100644
--- a/drivers/net/ethernet/freescale/gianfar.h
+++ b/drivers/net/ethernet/freescale/gianfar.h
@@ -923,6 +923,7 @@ struct gfar {
923#define FSL_GIANFAR_DEV_HAS_BUF_STASHING 0x00000400 923#define FSL_GIANFAR_DEV_HAS_BUF_STASHING 0x00000400
924#define FSL_GIANFAR_DEV_HAS_TIMER 0x00000800 924#define FSL_GIANFAR_DEV_HAS_TIMER 0x00000800
925#define FSL_GIANFAR_DEV_HAS_WAKE_ON_FILER 0x00001000 925#define FSL_GIANFAR_DEV_HAS_WAKE_ON_FILER 0x00001000
926#define FSL_GIANFAR_DEV_HAS_RX_FILER 0x00002000
926 927
927#if (MAXGROUPS == 2) 928#if (MAXGROUPS == 2)
928#define DEFAULT_MAPPING 0xAA 929#define DEFAULT_MAPPING 0xAA
diff --git a/drivers/net/ethernet/freescale/gianfar_ptp.c b/drivers/net/ethernet/freescale/gianfar_ptp.c
index 664d0c261269..b40fba929d65 100644
--- a/drivers/net/ethernet/freescale/gianfar_ptp.c
+++ b/drivers/net/ethernet/freescale/gianfar_ptp.c
@@ -467,7 +467,7 @@ static int gianfar_ptp_probe(struct platform_device *dev)
467 467
468 etsects->irq = platform_get_irq(dev, 0); 468 etsects->irq = platform_get_irq(dev, 0);
469 469
470 if (etsects->irq == NO_IRQ) { 470 if (etsects->irq < 0) {
471 pr_err("irq not in device tree\n"); 471 pr_err("irq not in device tree\n");
472 goto no_node; 472 goto no_node;
473 } 473 }
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
index 2a98eba660c0..b674414a4d72 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
@@ -1259,12 +1259,8 @@ int hns_dsaf_set_mac_uc_entry(
1259 if (MAC_IS_ALL_ZEROS(mac_entry->addr) || 1259 if (MAC_IS_ALL_ZEROS(mac_entry->addr) ||
1260 MAC_IS_BROADCAST(mac_entry->addr) || 1260 MAC_IS_BROADCAST(mac_entry->addr) ||
1261 MAC_IS_MULTICAST(mac_entry->addr)) { 1261 MAC_IS_MULTICAST(mac_entry->addr)) {
1262 dev_err(dsaf_dev->dev, 1262 dev_err(dsaf_dev->dev, "set_uc %s Mac %pM err!\n",
1263 "set_uc %s Mac %02x:%02x:%02x:%02x:%02x:%02x err!\n", 1263 dsaf_dev->ae_dev.name, mac_entry->addr);
1264 dsaf_dev->ae_dev.name, mac_entry->addr[0],
1265 mac_entry->addr[1], mac_entry->addr[2],
1266 mac_entry->addr[3], mac_entry->addr[4],
1267 mac_entry->addr[5]);
1268 return -EINVAL; 1264 return -EINVAL;
1269 } 1265 }
1270 1266
@@ -1331,12 +1327,8 @@ int hns_dsaf_set_mac_mc_entry(
1331 1327
1332 /* mac addr check */ 1328 /* mac addr check */
1333 if (MAC_IS_ALL_ZEROS(mac_entry->addr)) { 1329 if (MAC_IS_ALL_ZEROS(mac_entry->addr)) {
1334 dev_err(dsaf_dev->dev, 1330 dev_err(dsaf_dev->dev, "set uc %s Mac %pM err!\n",
1335 "set uc %s Mac %02x:%02x:%02x:%02x:%02x:%02x err!\n", 1331 dsaf_dev->ae_dev.name, mac_entry->addr);
1336 dsaf_dev->ae_dev.name, mac_entry->addr[0],
1337 mac_entry->addr[1], mac_entry->addr[2],
1338 mac_entry->addr[3],
1339 mac_entry->addr[4], mac_entry->addr[5]);
1340 return -EINVAL; 1332 return -EINVAL;
1341 } 1333 }
1342 1334
@@ -1410,11 +1402,8 @@ int hns_dsaf_add_mac_mc_port(struct dsaf_device *dsaf_dev,
1410 1402
1411 /*chechk mac addr */ 1403 /*chechk mac addr */
1412 if (MAC_IS_ALL_ZEROS(mac_entry->addr)) { 1404 if (MAC_IS_ALL_ZEROS(mac_entry->addr)) {
1413 dev_err(dsaf_dev->dev, 1405 dev_err(dsaf_dev->dev, "set_entry failed,addr %pM!\n",
1414 "set_entry failed,addr %02x:%02x:%02x:%02x:%02x:%02x!\n", 1406 mac_entry->addr);
1415 mac_entry->addr[0], mac_entry->addr[1],
1416 mac_entry->addr[2], mac_entry->addr[3],
1417 mac_entry->addr[4], mac_entry->addr[5]);
1418 return -EINVAL; 1407 return -EINVAL;
1419 } 1408 }
1420 1409
@@ -1497,9 +1486,8 @@ int hns_dsaf_del_mac_entry(struct dsaf_device *dsaf_dev, u16 vlan_id,
1497 1486
1498 /*check mac addr */ 1487 /*check mac addr */
1499 if (MAC_IS_ALL_ZEROS(addr) || MAC_IS_BROADCAST(addr)) { 1488 if (MAC_IS_ALL_ZEROS(addr) || MAC_IS_BROADCAST(addr)) {
1500 dev_err(dsaf_dev->dev, 1489 dev_err(dsaf_dev->dev, "del_entry failed,addr %pM!\n",
1501 "del_entry failed,addr %02x:%02x:%02x:%02x:%02x:%02x!\n", 1490 addr);
1502 addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]);
1503 return -EINVAL; 1491 return -EINVAL;
1504 } 1492 }
1505 1493
@@ -1563,11 +1551,8 @@ int hns_dsaf_del_mac_mc_port(struct dsaf_device *dsaf_dev,
1563 1551
1564 /*check mac addr */ 1552 /*check mac addr */
1565 if (MAC_IS_ALL_ZEROS(mac_entry->addr)) { 1553 if (MAC_IS_ALL_ZEROS(mac_entry->addr)) {
1566 dev_err(dsaf_dev->dev, 1554 dev_err(dsaf_dev->dev, "del_port failed, addr %pM!\n",
1567 "del_port failed, addr %02x:%02x:%02x:%02x:%02x:%02x!\n", 1555 mac_entry->addr);
1568 mac_entry->addr[0], mac_entry->addr[1],
1569 mac_entry->addr[2], mac_entry->addr[3],
1570 mac_entry->addr[4], mac_entry->addr[5]);
1571 return -EINVAL; 1556 return -EINVAL;
1572 } 1557 }
1573 1558
@@ -1644,11 +1629,8 @@ int hns_dsaf_get_mac_uc_entry(struct dsaf_device *dsaf_dev,
1644 /* check macaddr */ 1629 /* check macaddr */
1645 if (MAC_IS_ALL_ZEROS(mac_entry->addr) || 1630 if (MAC_IS_ALL_ZEROS(mac_entry->addr) ||
1646 MAC_IS_BROADCAST(mac_entry->addr)) { 1631 MAC_IS_BROADCAST(mac_entry->addr)) {
1647 dev_err(dsaf_dev->dev, 1632 dev_err(dsaf_dev->dev, "get_entry failed,addr %pM\n",
1648 "get_entry failed,addr %02x:%02x:%02x:%02x:%02x:%02x\n", 1633 mac_entry->addr);
1649 mac_entry->addr[0], mac_entry->addr[1],
1650 mac_entry->addr[2], mac_entry->addr[3],
1651 mac_entry->addr[4], mac_entry->addr[5]);
1652 return -EINVAL; 1634 return -EINVAL;
1653 } 1635 }
1654 1636
@@ -1695,11 +1677,8 @@ int hns_dsaf_get_mac_mc_entry(struct dsaf_device *dsaf_dev,
1695 /*check mac addr */ 1677 /*check mac addr */
1696 if (MAC_IS_ALL_ZEROS(mac_entry->addr) || 1678 if (MAC_IS_ALL_ZEROS(mac_entry->addr) ||
1697 MAC_IS_BROADCAST(mac_entry->addr)) { 1679 MAC_IS_BROADCAST(mac_entry->addr)) {
1698 dev_err(dsaf_dev->dev, 1680 dev_err(dsaf_dev->dev, "get_entry failed,addr %pM\n",
1699 "get_entry failed,addr %02x:%02x:%02x:%02x:%02x:%02x\n", 1681 mac_entry->addr);
1700 mac_entry->addr[0], mac_entry->addr[1],
1701 mac_entry->addr[2], mac_entry->addr[3],
1702 mac_entry->addr[4], mac_entry->addr[5]);
1703 return -EINVAL; 1682 return -EINVAL;
1704 } 1683 }
1705 1684
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
index b475e1bf2e6f..bdbd80423b17 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
@@ -898,7 +898,7 @@
898#define XGMAC_PAUSE_CTL_RSP_MODE_B 2 898#define XGMAC_PAUSE_CTL_RSP_MODE_B 2
899#define XGMAC_PAUSE_CTL_TX_XOFF_B 3 899#define XGMAC_PAUSE_CTL_TX_XOFF_B 3
900 900
901static inline void dsaf_write_reg(void *base, u32 reg, u32 value) 901static inline void dsaf_write_reg(void __iomem *base, u32 reg, u32 value)
902{ 902{
903 u8 __iomem *reg_addr = ACCESS_ONCE(base); 903 u8 __iomem *reg_addr = ACCESS_ONCE(base);
904 904
@@ -908,7 +908,7 @@ static inline void dsaf_write_reg(void *base, u32 reg, u32 value)
908#define dsaf_write_dev(a, reg, value) \ 908#define dsaf_write_dev(a, reg, value) \
909 dsaf_write_reg((a)->io_base, (reg), (value)) 909 dsaf_write_reg((a)->io_base, (reg), (value))
910 910
911static inline u32 dsaf_read_reg(u8 *base, u32 reg) 911static inline u32 dsaf_read_reg(u8 __iomem *base, u32 reg)
912{ 912{
913 u8 __iomem *reg_addr = ACCESS_ONCE(base); 913 u8 __iomem *reg_addr = ACCESS_ONCE(base);
914 914
@@ -927,8 +927,8 @@ static inline u32 dsaf_read_reg(u8 *base, u32 reg)
927#define dsaf_set_bit(origin, shift, val) \ 927#define dsaf_set_bit(origin, shift, val) \
928 dsaf_set_field((origin), (1ull << (shift)), (shift), (val)) 928 dsaf_set_field((origin), (1ull << (shift)), (shift), (val))
929 929
930static inline void dsaf_set_reg_field(void *base, u32 reg, u32 mask, u32 shift, 930static inline void dsaf_set_reg_field(void __iomem *base, u32 reg, u32 mask,
931 u32 val) 931 u32 shift, u32 val)
932{ 932{
933 u32 origin = dsaf_read_reg(base, reg); 933 u32 origin = dsaf_read_reg(base, reg);
934 934
@@ -947,7 +947,8 @@ static inline void dsaf_set_reg_field(void *base, u32 reg, u32 mask, u32 shift,
947#define dsaf_get_bit(origin, shift) \ 947#define dsaf_get_bit(origin, shift) \
948 dsaf_get_field((origin), (1ull << (shift)), (shift)) 948 dsaf_get_field((origin), (1ull << (shift)), (shift))
949 949
950static inline u32 dsaf_get_reg_field(void *base, u32 reg, u32 mask, u32 shift) 950static inline u32 dsaf_get_reg_field(void __iomem *base, u32 reg, u32 mask,
951 u32 shift)
951{ 952{
952 u32 origin; 953 u32 origin;
953 954
diff --git a/drivers/net/ethernet/icplus/Kconfig b/drivers/net/ethernet/icplus/Kconfig
deleted file mode 100644
index 14a66e9d2e26..000000000000
--- a/drivers/net/ethernet/icplus/Kconfig
+++ /dev/null
@@ -1,13 +0,0 @@
1#
2# IC Plus device configuration
3#
4
5config IP1000
6 tristate "IP1000 Gigabit Ethernet support"
7 depends on PCI
8 select MII
9 ---help---
10 This driver supports IP1000 gigabit Ethernet cards.
11
12 To compile this driver as a module, choose M here: the module
13 will be called ipg. This is recommended.
diff --git a/drivers/net/ethernet/icplus/Makefile b/drivers/net/ethernet/icplus/Makefile
deleted file mode 100644
index 5bc87c1f36aa..000000000000
--- a/drivers/net/ethernet/icplus/Makefile
+++ /dev/null
@@ -1,5 +0,0 @@
1#
2# Makefile for the IC Plus device drivers
3#
4
5obj-$(CONFIG_IP1000) += ipg.o
diff --git a/drivers/net/ethernet/icplus/ipg.c b/drivers/net/ethernet/icplus/ipg.c
deleted file mode 100644
index c3b6af83f070..000000000000
--- a/drivers/net/ethernet/icplus/ipg.c
+++ /dev/null
@@ -1,2300 +0,0 @@
1/*
2 * ipg.c: Device Driver for the IP1000 Gigabit Ethernet Adapter
3 *
4 * Copyright (C) 2003, 2007 IC Plus Corp
5 *
6 * Original Author:
7 *
8 * Craig Rich
9 * Sundance Technology, Inc.
10 * www.sundanceti.com
11 * craig_rich@sundanceti.com
12 *
13 * Current Maintainer:
14 *
15 * Sorbica Shieh.
16 * http://www.icplus.com.tw
17 * sorbica@icplus.com.tw
18 *
19 * Jesse Huang
20 * http://www.icplus.com.tw
21 * jesse@icplus.com.tw
22 */
23
24#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
25
26#include <linux/crc32.h>
27#include <linux/ethtool.h>
28#include <linux/interrupt.h>
29#include <linux/gfp.h>
30#include <linux/mii.h>
31#include <linux/mutex.h>
32
33#include <asm/div64.h>
34
35#define IPG_RX_RING_BYTES (sizeof(struct ipg_rx) * IPG_RFDLIST_LENGTH)
36#define IPG_TX_RING_BYTES (sizeof(struct ipg_tx) * IPG_TFDLIST_LENGTH)
37#define IPG_RESET_MASK \
38 (IPG_AC_GLOBAL_RESET | IPG_AC_RX_RESET | IPG_AC_TX_RESET | \
39 IPG_AC_DMA | IPG_AC_FIFO | IPG_AC_NETWORK | IPG_AC_HOST | \
40 IPG_AC_AUTO_INIT)
41
42#define ipg_w32(val32, reg) iowrite32((val32), ioaddr + (reg))
43#define ipg_w16(val16, reg) iowrite16((val16), ioaddr + (reg))
44#define ipg_w8(val8, reg) iowrite8((val8), ioaddr + (reg))
45
46#define ipg_r32(reg) ioread32(ioaddr + (reg))
47#define ipg_r16(reg) ioread16(ioaddr + (reg))
48#define ipg_r8(reg) ioread8(ioaddr + (reg))
49
50enum {
51 netdev_io_size = 128
52};
53
54#include "ipg.h"
55#define DRV_NAME "ipg"
56
57MODULE_AUTHOR("IC Plus Corp. 2003");
58MODULE_DESCRIPTION("IC Plus IP1000 Gigabit Ethernet Adapter Linux Driver");
59MODULE_LICENSE("GPL");
60
61/*
62 * Defaults
63 */
64#define IPG_MAX_RXFRAME_SIZE 0x0600
65#define IPG_RXFRAG_SIZE 0x0600
66#define IPG_RXSUPPORT_SIZE 0x0600
67#define IPG_IS_JUMBO false
68
69/*
70 * Variable record -- index by leading revision/length
71 * Revision/Length(=N*4), Address1, Data1, Address2, Data2,...,AddressN,DataN
72 */
73static const unsigned short DefaultPhyParam[] = {
74 /* 11/12/03 IP1000A v1-3 rev=0x40 */
75 /*--------------------------------------------------------------------------
76 (0x4000|(15*4)), 31, 0x0001, 27, 0x01e0, 31, 0x0002, 22, 0x85bd, 24, 0xfff2,
77 27, 0x0c10, 28, 0x0c10, 29, 0x2c10, 31, 0x0003, 23, 0x92f6,
78 31, 0x0000, 23, 0x003d, 30, 0x00de, 20, 0x20e7, 9, 0x0700,
79 --------------------------------------------------------------------------*/
80 /* 12/17/03 IP1000A v1-4 rev=0x40 */
81 (0x4000 | (07 * 4)), 31, 0x0001, 27, 0x01e0, 31, 0x0002, 27, 0xeb8e, 31,
82 0x0000,
83 30, 0x005e, 9, 0x0700,
84 /* 01/09/04 IP1000A v1-5 rev=0x41 */
85 (0x4100 | (07 * 4)), 31, 0x0001, 27, 0x01e0, 31, 0x0002, 27, 0xeb8e, 31,
86 0x0000,
87 30, 0x005e, 9, 0x0700,
88 0x0000
89};
90
91static const char * const ipg_brand_name[] = {
92 "IC PLUS IP1000 1000/100/10 based NIC",
93 "Sundance Technology ST2021 based NIC",
94 "Tamarack Microelectronics TC9020/9021 based NIC",
95 "D-Link NIC IP1000A"
96};
97
98static const struct pci_device_id ipg_pci_tbl[] = {
99 { PCI_VDEVICE(SUNDANCE, 0x1023), 0 },
100 { PCI_VDEVICE(SUNDANCE, 0x2021), 1 },
101 { PCI_VDEVICE(DLINK, 0x9021), 2 },
102 { PCI_VDEVICE(DLINK, 0x4020), 3 },
103 { 0, }
104};
105
106MODULE_DEVICE_TABLE(pci, ipg_pci_tbl);
107
108static inline void __iomem *ipg_ioaddr(struct net_device *dev)
109{
110 struct ipg_nic_private *sp = netdev_priv(dev);
111 return sp->ioaddr;
112}
113
114#ifdef IPG_DEBUG
115static void ipg_dump_rfdlist(struct net_device *dev)
116{
117 struct ipg_nic_private *sp = netdev_priv(dev);
118 void __iomem *ioaddr = sp->ioaddr;
119 unsigned int i;
120 u32 offset;
121
122 IPG_DEBUG_MSG("_dump_rfdlist\n");
123
124 netdev_info(dev, "rx_current = %02x\n", sp->rx_current);
125 netdev_info(dev, "rx_dirty = %02x\n", sp->rx_dirty);
126 netdev_info(dev, "RFDList start address = %016lx\n",
127 (unsigned long)sp->rxd_map);
128 netdev_info(dev, "RFDListPtr register = %08x%08x\n",
129 ipg_r32(IPG_RFDLISTPTR1), ipg_r32(IPG_RFDLISTPTR0));
130
131 for (i = 0; i < IPG_RFDLIST_LENGTH; i++) {
132 offset = (u32) &sp->rxd[i].next_desc - (u32) sp->rxd;
133 netdev_info(dev, "%02x %04x RFDNextPtr = %016lx\n",
134 i, offset, (unsigned long)sp->rxd[i].next_desc);
135 offset = (u32) &sp->rxd[i].rfs - (u32) sp->rxd;
136 netdev_info(dev, "%02x %04x RFS = %016lx\n",
137 i, offset, (unsigned long)sp->rxd[i].rfs);
138 offset = (u32) &sp->rxd[i].frag_info - (u32) sp->rxd;
139 netdev_info(dev, "%02x %04x frag_info = %016lx\n",
140 i, offset, (unsigned long)sp->rxd[i].frag_info);
141 }
142}
143
144static void ipg_dump_tfdlist(struct net_device *dev)
145{
146 struct ipg_nic_private *sp = netdev_priv(dev);
147 void __iomem *ioaddr = sp->ioaddr;
148 unsigned int i;
149 u32 offset;
150
151 IPG_DEBUG_MSG("_dump_tfdlist\n");
152
153 netdev_info(dev, "tx_current = %02x\n", sp->tx_current);
154 netdev_info(dev, "tx_dirty = %02x\n", sp->tx_dirty);
155 netdev_info(dev, "TFDList start address = %016lx\n",
156 (unsigned long) sp->txd_map);
157 netdev_info(dev, "TFDListPtr register = %08x%08x\n",
158 ipg_r32(IPG_TFDLISTPTR1), ipg_r32(IPG_TFDLISTPTR0));
159
160 for (i = 0; i < IPG_TFDLIST_LENGTH; i++) {
161 offset = (u32) &sp->txd[i].next_desc - (u32) sp->txd;
162 netdev_info(dev, "%02x %04x TFDNextPtr = %016lx\n",
163 i, offset, (unsigned long)sp->txd[i].next_desc);
164
165 offset = (u32) &sp->txd[i].tfc - (u32) sp->txd;
166 netdev_info(dev, "%02x %04x TFC = %016lx\n",
167 i, offset, (unsigned long) sp->txd[i].tfc);
168 offset = (u32) &sp->txd[i].frag_info - (u32) sp->txd;
169 netdev_info(dev, "%02x %04x frag_info = %016lx\n",
170 i, offset, (unsigned long) sp->txd[i].frag_info);
171 }
172}
173#endif
174
175static void ipg_write_phy_ctl(void __iomem *ioaddr, u8 data)
176{
177 ipg_w8(IPG_PC_RSVD_MASK & data, PHY_CTRL);
178 ndelay(IPG_PC_PHYCTRLWAIT_NS);
179}
180
181static void ipg_drive_phy_ctl_low_high(void __iomem *ioaddr, u8 data)
182{
183 ipg_write_phy_ctl(ioaddr, IPG_PC_MGMTCLK_LO | data);
184 ipg_write_phy_ctl(ioaddr, IPG_PC_MGMTCLK_HI | data);
185}
186
187static void send_three_state(void __iomem *ioaddr, u8 phyctrlpolarity)
188{
189 phyctrlpolarity |= (IPG_PC_MGMTDATA & 0) | IPG_PC_MGMTDIR;
190
191 ipg_drive_phy_ctl_low_high(ioaddr, phyctrlpolarity);
192}
193
194static void send_end(void __iomem *ioaddr, u8 phyctrlpolarity)
195{
196 ipg_w8((IPG_PC_MGMTCLK_LO | (IPG_PC_MGMTDATA & 0) | IPG_PC_MGMTDIR |
197 phyctrlpolarity) & IPG_PC_RSVD_MASK, PHY_CTRL);
198}
199
200static u16 read_phy_bit(void __iomem *ioaddr, u8 phyctrlpolarity)
201{
202 u16 bit_data;
203
204 ipg_write_phy_ctl(ioaddr, IPG_PC_MGMTCLK_LO | phyctrlpolarity);
205
206 bit_data = ((ipg_r8(PHY_CTRL) & IPG_PC_MGMTDATA) >> 1) & 1;
207
208 ipg_write_phy_ctl(ioaddr, IPG_PC_MGMTCLK_HI | phyctrlpolarity);
209
210 return bit_data;
211}
212
213/*
214 * Read a register from the Physical Layer device located
215 * on the IPG NIC, using the IPG PHYCTRL register.
216 */
217static int mdio_read(struct net_device *dev, int phy_id, int phy_reg)
218{
219 void __iomem *ioaddr = ipg_ioaddr(dev);
220 /*
221 * The GMII mangement frame structure for a read is as follows:
222 *
223 * |Preamble|st|op|phyad|regad|ta| data |idle|
224 * |< 32 1s>|01|10|AAAAA|RRRRR|z0|DDDDDDDDDDDDDDDD|z |
225 *
226 * <32 1s> = 32 consecutive logic 1 values
227 * A = bit of Physical Layer device address (MSB first)
228 * R = bit of register address (MSB first)
229 * z = High impedance state
230 * D = bit of read data (MSB first)
231 *
232 * Transmission order is 'Preamble' field first, bits transmitted
233 * left to right (first to last).
234 */
235 struct {
236 u32 field;
237 unsigned int len;
238 } p[] = {
239 { GMII_PREAMBLE, 32 }, /* Preamble */
240 { GMII_ST, 2 }, /* ST */
241 { GMII_READ, 2 }, /* OP */
242 { phy_id, 5 }, /* PHYAD */
243 { phy_reg, 5 }, /* REGAD */
244 { 0x0000, 2 }, /* TA */
245 { 0x0000, 16 }, /* DATA */
246 { 0x0000, 1 } /* IDLE */
247 };
248 unsigned int i, j;
249 u8 polarity, data;
250
251 polarity = ipg_r8(PHY_CTRL);
252 polarity &= (IPG_PC_DUPLEX_POLARITY | IPG_PC_LINK_POLARITY);
253
254 /* Create the Preamble, ST, OP, PHYAD, and REGAD field. */
255 for (j = 0; j < 5; j++) {
256 for (i = 0; i < p[j].len; i++) {
257 /* For each variable length field, the MSB must be
258 * transmitted first. Rotate through the field bits,
259 * starting with the MSB, and move each bit into the
260 * the 1st (2^1) bit position (this is the bit position
261 * corresponding to the MgmtData bit of the PhyCtrl
262 * register for the IPG).
263 *
264 * Example: ST = 01;
265 *
266 * First write a '0' to bit 1 of the PhyCtrl
267 * register, then write a '1' to bit 1 of the
268 * PhyCtrl register.
269 *
270 * To do this, right shift the MSB of ST by the value:
271 * [field length - 1 - #ST bits already written]
272 * then left shift this result by 1.
273 */
274 data = (p[j].field >> (p[j].len - 1 - i)) << 1;
275 data &= IPG_PC_MGMTDATA;
276 data |= polarity | IPG_PC_MGMTDIR;
277
278 ipg_drive_phy_ctl_low_high(ioaddr, data);
279 }
280 }
281
282 send_three_state(ioaddr, polarity);
283
284 read_phy_bit(ioaddr, polarity);
285
286 /*
287 * For a read cycle, the bits for the next two fields (TA and
288 * DATA) are driven by the PHY (the IPG reads these bits).
289 */
290 for (i = 0; i < p[6].len; i++) {
291 p[6].field |=
292 (read_phy_bit(ioaddr, polarity) << (p[6].len - 1 - i));
293 }
294
295 send_three_state(ioaddr, polarity);
296 send_three_state(ioaddr, polarity);
297 send_three_state(ioaddr, polarity);
298 send_end(ioaddr, polarity);
299
300 /* Return the value of the DATA field. */
301 return p[6].field;
302}
303
304/*
305 * Write to a register from the Physical Layer device located
306 * on the IPG NIC, using the IPG PHYCTRL register.
307 */
308static void mdio_write(struct net_device *dev, int phy_id, int phy_reg, int val)
309{
310 void __iomem *ioaddr = ipg_ioaddr(dev);
311 /*
312 * The GMII mangement frame structure for a read is as follows:
313 *
314 * |Preamble|st|op|phyad|regad|ta| data |idle|
315 * |< 32 1s>|01|10|AAAAA|RRRRR|z0|DDDDDDDDDDDDDDDD|z |
316 *
317 * <32 1s> = 32 consecutive logic 1 values
318 * A = bit of Physical Layer device address (MSB first)
319 * R = bit of register address (MSB first)
320 * z = High impedance state
321 * D = bit of write data (MSB first)
322 *
323 * Transmission order is 'Preamble' field first, bits transmitted
324 * left to right (first to last).
325 */
326 struct {
327 u32 field;
328 unsigned int len;
329 } p[] = {
330 { GMII_PREAMBLE, 32 }, /* Preamble */
331 { GMII_ST, 2 }, /* ST */
332 { GMII_WRITE, 2 }, /* OP */
333 { phy_id, 5 }, /* PHYAD */
334 { phy_reg, 5 }, /* REGAD */
335 { 0x0002, 2 }, /* TA */
336 { val & 0xffff, 16 }, /* DATA */
337 { 0x0000, 1 } /* IDLE */
338 };
339 unsigned int i, j;
340 u8 polarity, data;
341
342 polarity = ipg_r8(PHY_CTRL);
343 polarity &= (IPG_PC_DUPLEX_POLARITY | IPG_PC_LINK_POLARITY);
344
345 /* Create the Preamble, ST, OP, PHYAD, and REGAD field. */
346 for (j = 0; j < 7; j++) {
347 for (i = 0; i < p[j].len; i++) {
348 /* For each variable length field, the MSB must be
349 * transmitted first. Rotate through the field bits,
350 * starting with the MSB, and move each bit into the
351 * the 1st (2^1) bit position (this is the bit position
352 * corresponding to the MgmtData bit of the PhyCtrl
353 * register for the IPG).
354 *
355 * Example: ST = 01;
356 *
357 * First write a '0' to bit 1 of the PhyCtrl
358 * register, then write a '1' to bit 1 of the
359 * PhyCtrl register.
360 *
361 * To do this, right shift the MSB of ST by the value:
362 * [field length - 1 - #ST bits already written]
363 * then left shift this result by 1.
364 */
365 data = (p[j].field >> (p[j].len - 1 - i)) << 1;
366 data &= IPG_PC_MGMTDATA;
367 data |= polarity | IPG_PC_MGMTDIR;
368
369 ipg_drive_phy_ctl_low_high(ioaddr, data);
370 }
371 }
372
373 /* The last cycle is a tri-state, so read from the PHY. */
374 ipg_write_phy_ctl(ioaddr, IPG_PC_MGMTCLK_LO | polarity);
375 ipg_r8(PHY_CTRL);
376 ipg_write_phy_ctl(ioaddr, IPG_PC_MGMTCLK_HI | polarity);
377}
378
379static void ipg_set_led_mode(struct net_device *dev)
380{
381 struct ipg_nic_private *sp = netdev_priv(dev);
382 void __iomem *ioaddr = sp->ioaddr;
383 u32 mode;
384
385 mode = ipg_r32(ASIC_CTRL);
386 mode &= ~(IPG_AC_LED_MODE_BIT_1 | IPG_AC_LED_MODE | IPG_AC_LED_SPEED);
387
388 if ((sp->led_mode & 0x03) > 1)
389 mode |= IPG_AC_LED_MODE_BIT_1; /* Write Asic Control Bit 29 */
390
391 if ((sp->led_mode & 0x01) == 1)
392 mode |= IPG_AC_LED_MODE; /* Write Asic Control Bit 14 */
393
394 if ((sp->led_mode & 0x08) == 8)
395 mode |= IPG_AC_LED_SPEED; /* Write Asic Control Bit 27 */
396
397 ipg_w32(mode, ASIC_CTRL);
398}
399
400static void ipg_set_phy_set(struct net_device *dev)
401{
402 struct ipg_nic_private *sp = netdev_priv(dev);
403 void __iomem *ioaddr = sp->ioaddr;
404 int physet;
405
406 physet = ipg_r8(PHY_SET);
407 physet &= ~(IPG_PS_MEM_LENB9B | IPG_PS_MEM_LEN9 | IPG_PS_NON_COMPDET);
408 physet |= ((sp->led_mode & 0x70) >> 4);
409 ipg_w8(physet, PHY_SET);
410}
411
412static int ipg_reset(struct net_device *dev, u32 resetflags)
413{
414 /* Assert functional resets via the IPG AsicCtrl
415 * register as specified by the 'resetflags' input
416 * parameter.
417 */
418 void __iomem *ioaddr = ipg_ioaddr(dev);
419 unsigned int timeout_count = 0;
420
421 IPG_DEBUG_MSG("_reset\n");
422
423 ipg_w32(ipg_r32(ASIC_CTRL) | resetflags, ASIC_CTRL);
424
425 /* Delay added to account for problem with 10Mbps reset. */
426 mdelay(IPG_AC_RESETWAIT);
427
428 while (IPG_AC_RESET_BUSY & ipg_r32(ASIC_CTRL)) {
429 mdelay(IPG_AC_RESETWAIT);
430 if (++timeout_count > IPG_AC_RESET_TIMEOUT)
431 return -ETIME;
432 }
433 /* Set LED Mode in Asic Control */
434 ipg_set_led_mode(dev);
435
436 /* Set PHYSet Register Value */
437 ipg_set_phy_set(dev);
438 return 0;
439}
440
441/* Find the GMII PHY address. */
442static int ipg_find_phyaddr(struct net_device *dev)
443{
444 unsigned int phyaddr, i;
445
446 for (i = 0; i < 32; i++) {
447 u32 status;
448
449 /* Search for the correct PHY address among 32 possible. */
450 phyaddr = (IPG_NIC_PHY_ADDRESS + i) % 32;
451
452 /* 10/22/03 Grace change verify from GMII_PHY_STATUS to
453 GMII_PHY_ID1
454 */
455
456 status = mdio_read(dev, phyaddr, MII_BMSR);
457
458 if ((status != 0xFFFF) && (status != 0))
459 return phyaddr;
460 }
461
462 return 0x1f;
463}
464
465/*
466 * Configure IPG based on result of IEEE 802.3 PHY
467 * auto-negotiation.
468 */
469static int ipg_config_autoneg(struct net_device *dev)
470{
471 struct ipg_nic_private *sp = netdev_priv(dev);
472 void __iomem *ioaddr = sp->ioaddr;
473 unsigned int txflowcontrol;
474 unsigned int rxflowcontrol;
475 unsigned int fullduplex;
476 u32 mac_ctrl_val;
477 u32 asicctrl;
478 u8 phyctrl;
479 const char *speed;
480 const char *duplex;
481 const char *tx_desc;
482 const char *rx_desc;
483
484 IPG_DEBUG_MSG("_config_autoneg\n");
485
486 asicctrl = ipg_r32(ASIC_CTRL);
487 phyctrl = ipg_r8(PHY_CTRL);
488 mac_ctrl_val = ipg_r32(MAC_CTRL);
489
490 /* Set flags for use in resolving auto-negotiation, assuming
491 * non-1000Mbps, half duplex, no flow control.
492 */
493 fullduplex = 0;
494 txflowcontrol = 0;
495 rxflowcontrol = 0;
496
497 /* To accommodate a problem in 10Mbps operation,
498 * set a global flag if PHY running in 10Mbps mode.
499 */
500 sp->tenmbpsmode = 0;
501
502 /* Determine actual speed of operation. */
503 switch (phyctrl & IPG_PC_LINK_SPEED) {
504 case IPG_PC_LINK_SPEED_10MBPS:
505 speed = "10Mbps";
506 sp->tenmbpsmode = 1;
507 break;
508 case IPG_PC_LINK_SPEED_100MBPS:
509 speed = "100Mbps";
510 break;
511 case IPG_PC_LINK_SPEED_1000MBPS:
512 speed = "1000Mbps";
513 break;
514 default:
515 speed = "undefined!";
516 return 0;
517 }
518
519 netdev_info(dev, "Link speed = %s\n", speed);
520 if (sp->tenmbpsmode == 1)
521 netdev_info(dev, "10Mbps operational mode enabled\n");
522
523 if (phyctrl & IPG_PC_DUPLEX_STATUS) {
524 fullduplex = 1;
525 txflowcontrol = 1;
526 rxflowcontrol = 1;
527 }
528
529 /* Configure full duplex, and flow control. */
530 if (fullduplex == 1) {
531
532 /* Configure IPG for full duplex operation. */
533
534 duplex = "full";
535
536 mac_ctrl_val |= IPG_MC_DUPLEX_SELECT_FD;
537
538 if (txflowcontrol == 1) {
539 tx_desc = "";
540 mac_ctrl_val |= IPG_MC_TX_FLOW_CONTROL_ENABLE;
541 } else {
542 tx_desc = "no ";
543 mac_ctrl_val &= ~IPG_MC_TX_FLOW_CONTROL_ENABLE;
544 }
545
546 if (rxflowcontrol == 1) {
547 rx_desc = "";
548 mac_ctrl_val |= IPG_MC_RX_FLOW_CONTROL_ENABLE;
549 } else {
550 rx_desc = "no ";
551 mac_ctrl_val &= ~IPG_MC_RX_FLOW_CONTROL_ENABLE;
552 }
553 } else {
554 duplex = "half";
555 tx_desc = "no ";
556 rx_desc = "no ";
557 mac_ctrl_val &= (~IPG_MC_DUPLEX_SELECT_FD &
558 ~IPG_MC_TX_FLOW_CONTROL_ENABLE &
559 ~IPG_MC_RX_FLOW_CONTROL_ENABLE);
560 }
561
562 netdev_info(dev, "setting %s duplex, %sTX, %sRX flow control\n",
563 duplex, tx_desc, rx_desc);
564 ipg_w32(mac_ctrl_val, MAC_CTRL);
565
566 return 0;
567}
568
569/* Determine and configure multicast operation and set
570 * receive mode for IPG.
571 */
572static void ipg_nic_set_multicast_list(struct net_device *dev)
573{
574 void __iomem *ioaddr = ipg_ioaddr(dev);
575 struct netdev_hw_addr *ha;
576 unsigned int hashindex;
577 u32 hashtable[2];
578 u8 receivemode;
579
580 IPG_DEBUG_MSG("_nic_set_multicast_list\n");
581
582 receivemode = IPG_RM_RECEIVEUNICAST | IPG_RM_RECEIVEBROADCAST;
583
584 if (dev->flags & IFF_PROMISC) {
585 /* NIC to be configured in promiscuous mode. */
586 receivemode = IPG_RM_RECEIVEALLFRAMES;
587 } else if ((dev->flags & IFF_ALLMULTI) ||
588 ((dev->flags & IFF_MULTICAST) &&
589 (netdev_mc_count(dev) > IPG_MULTICAST_HASHTABLE_SIZE))) {
590 /* NIC to be configured to receive all multicast
591 * frames. */
592 receivemode |= IPG_RM_RECEIVEMULTICAST;
593 } else if ((dev->flags & IFF_MULTICAST) && !netdev_mc_empty(dev)) {
594 /* NIC to be configured to receive selected
595 * multicast addresses. */
596 receivemode |= IPG_RM_RECEIVEMULTICASTHASH;
597 }
598
599 /* Calculate the bits to set for the 64 bit, IPG HASHTABLE.
600 * The IPG applies a cyclic-redundancy-check (the same CRC
601 * used to calculate the frame data FCS) to the destination
602 * address all incoming multicast frames whose destination
603 * address has the multicast bit set. The least significant
604 * 6 bits of the CRC result are used as an addressing index
605 * into the hash table. If the value of the bit addressed by
606 * this index is a 1, the frame is passed to the host system.
607 */
608
609 /* Clear hashtable. */
610 hashtable[0] = 0x00000000;
611 hashtable[1] = 0x00000000;
612
613 /* Cycle through all multicast addresses to filter. */
614 netdev_for_each_mc_addr(ha, dev) {
615 /* Calculate CRC result for each multicast address. */
616 hashindex = crc32_le(0xffffffff, ha->addr,
617 ETH_ALEN);
618
619 /* Use only the least significant 6 bits. */
620 hashindex = hashindex & 0x3F;
621
622 /* Within "hashtable", set bit number "hashindex"
623 * to a logic 1.
624 */
625 set_bit(hashindex, (void *)hashtable);
626 }
627
628 /* Write the value of the hashtable, to the 4, 16 bit
629 * HASHTABLE IPG registers.
630 */
631 ipg_w32(hashtable[0], HASHTABLE_0);
632 ipg_w32(hashtable[1], HASHTABLE_1);
633
634 ipg_w8(IPG_RM_RSVD_MASK & receivemode, RECEIVE_MODE);
635
636 IPG_DEBUG_MSG("ReceiveMode = %x\n", ipg_r8(RECEIVE_MODE));
637}
638
639static int ipg_io_config(struct net_device *dev)
640{
641 struct ipg_nic_private *sp = netdev_priv(dev);
642 void __iomem *ioaddr = ipg_ioaddr(dev);
643 u32 origmacctrl;
644 u32 restoremacctrl;
645
646 IPG_DEBUG_MSG("_io_config\n");
647
648 origmacctrl = ipg_r32(MAC_CTRL);
649
650 restoremacctrl = origmacctrl | IPG_MC_STATISTICS_ENABLE;
651
652 /* Based on compilation option, determine if FCS is to be
653 * stripped on receive frames by IPG.
654 */
655 if (!IPG_STRIP_FCS_ON_RX)
656 restoremacctrl |= IPG_MC_RCV_FCS;
657
658 /* Determine if transmitter and/or receiver are
659 * enabled so we may restore MACCTRL correctly.
660 */
661 if (origmacctrl & IPG_MC_TX_ENABLED)
662 restoremacctrl |= IPG_MC_TX_ENABLE;
663
664 if (origmacctrl & IPG_MC_RX_ENABLED)
665 restoremacctrl |= IPG_MC_RX_ENABLE;
666
667 /* Transmitter and receiver must be disabled before setting
668 * IFSSelect.
669 */
670 ipg_w32((origmacctrl & (IPG_MC_RX_DISABLE | IPG_MC_TX_DISABLE)) &
671 IPG_MC_RSVD_MASK, MAC_CTRL);
672
673 /* Now that transmitter and receiver are disabled, write
674 * to IFSSelect.
675 */
676 ipg_w32((origmacctrl & IPG_MC_IFS_96BIT) & IPG_MC_RSVD_MASK, MAC_CTRL);
677
678 /* Set RECEIVEMODE register. */
679 ipg_nic_set_multicast_list(dev);
680
681 ipg_w16(sp->max_rxframe_size, MAX_FRAME_SIZE);
682
683 ipg_w8(IPG_RXDMAPOLLPERIOD_VALUE, RX_DMA_POLL_PERIOD);
684 ipg_w8(IPG_RXDMAURGENTTHRESH_VALUE, RX_DMA_URGENT_THRESH);
685 ipg_w8(IPG_RXDMABURSTTHRESH_VALUE, RX_DMA_BURST_THRESH);
686 ipg_w8(IPG_TXDMAPOLLPERIOD_VALUE, TX_DMA_POLL_PERIOD);
687 ipg_w8(IPG_TXDMAURGENTTHRESH_VALUE, TX_DMA_URGENT_THRESH);
688 ipg_w8(IPG_TXDMABURSTTHRESH_VALUE, TX_DMA_BURST_THRESH);
689 ipg_w16((IPG_IE_HOST_ERROR | IPG_IE_TX_DMA_COMPLETE |
690 IPG_IE_TX_COMPLETE | IPG_IE_INT_REQUESTED |
691 IPG_IE_UPDATE_STATS | IPG_IE_LINK_EVENT |
692 IPG_IE_RX_DMA_COMPLETE | IPG_IE_RX_DMA_PRIORITY), INT_ENABLE);
693 ipg_w16(IPG_FLOWONTHRESH_VALUE, FLOW_ON_THRESH);
694 ipg_w16(IPG_FLOWOFFTHRESH_VALUE, FLOW_OFF_THRESH);
695
696 /* IPG multi-frag frame bug workaround.
697 * Per silicon revision B3 eratta.
698 */
699 ipg_w16(ipg_r16(DEBUG_CTRL) | 0x0200, DEBUG_CTRL);
700
701 /* IPG TX poll now bug workaround.
702 * Per silicon revision B3 eratta.
703 */
704 ipg_w16(ipg_r16(DEBUG_CTRL) | 0x0010, DEBUG_CTRL);
705
706 /* IPG RX poll now bug workaround.
707 * Per silicon revision B3 eratta.
708 */
709 ipg_w16(ipg_r16(DEBUG_CTRL) | 0x0020, DEBUG_CTRL);
710
711 /* Now restore MACCTRL to original setting. */
712 ipg_w32(IPG_MC_RSVD_MASK & restoremacctrl, MAC_CTRL);
713
714 /* Disable unused RMON statistics. */
715 ipg_w32(IPG_RZ_ALL, RMON_STATISTICS_MASK);
716
717 /* Disable unused MIB statistics. */
718 ipg_w32(IPG_SM_MACCONTROLFRAMESXMTD | IPG_SM_MACCONTROLFRAMESRCVD |
719 IPG_SM_BCSTOCTETXMTOK_BCSTFRAMESXMTDOK | IPG_SM_TXJUMBOFRAMES |
720 IPG_SM_MCSTOCTETXMTOK_MCSTFRAMESXMTDOK | IPG_SM_RXJUMBOFRAMES |
721 IPG_SM_BCSTOCTETRCVDOK_BCSTFRAMESRCVDOK |
722 IPG_SM_UDPCHECKSUMERRORS | IPG_SM_TCPCHECKSUMERRORS |
723 IPG_SM_IPCHECKSUMERRORS, STATISTICS_MASK);
724
725 return 0;
726}
727
728/*
729 * Create a receive buffer within system memory and update
730 * NIC private structure appropriately.
731 */
732static int ipg_get_rxbuff(struct net_device *dev, int entry)
733{
734 struct ipg_nic_private *sp = netdev_priv(dev);
735 struct ipg_rx *rxfd = sp->rxd + entry;
736 struct sk_buff *skb;
737 u64 rxfragsize;
738
739 IPG_DEBUG_MSG("_get_rxbuff\n");
740
741 skb = netdev_alloc_skb_ip_align(dev, sp->rxsupport_size);
742 if (!skb) {
743 sp->rx_buff[entry] = NULL;
744 return -ENOMEM;
745 }
746
747 /* Save the address of the sk_buff structure. */
748 sp->rx_buff[entry] = skb;
749
750 rxfd->frag_info = cpu_to_le64(pci_map_single(sp->pdev, skb->data,
751 sp->rx_buf_sz, PCI_DMA_FROMDEVICE));
752
753 /* Set the RFD fragment length. */
754 rxfragsize = sp->rxfrag_size;
755 rxfd->frag_info |= cpu_to_le64((rxfragsize << 48) & IPG_RFI_FRAGLEN);
756
757 return 0;
758}
759
760static int init_rfdlist(struct net_device *dev)
761{
762 struct ipg_nic_private *sp = netdev_priv(dev);
763 void __iomem *ioaddr = sp->ioaddr;
764 unsigned int i;
765
766 IPG_DEBUG_MSG("_init_rfdlist\n");
767
768 for (i = 0; i < IPG_RFDLIST_LENGTH; i++) {
769 struct ipg_rx *rxfd = sp->rxd + i;
770
771 if (sp->rx_buff[i]) {
772 pci_unmap_single(sp->pdev,
773 le64_to_cpu(rxfd->frag_info) & ~IPG_RFI_FRAGLEN,
774 sp->rx_buf_sz, PCI_DMA_FROMDEVICE);
775 dev_kfree_skb_irq(sp->rx_buff[i]);
776 sp->rx_buff[i] = NULL;
777 }
778
779 /* Clear out the RFS field. */
780 rxfd->rfs = 0x0000000000000000;
781
782 if (ipg_get_rxbuff(dev, i) < 0) {
783 /*
784 * A receive buffer was not ready, break the
785 * RFD list here.
786 */
787 IPG_DEBUG_MSG("Cannot allocate Rx buffer\n");
788
789 /* Just in case we cannot allocate a single RFD.
790 * Should not occur.
791 */
792 if (i == 0) {
793 netdev_err(dev, "No memory available for RFD list\n");
794 return -ENOMEM;
795 }
796 }
797
798 rxfd->next_desc = cpu_to_le64(sp->rxd_map +
799 sizeof(struct ipg_rx)*(i + 1));
800 }
801 sp->rxd[i - 1].next_desc = cpu_to_le64(sp->rxd_map);
802
803 sp->rx_current = 0;
804 sp->rx_dirty = 0;
805
806 /* Write the location of the RFDList to the IPG. */
807 ipg_w32((u32) sp->rxd_map, RFD_LIST_PTR_0);
808 ipg_w32(0x00000000, RFD_LIST_PTR_1);
809
810 return 0;
811}
812
813static void init_tfdlist(struct net_device *dev)
814{
815 struct ipg_nic_private *sp = netdev_priv(dev);
816 void __iomem *ioaddr = sp->ioaddr;
817 unsigned int i;
818
819 IPG_DEBUG_MSG("_init_tfdlist\n");
820
821 for (i = 0; i < IPG_TFDLIST_LENGTH; i++) {
822 struct ipg_tx *txfd = sp->txd + i;
823
824 txfd->tfc = cpu_to_le64(IPG_TFC_TFDDONE);
825
826 if (sp->tx_buff[i]) {
827 dev_kfree_skb_irq(sp->tx_buff[i]);
828 sp->tx_buff[i] = NULL;
829 }
830
831 txfd->next_desc = cpu_to_le64(sp->txd_map +
832 sizeof(struct ipg_tx)*(i + 1));
833 }
834 sp->txd[i - 1].next_desc = cpu_to_le64(sp->txd_map);
835
836 sp->tx_current = 0;
837 sp->tx_dirty = 0;
838
839 /* Write the location of the TFDList to the IPG. */
840 IPG_DDEBUG_MSG("Starting TFDListPtr = %08x\n",
841 (u32) sp->txd_map);
842 ipg_w32((u32) sp->txd_map, TFD_LIST_PTR_0);
843 ipg_w32(0x00000000, TFD_LIST_PTR_1);
844
845 sp->reset_current_tfd = 1;
846}
847
848/*
849 * Free all transmit buffers which have already been transferred
850 * via DMA to the IPG.
851 */
852static void ipg_nic_txfree(struct net_device *dev)
853{
854 struct ipg_nic_private *sp = netdev_priv(dev);
855 unsigned int released, pending, dirty;
856
857 IPG_DEBUG_MSG("_nic_txfree\n");
858
859 pending = sp->tx_current - sp->tx_dirty;
860 dirty = sp->tx_dirty % IPG_TFDLIST_LENGTH;
861
862 for (released = 0; released < pending; released++) {
863 struct sk_buff *skb = sp->tx_buff[dirty];
864 struct ipg_tx *txfd = sp->txd + dirty;
865
866 IPG_DEBUG_MSG("TFC = %016lx\n", (unsigned long) txfd->tfc);
867
868 /* Look at each TFD's TFC field beginning
869 * at the last freed TFD up to the current TFD.
870 * If the TFDDone bit is set, free the associated
871 * buffer.
872 */
873 if (!(txfd->tfc & cpu_to_le64(IPG_TFC_TFDDONE)))
874 break;
875
876 /* Free the transmit buffer. */
877 if (skb) {
878 pci_unmap_single(sp->pdev,
879 le64_to_cpu(txfd->frag_info) & ~IPG_TFI_FRAGLEN,
880 skb->len, PCI_DMA_TODEVICE);
881
882 dev_kfree_skb_irq(skb);
883
884 sp->tx_buff[dirty] = NULL;
885 }
886 dirty = (dirty + 1) % IPG_TFDLIST_LENGTH;
887 }
888
889 sp->tx_dirty += released;
890
891 if (netif_queue_stopped(dev) &&
892 (sp->tx_current != (sp->tx_dirty + IPG_TFDLIST_LENGTH))) {
893 netif_wake_queue(dev);
894 }
895}
896
897static void ipg_tx_timeout(struct net_device *dev)
898{
899 struct ipg_nic_private *sp = netdev_priv(dev);
900 void __iomem *ioaddr = sp->ioaddr;
901
902 ipg_reset(dev, IPG_AC_TX_RESET | IPG_AC_DMA | IPG_AC_NETWORK |
903 IPG_AC_FIFO);
904
905 spin_lock_irq(&sp->lock);
906
907 /* Re-configure after DMA reset. */
908 if (ipg_io_config(dev) < 0)
909 netdev_info(dev, "Error during re-configuration\n");
910
911 init_tfdlist(dev);
912
913 spin_unlock_irq(&sp->lock);
914
915 ipg_w32((ipg_r32(MAC_CTRL) | IPG_MC_TX_ENABLE) & IPG_MC_RSVD_MASK,
916 MAC_CTRL);
917}
918
919/*
920 * For TxComplete interrupts, free all transmit
921 * buffers which have already been transferred via DMA
922 * to the IPG.
923 */
924static void ipg_nic_txcleanup(struct net_device *dev)
925{
926 struct ipg_nic_private *sp = netdev_priv(dev);
927 void __iomem *ioaddr = sp->ioaddr;
928 unsigned int i;
929
930 IPG_DEBUG_MSG("_nic_txcleanup\n");
931
932 for (i = 0; i < IPG_TFDLIST_LENGTH; i++) {
933 /* Reading the TXSTATUS register clears the
934 * TX_COMPLETE interrupt.
935 */
936 u32 txstatusdword = ipg_r32(TX_STATUS);
937
938 IPG_DEBUG_MSG("TxStatus = %08x\n", txstatusdword);
939
940 /* Check for Transmit errors. Error bits only valid if
941 * TX_COMPLETE bit in the TXSTATUS register is a 1.
942 */
943 if (!(txstatusdword & IPG_TS_TX_COMPLETE))
944 break;
945
946 /* If in 10Mbps mode, indicate transmit is ready. */
947 if (sp->tenmbpsmode) {
948 netif_wake_queue(dev);
949 }
950
951 /* Transmit error, increment stat counters. */
952 if (txstatusdword & IPG_TS_TX_ERROR) {
953 IPG_DEBUG_MSG("Transmit error\n");
954 sp->stats.tx_errors++;
955 }
956
957 /* Late collision, re-enable transmitter. */
958 if (txstatusdword & IPG_TS_LATE_COLLISION) {
959 IPG_DEBUG_MSG("Late collision on transmit\n");
960 ipg_w32((ipg_r32(MAC_CTRL) | IPG_MC_TX_ENABLE) &
961 IPG_MC_RSVD_MASK, MAC_CTRL);
962 }
963
964 /* Maximum collisions, re-enable transmitter. */
965 if (txstatusdword & IPG_TS_TX_MAX_COLL) {
966 IPG_DEBUG_MSG("Maximum collisions on transmit\n");
967 ipg_w32((ipg_r32(MAC_CTRL) | IPG_MC_TX_ENABLE) &
968 IPG_MC_RSVD_MASK, MAC_CTRL);
969 }
970
971 /* Transmit underrun, reset and re-enable
972 * transmitter.
973 */
974 if (txstatusdword & IPG_TS_TX_UNDERRUN) {
975 IPG_DEBUG_MSG("Transmitter underrun\n");
976 sp->stats.tx_fifo_errors++;
977 ipg_reset(dev, IPG_AC_TX_RESET | IPG_AC_DMA |
978 IPG_AC_NETWORK | IPG_AC_FIFO);
979
980 /* Re-configure after DMA reset. */
981 if (ipg_io_config(dev) < 0) {
982 netdev_info(dev, "Error during re-configuration\n");
983 }
984 init_tfdlist(dev);
985
986 ipg_w32((ipg_r32(MAC_CTRL) | IPG_MC_TX_ENABLE) &
987 IPG_MC_RSVD_MASK, MAC_CTRL);
988 }
989 }
990
991 ipg_nic_txfree(dev);
992}
993
994/* Provides statistical information about the IPG NIC. */
995static struct net_device_stats *ipg_nic_get_stats(struct net_device *dev)
996{
997 struct ipg_nic_private *sp = netdev_priv(dev);
998 void __iomem *ioaddr = sp->ioaddr;
999 u16 temp1;
1000 u16 temp2;
1001
1002 IPG_DEBUG_MSG("_nic_get_stats\n");
1003
1004 /* Check to see if the NIC has been initialized via nic_open,
1005 * before trying to read statistic registers.
1006 */
1007 if (!netif_running(dev))
1008 return &sp->stats;
1009
1010 sp->stats.rx_packets += ipg_r32(IPG_FRAMESRCVDOK);
1011 sp->stats.tx_packets += ipg_r32(IPG_FRAMESXMTDOK);
1012 sp->stats.rx_bytes += ipg_r32(IPG_OCTETRCVOK);
1013 sp->stats.tx_bytes += ipg_r32(IPG_OCTETXMTOK);
1014 temp1 = ipg_r16(IPG_FRAMESLOSTRXERRORS);
1015 sp->stats.rx_errors += temp1;
1016 sp->stats.rx_missed_errors += temp1;
1017 temp1 = ipg_r32(IPG_SINGLECOLFRAMES) + ipg_r32(IPG_MULTICOLFRAMES) +
1018 ipg_r32(IPG_LATECOLLISIONS);
1019 temp2 = ipg_r16(IPG_CARRIERSENSEERRORS);
1020 sp->stats.collisions += temp1;
1021 sp->stats.tx_dropped += ipg_r16(IPG_FRAMESABORTXSCOLLS);
1022 sp->stats.tx_errors += ipg_r16(IPG_FRAMESWEXDEFERRAL) +
1023 ipg_r32(IPG_FRAMESWDEFERREDXMT) + temp1 + temp2;
1024 sp->stats.multicast += ipg_r32(IPG_MCSTOCTETRCVDOK);
1025
1026 /* detailed tx_errors */
1027 sp->stats.tx_carrier_errors += temp2;
1028
1029 /* detailed rx_errors */
1030 sp->stats.rx_length_errors += ipg_r16(IPG_INRANGELENGTHERRORS) +
1031 ipg_r16(IPG_FRAMETOOLONGERRORS);
1032 sp->stats.rx_crc_errors += ipg_r16(IPG_FRAMECHECKSEQERRORS);
1033
1034 /* Unutilized IPG statistic registers. */
1035 ipg_r32(IPG_MCSTFRAMESRCVDOK);
1036
1037 return &sp->stats;
1038}
1039
1040/* Restore used receive buffers. */
1041static int ipg_nic_rxrestore(struct net_device *dev)
1042{
1043 struct ipg_nic_private *sp = netdev_priv(dev);
1044 const unsigned int curr = sp->rx_current;
1045 unsigned int dirty = sp->rx_dirty;
1046
1047 IPG_DEBUG_MSG("_nic_rxrestore\n");
1048
1049 for (dirty = sp->rx_dirty; curr - dirty > 0; dirty++) {
1050 unsigned int entry = dirty % IPG_RFDLIST_LENGTH;
1051
1052 /* rx_copybreak may poke hole here and there. */
1053 if (sp->rx_buff[entry])
1054 continue;
1055
1056 /* Generate a new receive buffer to replace the
1057 * current buffer (which will be released by the
1058 * Linux system).
1059 */
1060 if (ipg_get_rxbuff(dev, entry) < 0) {
1061 IPG_DEBUG_MSG("Cannot allocate new Rx buffer\n");
1062
1063 break;
1064 }
1065
1066 /* Reset the RFS field. */
1067 sp->rxd[entry].rfs = 0x0000000000000000;
1068 }
1069 sp->rx_dirty = dirty;
1070
1071 return 0;
1072}
1073
1074/* use jumboindex and jumbosize to control jumbo frame status
1075 * initial status is jumboindex=-1 and jumbosize=0
1076 * 1. jumboindex = -1 and jumbosize=0 : previous jumbo frame has been done.
1077 * 2. jumboindex != -1 and jumbosize != 0 : jumbo frame is not over size and receiving
1078 * 3. jumboindex = -1 and jumbosize != 0 : jumbo frame is over size, already dump
1079 * previous receiving and need to continue dumping the current one
1080 */
1081enum {
1082 NORMAL_PACKET,
1083 ERROR_PACKET
1084};
1085
1086enum {
1087 FRAME_NO_START_NO_END = 0,
1088 FRAME_WITH_START = 1,
1089 FRAME_WITH_END = 10,
1090 FRAME_WITH_START_WITH_END = 11
1091};
1092
1093static void ipg_nic_rx_free_skb(struct net_device *dev)
1094{
1095 struct ipg_nic_private *sp = netdev_priv(dev);
1096 unsigned int entry = sp->rx_current % IPG_RFDLIST_LENGTH;
1097
1098 if (sp->rx_buff[entry]) {
1099 struct ipg_rx *rxfd = sp->rxd + entry;
1100
1101 pci_unmap_single(sp->pdev,
1102 le64_to_cpu(rxfd->frag_info) & ~IPG_RFI_FRAGLEN,
1103 sp->rx_buf_sz, PCI_DMA_FROMDEVICE);
1104 dev_kfree_skb_irq(sp->rx_buff[entry]);
1105 sp->rx_buff[entry] = NULL;
1106 }
1107}
1108
1109static int ipg_nic_rx_check_frame_type(struct net_device *dev)
1110{
1111 struct ipg_nic_private *sp = netdev_priv(dev);
1112 struct ipg_rx *rxfd = sp->rxd + (sp->rx_current % IPG_RFDLIST_LENGTH);
1113 int type = FRAME_NO_START_NO_END;
1114
1115 if (le64_to_cpu(rxfd->rfs) & IPG_RFS_FRAMESTART)
1116 type += FRAME_WITH_START;
1117 if (le64_to_cpu(rxfd->rfs) & IPG_RFS_FRAMEEND)
1118 type += FRAME_WITH_END;
1119 return type;
1120}
1121
1122static int ipg_nic_rx_check_error(struct net_device *dev)
1123{
1124 struct ipg_nic_private *sp = netdev_priv(dev);
1125 unsigned int entry = sp->rx_current % IPG_RFDLIST_LENGTH;
1126 struct ipg_rx *rxfd = sp->rxd + entry;
1127
1128 if (IPG_DROP_ON_RX_ETH_ERRORS && (le64_to_cpu(rxfd->rfs) &
1129 (IPG_RFS_RXFIFOOVERRUN | IPG_RFS_RXRUNTFRAME |
1130 IPG_RFS_RXALIGNMENTERROR | IPG_RFS_RXFCSERROR |
1131 IPG_RFS_RXOVERSIZEDFRAME | IPG_RFS_RXLENGTHERROR))) {
1132 IPG_DEBUG_MSG("Rx error, RFS = %016lx\n",
1133 (unsigned long) rxfd->rfs);
1134
1135 /* Increment general receive error statistic. */
1136 sp->stats.rx_errors++;
1137
1138 /* Increment detailed receive error statistics. */
1139 if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXFIFOOVERRUN) {
1140 IPG_DEBUG_MSG("RX FIFO overrun occurred\n");
1141
1142 sp->stats.rx_fifo_errors++;
1143 }
1144
1145 if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXRUNTFRAME) {
1146 IPG_DEBUG_MSG("RX runt occurred\n");
1147 sp->stats.rx_length_errors++;
1148 }
1149
1150 /* Do nothing for IPG_RFS_RXOVERSIZEDFRAME,
1151 * error count handled by a IPG statistic register.
1152 */
1153
1154 if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXALIGNMENTERROR) {
1155 IPG_DEBUG_MSG("RX alignment error occurred\n");
1156 sp->stats.rx_frame_errors++;
1157 }
1158
1159 /* Do nothing for IPG_RFS_RXFCSERROR, error count
1160 * handled by a IPG statistic register.
1161 */
1162
1163 /* Free the memory associated with the RX
1164 * buffer since it is erroneous and we will
1165 * not pass it to higher layer processes.
1166 */
1167 if (sp->rx_buff[entry]) {
1168 pci_unmap_single(sp->pdev,
1169 le64_to_cpu(rxfd->frag_info) & ~IPG_RFI_FRAGLEN,
1170 sp->rx_buf_sz, PCI_DMA_FROMDEVICE);
1171
1172 dev_kfree_skb_irq(sp->rx_buff[entry]);
1173 sp->rx_buff[entry] = NULL;
1174 }
1175 return ERROR_PACKET;
1176 }
1177 return NORMAL_PACKET;
1178}
1179
1180static void ipg_nic_rx_with_start_and_end(struct net_device *dev,
1181 struct ipg_nic_private *sp,
1182 struct ipg_rx *rxfd, unsigned entry)
1183{
1184 struct ipg_jumbo *jumbo = &sp->jumbo;
1185 struct sk_buff *skb;
1186 int framelen;
1187
1188 if (jumbo->found_start) {
1189 dev_kfree_skb_irq(jumbo->skb);
1190 jumbo->found_start = 0;
1191 jumbo->current_size = 0;
1192 jumbo->skb = NULL;
1193 }
1194
1195 /* 1: found error, 0 no error */
1196 if (ipg_nic_rx_check_error(dev) != NORMAL_PACKET)
1197 return;
1198
1199 skb = sp->rx_buff[entry];
1200 if (!skb)
1201 return;
1202
1203 /* accept this frame and send to upper layer */
1204 framelen = le64_to_cpu(rxfd->rfs) & IPG_RFS_RXFRAMELEN;
1205 if (framelen > sp->rxfrag_size)
1206 framelen = sp->rxfrag_size;
1207
1208 skb_put(skb, framelen);
1209 skb->protocol = eth_type_trans(skb, dev);
1210 skb_checksum_none_assert(skb);
1211 netif_rx(skb);
1212 sp->rx_buff[entry] = NULL;
1213}
1214
1215static void ipg_nic_rx_with_start(struct net_device *dev,
1216 struct ipg_nic_private *sp,
1217 struct ipg_rx *rxfd, unsigned entry)
1218{
1219 struct ipg_jumbo *jumbo = &sp->jumbo;
1220 struct pci_dev *pdev = sp->pdev;
1221 struct sk_buff *skb;
1222
1223 /* 1: found error, 0 no error */
1224 if (ipg_nic_rx_check_error(dev) != NORMAL_PACKET)
1225 return;
1226
1227 /* accept this frame and send to upper layer */
1228 skb = sp->rx_buff[entry];
1229 if (!skb)
1230 return;
1231
1232 if (jumbo->found_start)
1233 dev_kfree_skb_irq(jumbo->skb);
1234
1235 pci_unmap_single(pdev, le64_to_cpu(rxfd->frag_info) & ~IPG_RFI_FRAGLEN,
1236 sp->rx_buf_sz, PCI_DMA_FROMDEVICE);
1237
1238 skb_put(skb, sp->rxfrag_size);
1239
1240 jumbo->found_start = 1;
1241 jumbo->current_size = sp->rxfrag_size;
1242 jumbo->skb = skb;
1243
1244 sp->rx_buff[entry] = NULL;
1245}
1246
1247static void ipg_nic_rx_with_end(struct net_device *dev,
1248 struct ipg_nic_private *sp,
1249 struct ipg_rx *rxfd, unsigned entry)
1250{
1251 struct ipg_jumbo *jumbo = &sp->jumbo;
1252
1253 /* 1: found error, 0 no error */
1254 if (ipg_nic_rx_check_error(dev) == NORMAL_PACKET) {
1255 struct sk_buff *skb = sp->rx_buff[entry];
1256
1257 if (!skb)
1258 return;
1259
1260 if (jumbo->found_start) {
1261 int framelen, endframelen;
1262
1263 framelen = le64_to_cpu(rxfd->rfs) & IPG_RFS_RXFRAMELEN;
1264
1265 endframelen = framelen - jumbo->current_size;
1266 if (framelen > sp->rxsupport_size)
1267 dev_kfree_skb_irq(jumbo->skb);
1268 else {
1269 memcpy(skb_put(jumbo->skb, endframelen),
1270 skb->data, endframelen);
1271
1272 jumbo->skb->protocol =
1273 eth_type_trans(jumbo->skb, dev);
1274
1275 skb_checksum_none_assert(jumbo->skb);
1276 netif_rx(jumbo->skb);
1277 }
1278 }
1279
1280 jumbo->found_start = 0;
1281 jumbo->current_size = 0;
1282 jumbo->skb = NULL;
1283
1284 ipg_nic_rx_free_skb(dev);
1285 } else {
1286 dev_kfree_skb_irq(jumbo->skb);
1287 jumbo->found_start = 0;
1288 jumbo->current_size = 0;
1289 jumbo->skb = NULL;
1290 }
1291}
1292
1293static void ipg_nic_rx_no_start_no_end(struct net_device *dev,
1294 struct ipg_nic_private *sp,
1295 struct ipg_rx *rxfd, unsigned entry)
1296{
1297 struct ipg_jumbo *jumbo = &sp->jumbo;
1298
1299 /* 1: found error, 0 no error */
1300 if (ipg_nic_rx_check_error(dev) == NORMAL_PACKET) {
1301 struct sk_buff *skb = sp->rx_buff[entry];
1302
1303 if (skb) {
1304 if (jumbo->found_start) {
1305 jumbo->current_size += sp->rxfrag_size;
1306 if (jumbo->current_size <= sp->rxsupport_size) {
1307 memcpy(skb_put(jumbo->skb,
1308 sp->rxfrag_size),
1309 skb->data, sp->rxfrag_size);
1310 }
1311 }
1312 ipg_nic_rx_free_skb(dev);
1313 }
1314 } else {
1315 dev_kfree_skb_irq(jumbo->skb);
1316 jumbo->found_start = 0;
1317 jumbo->current_size = 0;
1318 jumbo->skb = NULL;
1319 }
1320}
1321
1322static int ipg_nic_rx_jumbo(struct net_device *dev)
1323{
1324 struct ipg_nic_private *sp = netdev_priv(dev);
1325 unsigned int curr = sp->rx_current;
1326 void __iomem *ioaddr = sp->ioaddr;
1327 unsigned int i;
1328
1329 IPG_DEBUG_MSG("_nic_rx\n");
1330
1331 for (i = 0; i < IPG_MAXRFDPROCESS_COUNT; i++, curr++) {
1332 unsigned int entry = curr % IPG_RFDLIST_LENGTH;
1333 struct ipg_rx *rxfd = sp->rxd + entry;
1334
1335 if (!(rxfd->rfs & cpu_to_le64(IPG_RFS_RFDDONE)))
1336 break;
1337
1338 switch (ipg_nic_rx_check_frame_type(dev)) {
1339 case FRAME_WITH_START_WITH_END:
1340 ipg_nic_rx_with_start_and_end(dev, sp, rxfd, entry);
1341 break;
1342 case FRAME_WITH_START:
1343 ipg_nic_rx_with_start(dev, sp, rxfd, entry);
1344 break;
1345 case FRAME_WITH_END:
1346 ipg_nic_rx_with_end(dev, sp, rxfd, entry);
1347 break;
1348 case FRAME_NO_START_NO_END:
1349 ipg_nic_rx_no_start_no_end(dev, sp, rxfd, entry);
1350 break;
1351 }
1352 }
1353
1354 sp->rx_current = curr;
1355
1356 if (i == IPG_MAXRFDPROCESS_COUNT) {
1357 /* There are more RFDs to process, however the
1358 * allocated amount of RFD processing time has
1359 * expired. Assert Interrupt Requested to make
1360 * sure we come back to process the remaining RFDs.
1361 */
1362 ipg_w32(ipg_r32(ASIC_CTRL) | IPG_AC_INT_REQUEST, ASIC_CTRL);
1363 }
1364
1365 ipg_nic_rxrestore(dev);
1366
1367 return 0;
1368}
1369
1370static int ipg_nic_rx(struct net_device *dev)
1371{
1372 /* Transfer received Ethernet frames to higher network layers. */
1373 struct ipg_nic_private *sp = netdev_priv(dev);
1374 unsigned int curr = sp->rx_current;
1375 void __iomem *ioaddr = sp->ioaddr;
1376 struct ipg_rx *rxfd;
1377 unsigned int i;
1378
1379 IPG_DEBUG_MSG("_nic_rx\n");
1380
1381#define __RFS_MASK \
1382 cpu_to_le64(IPG_RFS_RFDDONE | IPG_RFS_FRAMESTART | IPG_RFS_FRAMEEND)
1383
1384 for (i = 0; i < IPG_MAXRFDPROCESS_COUNT; i++, curr++) {
1385 unsigned int entry = curr % IPG_RFDLIST_LENGTH;
1386 struct sk_buff *skb = sp->rx_buff[entry];
1387 unsigned int framelen;
1388
1389 rxfd = sp->rxd + entry;
1390
1391 if (((rxfd->rfs & __RFS_MASK) != __RFS_MASK) || !skb)
1392 break;
1393
1394 /* Get received frame length. */
1395 framelen = le64_to_cpu(rxfd->rfs) & IPG_RFS_RXFRAMELEN;
1396
1397 /* Check for jumbo frame arrival with too small
1398 * RXFRAG_SIZE.
1399 */
1400 if (framelen > sp->rxfrag_size) {
1401 IPG_DEBUG_MSG
1402 ("RFS FrameLen > allocated fragment size\n");
1403
1404 framelen = sp->rxfrag_size;
1405 }
1406
1407 if ((IPG_DROP_ON_RX_ETH_ERRORS && (le64_to_cpu(rxfd->rfs) &
1408 (IPG_RFS_RXFIFOOVERRUN | IPG_RFS_RXRUNTFRAME |
1409 IPG_RFS_RXALIGNMENTERROR | IPG_RFS_RXFCSERROR |
1410 IPG_RFS_RXOVERSIZEDFRAME | IPG_RFS_RXLENGTHERROR)))) {
1411
1412 IPG_DEBUG_MSG("Rx error, RFS = %016lx\n",
1413 (unsigned long int) rxfd->rfs);
1414
1415 /* Increment general receive error statistic. */
1416 sp->stats.rx_errors++;
1417
1418 /* Increment detailed receive error statistics. */
1419 if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXFIFOOVERRUN) {
1420 IPG_DEBUG_MSG("RX FIFO overrun occurred\n");
1421 sp->stats.rx_fifo_errors++;
1422 }
1423
1424 if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXRUNTFRAME) {
1425 IPG_DEBUG_MSG("RX runt occurred\n");
1426 sp->stats.rx_length_errors++;
1427 }
1428
1429 if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXOVERSIZEDFRAME) ;
1430 /* Do nothing, error count handled by a IPG
1431 * statistic register.
1432 */
1433
1434 if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXALIGNMENTERROR) {
1435 IPG_DEBUG_MSG("RX alignment error occurred\n");
1436 sp->stats.rx_frame_errors++;
1437 }
1438
1439 if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXFCSERROR) ;
1440 /* Do nothing, error count handled by a IPG
1441 * statistic register.
1442 */
1443
1444 /* Free the memory associated with the RX
1445 * buffer since it is erroneous and we will
1446 * not pass it to higher layer processes.
1447 */
1448 if (skb) {
1449 __le64 info = rxfd->frag_info;
1450
1451 pci_unmap_single(sp->pdev,
1452 le64_to_cpu(info) & ~IPG_RFI_FRAGLEN,
1453 sp->rx_buf_sz, PCI_DMA_FROMDEVICE);
1454
1455 dev_kfree_skb_irq(skb);
1456 }
1457 } else {
1458
1459 /* Adjust the new buffer length to accommodate the size
1460 * of the received frame.
1461 */
1462 skb_put(skb, framelen);
1463
1464 /* Set the buffer's protocol field to Ethernet. */
1465 skb->protocol = eth_type_trans(skb, dev);
1466
1467 /* The IPG encountered an error with (or
1468 * there were no) IP/TCP/UDP checksums.
1469 * This may or may not indicate an invalid
1470 * IP/TCP/UDP frame was received. Let the
1471 * upper layer decide.
1472 */
1473 skb_checksum_none_assert(skb);
1474
1475 /* Hand off frame for higher layer processing.
1476 * The function netif_rx() releases the sk_buff
1477 * when processing completes.
1478 */
1479 netif_rx(skb);
1480 }
1481
1482 /* Assure RX buffer is not reused by IPG. */
1483 sp->rx_buff[entry] = NULL;
1484 }
1485
1486 /*
1487 * If there are more RFDs to process and the allocated amount of RFD
1488 * processing time has expired, assert Interrupt Requested to make
1489 * sure we come back to process the remaining RFDs.
1490 */
1491 if (i == IPG_MAXRFDPROCESS_COUNT)
1492 ipg_w32(ipg_r32(ASIC_CTRL) | IPG_AC_INT_REQUEST, ASIC_CTRL);
1493
1494#ifdef IPG_DEBUG
1495 /* Check if the RFD list contained no receive frame data. */
1496 if (!i)
1497 sp->EmptyRFDListCount++;
1498#endif
1499 while ((le64_to_cpu(rxfd->rfs) & IPG_RFS_RFDDONE) &&
1500 !((le64_to_cpu(rxfd->rfs) & IPG_RFS_FRAMESTART) &&
1501 (le64_to_cpu(rxfd->rfs) & IPG_RFS_FRAMEEND))) {
1502 unsigned int entry = curr++ % IPG_RFDLIST_LENGTH;
1503
1504 rxfd = sp->rxd + entry;
1505
1506 IPG_DEBUG_MSG("Frame requires multiple RFDs\n");
1507
1508 /* An unexpected event, additional code needed to handle
1509 * properly. So for the time being, just disregard the
1510 * frame.
1511 */
1512
1513 /* Free the memory associated with the RX
1514 * buffer since it is erroneous and we will
1515 * not pass it to higher layer processes.
1516 */
1517 if (sp->rx_buff[entry]) {
1518 pci_unmap_single(sp->pdev,
1519 le64_to_cpu(rxfd->frag_info) & ~IPG_RFI_FRAGLEN,
1520 sp->rx_buf_sz, PCI_DMA_FROMDEVICE);
1521 dev_kfree_skb_irq(sp->rx_buff[entry]);
1522 }
1523
1524 /* Assure RX buffer is not reused by IPG. */
1525 sp->rx_buff[entry] = NULL;
1526 }
1527
1528 sp->rx_current = curr;
1529
1530 /* Check to see if there are a minimum number of used
1531 * RFDs before restoring any (should improve performance.)
1532 */
1533 if ((curr - sp->rx_dirty) >= IPG_MINUSEDRFDSTOFREE)
1534 ipg_nic_rxrestore(dev);
1535
1536 return 0;
1537}
1538
1539static void ipg_reset_after_host_error(struct work_struct *work)
1540{
1541 struct ipg_nic_private *sp =
1542 container_of(work, struct ipg_nic_private, task.work);
1543 struct net_device *dev = sp->dev;
1544
1545 /*
1546 * Acknowledge HostError interrupt by resetting
1547 * IPG DMA and HOST.
1548 */
1549 ipg_reset(dev, IPG_AC_GLOBAL_RESET | IPG_AC_HOST | IPG_AC_DMA);
1550
1551 init_rfdlist(dev);
1552 init_tfdlist(dev);
1553
1554 if (ipg_io_config(dev) < 0) {
1555 netdev_info(dev, "Cannot recover from PCI error\n");
1556 schedule_delayed_work(&sp->task, HZ);
1557 }
1558}
1559
1560static irqreturn_t ipg_interrupt_handler(int irq, void *dev_inst)
1561{
1562 struct net_device *dev = dev_inst;
1563 struct ipg_nic_private *sp = netdev_priv(dev);
1564 void __iomem *ioaddr = sp->ioaddr;
1565 unsigned int handled = 0;
1566 u16 status;
1567
1568 IPG_DEBUG_MSG("_interrupt_handler\n");
1569
1570 if (sp->is_jumbo)
1571 ipg_nic_rxrestore(dev);
1572
1573 spin_lock(&sp->lock);
1574
1575 /* Get interrupt source information, and acknowledge
1576 * some (i.e. TxDMAComplete, RxDMAComplete, RxEarly,
1577 * IntRequested, MacControlFrame, LinkEvent) interrupts
1578 * if issued. Also, all IPG interrupts are disabled by
1579 * reading IntStatusAck.
1580 */
1581 status = ipg_r16(INT_STATUS_ACK);
1582
1583 IPG_DEBUG_MSG("IntStatusAck = %04x\n", status);
1584
1585 /* Shared IRQ of remove event. */
1586 if (!(status & IPG_IS_RSVD_MASK))
1587 goto out_enable;
1588
1589 handled = 1;
1590
1591 if (unlikely(!netif_running(dev)))
1592 goto out_unlock;
1593
1594 /* If RFDListEnd interrupt, restore all used RFDs. */
1595 if (status & IPG_IS_RFD_LIST_END) {
1596 IPG_DEBUG_MSG("RFDListEnd Interrupt\n");
1597
1598 /* The RFD list end indicates an RFD was encountered
1599 * with a 0 NextPtr, or with an RFDDone bit set to 1
1600 * (indicating the RFD is not read for use by the
1601 * IPG.) Try to restore all RFDs.
1602 */
1603 ipg_nic_rxrestore(dev);
1604
1605#ifdef IPG_DEBUG
1606 /* Increment the RFDlistendCount counter. */
1607 sp->RFDlistendCount++;
1608#endif
1609 }
1610
1611 /* If RFDListEnd, RxDMAPriority, RxDMAComplete, or
1612 * IntRequested interrupt, process received frames. */
1613 if ((status & IPG_IS_RX_DMA_PRIORITY) ||
1614 (status & IPG_IS_RFD_LIST_END) ||
1615 (status & IPG_IS_RX_DMA_COMPLETE) ||
1616 (status & IPG_IS_INT_REQUESTED)) {
1617#ifdef IPG_DEBUG
1618 /* Increment the RFD list checked counter if interrupted
1619 * only to check the RFD list. */
1620 if (status & (~(IPG_IS_RX_DMA_PRIORITY | IPG_IS_RFD_LIST_END |
1621 IPG_IS_RX_DMA_COMPLETE | IPG_IS_INT_REQUESTED) &
1622 (IPG_IS_HOST_ERROR | IPG_IS_TX_DMA_COMPLETE |
1623 IPG_IS_LINK_EVENT | IPG_IS_TX_COMPLETE |
1624 IPG_IS_UPDATE_STATS)))
1625 sp->RFDListCheckedCount++;
1626#endif
1627
1628 if (sp->is_jumbo)
1629 ipg_nic_rx_jumbo(dev);
1630 else
1631 ipg_nic_rx(dev);
1632 }
1633
1634 /* If TxDMAComplete interrupt, free used TFDs. */
1635 if (status & IPG_IS_TX_DMA_COMPLETE)
1636 ipg_nic_txfree(dev);
1637
1638 /* TxComplete interrupts indicate one of numerous actions.
1639 * Determine what action to take based on TXSTATUS register.
1640 */
1641 if (status & IPG_IS_TX_COMPLETE)
1642 ipg_nic_txcleanup(dev);
1643
1644 /* If UpdateStats interrupt, update Linux Ethernet statistics */
1645 if (status & IPG_IS_UPDATE_STATS)
1646 ipg_nic_get_stats(dev);
1647
1648 /* If HostError interrupt, reset IPG. */
1649 if (status & IPG_IS_HOST_ERROR) {
1650 IPG_DDEBUG_MSG("HostError Interrupt\n");
1651
1652 schedule_delayed_work(&sp->task, 0);
1653 }
1654
1655 /* If LinkEvent interrupt, resolve autonegotiation. */
1656 if (status & IPG_IS_LINK_EVENT) {
1657 if (ipg_config_autoneg(dev) < 0)
1658 netdev_info(dev, "Auto-negotiation error\n");
1659 }
1660
1661 /* If MACCtrlFrame interrupt, do nothing. */
1662 if (status & IPG_IS_MAC_CTRL_FRAME)
1663 IPG_DEBUG_MSG("MACCtrlFrame interrupt\n");
1664
1665 /* If RxComplete interrupt, do nothing. */
1666 if (status & IPG_IS_RX_COMPLETE)
1667 IPG_DEBUG_MSG("RxComplete interrupt\n");
1668
1669 /* If RxEarly interrupt, do nothing. */
1670 if (status & IPG_IS_RX_EARLY)
1671 IPG_DEBUG_MSG("RxEarly interrupt\n");
1672
1673out_enable:
1674 /* Re-enable IPG interrupts. */
1675 ipg_w16(IPG_IE_TX_DMA_COMPLETE | IPG_IE_RX_DMA_COMPLETE |
1676 IPG_IE_HOST_ERROR | IPG_IE_INT_REQUESTED | IPG_IE_TX_COMPLETE |
1677 IPG_IE_LINK_EVENT | IPG_IE_UPDATE_STATS, INT_ENABLE);
1678out_unlock:
1679 spin_unlock(&sp->lock);
1680
1681 return IRQ_RETVAL(handled);
1682}
1683
1684static void ipg_rx_clear(struct ipg_nic_private *sp)
1685{
1686 unsigned int i;
1687
1688 for (i = 0; i < IPG_RFDLIST_LENGTH; i++) {
1689 if (sp->rx_buff[i]) {
1690 struct ipg_rx *rxfd = sp->rxd + i;
1691
1692 dev_kfree_skb_irq(sp->rx_buff[i]);
1693 sp->rx_buff[i] = NULL;
1694 pci_unmap_single(sp->pdev,
1695 le64_to_cpu(rxfd->frag_info) & ~IPG_RFI_FRAGLEN,
1696 sp->rx_buf_sz, PCI_DMA_FROMDEVICE);
1697 }
1698 }
1699}
1700
1701static void ipg_tx_clear(struct ipg_nic_private *sp)
1702{
1703 unsigned int i;
1704
1705 for (i = 0; i < IPG_TFDLIST_LENGTH; i++) {
1706 if (sp->tx_buff[i]) {
1707 struct ipg_tx *txfd = sp->txd + i;
1708
1709 pci_unmap_single(sp->pdev,
1710 le64_to_cpu(txfd->frag_info) & ~IPG_TFI_FRAGLEN,
1711 sp->tx_buff[i]->len, PCI_DMA_TODEVICE);
1712
1713 dev_kfree_skb_irq(sp->tx_buff[i]);
1714
1715 sp->tx_buff[i] = NULL;
1716 }
1717 }
1718}
1719
1720static int ipg_nic_open(struct net_device *dev)
1721{
1722 struct ipg_nic_private *sp = netdev_priv(dev);
1723 void __iomem *ioaddr = sp->ioaddr;
1724 struct pci_dev *pdev = sp->pdev;
1725 int rc;
1726
1727 IPG_DEBUG_MSG("_nic_open\n");
1728
1729 sp->rx_buf_sz = sp->rxsupport_size;
1730
1731 /* Check for interrupt line conflicts, and request interrupt
1732 * line for IPG.
1733 *
1734 * IMPORTANT: Disable IPG interrupts prior to registering
1735 * IRQ.
1736 */
1737 ipg_w16(0x0000, INT_ENABLE);
1738
1739 /* Register the interrupt line to be used by the IPG within
1740 * the Linux system.
1741 */
1742 rc = request_irq(pdev->irq, ipg_interrupt_handler, IRQF_SHARED,
1743 dev->name, dev);
1744 if (rc < 0) {
1745 netdev_info(dev, "Error when requesting interrupt\n");
1746 goto out;
1747 }
1748
1749 dev->irq = pdev->irq;
1750
1751 rc = -ENOMEM;
1752
1753 sp->rxd = dma_alloc_coherent(&pdev->dev, IPG_RX_RING_BYTES,
1754 &sp->rxd_map, GFP_KERNEL);
1755 if (!sp->rxd)
1756 goto err_free_irq_0;
1757
1758 sp->txd = dma_alloc_coherent(&pdev->dev, IPG_TX_RING_BYTES,
1759 &sp->txd_map, GFP_KERNEL);
1760 if (!sp->txd)
1761 goto err_free_rx_1;
1762
1763 rc = init_rfdlist(dev);
1764 if (rc < 0) {
1765 netdev_info(dev, "Error during configuration\n");
1766 goto err_free_tx_2;
1767 }
1768
1769 init_tfdlist(dev);
1770
1771 rc = ipg_io_config(dev);
1772 if (rc < 0) {
1773 netdev_info(dev, "Error during configuration\n");
1774 goto err_release_tfdlist_3;
1775 }
1776
1777 /* Resolve autonegotiation. */
1778 if (ipg_config_autoneg(dev) < 0)
1779 netdev_info(dev, "Auto-negotiation error\n");
1780
1781 /* initialize JUMBO Frame control variable */
1782 sp->jumbo.found_start = 0;
1783 sp->jumbo.current_size = 0;
1784 sp->jumbo.skb = NULL;
1785
1786 /* Enable transmit and receive operation of the IPG. */
1787 ipg_w32((ipg_r32(MAC_CTRL) | IPG_MC_RX_ENABLE | IPG_MC_TX_ENABLE) &
1788 IPG_MC_RSVD_MASK, MAC_CTRL);
1789
1790 netif_start_queue(dev);
1791out:
1792 return rc;
1793
1794err_release_tfdlist_3:
1795 ipg_tx_clear(sp);
1796 ipg_rx_clear(sp);
1797err_free_tx_2:
1798 dma_free_coherent(&pdev->dev, IPG_TX_RING_BYTES, sp->txd, sp->txd_map);
1799err_free_rx_1:
1800 dma_free_coherent(&pdev->dev, IPG_RX_RING_BYTES, sp->rxd, sp->rxd_map);
1801err_free_irq_0:
1802 free_irq(pdev->irq, dev);
1803 goto out;
1804}
1805
1806static int ipg_nic_stop(struct net_device *dev)
1807{
1808 struct ipg_nic_private *sp = netdev_priv(dev);
1809 void __iomem *ioaddr = sp->ioaddr;
1810 struct pci_dev *pdev = sp->pdev;
1811
1812 IPG_DEBUG_MSG("_nic_stop\n");
1813
1814 netif_stop_queue(dev);
1815
1816 IPG_DUMPTFDLIST(dev);
1817
1818 do {
1819 (void) ipg_r16(INT_STATUS_ACK);
1820
1821 ipg_reset(dev, IPG_AC_GLOBAL_RESET | IPG_AC_HOST | IPG_AC_DMA);
1822
1823 synchronize_irq(pdev->irq);
1824 } while (ipg_r16(INT_ENABLE) & IPG_IE_RSVD_MASK);
1825
1826 ipg_rx_clear(sp);
1827
1828 ipg_tx_clear(sp);
1829
1830 pci_free_consistent(pdev, IPG_RX_RING_BYTES, sp->rxd, sp->rxd_map);
1831 pci_free_consistent(pdev, IPG_TX_RING_BYTES, sp->txd, sp->txd_map);
1832
1833 free_irq(pdev->irq, dev);
1834
1835 return 0;
1836}
1837
1838static netdev_tx_t ipg_nic_hard_start_xmit(struct sk_buff *skb,
1839 struct net_device *dev)
1840{
1841 struct ipg_nic_private *sp = netdev_priv(dev);
1842 void __iomem *ioaddr = sp->ioaddr;
1843 unsigned int entry = sp->tx_current % IPG_TFDLIST_LENGTH;
1844 unsigned long flags;
1845 struct ipg_tx *txfd;
1846
1847 IPG_DDEBUG_MSG("_nic_hard_start_xmit\n");
1848
1849 /* If in 10Mbps mode, stop the transmit queue so
1850 * no more transmit frames are accepted.
1851 */
1852 if (sp->tenmbpsmode)
1853 netif_stop_queue(dev);
1854
1855 if (sp->reset_current_tfd) {
1856 sp->reset_current_tfd = 0;
1857 entry = 0;
1858 }
1859
1860 txfd = sp->txd + entry;
1861
1862 sp->tx_buff[entry] = skb;
1863
1864 /* Clear all TFC fields, except TFDDONE. */
1865 txfd->tfc = cpu_to_le64(IPG_TFC_TFDDONE);
1866
1867 /* Specify the TFC field within the TFD. */
1868 txfd->tfc |= cpu_to_le64(IPG_TFC_WORDALIGNDISABLED |
1869 (IPG_TFC_FRAMEID & sp->tx_current) |
1870 (IPG_TFC_FRAGCOUNT & (1 << 24)));
1871 /*
1872 * 16--17 (WordAlign) <- 3 (disable),
1873 * 0--15 (FrameId) <- sp->tx_current,
1874 * 24--27 (FragCount) <- 1
1875 */
1876
1877 /* Request TxComplete interrupts at an interval defined
1878 * by the constant IPG_FRAMESBETWEENTXCOMPLETES.
1879 * Request TxComplete interrupt for every frame
1880 * if in 10Mbps mode to accommodate problem with 10Mbps
1881 * processing.
1882 */
1883 if (sp->tenmbpsmode)
1884 txfd->tfc |= cpu_to_le64(IPG_TFC_TXINDICATE);
1885 txfd->tfc |= cpu_to_le64(IPG_TFC_TXDMAINDICATE);
1886 /* Based on compilation option, determine if FCS is to be
1887 * appended to transmit frame by IPG.
1888 */
1889 if (!(IPG_APPEND_FCS_ON_TX))
1890 txfd->tfc |= cpu_to_le64(IPG_TFC_FCSAPPENDDISABLE);
1891
1892 /* Based on compilation option, determine if IP, TCP and/or
1893 * UDP checksums are to be added to transmit frame by IPG.
1894 */
1895 if (IPG_ADD_IPCHECKSUM_ON_TX)
1896 txfd->tfc |= cpu_to_le64(IPG_TFC_IPCHECKSUMENABLE);
1897
1898 if (IPG_ADD_TCPCHECKSUM_ON_TX)
1899 txfd->tfc |= cpu_to_le64(IPG_TFC_TCPCHECKSUMENABLE);
1900
1901 if (IPG_ADD_UDPCHECKSUM_ON_TX)
1902 txfd->tfc |= cpu_to_le64(IPG_TFC_UDPCHECKSUMENABLE);
1903
1904 /* Based on compilation option, determine if VLAN tag info is to be
1905 * inserted into transmit frame by IPG.
1906 */
1907 if (IPG_INSERT_MANUAL_VLAN_TAG) {
1908 txfd->tfc |= cpu_to_le64(IPG_TFC_VLANTAGINSERT |
1909 ((u64) IPG_MANUAL_VLAN_VID << 32) |
1910 ((u64) IPG_MANUAL_VLAN_CFI << 44) |
1911 ((u64) IPG_MANUAL_VLAN_USERPRIORITY << 45));
1912 }
1913
1914 /* The fragment start location within system memory is defined
1915 * by the sk_buff structure's data field. The physical address
1916 * of this location within the system's virtual memory space
1917 * is determined using the IPG_HOST2BUS_MAP function.
1918 */
1919 txfd->frag_info = cpu_to_le64(pci_map_single(sp->pdev, skb->data,
1920 skb->len, PCI_DMA_TODEVICE));
1921
1922 /* The length of the fragment within system memory is defined by
1923 * the sk_buff structure's len field.
1924 */
1925 txfd->frag_info |= cpu_to_le64(IPG_TFI_FRAGLEN &
1926 ((u64) (skb->len & 0xffff) << 48));
1927
1928 /* Clear the TFDDone bit last to indicate the TFD is ready
1929 * for transfer to the IPG.
1930 */
1931 txfd->tfc &= cpu_to_le64(~IPG_TFC_TFDDONE);
1932
1933 spin_lock_irqsave(&sp->lock, flags);
1934
1935 sp->tx_current++;
1936
1937 mmiowb();
1938
1939 ipg_w32(IPG_DC_TX_DMA_POLL_NOW, DMA_CTRL);
1940
1941 if (sp->tx_current == (sp->tx_dirty + IPG_TFDLIST_LENGTH))
1942 netif_stop_queue(dev);
1943
1944 spin_unlock_irqrestore(&sp->lock, flags);
1945
1946 return NETDEV_TX_OK;
1947}
1948
1949static void ipg_set_phy_default_param(unsigned char rev,
1950 struct net_device *dev, int phy_address)
1951{
1952 unsigned short length;
1953 unsigned char revision;
1954 const unsigned short *phy_param;
1955 unsigned short address, value;
1956
1957 phy_param = &DefaultPhyParam[0];
1958 length = *phy_param & 0x00FF;
1959 revision = (unsigned char)((*phy_param) >> 8);
1960 phy_param++;
1961 while (length != 0) {
1962 if (rev == revision) {
1963 while (length > 1) {
1964 address = *phy_param;
1965 value = *(phy_param + 1);
1966 phy_param += 2;
1967 mdio_write(dev, phy_address, address, value);
1968 length -= 4;
1969 }
1970 break;
1971 } else {
1972 phy_param += length / 2;
1973 length = *phy_param & 0x00FF;
1974 revision = (unsigned char)((*phy_param) >> 8);
1975 phy_param++;
1976 }
1977 }
1978}
1979
1980static int read_eeprom(struct net_device *dev, int eep_addr)
1981{
1982 void __iomem *ioaddr = ipg_ioaddr(dev);
1983 unsigned int i;
1984 int ret = 0;
1985 u16 value;
1986
1987 value = IPG_EC_EEPROM_READOPCODE | (eep_addr & 0xff);
1988 ipg_w16(value, EEPROM_CTRL);
1989
1990 for (i = 0; i < 1000; i++) {
1991 u16 data;
1992
1993 mdelay(10);
1994 data = ipg_r16(EEPROM_CTRL);
1995 if (!(data & IPG_EC_EEPROM_BUSY)) {
1996 ret = ipg_r16(EEPROM_DATA);
1997 break;
1998 }
1999 }
2000 return ret;
2001}
2002
2003static void ipg_init_mii(struct net_device *dev)
2004{
2005 struct ipg_nic_private *sp = netdev_priv(dev);
2006 struct mii_if_info *mii_if = &sp->mii_if;
2007 int phyaddr;
2008
2009 mii_if->dev = dev;
2010 mii_if->mdio_read = mdio_read;
2011 mii_if->mdio_write = mdio_write;
2012 mii_if->phy_id_mask = 0x1f;
2013 mii_if->reg_num_mask = 0x1f;
2014
2015 mii_if->phy_id = phyaddr = ipg_find_phyaddr(dev);
2016
2017 if (phyaddr != 0x1f) {
2018 u16 mii_phyctrl, mii_1000cr;
2019
2020 mii_1000cr = mdio_read(dev, phyaddr, MII_CTRL1000);
2021 mii_1000cr |= ADVERTISE_1000FULL | ADVERTISE_1000HALF |
2022 GMII_PHY_1000BASETCONTROL_PreferMaster;
2023 mdio_write(dev, phyaddr, MII_CTRL1000, mii_1000cr);
2024
2025 mii_phyctrl = mdio_read(dev, phyaddr, MII_BMCR);
2026
2027 /* Set default phyparam */
2028 ipg_set_phy_default_param(sp->pdev->revision, dev, phyaddr);
2029
2030 /* Reset PHY */
2031 mii_phyctrl |= BMCR_RESET | BMCR_ANRESTART;
2032 mdio_write(dev, phyaddr, MII_BMCR, mii_phyctrl);
2033
2034 }
2035}
2036
2037static int ipg_hw_init(struct net_device *dev)
2038{
2039 struct ipg_nic_private *sp = netdev_priv(dev);
2040 void __iomem *ioaddr = sp->ioaddr;
2041 unsigned int i;
2042 int rc;
2043
2044 /* Read/Write and Reset EEPROM Value */
2045 /* Read LED Mode Configuration from EEPROM */
2046 sp->led_mode = read_eeprom(dev, 6);
2047
2048 /* Reset all functions within the IPG. Do not assert
2049 * RST_OUT as not compatible with some PHYs.
2050 */
2051 rc = ipg_reset(dev, IPG_RESET_MASK);
2052 if (rc < 0)
2053 goto out;
2054
2055 ipg_init_mii(dev);
2056
2057 /* Read MAC Address from EEPROM */
2058 for (i = 0; i < 3; i++)
2059 sp->station_addr[i] = read_eeprom(dev, 16 + i);
2060
2061 for (i = 0; i < 3; i++)
2062 ipg_w16(sp->station_addr[i], STATION_ADDRESS_0 + 2*i);
2063
2064 /* Set station address in ethernet_device structure. */
2065 dev->dev_addr[0] = ipg_r16(STATION_ADDRESS_0) & 0x00ff;
2066 dev->dev_addr[1] = (ipg_r16(STATION_ADDRESS_0) & 0xff00) >> 8;
2067 dev->dev_addr[2] = ipg_r16(STATION_ADDRESS_1) & 0x00ff;
2068 dev->dev_addr[3] = (ipg_r16(STATION_ADDRESS_1) & 0xff00) >> 8;
2069 dev->dev_addr[4] = ipg_r16(STATION_ADDRESS_2) & 0x00ff;
2070 dev->dev_addr[5] = (ipg_r16(STATION_ADDRESS_2) & 0xff00) >> 8;
2071out:
2072 return rc;
2073}
2074
2075static int ipg_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2076{
2077 struct ipg_nic_private *sp = netdev_priv(dev);
2078 int rc;
2079
2080 mutex_lock(&sp->mii_mutex);
2081 rc = generic_mii_ioctl(&sp->mii_if, if_mii(ifr), cmd, NULL);
2082 mutex_unlock(&sp->mii_mutex);
2083
2084 return rc;
2085}
2086
2087static int ipg_nic_change_mtu(struct net_device *dev, int new_mtu)
2088{
2089 struct ipg_nic_private *sp = netdev_priv(dev);
2090 int err;
2091
2092 /* Function to accommodate changes to Maximum Transfer Unit
2093 * (or MTU) of IPG NIC. Cannot use default function since
2094 * the default will not allow for MTU > 1500 bytes.
2095 */
2096
2097 IPG_DEBUG_MSG("_nic_change_mtu\n");
2098
2099 /*
2100 * Check that the new MTU value is between 68 (14 byte header, 46 byte
2101 * payload, 4 byte FCS) and 10 KB, which is the largest supported MTU.
2102 */
2103 if (new_mtu < 68 || new_mtu > 10240)
2104 return -EINVAL;
2105
2106 err = ipg_nic_stop(dev);
2107 if (err)
2108 return err;
2109
2110 dev->mtu = new_mtu;
2111
2112 sp->max_rxframe_size = new_mtu;
2113
2114 sp->rxfrag_size = new_mtu;
2115 if (sp->rxfrag_size > 4088)
2116 sp->rxfrag_size = 4088;
2117
2118 sp->rxsupport_size = sp->max_rxframe_size;
2119
2120 if (new_mtu > 0x0600)
2121 sp->is_jumbo = true;
2122 else
2123 sp->is_jumbo = false;
2124
2125 return ipg_nic_open(dev);
2126}
2127
2128static int ipg_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2129{
2130 struct ipg_nic_private *sp = netdev_priv(dev);
2131 int rc;
2132
2133 mutex_lock(&sp->mii_mutex);
2134 rc = mii_ethtool_gset(&sp->mii_if, cmd);
2135 mutex_unlock(&sp->mii_mutex);
2136
2137 return rc;
2138}
2139
2140static int ipg_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2141{
2142 struct ipg_nic_private *sp = netdev_priv(dev);
2143 int rc;
2144
2145 mutex_lock(&sp->mii_mutex);
2146 rc = mii_ethtool_sset(&sp->mii_if, cmd);
2147 mutex_unlock(&sp->mii_mutex);
2148
2149 return rc;
2150}
2151
2152static int ipg_nway_reset(struct net_device *dev)
2153{
2154 struct ipg_nic_private *sp = netdev_priv(dev);
2155 int rc;
2156
2157 mutex_lock(&sp->mii_mutex);
2158 rc = mii_nway_restart(&sp->mii_if);
2159 mutex_unlock(&sp->mii_mutex);
2160
2161 return rc;
2162}
2163
2164static const struct ethtool_ops ipg_ethtool_ops = {
2165 .get_settings = ipg_get_settings,
2166 .set_settings = ipg_set_settings,
2167 .nway_reset = ipg_nway_reset,
2168};
2169
2170static void ipg_remove(struct pci_dev *pdev)
2171{
2172 struct net_device *dev = pci_get_drvdata(pdev);
2173 struct ipg_nic_private *sp = netdev_priv(dev);
2174
2175 IPG_DEBUG_MSG("_remove\n");
2176
2177 /* Un-register Ethernet device. */
2178 unregister_netdev(dev);
2179
2180 pci_iounmap(pdev, sp->ioaddr);
2181
2182 pci_release_regions(pdev);
2183
2184 free_netdev(dev);
2185 pci_disable_device(pdev);
2186}
2187
2188static const struct net_device_ops ipg_netdev_ops = {
2189 .ndo_open = ipg_nic_open,
2190 .ndo_stop = ipg_nic_stop,
2191 .ndo_start_xmit = ipg_nic_hard_start_xmit,
2192 .ndo_get_stats = ipg_nic_get_stats,
2193 .ndo_set_rx_mode = ipg_nic_set_multicast_list,
2194 .ndo_do_ioctl = ipg_ioctl,
2195 .ndo_tx_timeout = ipg_tx_timeout,
2196 .ndo_change_mtu = ipg_nic_change_mtu,
2197 .ndo_set_mac_address = eth_mac_addr,
2198 .ndo_validate_addr = eth_validate_addr,
2199};
2200
2201static int ipg_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2202{
2203 unsigned int i = id->driver_data;
2204 struct ipg_nic_private *sp;
2205 struct net_device *dev;
2206 void __iomem *ioaddr;
2207 int rc;
2208
2209 rc = pci_enable_device(pdev);
2210 if (rc < 0)
2211 goto out;
2212
2213 pr_info("%s: %s\n", pci_name(pdev), ipg_brand_name[i]);
2214
2215 pci_set_master(pdev);
2216
2217 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(40));
2218 if (rc < 0) {
2219 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2220 if (rc < 0) {
2221 pr_err("%s: DMA config failed\n", pci_name(pdev));
2222 goto err_disable_0;
2223 }
2224 }
2225
2226 /*
2227 * Initialize net device.
2228 */
2229 dev = alloc_etherdev(sizeof(struct ipg_nic_private));
2230 if (!dev) {
2231 rc = -ENOMEM;
2232 goto err_disable_0;
2233 }
2234
2235 sp = netdev_priv(dev);
2236 spin_lock_init(&sp->lock);
2237 mutex_init(&sp->mii_mutex);
2238
2239 sp->is_jumbo = IPG_IS_JUMBO;
2240 sp->rxfrag_size = IPG_RXFRAG_SIZE;
2241 sp->rxsupport_size = IPG_RXSUPPORT_SIZE;
2242 sp->max_rxframe_size = IPG_MAX_RXFRAME_SIZE;
2243
2244 /* Declare IPG NIC functions for Ethernet device methods.
2245 */
2246 dev->netdev_ops = &ipg_netdev_ops;
2247 SET_NETDEV_DEV(dev, &pdev->dev);
2248 dev->ethtool_ops = &ipg_ethtool_ops;
2249
2250 rc = pci_request_regions(pdev, DRV_NAME);
2251 if (rc)
2252 goto err_free_dev_1;
2253
2254 ioaddr = pci_iomap(pdev, 1, pci_resource_len(pdev, 1));
2255 if (!ioaddr) {
2256 pr_err("%s: cannot map MMIO\n", pci_name(pdev));
2257 rc = -EIO;
2258 goto err_release_regions_2;
2259 }
2260
2261 /* Save the pointer to the PCI device information. */
2262 sp->ioaddr = ioaddr;
2263 sp->pdev = pdev;
2264 sp->dev = dev;
2265
2266 INIT_DELAYED_WORK(&sp->task, ipg_reset_after_host_error);
2267
2268 pci_set_drvdata(pdev, dev);
2269
2270 rc = ipg_hw_init(dev);
2271 if (rc < 0)
2272 goto err_unmap_3;
2273
2274 rc = register_netdev(dev);
2275 if (rc < 0)
2276 goto err_unmap_3;
2277
2278 netdev_info(dev, "Ethernet device registered\n");
2279out:
2280 return rc;
2281
2282err_unmap_3:
2283 pci_iounmap(pdev, ioaddr);
2284err_release_regions_2:
2285 pci_release_regions(pdev);
2286err_free_dev_1:
2287 free_netdev(dev);
2288err_disable_0:
2289 pci_disable_device(pdev);
2290 goto out;
2291}
2292
2293static struct pci_driver ipg_pci_driver = {
2294 .name = IPG_DRIVER_NAME,
2295 .id_table = ipg_pci_tbl,
2296 .probe = ipg_probe,
2297 .remove = ipg_remove,
2298};
2299
2300module_pci_driver(ipg_pci_driver);
diff --git a/drivers/net/ethernet/icplus/ipg.h b/drivers/net/ethernet/icplus/ipg.h
deleted file mode 100644
index de606281f97b..000000000000
--- a/drivers/net/ethernet/icplus/ipg.h
+++ /dev/null
@@ -1,748 +0,0 @@
1/*
2 * Include file for Gigabit Ethernet device driver for Network
3 * Interface Cards (NICs) utilizing the Tamarack Microelectronics
4 * Inc. IPG Gigabit or Triple Speed Ethernet Media Access
5 * Controller.
6 */
7#ifndef __LINUX_IPG_H
8#define __LINUX_IPG_H
9
10#include <linux/module.h>
11
12#include <linux/kernel.h>
13#include <linux/pci.h>
14#include <linux/ioport.h>
15#include <linux/errno.h>
16#include <asm/io.h>
17#include <linux/delay.h>
18#include <linux/types.h>
19#include <linux/netdevice.h>
20#include <linux/etherdevice.h>
21#include <linux/skbuff.h>
22#include <asm/bitops.h>
23
24/*
25 * Constants
26 */
27
28/* GMII based PHY IDs */
29#define NS 0x2000
30#define MARVELL 0x0141
31#define ICPLUS_PHY 0x243
32
33/* NIC Physical Layer Device MII register fields. */
34#define MII_PHY_SELECTOR_IEEE8023 0x0001
35#define MII_PHY_TECHABILITYFIELD 0x1FE0
36
37/* GMII_PHY_1000 need to set to prefer master */
38#define GMII_PHY_1000BASETCONTROL_PreferMaster 0x0400
39
40/* NIC Physical Layer Device GMII constants. */
41#define GMII_PREAMBLE 0xFFFFFFFF
42#define GMII_ST 0x1
43#define GMII_READ 0x2
44#define GMII_WRITE 0x1
45#define GMII_TA_READ_MASK 0x1
46#define GMII_TA_WRITE 0x2
47
48/* I/O register offsets. */
49enum ipg_regs {
50 DMA_CTRL = 0x00,
51 RX_DMA_STATUS = 0x08, /* Unused + reserved */
52 TFD_LIST_PTR_0 = 0x10,
53 TFD_LIST_PTR_1 = 0x14,
54 TX_DMA_BURST_THRESH = 0x18,
55 TX_DMA_URGENT_THRESH = 0x19,
56 TX_DMA_POLL_PERIOD = 0x1a,
57 RFD_LIST_PTR_0 = 0x1c,
58 RFD_LIST_PTR_1 = 0x20,
59 RX_DMA_BURST_THRESH = 0x24,
60 RX_DMA_URGENT_THRESH = 0x25,
61 RX_DMA_POLL_PERIOD = 0x26,
62 DEBUG_CTRL = 0x2c,
63 ASIC_CTRL = 0x30,
64 FIFO_CTRL = 0x38, /* Unused */
65 FLOW_OFF_THRESH = 0x3c,
66 FLOW_ON_THRESH = 0x3e,
67 EEPROM_DATA = 0x48,
68 EEPROM_CTRL = 0x4a,
69 EXPROM_ADDR = 0x4c, /* Unused */
70 EXPROM_DATA = 0x50, /* Unused */
71 WAKE_EVENT = 0x51, /* Unused */
72 COUNTDOWN = 0x54, /* Unused */
73 INT_STATUS_ACK = 0x5a,
74 INT_ENABLE = 0x5c,
75 INT_STATUS = 0x5e, /* Unused */
76 TX_STATUS = 0x60,
77 MAC_CTRL = 0x6c,
78 VLAN_TAG = 0x70, /* Unused */
79 PHY_SET = 0x75,
80 PHY_CTRL = 0x76,
81 STATION_ADDRESS_0 = 0x78,
82 STATION_ADDRESS_1 = 0x7a,
83 STATION_ADDRESS_2 = 0x7c,
84 MAX_FRAME_SIZE = 0x86,
85 RECEIVE_MODE = 0x88,
86 HASHTABLE_0 = 0x8c,
87 HASHTABLE_1 = 0x90,
88 RMON_STATISTICS_MASK = 0x98,
89 STATISTICS_MASK = 0x9c,
90 RX_JUMBO_FRAMES = 0xbc, /* Unused */
91 TCP_CHECKSUM_ERRORS = 0xc0, /* Unused */
92 IP_CHECKSUM_ERRORS = 0xc2, /* Unused */
93 UDP_CHECKSUM_ERRORS = 0xc4, /* Unused */
94 TX_JUMBO_FRAMES = 0xf4 /* Unused */
95};
96
97/* Ethernet MIB statistic register offsets. */
98#define IPG_OCTETRCVOK 0xA8
99#define IPG_MCSTOCTETRCVDOK 0xAC
100#define IPG_BCSTOCTETRCVOK 0xB0
101#define IPG_FRAMESRCVDOK 0xB4
102#define IPG_MCSTFRAMESRCVDOK 0xB8
103#define IPG_BCSTFRAMESRCVDOK 0xBE
104#define IPG_MACCONTROLFRAMESRCVD 0xC6
105#define IPG_FRAMETOOLONGERRORS 0xC8
106#define IPG_INRANGELENGTHERRORS 0xCA
107#define IPG_FRAMECHECKSEQERRORS 0xCC
108#define IPG_FRAMESLOSTRXERRORS 0xCE
109#define IPG_OCTETXMTOK 0xD0
110#define IPG_MCSTOCTETXMTOK 0xD4
111#define IPG_BCSTOCTETXMTOK 0xD8
112#define IPG_FRAMESXMTDOK 0xDC
113#define IPG_MCSTFRAMESXMTDOK 0xE0
114#define IPG_FRAMESWDEFERREDXMT 0xE4
115#define IPG_LATECOLLISIONS 0xE8
116#define IPG_MULTICOLFRAMES 0xEC
117#define IPG_SINGLECOLFRAMES 0xF0
118#define IPG_BCSTFRAMESXMTDOK 0xF6
119#define IPG_CARRIERSENSEERRORS 0xF8
120#define IPG_MACCONTROLFRAMESXMTDOK 0xFA
121#define IPG_FRAMESABORTXSCOLLS 0xFC
122#define IPG_FRAMESWEXDEFERRAL 0xFE
123
124/* RMON statistic register offsets. */
125#define IPG_ETHERSTATSCOLLISIONS 0x100
126#define IPG_ETHERSTATSOCTETSTRANSMIT 0x104
127#define IPG_ETHERSTATSPKTSTRANSMIT 0x108
128#define IPG_ETHERSTATSPKTS64OCTESTSTRANSMIT 0x10C
129#define IPG_ETHERSTATSPKTS65TO127OCTESTSTRANSMIT 0x110
130#define IPG_ETHERSTATSPKTS128TO255OCTESTSTRANSMIT 0x114
131#define IPG_ETHERSTATSPKTS256TO511OCTESTSTRANSMIT 0x118
132#define IPG_ETHERSTATSPKTS512TO1023OCTESTSTRANSMIT 0x11C
133#define IPG_ETHERSTATSPKTS1024TO1518OCTESTSTRANSMIT 0x120
134#define IPG_ETHERSTATSCRCALIGNERRORS 0x124
135#define IPG_ETHERSTATSUNDERSIZEPKTS 0x128
136#define IPG_ETHERSTATSFRAGMENTS 0x12C
137#define IPG_ETHERSTATSJABBERS 0x130
138#define IPG_ETHERSTATSOCTETS 0x134
139#define IPG_ETHERSTATSPKTS 0x138
140#define IPG_ETHERSTATSPKTS64OCTESTS 0x13C
141#define IPG_ETHERSTATSPKTS65TO127OCTESTS 0x140
142#define IPG_ETHERSTATSPKTS128TO255OCTESTS 0x144
143#define IPG_ETHERSTATSPKTS256TO511OCTESTS 0x148
144#define IPG_ETHERSTATSPKTS512TO1023OCTESTS 0x14C
145#define IPG_ETHERSTATSPKTS1024TO1518OCTESTS 0x150
146
147/* RMON statistic register equivalents. */
148#define IPG_ETHERSTATSMULTICASTPKTSTRANSMIT 0xE0
149#define IPG_ETHERSTATSBROADCASTPKTSTRANSMIT 0xF6
150#define IPG_ETHERSTATSMULTICASTPKTS 0xB8
151#define IPG_ETHERSTATSBROADCASTPKTS 0xBE
152#define IPG_ETHERSTATSOVERSIZEPKTS 0xC8
153#define IPG_ETHERSTATSDROPEVENTS 0xCE
154
155/* Serial EEPROM offsets */
156#define IPG_EEPROM_CONFIGPARAM 0x00
157#define IPG_EEPROM_ASICCTRL 0x01
158#define IPG_EEPROM_SUBSYSTEMVENDORID 0x02
159#define IPG_EEPROM_SUBSYSTEMID 0x03
160#define IPG_EEPROM_STATIONADDRESS0 0x10
161#define IPG_EEPROM_STATIONADDRESS1 0x11
162#define IPG_EEPROM_STATIONADDRESS2 0x12
163
164/* Register & data structure bit masks */
165
166/* PCI register masks. */
167
168/* IOBaseAddress */
169#define IPG_PIB_RSVD_MASK 0xFFFFFE01
170#define IPG_PIB_IOBASEADDRESS 0xFFFFFF00
171#define IPG_PIB_IOBASEADDRIND 0x00000001
172
173/* MemBaseAddress */
174#define IPG_PMB_RSVD_MASK 0xFFFFFE07
175#define IPG_PMB_MEMBASEADDRIND 0x00000001
176#define IPG_PMB_MEMMAPTYPE 0x00000006
177#define IPG_PMB_MEMMAPTYPE0 0x00000002
178#define IPG_PMB_MEMMAPTYPE1 0x00000004
179#define IPG_PMB_MEMBASEADDRESS 0xFFFFFE00
180
181/* ConfigStatus */
182#define IPG_CS_RSVD_MASK 0xFFB0
183#define IPG_CS_CAPABILITIES 0x0010
184#define IPG_CS_66MHZCAPABLE 0x0020
185#define IPG_CS_FASTBACK2BACK 0x0080
186#define IPG_CS_DATAPARITYREPORTED 0x0100
187#define IPG_CS_DEVSELTIMING 0x0600
188#define IPG_CS_SIGNALEDTARGETABORT 0x0800
189#define IPG_CS_RECEIVEDTARGETABORT 0x1000
190#define IPG_CS_RECEIVEDMASTERABORT 0x2000
191#define IPG_CS_SIGNALEDSYSTEMERROR 0x4000
192#define IPG_CS_DETECTEDPARITYERROR 0x8000
193
194/* TFD data structure masks. */
195
196/* TFDList, TFC */
197#define IPG_TFC_RSVD_MASK 0x0000FFFF9FFFFFFFULL
198#define IPG_TFC_FRAMEID 0x000000000000FFFFULL
199#define IPG_TFC_WORDALIGN 0x0000000000030000ULL
200#define IPG_TFC_WORDALIGNTODWORD 0x0000000000000000ULL
201#define IPG_TFC_WORDALIGNTOWORD 0x0000000000020000ULL
202#define IPG_TFC_WORDALIGNDISABLED 0x0000000000030000ULL
203#define IPG_TFC_TCPCHECKSUMENABLE 0x0000000000040000ULL
204#define IPG_TFC_UDPCHECKSUMENABLE 0x0000000000080000ULL
205#define IPG_TFC_IPCHECKSUMENABLE 0x0000000000100000ULL
206#define IPG_TFC_FCSAPPENDDISABLE 0x0000000000200000ULL
207#define IPG_TFC_TXINDICATE 0x0000000000400000ULL
208#define IPG_TFC_TXDMAINDICATE 0x0000000000800000ULL
209#define IPG_TFC_FRAGCOUNT 0x000000000F000000ULL
210#define IPG_TFC_VLANTAGINSERT 0x0000000010000000ULL
211#define IPG_TFC_TFDDONE 0x0000000080000000ULL
212#define IPG_TFC_VID 0x00000FFF00000000ULL
213#define IPG_TFC_CFI 0x0000100000000000ULL
214#define IPG_TFC_USERPRIORITY 0x0000E00000000000ULL
215
216/* TFDList, FragInfo */
217#define IPG_TFI_RSVD_MASK 0xFFFF00FFFFFFFFFFULL
218#define IPG_TFI_FRAGADDR 0x000000FFFFFFFFFFULL
219#define IPG_TFI_FRAGLEN 0xFFFF000000000000ULL
220
221/* RFD data structure masks. */
222
223/* RFDList, RFS */
224#define IPG_RFS_RSVD_MASK 0x0000FFFFFFFFFFFFULL
225#define IPG_RFS_RXFRAMELEN 0x000000000000FFFFULL
226#define IPG_RFS_RXFIFOOVERRUN 0x0000000000010000ULL
227#define IPG_RFS_RXRUNTFRAME 0x0000000000020000ULL
228#define IPG_RFS_RXALIGNMENTERROR 0x0000000000040000ULL
229#define IPG_RFS_RXFCSERROR 0x0000000000080000ULL
230#define IPG_RFS_RXOVERSIZEDFRAME 0x0000000000100000ULL
231#define IPG_RFS_RXLENGTHERROR 0x0000000000200000ULL
232#define IPG_RFS_VLANDETECTED 0x0000000000400000ULL
233#define IPG_RFS_TCPDETECTED 0x0000000000800000ULL
234#define IPG_RFS_TCPERROR 0x0000000001000000ULL
235#define IPG_RFS_UDPDETECTED 0x0000000002000000ULL
236#define IPG_RFS_UDPERROR 0x0000000004000000ULL
237#define IPG_RFS_IPDETECTED 0x0000000008000000ULL
238#define IPG_RFS_IPERROR 0x0000000010000000ULL
239#define IPG_RFS_FRAMESTART 0x0000000020000000ULL
240#define IPG_RFS_FRAMEEND 0x0000000040000000ULL
241#define IPG_RFS_RFDDONE 0x0000000080000000ULL
242#define IPG_RFS_TCI 0x0000FFFF00000000ULL
243
244/* RFDList, FragInfo */
245#define IPG_RFI_RSVD_MASK 0xFFFF00FFFFFFFFFFULL
246#define IPG_RFI_FRAGADDR 0x000000FFFFFFFFFFULL
247#define IPG_RFI_FRAGLEN 0xFFFF000000000000ULL
248
249/* I/O Register masks. */
250
251/* RMON Statistics Mask */
252#define IPG_RZ_ALL 0x0FFFFFFF
253
254/* Statistics Mask */
255#define IPG_SM_ALL 0x0FFFFFFF
256#define IPG_SM_OCTETRCVOK_FRAMESRCVDOK 0x00000001
257#define IPG_SM_MCSTOCTETRCVDOK_MCSTFRAMESRCVDOK 0x00000002
258#define IPG_SM_BCSTOCTETRCVDOK_BCSTFRAMESRCVDOK 0x00000004
259#define IPG_SM_RXJUMBOFRAMES 0x00000008
260#define IPG_SM_TCPCHECKSUMERRORS 0x00000010
261#define IPG_SM_IPCHECKSUMERRORS 0x00000020
262#define IPG_SM_UDPCHECKSUMERRORS 0x00000040
263#define IPG_SM_MACCONTROLFRAMESRCVD 0x00000080
264#define IPG_SM_FRAMESTOOLONGERRORS 0x00000100
265#define IPG_SM_INRANGELENGTHERRORS 0x00000200
266#define IPG_SM_FRAMECHECKSEQERRORS 0x00000400
267#define IPG_SM_FRAMESLOSTRXERRORS 0x00000800
268#define IPG_SM_OCTETXMTOK_FRAMESXMTOK 0x00001000
269#define IPG_SM_MCSTOCTETXMTOK_MCSTFRAMESXMTDOK 0x00002000
270#define IPG_SM_BCSTOCTETXMTOK_BCSTFRAMESXMTDOK 0x00004000
271#define IPG_SM_FRAMESWDEFERREDXMT 0x00008000
272#define IPG_SM_LATECOLLISIONS 0x00010000
273#define IPG_SM_MULTICOLFRAMES 0x00020000
274#define IPG_SM_SINGLECOLFRAMES 0x00040000
275#define IPG_SM_TXJUMBOFRAMES 0x00080000
276#define IPG_SM_CARRIERSENSEERRORS 0x00100000
277#define IPG_SM_MACCONTROLFRAMESXMTD 0x00200000
278#define IPG_SM_FRAMESABORTXSCOLLS 0x00400000
279#define IPG_SM_FRAMESWEXDEFERAL 0x00800000
280
281/* Countdown */
282#define IPG_CD_RSVD_MASK 0x0700FFFF
283#define IPG_CD_COUNT 0x0000FFFF
284#define IPG_CD_COUNTDOWNSPEED 0x01000000
285#define IPG_CD_COUNTDOWNMODE 0x02000000
286#define IPG_CD_COUNTINTENABLED 0x04000000
287
288/* TxDMABurstThresh */
289#define IPG_TB_RSVD_MASK 0xFF
290
291/* TxDMAUrgentThresh */
292#define IPG_TU_RSVD_MASK 0xFF
293
294/* TxDMAPollPeriod */
295#define IPG_TP_RSVD_MASK 0xFF
296
297/* RxDMAUrgentThresh */
298#define IPG_RU_RSVD_MASK 0xFF
299
300/* RxDMAPollPeriod */
301#define IPG_RP_RSVD_MASK 0xFF
302
303/* ReceiveMode */
304#define IPG_RM_RSVD_MASK 0x3F
305#define IPG_RM_RECEIVEUNICAST 0x01
306#define IPG_RM_RECEIVEMULTICAST 0x02
307#define IPG_RM_RECEIVEBROADCAST 0x04
308#define IPG_RM_RECEIVEALLFRAMES 0x08
309#define IPG_RM_RECEIVEMULTICASTHASH 0x10
310#define IPG_RM_RECEIVEIPMULTICAST 0x20
311
312/* PhySet */
313#define IPG_PS_MEM_LENB9B 0x01
314#define IPG_PS_MEM_LEN9 0x02
315#define IPG_PS_NON_COMPDET 0x04
316
317/* PhyCtrl */
318#define IPG_PC_RSVD_MASK 0xFF
319#define IPG_PC_MGMTCLK_LO 0x00
320#define IPG_PC_MGMTCLK_HI 0x01
321#define IPG_PC_MGMTCLK 0x01
322#define IPG_PC_MGMTDATA 0x02
323#define IPG_PC_MGMTDIR 0x04
324#define IPG_PC_DUPLEX_POLARITY 0x08
325#define IPG_PC_DUPLEX_STATUS 0x10
326#define IPG_PC_LINK_POLARITY 0x20
327#define IPG_PC_LINK_SPEED 0xC0
328#define IPG_PC_LINK_SPEED_10MBPS 0x40
329#define IPG_PC_LINK_SPEED_100MBPS 0x80
330#define IPG_PC_LINK_SPEED_1000MBPS 0xC0
331
332/* DMACtrl */
333#define IPG_DC_RSVD_MASK 0xC07D9818
334#define IPG_DC_RX_DMA_COMPLETE 0x00000008
335#define IPG_DC_RX_DMA_POLL_NOW 0x00000010
336#define IPG_DC_TX_DMA_COMPLETE 0x00000800
337#define IPG_DC_TX_DMA_POLL_NOW 0x00001000
338#define IPG_DC_TX_DMA_IN_PROG 0x00008000
339#define IPG_DC_RX_EARLY_DISABLE 0x00010000
340#define IPG_DC_MWI_DISABLE 0x00040000
341#define IPG_DC_TX_WRITE_BACK_DISABLE 0x00080000
342#define IPG_DC_TX_BURST_LIMIT 0x00700000
343#define IPG_DC_TARGET_ABORT 0x40000000
344#define IPG_DC_MASTER_ABORT 0x80000000
345
346/* ASICCtrl */
347#define IPG_AC_RSVD_MASK 0x07FFEFF2
348#define IPG_AC_EXP_ROM_SIZE 0x00000002
349#define IPG_AC_PHY_SPEED10 0x00000010
350#define IPG_AC_PHY_SPEED100 0x00000020
351#define IPG_AC_PHY_SPEED1000 0x00000040
352#define IPG_AC_PHY_MEDIA 0x00000080
353#define IPG_AC_FORCED_CFG 0x00000700
354#define IPG_AC_D3RESETDISABLE 0x00000800
355#define IPG_AC_SPEED_UP_MODE 0x00002000
356#define IPG_AC_LED_MODE 0x00004000
357#define IPG_AC_RST_OUT_POLARITY 0x00008000
358#define IPG_AC_GLOBAL_RESET 0x00010000
359#define IPG_AC_RX_RESET 0x00020000
360#define IPG_AC_TX_RESET 0x00040000
361#define IPG_AC_DMA 0x00080000
362#define IPG_AC_FIFO 0x00100000
363#define IPG_AC_NETWORK 0x00200000
364#define IPG_AC_HOST 0x00400000
365#define IPG_AC_AUTO_INIT 0x00800000
366#define IPG_AC_RST_OUT 0x01000000
367#define IPG_AC_INT_REQUEST 0x02000000
368#define IPG_AC_RESET_BUSY 0x04000000
369#define IPG_AC_LED_SPEED 0x08000000
370#define IPG_AC_LED_MODE_BIT_1 0x20000000
371
372/* EepromCtrl */
373#define IPG_EC_RSVD_MASK 0x83FF
374#define IPG_EC_EEPROM_ADDR 0x00FF
375#define IPG_EC_EEPROM_OPCODE 0x0300
376#define IPG_EC_EEPROM_SUBCOMMAD 0x0000
377#define IPG_EC_EEPROM_WRITEOPCODE 0x0100
378#define IPG_EC_EEPROM_READOPCODE 0x0200
379#define IPG_EC_EEPROM_ERASEOPCODE 0x0300
380#define IPG_EC_EEPROM_BUSY 0x8000
381
382/* FIFOCtrl */
383#define IPG_FC_RSVD_MASK 0xC001
384#define IPG_FC_RAM_TEST_MODE 0x0001
385#define IPG_FC_TRANSMITTING 0x4000
386#define IPG_FC_RECEIVING 0x8000
387
388/* TxStatus */
389#define IPG_TS_RSVD_MASK 0xFFFF00DD
390#define IPG_TS_TX_ERROR 0x00000001
391#define IPG_TS_LATE_COLLISION 0x00000004
392#define IPG_TS_TX_MAX_COLL 0x00000008
393#define IPG_TS_TX_UNDERRUN 0x00000010
394#define IPG_TS_TX_IND_REQD 0x00000040
395#define IPG_TS_TX_COMPLETE 0x00000080
396#define IPG_TS_TX_FRAMEID 0xFFFF0000
397
398/* WakeEvent */
399#define IPG_WE_WAKE_PKT_ENABLE 0x01
400#define IPG_WE_MAGIC_PKT_ENABLE 0x02
401#define IPG_WE_LINK_EVT_ENABLE 0x04
402#define IPG_WE_WAKE_POLARITY 0x08
403#define IPG_WE_WAKE_PKT_EVT 0x10
404#define IPG_WE_MAGIC_PKT_EVT 0x20
405#define IPG_WE_LINK_EVT 0x40
406#define IPG_WE_WOL_ENABLE 0x80
407
408/* IntEnable */
409#define IPG_IE_RSVD_MASK 0x1FFE
410#define IPG_IE_HOST_ERROR 0x0002
411#define IPG_IE_TX_COMPLETE 0x0004
412#define IPG_IE_MAC_CTRL_FRAME 0x0008
413#define IPG_IE_RX_COMPLETE 0x0010
414#define IPG_IE_RX_EARLY 0x0020
415#define IPG_IE_INT_REQUESTED 0x0040
416#define IPG_IE_UPDATE_STATS 0x0080
417#define IPG_IE_LINK_EVENT 0x0100
418#define IPG_IE_TX_DMA_COMPLETE 0x0200
419#define IPG_IE_RX_DMA_COMPLETE 0x0400
420#define IPG_IE_RFD_LIST_END 0x0800
421#define IPG_IE_RX_DMA_PRIORITY 0x1000
422
423/* IntStatus */
424#define IPG_IS_RSVD_MASK 0x1FFF
425#define IPG_IS_INTERRUPT_STATUS 0x0001
426#define IPG_IS_HOST_ERROR 0x0002
427#define IPG_IS_TX_COMPLETE 0x0004
428#define IPG_IS_MAC_CTRL_FRAME 0x0008
429#define IPG_IS_RX_COMPLETE 0x0010
430#define IPG_IS_RX_EARLY 0x0020
431#define IPG_IS_INT_REQUESTED 0x0040
432#define IPG_IS_UPDATE_STATS 0x0080
433#define IPG_IS_LINK_EVENT 0x0100
434#define IPG_IS_TX_DMA_COMPLETE 0x0200
435#define IPG_IS_RX_DMA_COMPLETE 0x0400
436#define IPG_IS_RFD_LIST_END 0x0800
437#define IPG_IS_RX_DMA_PRIORITY 0x1000
438
439/* MACCtrl */
440#define IPG_MC_RSVD_MASK 0x7FE33FA3
441#define IPG_MC_IFS_SELECT 0x00000003
442#define IPG_MC_IFS_4352BIT 0x00000003
443#define IPG_MC_IFS_1792BIT 0x00000002
444#define IPG_MC_IFS_1024BIT 0x00000001
445#define IPG_MC_IFS_96BIT 0x00000000
446#define IPG_MC_DUPLEX_SELECT 0x00000020
447#define IPG_MC_DUPLEX_SELECT_FD 0x00000020
448#define IPG_MC_DUPLEX_SELECT_HD 0x00000000
449#define IPG_MC_TX_FLOW_CONTROL_ENABLE 0x00000080
450#define IPG_MC_RX_FLOW_CONTROL_ENABLE 0x00000100
451#define IPG_MC_RCV_FCS 0x00000200
452#define IPG_MC_FIFO_LOOPBACK 0x00000400
453#define IPG_MC_MAC_LOOPBACK 0x00000800
454#define IPG_MC_AUTO_VLAN_TAGGING 0x00001000
455#define IPG_MC_AUTO_VLAN_UNTAGGING 0x00002000
456#define IPG_MC_COLLISION_DETECT 0x00010000
457#define IPG_MC_CARRIER_SENSE 0x00020000
458#define IPG_MC_STATISTICS_ENABLE 0x00200000
459#define IPG_MC_STATISTICS_DISABLE 0x00400000
460#define IPG_MC_STATISTICS_ENABLED 0x00800000
461#define IPG_MC_TX_ENABLE 0x01000000
462#define IPG_MC_TX_DISABLE 0x02000000
463#define IPG_MC_TX_ENABLED 0x04000000
464#define IPG_MC_RX_ENABLE 0x08000000
465#define IPG_MC_RX_DISABLE 0x10000000
466#define IPG_MC_RX_ENABLED 0x20000000
467#define IPG_MC_PAUSED 0x40000000
468
469/*
470 * Tune
471 */
472
473/* Assign IPG_APPEND_FCS_ON_TX > 0 for auto FCS append on TX. */
474#define IPG_APPEND_FCS_ON_TX 1
475
476/* Assign IPG_APPEND_FCS_ON_TX > 0 for auto FCS strip on RX. */
477#define IPG_STRIP_FCS_ON_RX 1
478
479/* Assign IPG_DROP_ON_RX_ETH_ERRORS > 0 to drop RX frames with
480 * Ethernet errors.
481 */
482#define IPG_DROP_ON_RX_ETH_ERRORS 1
483
484/* Assign IPG_INSERT_MANUAL_VLAN_TAG > 0 to insert VLAN tags manually
485 * (via TFC).
486 */
487#define IPG_INSERT_MANUAL_VLAN_TAG 0
488
489/* Assign IPG_ADD_IPCHECKSUM_ON_TX > 0 for auto IP checksum on TX. */
490#define IPG_ADD_IPCHECKSUM_ON_TX 0
491
492/* Assign IPG_ADD_TCPCHECKSUM_ON_TX > 0 for auto TCP checksum on TX.
493 * DO NOT USE FOR SILICON REVISIONS B3 AND EARLIER.
494 */
495#define IPG_ADD_TCPCHECKSUM_ON_TX 0
496
497/* Assign IPG_ADD_UDPCHECKSUM_ON_TX > 0 for auto UDP checksum on TX.
498 * DO NOT USE FOR SILICON REVISIONS B3 AND EARLIER.
499 */
500#define IPG_ADD_UDPCHECKSUM_ON_TX 0
501
502/* If inserting VLAN tags manually, assign the IPG_MANUAL_VLAN_xx
503 * constants as desired.
504 */
505#define IPG_MANUAL_VLAN_VID 0xABC
506#define IPG_MANUAL_VLAN_CFI 0x1
507#define IPG_MANUAL_VLAN_USERPRIORITY 0x5
508
509#define IPG_IO_REG_RANGE 0xFF
510#define IPG_MEM_REG_RANGE 0x154
511#define IPG_DRIVER_NAME "Sundance Technology IPG Triple-Speed Ethernet"
512#define IPG_NIC_PHY_ADDRESS 0x01
513#define IPG_DMALIST_ALIGN_PAD 0x07
514#define IPG_MULTICAST_HASHTABLE_SIZE 0x40
515
516/* Number of milliseconds to wait after issuing a software reset.
517 * 0x05 <= IPG_AC_RESETWAIT to account for proper 10Mbps operation.
518 */
519#define IPG_AC_RESETWAIT 0x05
520
521/* Number of IPG_AC_RESETWAIT timeperiods before declaring timeout. */
522#define IPG_AC_RESET_TIMEOUT 0x0A
523
524/* Minimum number of nanoseconds used to toggle MDC clock during
525 * MII/GMII register access.
526 */
527#define IPG_PC_PHYCTRLWAIT_NS 200
528
529#define IPG_TFDLIST_LENGTH 0x100
530
531/* Number of frames between TxDMAComplete interrupt.
532 * 0 < IPG_FRAMESBETWEENTXDMACOMPLETES <= IPG_TFDLIST_LENGTH
533 */
534#define IPG_FRAMESBETWEENTXDMACOMPLETES 0x1
535
536#define IPG_RFDLIST_LENGTH 0x100
537
538/* Maximum number of RFDs to process per interrupt.
539 * 1 < IPG_MAXRFDPROCESS_COUNT < IPG_RFDLIST_LENGTH
540 */
541#define IPG_MAXRFDPROCESS_COUNT 0x80
542
543/* Minimum margin between last freed RFD, and current RFD.
544 * 1 < IPG_MINUSEDRFDSTOFREE < IPG_RFDLIST_LENGTH
545 */
546#define IPG_MINUSEDRFDSTOFREE 0x80
547
548/* specify the jumbo frame maximum size
549 * per unit is 0x600 (the rx_buffer size that one RFD can carry)
550 */
551#define MAX_JUMBOSIZE 0x8 /* max is 12K */
552
553/* Key register values loaded at driver start up. */
554
555/* TXDMAPollPeriod is specified in 320ns increments.
556 *
557 * Value Time
558 * ---------------------
559 * 0x00-0x01 320ns
560 * 0x03 ~1us
561 * 0x1F ~10us
562 * 0xFF ~82us
563 */
564#define IPG_TXDMAPOLLPERIOD_VALUE 0x26
565
566/* TxDMAUrgentThresh specifies the minimum amount of
567 * data in the transmit FIFO before asserting an
568 * urgent transmit DMA request.
569 *
570 * Value Min TxFIFO occupied space before urgent TX request
571 * ---------------------------------------------------------------
572 * 0x00-0x04 128 bytes (1024 bits)
573 * 0x27 1248 bytes (~10000 bits)
574 * 0x30 1536 bytes (12288 bits)
575 * 0xFF 8192 bytes (65535 bits)
576 */
577#define IPG_TXDMAURGENTTHRESH_VALUE 0x04
578
579/* TxDMABurstThresh specifies the minimum amount of
580 * free space in the transmit FIFO before asserting an
581 * transmit DMA request.
582 *
583 * Value Min TxFIFO free space before TX request
584 * ----------------------------------------------------
585 * 0x00-0x08 256 bytes
586 * 0x30 1536 bytes
587 * 0xFF 8192 bytes
588 */
589#define IPG_TXDMABURSTTHRESH_VALUE 0x30
590
591/* RXDMAPollPeriod is specified in 320ns increments.
592 *
593 * Value Time
594 * ---------------------
595 * 0x00-0x01 320ns
596 * 0x03 ~1us
597 * 0x1F ~10us
598 * 0xFF ~82us
599 */
600#define IPG_RXDMAPOLLPERIOD_VALUE 0x01
601
602/* RxDMAUrgentThresh specifies the minimum amount of
603 * free space within the receive FIFO before asserting
604 * a urgent receive DMA request.
605 *
606 * Value Min RxFIFO free space before urgent RX request
607 * ---------------------------------------------------------------
608 * 0x00-0x04 128 bytes (1024 bits)
609 * 0x27 1248 bytes (~10000 bits)
610 * 0x30 1536 bytes (12288 bits)
611 * 0xFF 8192 bytes (65535 bits)
612 */
613#define IPG_RXDMAURGENTTHRESH_VALUE 0x30
614
615/* RxDMABurstThresh specifies the minimum amount of
616 * occupied space within the receive FIFO before asserting
617 * a receive DMA request.
618 *
619 * Value Min TxFIFO free space before TX request
620 * ----------------------------------------------------
621 * 0x00-0x08 256 bytes
622 * 0x30 1536 bytes
623 * 0xFF 8192 bytes
624 */
625#define IPG_RXDMABURSTTHRESH_VALUE 0x30
626
627/* FlowOnThresh specifies the maximum amount of occupied
628 * space in the receive FIFO before a PAUSE frame with
629 * maximum pause time transmitted.
630 *
631 * Value Max RxFIFO occupied space before PAUSE
632 * ---------------------------------------------------
633 * 0x0000 0 bytes
634 * 0x0740 29,696 bytes
635 * 0x07FF 32,752 bytes
636 */
637#define IPG_FLOWONTHRESH_VALUE 0x0740
638
639/* FlowOffThresh specifies the minimum amount of occupied
640 * space in the receive FIFO before a PAUSE frame with
641 * zero pause time is transmitted.
642 *
643 * Value Max RxFIFO occupied space before PAUSE
644 * ---------------------------------------------------
645 * 0x0000 0 bytes
646 * 0x00BF 3056 bytes
647 * 0x07FF 32,752 bytes
648 */
649#define IPG_FLOWOFFTHRESH_VALUE 0x00BF
650
651/*
652 * Miscellaneous macros.
653 */
654
655/* Macros for printing debug statements. */
656#ifdef IPG_DEBUG
657# define IPG_DEBUG_MSG(fmt, args...) \
658do { \
659 if (0) \
660 printk(KERN_DEBUG "IPG: " fmt, ##args); \
661} while (0)
662# define IPG_DDEBUG_MSG(fmt, args...) \
663 printk(KERN_DEBUG "IPG: " fmt, ##args)
664# define IPG_DUMPRFDLIST(args) ipg_dump_rfdlist(args)
665# define IPG_DUMPTFDLIST(args) ipg_dump_tfdlist(args)
666#else
667# define IPG_DEBUG_MSG(fmt, args...) \
668do { \
669 if (0) \
670 printk(KERN_DEBUG "IPG: " fmt, ##args); \
671} while (0)
672# define IPG_DDEBUG_MSG(fmt, args...) \
673do { \
674 if (0) \
675 printk(KERN_DEBUG "IPG: " fmt, ##args); \
676} while (0)
677# define IPG_DUMPRFDLIST(args)
678# define IPG_DUMPTFDLIST(args)
679#endif
680
681/*
682 * End miscellaneous macros.
683 */
684
685/* Transmit Frame Descriptor. The IPG supports 15 fragments,
686 * however Linux requires only a single fragment. Note, each
687 * TFD field is 64 bits wide.
688 */
689struct ipg_tx {
690 __le64 next_desc;
691 __le64 tfc;
692 __le64 frag_info;
693};
694
695/* Receive Frame Descriptor. Note, each RFD field is 64 bits wide.
696 */
697struct ipg_rx {
698 __le64 next_desc;
699 __le64 rfs;
700 __le64 frag_info;
701};
702
703struct ipg_jumbo {
704 int found_start;
705 int current_size;
706 struct sk_buff *skb;
707};
708
709/* Structure of IPG NIC specific data. */
710struct ipg_nic_private {
711 void __iomem *ioaddr;
712 struct ipg_tx *txd;
713 struct ipg_rx *rxd;
714 dma_addr_t txd_map;
715 dma_addr_t rxd_map;
716 struct sk_buff *tx_buff[IPG_TFDLIST_LENGTH];
717 struct sk_buff *rx_buff[IPG_RFDLIST_LENGTH];
718 unsigned int tx_current;
719 unsigned int tx_dirty;
720 unsigned int rx_current;
721 unsigned int rx_dirty;
722 bool is_jumbo;
723 struct ipg_jumbo jumbo;
724 unsigned long rxfrag_size;
725 unsigned long rxsupport_size;
726 unsigned long max_rxframe_size;
727 unsigned int rx_buf_sz;
728 struct pci_dev *pdev;
729 struct net_device *dev;
730 struct net_device_stats stats;
731 spinlock_t lock;
732 int tenmbpsmode;
733
734 u16 led_mode;
735 u16 station_addr[3]; /* Station Address in EEPROM Reg 0x10..0x12 */
736
737 struct mutex mii_mutex;
738 struct mii_if_info mii_if;
739 int reset_current_tfd;
740#ifdef IPG_DEBUG
741 int RFDlistendCount;
742 int RFDListCheckedCount;
743 int EmptyRFDListCount;
744#endif
745 struct delayed_work task;
746};
747
748#endif /* __LINUX_IPG_H */
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
index 639263d5e833..7781e80896a6 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
@@ -627,8 +627,10 @@ static netdev_tx_t fm10k_xmit_frame(struct sk_buff *skb, struct net_device *dev)
627 627
628 /* verify the skb head is not shared */ 628 /* verify the skb head is not shared */
629 err = skb_cow_head(skb, 0); 629 err = skb_cow_head(skb, 0);
630 if (err) 630 if (err) {
631 dev_kfree_skb(skb);
631 return NETDEV_TX_OK; 632 return NETDEV_TX_OK;
633 }
632 634
633 /* locate vlan header */ 635 /* locate vlan header */
634 vhdr = (struct vlan_hdr *)(skb->data + ETH_HLEN); 636 vhdr = (struct vlan_hdr *)(skb->data + ETH_HLEN);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
index 0ff8f01e57ee..1fd5ea82a9bc 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
@@ -567,10 +567,6 @@ i40e_status i40e_init_adminq(struct i40e_hw *hw)
567 goto init_adminq_exit; 567 goto init_adminq_exit;
568 } 568 }
569 569
570 /* initialize locks */
571 mutex_init(&hw->aq.asq_mutex);
572 mutex_init(&hw->aq.arq_mutex);
573
574 /* Set up register offsets */ 570 /* Set up register offsets */
575 i40e_adminq_init_regs(hw); 571 i40e_adminq_init_regs(hw);
576 572
@@ -664,8 +660,6 @@ i40e_status i40e_shutdown_adminq(struct i40e_hw *hw)
664 i40e_shutdown_asq(hw); 660 i40e_shutdown_asq(hw);
665 i40e_shutdown_arq(hw); 661 i40e_shutdown_arq(hw);
666 662
667 /* destroy the locks */
668
669 if (hw->nvm_buff.va) 663 if (hw->nvm_buff.va)
670 i40e_free_virt_mem(hw, &hw->nvm_buff); 664 i40e_free_virt_mem(hw, &hw->nvm_buff);
671 665
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index b825f978d441..4a9873ec28c7 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -10295,6 +10295,12 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
10295 /* set up a default setting for link flow control */ 10295 /* set up a default setting for link flow control */
10296 pf->hw.fc.requested_mode = I40E_FC_NONE; 10296 pf->hw.fc.requested_mode = I40E_FC_NONE;
10297 10297
10298 /* set up the locks for the AQ, do this only once in probe
10299 * and destroy them only once in remove
10300 */
10301 mutex_init(&hw->aq.asq_mutex);
10302 mutex_init(&hw->aq.arq_mutex);
10303
10298 err = i40e_init_adminq(hw); 10304 err = i40e_init_adminq(hw);
10299 10305
10300 /* provide nvm, fw, api versions */ 10306 /* provide nvm, fw, api versions */
@@ -10697,7 +10703,6 @@ static void i40e_remove(struct pci_dev *pdev)
10697 set_bit(__I40E_DOWN, &pf->state); 10703 set_bit(__I40E_DOWN, &pf->state);
10698 del_timer_sync(&pf->service_timer); 10704 del_timer_sync(&pf->service_timer);
10699 cancel_work_sync(&pf->service_task); 10705 cancel_work_sync(&pf->service_task);
10700 i40e_fdir_teardown(pf);
10701 10706
10702 if (pf->flags & I40E_FLAG_SRIOV_ENABLED) { 10707 if (pf->flags & I40E_FLAG_SRIOV_ENABLED) {
10703 i40e_free_vfs(pf); 10708 i40e_free_vfs(pf);
@@ -10740,6 +10745,10 @@ static void i40e_remove(struct pci_dev *pdev)
10740 "Failed to destroy the Admin Queue resources: %d\n", 10745 "Failed to destroy the Admin Queue resources: %d\n",
10741 ret_code); 10746 ret_code);
10742 10747
10748 /* destroy the locks only once, here */
10749 mutex_destroy(&hw->aq.arq_mutex);
10750 mutex_destroy(&hw->aq.asq_mutex);
10751
10743 /* Clear all dynamic memory lists of rings, q_vectors, and VSIs */ 10752 /* Clear all dynamic memory lists of rings, q_vectors, and VSIs */
10744 i40e_clear_interrupt_scheme(pf); 10753 i40e_clear_interrupt_scheme(pf);
10745 for (i = 0; i < pf->num_alloc_vsi; i++) { 10754 for (i = 0; i < pf->num_alloc_vsi; i++) {
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq.c b/drivers/net/ethernet/intel/i40evf/i40e_adminq.c
index fd123ca60761..3f65e39b3fe4 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_adminq.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq.c
@@ -551,10 +551,6 @@ i40e_status i40evf_init_adminq(struct i40e_hw *hw)
551 goto init_adminq_exit; 551 goto init_adminq_exit;
552 } 552 }
553 553
554 /* initialize locks */
555 mutex_init(&hw->aq.asq_mutex);
556 mutex_init(&hw->aq.arq_mutex);
557
558 /* Set up register offsets */ 554 /* Set up register offsets */
559 i40e_adminq_init_regs(hw); 555 i40e_adminq_init_regs(hw);
560 556
@@ -596,8 +592,6 @@ i40e_status i40evf_shutdown_adminq(struct i40e_hw *hw)
596 i40e_shutdown_asq(hw); 592 i40e_shutdown_asq(hw);
597 i40e_shutdown_arq(hw); 593 i40e_shutdown_arq(hw);
598 594
599 /* destroy the locks */
600
601 if (hw->nvm_buff.va) 595 if (hw->nvm_buff.va)
602 i40e_free_virt_mem(hw, &hw->nvm_buff); 596 i40e_free_virt_mem(hw, &hw->nvm_buff);
603 597
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
index d962164dfb0f..99d2cffae0cd 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
@@ -2476,6 +2476,12 @@ static int i40evf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2476 hw->bus.device = PCI_SLOT(pdev->devfn); 2476 hw->bus.device = PCI_SLOT(pdev->devfn);
2477 hw->bus.func = PCI_FUNC(pdev->devfn); 2477 hw->bus.func = PCI_FUNC(pdev->devfn);
2478 2478
2479 /* set up the locks for the AQ, do this only once in probe
2480 * and destroy them only once in remove
2481 */
2482 mutex_init(&hw->aq.asq_mutex);
2483 mutex_init(&hw->aq.arq_mutex);
2484
2479 INIT_LIST_HEAD(&adapter->mac_filter_list); 2485 INIT_LIST_HEAD(&adapter->mac_filter_list);
2480 INIT_LIST_HEAD(&adapter->vlan_filter_list); 2486 INIT_LIST_HEAD(&adapter->vlan_filter_list);
2481 2487
@@ -2629,6 +2635,10 @@ static void i40evf_remove(struct pci_dev *pdev)
2629 if (hw->aq.asq.count) 2635 if (hw->aq.asq.count)
2630 i40evf_shutdown_adminq(hw); 2636 i40evf_shutdown_adminq(hw);
2631 2637
2638 /* destroy the locks only once, here */
2639 mutex_destroy(&hw->aq.arq_mutex);
2640 mutex_destroy(&hw->aq.asq_mutex);
2641
2632 iounmap(hw->hw_addr); 2642 iounmap(hw->hw_addr);
2633 pci_release_regions(pdev); 2643 pci_release_regions(pdev);
2634 2644
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 47395ff5d908..aed8d029b23d 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -7920,6 +7920,9 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc)
7920 */ 7920 */
7921 if (netif_running(dev)) 7921 if (netif_running(dev))
7922 ixgbe_close(dev); 7922 ixgbe_close(dev);
7923 else
7924 ixgbe_reset(adapter);
7925
7923 ixgbe_clear_interrupt_scheme(adapter); 7926 ixgbe_clear_interrupt_scheme(adapter);
7924 7927
7925#ifdef CONFIG_IXGBE_DCB 7928#ifdef CONFIG_IXGBE_DCB
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index e84c7f2634d3..ed622fa29dfa 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -36,7 +36,7 @@
36 36
37/* Registers */ 37/* Registers */
38#define MVNETA_RXQ_CONFIG_REG(q) (0x1400 + ((q) << 2)) 38#define MVNETA_RXQ_CONFIG_REG(q) (0x1400 + ((q) << 2))
39#define MVNETA_RXQ_HW_BUF_ALLOC BIT(1) 39#define MVNETA_RXQ_HW_BUF_ALLOC BIT(0)
40#define MVNETA_RXQ_PKT_OFFSET_ALL_MASK (0xf << 8) 40#define MVNETA_RXQ_PKT_OFFSET_ALL_MASK (0xf << 8)
41#define MVNETA_RXQ_PKT_OFFSET_MASK(offs) ((offs) << 8) 41#define MVNETA_RXQ_PKT_OFFSET_MASK(offs) ((offs) << 8)
42#define MVNETA_RXQ_THRESHOLD_REG(q) (0x14c0 + ((q) << 2)) 42#define MVNETA_RXQ_THRESHOLD_REG(q) (0x14c0 + ((q) << 2))
@@ -62,6 +62,7 @@
62#define MVNETA_WIN_SIZE(w) (0x2204 + ((w) << 3)) 62#define MVNETA_WIN_SIZE(w) (0x2204 + ((w) << 3))
63#define MVNETA_WIN_REMAP(w) (0x2280 + ((w) << 2)) 63#define MVNETA_WIN_REMAP(w) (0x2280 + ((w) << 2))
64#define MVNETA_BASE_ADDR_ENABLE 0x2290 64#define MVNETA_BASE_ADDR_ENABLE 0x2290
65#define MVNETA_ACCESS_PROTECT_ENABLE 0x2294
65#define MVNETA_PORT_CONFIG 0x2400 66#define MVNETA_PORT_CONFIG 0x2400
66#define MVNETA_UNI_PROMISC_MODE BIT(0) 67#define MVNETA_UNI_PROMISC_MODE BIT(0)
67#define MVNETA_DEF_RXQ(q) ((q) << 1) 68#define MVNETA_DEF_RXQ(q) ((q) << 1)
@@ -159,7 +160,7 @@
159 160
160#define MVNETA_INTR_ENABLE 0x25b8 161#define MVNETA_INTR_ENABLE 0x25b8
161#define MVNETA_TXQ_INTR_ENABLE_ALL_MASK 0x0000ff00 162#define MVNETA_TXQ_INTR_ENABLE_ALL_MASK 0x0000ff00
162#define MVNETA_RXQ_INTR_ENABLE_ALL_MASK 0xff000000 // note: neta says it's 0x000000FF 163#define MVNETA_RXQ_INTR_ENABLE_ALL_MASK 0x000000ff
163 164
164#define MVNETA_RXQ_CMD 0x2680 165#define MVNETA_RXQ_CMD 0x2680
165#define MVNETA_RXQ_DISABLE_SHIFT 8 166#define MVNETA_RXQ_DISABLE_SHIFT 8
@@ -242,6 +243,7 @@
242#define MVNETA_VLAN_TAG_LEN 4 243#define MVNETA_VLAN_TAG_LEN 4
243 244
244#define MVNETA_CPU_D_CACHE_LINE_SIZE 32 245#define MVNETA_CPU_D_CACHE_LINE_SIZE 32
246#define MVNETA_TX_CSUM_DEF_SIZE 1600
245#define MVNETA_TX_CSUM_MAX_SIZE 9800 247#define MVNETA_TX_CSUM_MAX_SIZE 9800
246#define MVNETA_ACC_MODE_EXT 1 248#define MVNETA_ACC_MODE_EXT 1
247 249
@@ -1579,12 +1581,16 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo,
1579 } 1581 }
1580 1582
1581 skb = build_skb(data, pp->frag_size > PAGE_SIZE ? 0 : pp->frag_size); 1583 skb = build_skb(data, pp->frag_size > PAGE_SIZE ? 0 : pp->frag_size);
1582 if (!skb)
1583 goto err_drop_frame;
1584 1584
1585 /* After refill old buffer has to be unmapped regardless
1586 * the skb is successfully built or not.
1587 */
1585 dma_unmap_single(dev->dev.parent, phys_addr, 1588 dma_unmap_single(dev->dev.parent, phys_addr,
1586 MVNETA_RX_BUF_SIZE(pp->pkt_size), DMA_FROM_DEVICE); 1589 MVNETA_RX_BUF_SIZE(pp->pkt_size), DMA_FROM_DEVICE);
1587 1590
1591 if (!skb)
1592 goto err_drop_frame;
1593
1588 rcvd_pkts++; 1594 rcvd_pkts++;
1589 rcvd_bytes += rx_bytes; 1595 rcvd_bytes += rx_bytes;
1590 1596
@@ -3191,6 +3197,7 @@ static void mvneta_conf_mbus_windows(struct mvneta_port *pp,
3191 } 3197 }
3192 3198
3193 mvreg_write(pp, MVNETA_BASE_ADDR_ENABLE, win_enable); 3199 mvreg_write(pp, MVNETA_BASE_ADDR_ENABLE, win_enable);
3200 mvreg_write(pp, MVNETA_ACCESS_PROTECT_ENABLE, win_protect);
3194} 3201}
3195 3202
3196/* Power up the port */ 3203/* Power up the port */
@@ -3250,6 +3257,7 @@ static int mvneta_probe(struct platform_device *pdev)
3250 char hw_mac_addr[ETH_ALEN]; 3257 char hw_mac_addr[ETH_ALEN];
3251 const char *mac_from; 3258 const char *mac_from;
3252 const char *managed; 3259 const char *managed;
3260 int tx_csum_limit;
3253 int phy_mode; 3261 int phy_mode;
3254 int err; 3262 int err;
3255 int cpu; 3263 int cpu;
@@ -3350,8 +3358,21 @@ static int mvneta_probe(struct platform_device *pdev)
3350 } 3358 }
3351 } 3359 }
3352 3360
3353 if (of_device_is_compatible(dn, "marvell,armada-370-neta")) 3361 if (!of_property_read_u32(dn, "tx-csum-limit", &tx_csum_limit)) {
3354 pp->tx_csum_limit = 1600; 3362 if (tx_csum_limit < 0 ||
3363 tx_csum_limit > MVNETA_TX_CSUM_MAX_SIZE) {
3364 tx_csum_limit = MVNETA_TX_CSUM_DEF_SIZE;
3365 dev_info(&pdev->dev,
3366 "Wrong TX csum limit in DT, set to %dB\n",
3367 MVNETA_TX_CSUM_DEF_SIZE);
3368 }
3369 } else if (of_device_is_compatible(dn, "marvell,armada-370-neta")) {
3370 tx_csum_limit = MVNETA_TX_CSUM_DEF_SIZE;
3371 } else {
3372 tx_csum_limit = MVNETA_TX_CSUM_MAX_SIZE;
3373 }
3374
3375 pp->tx_csum_limit = tx_csum_limit;
3355 3376
3356 pp->tx_ring_size = MVNETA_MAX_TXD; 3377 pp->tx_ring_size = MVNETA_MAX_TXD;
3357 pp->rx_ring_size = MVNETA_MAX_RXD; 3378 pp->rx_ring_size = MVNETA_MAX_RXD;
diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c
index d9884fd15b45..a4beccf1fd46 100644
--- a/drivers/net/ethernet/marvell/mvpp2.c
+++ b/drivers/net/ethernet/marvell/mvpp2.c
@@ -3413,16 +3413,23 @@ static void mvpp2_bm_pool_bufsize_set(struct mvpp2 *priv,
3413} 3413}
3414 3414
3415/* Free all buffers from the pool */ 3415/* Free all buffers from the pool */
3416static void mvpp2_bm_bufs_free(struct mvpp2 *priv, struct mvpp2_bm_pool *bm_pool) 3416static void mvpp2_bm_bufs_free(struct device *dev, struct mvpp2 *priv,
3417 struct mvpp2_bm_pool *bm_pool)
3417{ 3418{
3418 int i; 3419 int i;
3419 3420
3420 for (i = 0; i < bm_pool->buf_num; i++) { 3421 for (i = 0; i < bm_pool->buf_num; i++) {
3422 dma_addr_t buf_phys_addr;
3421 u32 vaddr; 3423 u32 vaddr;
3422 3424
3423 /* Get buffer virtual address (indirect access) */ 3425 /* Get buffer virtual address (indirect access) */
3424 mvpp2_read(priv, MVPP2_BM_PHY_ALLOC_REG(bm_pool->id)); 3426 buf_phys_addr = mvpp2_read(priv,
3427 MVPP2_BM_PHY_ALLOC_REG(bm_pool->id));
3425 vaddr = mvpp2_read(priv, MVPP2_BM_VIRT_ALLOC_REG); 3428 vaddr = mvpp2_read(priv, MVPP2_BM_VIRT_ALLOC_REG);
3429
3430 dma_unmap_single(dev, buf_phys_addr,
3431 bm_pool->buf_size, DMA_FROM_DEVICE);
3432
3426 if (!vaddr) 3433 if (!vaddr)
3427 break; 3434 break;
3428 dev_kfree_skb_any((struct sk_buff *)vaddr); 3435 dev_kfree_skb_any((struct sk_buff *)vaddr);
@@ -3439,7 +3446,7 @@ static int mvpp2_bm_pool_destroy(struct platform_device *pdev,
3439{ 3446{
3440 u32 val; 3447 u32 val;
3441 3448
3442 mvpp2_bm_bufs_free(priv, bm_pool); 3449 mvpp2_bm_bufs_free(&pdev->dev, priv, bm_pool);
3443 if (bm_pool->buf_num) { 3450 if (bm_pool->buf_num) {
3444 WARN(1, "cannot free all buffers in pool %d\n", bm_pool->id); 3451 WARN(1, "cannot free all buffers in pool %d\n", bm_pool->id);
3445 return 0; 3452 return 0;
@@ -3692,7 +3699,8 @@ mvpp2_bm_pool_use(struct mvpp2_port *port, int pool, enum mvpp2_bm_type type,
3692 MVPP2_BM_LONG_BUF_NUM : 3699 MVPP2_BM_LONG_BUF_NUM :
3693 MVPP2_BM_SHORT_BUF_NUM; 3700 MVPP2_BM_SHORT_BUF_NUM;
3694 else 3701 else
3695 mvpp2_bm_bufs_free(port->priv, new_pool); 3702 mvpp2_bm_bufs_free(port->dev->dev.parent,
3703 port->priv, new_pool);
3696 3704
3697 new_pool->pkt_size = pkt_size; 3705 new_pool->pkt_size = pkt_size;
3698 3706
@@ -3756,7 +3764,7 @@ static int mvpp2_bm_update_mtu(struct net_device *dev, int mtu)
3756 int pkt_size = MVPP2_RX_PKT_SIZE(mtu); 3764 int pkt_size = MVPP2_RX_PKT_SIZE(mtu);
3757 3765
3758 /* Update BM pool with new buffer size */ 3766 /* Update BM pool with new buffer size */
3759 mvpp2_bm_bufs_free(port->priv, port_pool); 3767 mvpp2_bm_bufs_free(dev->dev.parent, port->priv, port_pool);
3760 if (port_pool->buf_num) { 3768 if (port_pool->buf_num) {
3761 WARN(1, "cannot free all buffers in pool %d\n", port_pool->id); 3769 WARN(1, "cannot free all buffers in pool %d\n", port_pool->id);
3762 return -EIO; 3770 return -EIO;
@@ -4401,11 +4409,10 @@ static void mvpp2_txq_bufs_free(struct mvpp2_port *port,
4401 4409
4402 mvpp2_txq_inc_get(txq_pcpu); 4410 mvpp2_txq_inc_get(txq_pcpu);
4403 4411
4404 if (!skb)
4405 continue;
4406
4407 dma_unmap_single(port->dev->dev.parent, buf_phys_addr, 4412 dma_unmap_single(port->dev->dev.parent, buf_phys_addr,
4408 skb_headlen(skb), DMA_TO_DEVICE); 4413 skb_headlen(skb), DMA_TO_DEVICE);
4414 if (!skb)
4415 continue;
4409 dev_kfree_skb_any(skb); 4416 dev_kfree_skb_any(skb);
4410 } 4417 }
4411} 4418}
@@ -5092,7 +5099,8 @@ static int mvpp2_rx(struct mvpp2_port *port, int rx_todo,
5092 struct mvpp2_rx_queue *rxq) 5099 struct mvpp2_rx_queue *rxq)
5093{ 5100{
5094 struct net_device *dev = port->dev; 5101 struct net_device *dev = port->dev;
5095 int rx_received, rx_filled, i; 5102 int rx_received;
5103 int rx_done = 0;
5096 u32 rcvd_pkts = 0; 5104 u32 rcvd_pkts = 0;
5097 u32 rcvd_bytes = 0; 5105 u32 rcvd_bytes = 0;
5098 5106
@@ -5101,17 +5109,18 @@ static int mvpp2_rx(struct mvpp2_port *port, int rx_todo,
5101 if (rx_todo > rx_received) 5109 if (rx_todo > rx_received)
5102 rx_todo = rx_received; 5110 rx_todo = rx_received;
5103 5111
5104 rx_filled = 0; 5112 while (rx_done < rx_todo) {
5105 for (i = 0; i < rx_todo; i++) {
5106 struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq); 5113 struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq);
5107 struct mvpp2_bm_pool *bm_pool; 5114 struct mvpp2_bm_pool *bm_pool;
5108 struct sk_buff *skb; 5115 struct sk_buff *skb;
5116 dma_addr_t phys_addr;
5109 u32 bm, rx_status; 5117 u32 bm, rx_status;
5110 int pool, rx_bytes, err; 5118 int pool, rx_bytes, err;
5111 5119
5112 rx_filled++; 5120 rx_done++;
5113 rx_status = rx_desc->status; 5121 rx_status = rx_desc->status;
5114 rx_bytes = rx_desc->data_size - MVPP2_MH_SIZE; 5122 rx_bytes = rx_desc->data_size - MVPP2_MH_SIZE;
5123 phys_addr = rx_desc->buf_phys_addr;
5115 5124
5116 bm = mvpp2_bm_cookie_build(rx_desc); 5125 bm = mvpp2_bm_cookie_build(rx_desc);
5117 pool = mvpp2_bm_cookie_pool_get(bm); 5126 pool = mvpp2_bm_cookie_pool_get(bm);
@@ -5128,8 +5137,10 @@ static int mvpp2_rx(struct mvpp2_port *port, int rx_todo,
5128 * comprised by the RX descriptor. 5137 * comprised by the RX descriptor.
5129 */ 5138 */
5130 if (rx_status & MVPP2_RXD_ERR_SUMMARY) { 5139 if (rx_status & MVPP2_RXD_ERR_SUMMARY) {
5140 err_drop_frame:
5131 dev->stats.rx_errors++; 5141 dev->stats.rx_errors++;
5132 mvpp2_rx_error(port, rx_desc); 5142 mvpp2_rx_error(port, rx_desc);
5143 /* Return the buffer to the pool */
5133 mvpp2_pool_refill(port, bm, rx_desc->buf_phys_addr, 5144 mvpp2_pool_refill(port, bm, rx_desc->buf_phys_addr,
5134 rx_desc->buf_cookie); 5145 rx_desc->buf_cookie);
5135 continue; 5146 continue;
@@ -5137,6 +5148,15 @@ static int mvpp2_rx(struct mvpp2_port *port, int rx_todo,
5137 5148
5138 skb = (struct sk_buff *)rx_desc->buf_cookie; 5149 skb = (struct sk_buff *)rx_desc->buf_cookie;
5139 5150
5151 err = mvpp2_rx_refill(port, bm_pool, bm, 0);
5152 if (err) {
5153 netdev_err(port->dev, "failed to refill BM pools\n");
5154 goto err_drop_frame;
5155 }
5156
5157 dma_unmap_single(dev->dev.parent, phys_addr,
5158 bm_pool->buf_size, DMA_FROM_DEVICE);
5159
5140 rcvd_pkts++; 5160 rcvd_pkts++;
5141 rcvd_bytes += rx_bytes; 5161 rcvd_bytes += rx_bytes;
5142 atomic_inc(&bm_pool->in_use); 5162 atomic_inc(&bm_pool->in_use);
@@ -5147,12 +5167,6 @@ static int mvpp2_rx(struct mvpp2_port *port, int rx_todo,
5147 mvpp2_rx_csum(port, rx_status, skb); 5167 mvpp2_rx_csum(port, rx_status, skb);
5148 5168
5149 napi_gro_receive(&port->napi, skb); 5169 napi_gro_receive(&port->napi, skb);
5150
5151 err = mvpp2_rx_refill(port, bm_pool, bm, 0);
5152 if (err) {
5153 netdev_err(port->dev, "failed to refill BM pools\n");
5154 rx_filled--;
5155 }
5156 } 5170 }
5157 5171
5158 if (rcvd_pkts) { 5172 if (rcvd_pkts) {
@@ -5166,7 +5180,7 @@ static int mvpp2_rx(struct mvpp2_port *port, int rx_todo,
5166 5180
5167 /* Update Rx queue management counters */ 5181 /* Update Rx queue management counters */
5168 wmb(); 5182 wmb();
5169 mvpp2_rxq_status_update(port, rxq->id, rx_todo, rx_filled); 5183 mvpp2_rxq_status_update(port, rxq->id, rx_done, rx_done);
5170 5184
5171 return rx_todo; 5185 return rx_todo;
5172} 5186}
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
index 2177e56ed0be..d48d5793407d 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
@@ -1010,7 +1010,7 @@ static int mlx4_MAD_IFC_wrapper(struct mlx4_dev *dev, int slave,
1010 if (!(smp->mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED && 1010 if (!(smp->mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED &&
1011 smp->method == IB_MGMT_METHOD_GET) || network_view) { 1011 smp->method == IB_MGMT_METHOD_GET) || network_view) {
1012 mlx4_err(dev, "Unprivileged slave %d is trying to execute a Subnet MGMT MAD, class 0x%x, method 0x%x, view=%s for attr 0x%x. Rejecting\n", 1012 mlx4_err(dev, "Unprivileged slave %d is trying to execute a Subnet MGMT MAD, class 0x%x, method 0x%x, view=%s for attr 0x%x. Rejecting\n",
1013 slave, smp->method, smp->mgmt_class, 1013 slave, smp->mgmt_class, smp->method,
1014 network_view ? "Network" : "Host", 1014 network_view ? "Network" : "Host",
1015 be16_to_cpu(smp->attr_id)); 1015 be16_to_cpu(smp->attr_id));
1016 return -EPERM; 1016 return -EPERM;
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 85f1b1e7e505..31c491e02e69 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -892,9 +892,10 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
892 dev->caps.qp1_proxy[i - 1] = func_cap.qp1_proxy_qpn; 892 dev->caps.qp1_proxy[i - 1] = func_cap.qp1_proxy_qpn;
893 dev->caps.port_mask[i] = dev->caps.port_type[i]; 893 dev->caps.port_mask[i] = dev->caps.port_type[i];
894 dev->caps.phys_port_id[i] = func_cap.phys_port_id; 894 dev->caps.phys_port_id[i] = func_cap.phys_port_id;
895 if (mlx4_get_slave_pkey_gid_tbl_len(dev, i, 895 err = mlx4_get_slave_pkey_gid_tbl_len(dev, i,
896 &dev->caps.gid_table_len[i], 896 &dev->caps.gid_table_len[i],
897 &dev->caps.pkey_table_len[i])) 897 &dev->caps.pkey_table_len[i]);
898 if (err)
898 goto err_mem; 899 goto err_mem;
899 } 900 }
900 901
@@ -906,6 +907,7 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
906 dev->caps.uar_page_size * dev->caps.num_uars, 907 dev->caps.uar_page_size * dev->caps.num_uars,
907 (unsigned long long) 908 (unsigned long long)
908 pci_resource_len(dev->persist->pdev, 2)); 909 pci_resource_len(dev->persist->pdev, 2));
910 err = -ENOMEM;
909 goto err_mem; 911 goto err_mem;
910 } 912 }
911 913
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index 9813d34f3e5b..cad6c44df91c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -4306,9 +4306,10 @@ int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
4306 return -EOPNOTSUPP; 4306 return -EOPNOTSUPP;
4307 4307
4308 ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf; 4308 ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf;
4309 ctrl->port = mlx4_slave_convert_port(dev, slave, ctrl->port); 4309 err = mlx4_slave_convert_port(dev, slave, ctrl->port);
4310 if (ctrl->port <= 0) 4310 if (err <= 0)
4311 return -EINVAL; 4311 return -EINVAL;
4312 ctrl->port = err;
4312 qpn = be32_to_cpu(ctrl->qpn) & 0xffffff; 4313 qpn = be32_to_cpu(ctrl->qpn) & 0xffffff;
4313 err = get_res(dev, slave, qpn, RES_QP, &rqp); 4314 err = get_res(dev, slave, qpn, RES_QP, &rqp);
4314 if (err) { 4315 if (err) {
@@ -4952,26 +4953,41 @@ static void rem_slave_counters(struct mlx4_dev *dev, int slave)
4952 struct res_counter *counter; 4953 struct res_counter *counter;
4953 struct res_counter *tmp; 4954 struct res_counter *tmp;
4954 int err; 4955 int err;
4955 int index; 4956 int *counters_arr = NULL;
4957 int i, j;
4956 4958
4957 err = move_all_busy(dev, slave, RES_COUNTER); 4959 err = move_all_busy(dev, slave, RES_COUNTER);
4958 if (err) 4960 if (err)
4959 mlx4_warn(dev, "rem_slave_counters: Could not move all counters - too busy for slave %d\n", 4961 mlx4_warn(dev, "rem_slave_counters: Could not move all counters - too busy for slave %d\n",
4960 slave); 4962 slave);
4961 4963
4962 spin_lock_irq(mlx4_tlock(dev)); 4964 counters_arr = kmalloc_array(dev->caps.max_counters,
4963 list_for_each_entry_safe(counter, tmp, counter_list, com.list) { 4965 sizeof(*counters_arr), GFP_KERNEL);
4964 if (counter->com.owner == slave) { 4966 if (!counters_arr)
4965 index = counter->com.res_id; 4967 return;
4966 rb_erase(&counter->com.node, 4968
4967 &tracker->res_tree[RES_COUNTER]); 4969 do {
4968 list_del(&counter->com.list); 4970 i = 0;
4969 kfree(counter); 4971 j = 0;
4970 __mlx4_counter_free(dev, index); 4972 spin_lock_irq(mlx4_tlock(dev));
4973 list_for_each_entry_safe(counter, tmp, counter_list, com.list) {
4974 if (counter->com.owner == slave) {
4975 counters_arr[i++] = counter->com.res_id;
4976 rb_erase(&counter->com.node,
4977 &tracker->res_tree[RES_COUNTER]);
4978 list_del(&counter->com.list);
4979 kfree(counter);
4980 }
4981 }
4982 spin_unlock_irq(mlx4_tlock(dev));
4983
4984 while (j < i) {
4985 __mlx4_counter_free(dev, counters_arr[j++]);
4971 mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0); 4986 mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
4972 } 4987 }
4973 } 4988 } while (i);
4974 spin_unlock_irq(mlx4_tlock(dev)); 4989
4990 kfree(counters_arr);
4975} 4991}
4976 4992
4977static void rem_slave_xrcdns(struct mlx4_dev *dev, int slave) 4993static void rem_slave_xrcdns(struct mlx4_dev *dev, int slave)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index f2ae62dd8c09..22e72bf1ae48 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -334,9 +334,15 @@ struct mlx5e_tx_skb_cb {
334 334
335#define MLX5E_TX_SKB_CB(__skb) ((struct mlx5e_tx_skb_cb *)__skb->cb) 335#define MLX5E_TX_SKB_CB(__skb) ((struct mlx5e_tx_skb_cb *)__skb->cb)
336 336
337enum mlx5e_dma_map_type {
338 MLX5E_DMA_MAP_SINGLE,
339 MLX5E_DMA_MAP_PAGE
340};
341
337struct mlx5e_sq_dma { 342struct mlx5e_sq_dma {
338 dma_addr_t addr; 343 dma_addr_t addr;
339 u32 size; 344 u32 size;
345 enum mlx5e_dma_map_type type;
340}; 346};
341 347
342enum { 348enum {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 5fc4d2d78cdf..1e52db32c73d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -1332,6 +1332,42 @@ static int mlx5e_modify_tir_lro(struct mlx5e_priv *priv, int tt)
1332 return err; 1332 return err;
1333} 1333}
1334 1334
1335static int mlx5e_refresh_tir_self_loopback_enable(struct mlx5_core_dev *mdev,
1336 u32 tirn)
1337{
1338 void *in;
1339 int inlen;
1340 int err;
1341
1342 inlen = MLX5_ST_SZ_BYTES(modify_tir_in);
1343 in = mlx5_vzalloc(inlen);
1344 if (!in)
1345 return -ENOMEM;
1346
1347 MLX5_SET(modify_tir_in, in, bitmask.self_lb_en, 1);
1348
1349 err = mlx5_core_modify_tir(mdev, tirn, in, inlen);
1350
1351 kvfree(in);
1352
1353 return err;
1354}
1355
1356static int mlx5e_refresh_tirs_self_loopback_enable(struct mlx5e_priv *priv)
1357{
1358 int err;
1359 int i;
1360
1361 for (i = 0; i < MLX5E_NUM_TT; i++) {
1362 err = mlx5e_refresh_tir_self_loopback_enable(priv->mdev,
1363 priv->tirn[i]);
1364 if (err)
1365 return err;
1366 }
1367
1368 return 0;
1369}
1370
1335static int mlx5e_set_dev_port_mtu(struct net_device *netdev) 1371static int mlx5e_set_dev_port_mtu(struct net_device *netdev)
1336{ 1372{
1337 struct mlx5e_priv *priv = netdev_priv(netdev); 1373 struct mlx5e_priv *priv = netdev_priv(netdev);
@@ -1376,6 +1412,13 @@ int mlx5e_open_locked(struct net_device *netdev)
1376 goto err_clear_state_opened_flag; 1412 goto err_clear_state_opened_flag;
1377 } 1413 }
1378 1414
1415 err = mlx5e_refresh_tirs_self_loopback_enable(priv);
1416 if (err) {
1417 netdev_err(netdev, "%s: mlx5e_refresh_tirs_self_loopback_enable failed, %d\n",
1418 __func__, err);
1419 goto err_close_channels;
1420 }
1421
1379 mlx5e_update_carrier(priv); 1422 mlx5e_update_carrier(priv);
1380 mlx5e_redirect_rqts(priv); 1423 mlx5e_redirect_rqts(priv);
1381 1424
@@ -1383,6 +1426,8 @@ int mlx5e_open_locked(struct net_device *netdev)
1383 1426
1384 return 0; 1427 return 0;
1385 1428
1429err_close_channels:
1430 mlx5e_close_channels(priv);
1386err_clear_state_opened_flag: 1431err_clear_state_opened_flag:
1387 clear_bit(MLX5E_STATE_OPENED, &priv->state); 1432 clear_bit(MLX5E_STATE_OPENED, &priv->state);
1388 return err; 1433 return err;
@@ -1856,6 +1901,8 @@ static int mlx5e_change_mtu(struct net_device *netdev, int new_mtu)
1856 1901
1857 mlx5_query_port_max_mtu(mdev, &max_mtu, 1); 1902 mlx5_query_port_max_mtu(mdev, &max_mtu, 1);
1858 1903
1904 max_mtu = MLX5E_HW2SW_MTU(max_mtu);
1905
1859 if (new_mtu > max_mtu) { 1906 if (new_mtu > max_mtu) {
1860 netdev_err(netdev, 1907 netdev_err(netdev,
1861 "%s: Bad MTU (%d) > (%d) Max\n", 1908 "%s: Bad MTU (%d) > (%d) Max\n",
@@ -1909,6 +1956,9 @@ static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev)
1909 "Not creating net device, some required device capabilities are missing\n"); 1956 "Not creating net device, some required device capabilities are missing\n");
1910 return -ENOTSUPP; 1957 return -ENOTSUPP;
1911 } 1958 }
1959 if (!MLX5_CAP_ETH(mdev, self_lb_en_modifiable))
1960 mlx5_core_warn(mdev, "Self loop back prevention is not supported\n");
1961
1912 return 0; 1962 return 0;
1913} 1963}
1914 1964
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index cd8f85a251d7..1341b1d3c421 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -61,39 +61,47 @@ void mlx5e_send_nop(struct mlx5e_sq *sq, bool notify_hw)
61 } 61 }
62} 62}
63 63
64static void mlx5e_dma_pop_last_pushed(struct mlx5e_sq *sq, dma_addr_t *addr, 64static inline void mlx5e_tx_dma_unmap(struct device *pdev,
65 u32 *size) 65 struct mlx5e_sq_dma *dma)
66{ 66{
67 sq->dma_fifo_pc--; 67 switch (dma->type) {
68 *addr = sq->dma_fifo[sq->dma_fifo_pc & sq->dma_fifo_mask].addr; 68 case MLX5E_DMA_MAP_SINGLE:
69 *size = sq->dma_fifo[sq->dma_fifo_pc & sq->dma_fifo_mask].size; 69 dma_unmap_single(pdev, dma->addr, dma->size, DMA_TO_DEVICE);
70} 70 break;
71 71 case MLX5E_DMA_MAP_PAGE:
72static void mlx5e_dma_unmap_wqe_err(struct mlx5e_sq *sq, struct sk_buff *skb) 72 dma_unmap_page(pdev, dma->addr, dma->size, DMA_TO_DEVICE);
73{ 73 break;
74 dma_addr_t addr; 74 default:
75 u32 size; 75 WARN_ONCE(true, "mlx5e_tx_dma_unmap unknown DMA type!\n");
76 int i;
77
78 for (i = 0; i < MLX5E_TX_SKB_CB(skb)->num_dma; i++) {
79 mlx5e_dma_pop_last_pushed(sq, &addr, &size);
80 dma_unmap_single(sq->pdev, addr, size, DMA_TO_DEVICE);
81 } 76 }
82} 77}
83 78
84static inline void mlx5e_dma_push(struct mlx5e_sq *sq, dma_addr_t addr, 79static inline void mlx5e_dma_push(struct mlx5e_sq *sq,
85 u32 size) 80 dma_addr_t addr,
81 u32 size,
82 enum mlx5e_dma_map_type map_type)
86{ 83{
87 sq->dma_fifo[sq->dma_fifo_pc & sq->dma_fifo_mask].addr = addr; 84 sq->dma_fifo[sq->dma_fifo_pc & sq->dma_fifo_mask].addr = addr;
88 sq->dma_fifo[sq->dma_fifo_pc & sq->dma_fifo_mask].size = size; 85 sq->dma_fifo[sq->dma_fifo_pc & sq->dma_fifo_mask].size = size;
86 sq->dma_fifo[sq->dma_fifo_pc & sq->dma_fifo_mask].type = map_type;
89 sq->dma_fifo_pc++; 87 sq->dma_fifo_pc++;
90} 88}
91 89
92static inline void mlx5e_dma_get(struct mlx5e_sq *sq, u32 i, dma_addr_t *addr, 90static inline struct mlx5e_sq_dma *mlx5e_dma_get(struct mlx5e_sq *sq, u32 i)
93 u32 *size)
94{ 91{
95 *addr = sq->dma_fifo[i & sq->dma_fifo_mask].addr; 92 return &sq->dma_fifo[i & sq->dma_fifo_mask];
96 *size = sq->dma_fifo[i & sq->dma_fifo_mask].size; 93}
94
95static void mlx5e_dma_unmap_wqe_err(struct mlx5e_sq *sq, struct sk_buff *skb)
96{
97 int i;
98
99 for (i = 0; i < MLX5E_TX_SKB_CB(skb)->num_dma; i++) {
100 struct mlx5e_sq_dma *last_pushed_dma =
101 mlx5e_dma_get(sq, --sq->dma_fifo_pc);
102
103 mlx5e_tx_dma_unmap(sq->pdev, last_pushed_dma);
104 }
97} 105}
98 106
99u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb, 107u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
@@ -118,8 +126,15 @@ static inline u16 mlx5e_get_inline_hdr_size(struct mlx5e_sq *sq,
118 */ 126 */
119#define MLX5E_MIN_INLINE ETH_HLEN 127#define MLX5E_MIN_INLINE ETH_HLEN
120 128
121 if (bf && (skb_headlen(skb) <= sq->max_inline)) 129 if (bf) {
122 return skb_headlen(skb); 130 u16 ihs = skb_headlen(skb);
131
132 if (skb_vlan_tag_present(skb))
133 ihs += VLAN_HLEN;
134
135 if (ihs <= sq->max_inline)
136 return skb_headlen(skb);
137 }
123 138
124 return MLX5E_MIN_INLINE; 139 return MLX5E_MIN_INLINE;
125} 140}
@@ -218,7 +233,7 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
218 dseg->lkey = sq->mkey_be; 233 dseg->lkey = sq->mkey_be;
219 dseg->byte_count = cpu_to_be32(headlen); 234 dseg->byte_count = cpu_to_be32(headlen);
220 235
221 mlx5e_dma_push(sq, dma_addr, headlen); 236 mlx5e_dma_push(sq, dma_addr, headlen, MLX5E_DMA_MAP_SINGLE);
222 MLX5E_TX_SKB_CB(skb)->num_dma++; 237 MLX5E_TX_SKB_CB(skb)->num_dma++;
223 238
224 dseg++; 239 dseg++;
@@ -237,7 +252,7 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
237 dseg->lkey = sq->mkey_be; 252 dseg->lkey = sq->mkey_be;
238 dseg->byte_count = cpu_to_be32(fsz); 253 dseg->byte_count = cpu_to_be32(fsz);
239 254
240 mlx5e_dma_push(sq, dma_addr, fsz); 255 mlx5e_dma_push(sq, dma_addr, fsz, MLX5E_DMA_MAP_PAGE);
241 MLX5E_TX_SKB_CB(skb)->num_dma++; 256 MLX5E_TX_SKB_CB(skb)->num_dma++;
242 257
243 dseg++; 258 dseg++;
@@ -353,13 +368,10 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq)
353 } 368 }
354 369
355 for (j = 0; j < MLX5E_TX_SKB_CB(skb)->num_dma; j++) { 370 for (j = 0; j < MLX5E_TX_SKB_CB(skb)->num_dma; j++) {
356 dma_addr_t addr; 371 struct mlx5e_sq_dma *dma =
357 u32 size; 372 mlx5e_dma_get(sq, dma_fifo_cc++);
358 373
359 mlx5e_dma_get(sq, dma_fifo_cc, &addr, &size); 374 mlx5e_tx_dma_unmap(sq->pdev, dma);
360 dma_fifo_cc++;
361 dma_unmap_single(sq->pdev, addr, size,
362 DMA_TO_DEVICE);
363 } 375 }
364 376
365 npkts++; 377 npkts++;
diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c
index b159ef8303cc..057665180f13 100644
--- a/drivers/net/ethernet/nxp/lpc_eth.c
+++ b/drivers/net/ethernet/nxp/lpc_eth.c
@@ -1326,7 +1326,7 @@ static int lpc_eth_drv_probe(struct platform_device *pdev)
1326 /* Get platform resources */ 1326 /* Get platform resources */
1327 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1327 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1328 irq = platform_get_irq(pdev, 0); 1328 irq = platform_get_irq(pdev, 0);
1329 if ((!res) || (irq < 0) || (irq >= NR_IRQS)) { 1329 if (!res || irq < 0) {
1330 dev_err(&pdev->dev, "error getting resources.\n"); 1330 dev_err(&pdev->dev, "error getting resources.\n");
1331 ret = -ENXIO; 1331 ret = -ENXIO;
1332 goto err_exit; 1332 goto err_exit;
diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h
index ac17d8669b1a..1292c360390c 100644
--- a/drivers/net/ethernet/qlogic/qed/qed.h
+++ b/drivers/net/ethernet/qlogic/qed/qed.h
@@ -299,6 +299,7 @@ struct qed_hwfn {
299 299
300 /* Flag indicating whether interrupts are enabled or not*/ 300 /* Flag indicating whether interrupts are enabled or not*/
301 bool b_int_enabled; 301 bool b_int_enabled;
302 bool b_int_requested;
302 303
303 struct qed_mcp_info *mcp_info; 304 struct qed_mcp_info *mcp_info;
304 305
@@ -491,6 +492,8 @@ u32 qed_unzip_data(struct qed_hwfn *p_hwfn,
491 u32 input_len, u8 *input_buf, 492 u32 input_len, u8 *input_buf,
492 u32 max_size, u8 *unzip_buf); 493 u32 max_size, u8 *unzip_buf);
493 494
495int qed_slowpath_irq_req(struct qed_hwfn *hwfn);
496
494#define QED_ETH_INTERFACE_VERSION 300 497#define QED_ETH_INTERFACE_VERSION 300
495 498
496#endif /* _QED_H */ 499#endif /* _QED_H */
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c
index 803b190ccada..817bbd5476ff 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dev.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c
@@ -1385,52 +1385,63 @@ err0:
1385 return rc; 1385 return rc;
1386} 1386}
1387 1387
1388static u32 qed_hw_bar_size(struct qed_dev *cdev, 1388static u32 qed_hw_bar_size(struct qed_hwfn *p_hwfn,
1389 u8 bar_id) 1389 u8 bar_id)
1390{ 1390{
1391 u32 size = pci_resource_len(cdev->pdev, (bar_id > 0) ? 2 : 0); 1391 u32 bar_reg = (bar_id == 0 ? PGLUE_B_REG_PF_BAR0_SIZE
1392 : PGLUE_B_REG_PF_BAR1_SIZE);
1393 u32 val = qed_rd(p_hwfn, p_hwfn->p_main_ptt, bar_reg);
1392 1394
1393 return size / cdev->num_hwfns; 1395 /* Get the BAR size(in KB) from hardware given val */
1396 return 1 << (val + 15);
1394} 1397}
1395 1398
1396int qed_hw_prepare(struct qed_dev *cdev, 1399int qed_hw_prepare(struct qed_dev *cdev,
1397 int personality) 1400 int personality)
1398{ 1401{
1399 int rc, i; 1402 struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
1403 int rc;
1400 1404
1401 /* Store the precompiled init data ptrs */ 1405 /* Store the precompiled init data ptrs */
1402 qed_init_iro_array(cdev); 1406 qed_init_iro_array(cdev);
1403 1407
1404 /* Initialize the first hwfn - will learn number of hwfns */ 1408 /* Initialize the first hwfn - will learn number of hwfns */
1405 rc = qed_hw_prepare_single(&cdev->hwfns[0], cdev->regview, 1409 rc = qed_hw_prepare_single(p_hwfn,
1410 cdev->regview,
1406 cdev->doorbells, personality); 1411 cdev->doorbells, personality);
1407 if (rc) 1412 if (rc)
1408 return rc; 1413 return rc;
1409 1414
1410 personality = cdev->hwfns[0].hw_info.personality; 1415 personality = p_hwfn->hw_info.personality;
1411 1416
1412 /* Initialize the rest of the hwfns */ 1417 /* Initialize the rest of the hwfns */
1413 for (i = 1; i < cdev->num_hwfns; i++) { 1418 if (cdev->num_hwfns > 1) {
1414 void __iomem *p_regview, *p_doorbell; 1419 void __iomem *p_regview, *p_doorbell;
1420 u8 __iomem *addr;
1421
1422 /* adjust bar offset for second engine */
1423 addr = cdev->regview + qed_hw_bar_size(p_hwfn, 0) / 2;
1424 p_regview = addr;
1415 1425
1416 p_regview = cdev->regview + 1426 /* adjust doorbell bar offset for second engine */
1417 i * qed_hw_bar_size(cdev, 0); 1427 addr = cdev->doorbells + qed_hw_bar_size(p_hwfn, 1) / 2;
1418 p_doorbell = cdev->doorbells + 1428 p_doorbell = addr;
1419 i * qed_hw_bar_size(cdev, 1); 1429
1420 rc = qed_hw_prepare_single(&cdev->hwfns[i], p_regview, 1430 /* prepare second hw function */
1431 rc = qed_hw_prepare_single(&cdev->hwfns[1], p_regview,
1421 p_doorbell, personality); 1432 p_doorbell, personality);
1433
1434 /* in case of error, need to free the previously
1435 * initiliazed hwfn 0.
1436 */
1422 if (rc) { 1437 if (rc) {
1423 /* Cleanup previously initialized hwfns */ 1438 qed_init_free(p_hwfn);
1424 while (--i >= 0) { 1439 qed_mcp_free(p_hwfn);
1425 qed_init_free(&cdev->hwfns[i]); 1440 qed_hw_hwfn_free(p_hwfn);
1426 qed_mcp_free(&cdev->hwfns[i]);
1427 qed_hw_hwfn_free(&cdev->hwfns[i]);
1428 }
1429 return rc;
1430 } 1441 }
1431 } 1442 }
1432 1443
1433 return 0; 1444 return rc;
1434} 1445}
1435 1446
1436void qed_hw_remove(struct qed_dev *cdev) 1447void qed_hw_remove(struct qed_dev *cdev)
diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.c b/drivers/net/ethernet/qlogic/qed/qed_int.c
index de50e84902af..9cc9d62c1fec 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_int.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_int.c
@@ -783,22 +783,16 @@ void qed_int_igu_enable_int(struct qed_hwfn *p_hwfn,
783 qed_wr(p_hwfn, p_ptt, IGU_REG_PF_CONFIGURATION, igu_pf_conf); 783 qed_wr(p_hwfn, p_ptt, IGU_REG_PF_CONFIGURATION, igu_pf_conf);
784} 784}
785 785
786void qed_int_igu_enable(struct qed_hwfn *p_hwfn, 786int qed_int_igu_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
787 struct qed_ptt *p_ptt, 787 enum qed_int_mode int_mode)
788 enum qed_int_mode int_mode)
789{ 788{
790 int i; 789 int rc, i;
791
792 p_hwfn->b_int_enabled = 1;
793 790
794 /* Mask non-link attentions */ 791 /* Mask non-link attentions */
795 for (i = 0; i < 9; i++) 792 for (i = 0; i < 9; i++)
796 qed_wr(p_hwfn, p_ptt, 793 qed_wr(p_hwfn, p_ptt,
797 MISC_REG_AEU_ENABLE1_IGU_OUT_0 + (i << 2), 0); 794 MISC_REG_AEU_ENABLE1_IGU_OUT_0 + (i << 2), 0);
798 795
799 /* Enable interrupt Generation */
800 qed_int_igu_enable_int(p_hwfn, p_ptt, int_mode);
801
802 /* Configure AEU signal change to produce attentions for link */ 796 /* Configure AEU signal change to produce attentions for link */
803 qed_wr(p_hwfn, p_ptt, IGU_REG_LEADING_EDGE_LATCH, 0xfff); 797 qed_wr(p_hwfn, p_ptt, IGU_REG_LEADING_EDGE_LATCH, 0xfff);
804 qed_wr(p_hwfn, p_ptt, IGU_REG_TRAILING_EDGE_LATCH, 0xfff); 798 qed_wr(p_hwfn, p_ptt, IGU_REG_TRAILING_EDGE_LATCH, 0xfff);
@@ -808,6 +802,19 @@ void qed_int_igu_enable(struct qed_hwfn *p_hwfn,
808 802
809 /* Unmask AEU signals toward IGU */ 803 /* Unmask AEU signals toward IGU */
810 qed_wr(p_hwfn, p_ptt, MISC_REG_AEU_MASK_ATTN_IGU, 0xff); 804 qed_wr(p_hwfn, p_ptt, MISC_REG_AEU_MASK_ATTN_IGU, 0xff);
805 if ((int_mode != QED_INT_MODE_INTA) || IS_LEAD_HWFN(p_hwfn)) {
806 rc = qed_slowpath_irq_req(p_hwfn);
807 if (rc != 0) {
808 DP_NOTICE(p_hwfn, "Slowpath IRQ request failed\n");
809 return -EINVAL;
810 }
811 p_hwfn->b_int_requested = true;
812 }
813 /* Enable interrupt Generation */
814 qed_int_igu_enable_int(p_hwfn, p_ptt, int_mode);
815 p_hwfn->b_int_enabled = 1;
816
817 return rc;
811} 818}
812 819
813void qed_int_igu_disable_int(struct qed_hwfn *p_hwfn, 820void qed_int_igu_disable_int(struct qed_hwfn *p_hwfn,
@@ -1127,3 +1134,11 @@ int qed_int_get_num_sbs(struct qed_hwfn *p_hwfn,
1127 1134
1128 return info->igu_sb_cnt; 1135 return info->igu_sb_cnt;
1129} 1136}
1137
1138void qed_int_disable_post_isr_release(struct qed_dev *cdev)
1139{
1140 int i;
1141
1142 for_each_hwfn(cdev, i)
1143 cdev->hwfns[i].b_int_requested = false;
1144}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.h b/drivers/net/ethernet/qlogic/qed/qed_int.h
index 16b57518e706..51e0b09a7f47 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_int.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_int.h
@@ -169,10 +169,14 @@ int qed_int_get_num_sbs(struct qed_hwfn *p_hwfn,
169 int *p_iov_blks); 169 int *p_iov_blks);
170 170
171/** 171/**
172 * @file 172 * @brief qed_int_disable_post_isr_release - performs the cleanup post ISR
173 * release. The API need to be called after releasing all slowpath IRQs
174 * of the device.
175 *
176 * @param cdev
173 * 177 *
174 * @brief Interrupt handler
175 */ 178 */
179void qed_int_disable_post_isr_release(struct qed_dev *cdev);
176 180
177#define QED_CAU_DEF_RX_TIMER_RES 0 181#define QED_CAU_DEF_RX_TIMER_RES 0
178#define QED_CAU_DEF_TX_TIMER_RES 0 182#define QED_CAU_DEF_TX_TIMER_RES 0
@@ -366,10 +370,11 @@ void qed_int_setup(struct qed_hwfn *p_hwfn,
366 * @param p_hwfn 370 * @param p_hwfn
367 * @param p_ptt 371 * @param p_ptt
368 * @param int_mode 372 * @param int_mode
373 *
374 * @return int
369 */ 375 */
370void qed_int_igu_enable(struct qed_hwfn *p_hwfn, 376int qed_int_igu_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
371 struct qed_ptt *p_ptt, 377 enum qed_int_mode int_mode);
372 enum qed_int_mode int_mode);
373 378
374/** 379/**
375 * @brief - Initialize CAU status block entry 380 * @brief - Initialize CAU status block entry
diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c
index 947c7af72b25..174f7341c5c3 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_main.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
@@ -476,41 +476,22 @@ static irqreturn_t qed_single_int(int irq, void *dev_instance)
476 return rc; 476 return rc;
477} 477}
478 478
479static int qed_slowpath_irq_req(struct qed_dev *cdev) 479int qed_slowpath_irq_req(struct qed_hwfn *hwfn)
480{ 480{
481 int i = 0, rc = 0; 481 struct qed_dev *cdev = hwfn->cdev;
482 int rc = 0;
483 u8 id;
482 484
483 if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) { 485 if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
484 /* Request all the slowpath MSI-X vectors */ 486 id = hwfn->my_id;
485 for (i = 0; i < cdev->num_hwfns; i++) { 487 snprintf(hwfn->name, NAME_SIZE, "sp-%d-%02x:%02x.%02x",
486 snprintf(cdev->hwfns[i].name, NAME_SIZE, 488 id, cdev->pdev->bus->number,
487 "sp-%d-%02x:%02x.%02x", 489 PCI_SLOT(cdev->pdev->devfn), hwfn->abs_pf_id);
488 i, cdev->pdev->bus->number, 490 rc = request_irq(cdev->int_params.msix_table[id].vector,
489 PCI_SLOT(cdev->pdev->devfn), 491 qed_msix_sp_int, 0, hwfn->name, hwfn->sp_dpc);
490 cdev->hwfns[i].abs_pf_id); 492 if (!rc)
491 493 DP_VERBOSE(hwfn, (NETIF_MSG_INTR | QED_MSG_SP),
492 rc = request_irq(cdev->int_params.msix_table[i].vector,
493 qed_msix_sp_int, 0,
494 cdev->hwfns[i].name,
495 cdev->hwfns[i].sp_dpc);
496 if (rc)
497 break;
498
499 DP_VERBOSE(&cdev->hwfns[i],
500 (NETIF_MSG_INTR | QED_MSG_SP),
501 "Requested slowpath MSI-X\n"); 494 "Requested slowpath MSI-X\n");
502 }
503
504 if (i != cdev->num_hwfns) {
505 /* Free already request MSI-X vectors */
506 for (i--; i >= 0; i--) {
507 unsigned int vec =
508 cdev->int_params.msix_table[i].vector;
509 synchronize_irq(vec);
510 free_irq(cdev->int_params.msix_table[i].vector,
511 cdev->hwfns[i].sp_dpc);
512 }
513 }
514 } else { 495 } else {
515 unsigned long flags = 0; 496 unsigned long flags = 0;
516 497
@@ -534,13 +515,17 @@ static void qed_slowpath_irq_free(struct qed_dev *cdev)
534 515
535 if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) { 516 if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
536 for_each_hwfn(cdev, i) { 517 for_each_hwfn(cdev, i) {
518 if (!cdev->hwfns[i].b_int_requested)
519 break;
537 synchronize_irq(cdev->int_params.msix_table[i].vector); 520 synchronize_irq(cdev->int_params.msix_table[i].vector);
538 free_irq(cdev->int_params.msix_table[i].vector, 521 free_irq(cdev->int_params.msix_table[i].vector,
539 cdev->hwfns[i].sp_dpc); 522 cdev->hwfns[i].sp_dpc);
540 } 523 }
541 } else { 524 } else {
542 free_irq(cdev->pdev->irq, cdev); 525 if (QED_LEADING_HWFN(cdev)->b_int_requested)
526 free_irq(cdev->pdev->irq, cdev);
543 } 527 }
528 qed_int_disable_post_isr_release(cdev);
544} 529}
545 530
546static int qed_nic_stop(struct qed_dev *cdev) 531static int qed_nic_stop(struct qed_dev *cdev)
@@ -765,16 +750,11 @@ static int qed_slowpath_start(struct qed_dev *cdev,
765 if (rc) 750 if (rc)
766 goto err1; 751 goto err1;
767 752
768 /* Request the slowpath IRQ */
769 rc = qed_slowpath_irq_req(cdev);
770 if (rc)
771 goto err2;
772
773 /* Allocate stream for unzipping */ 753 /* Allocate stream for unzipping */
774 rc = qed_alloc_stream_mem(cdev); 754 rc = qed_alloc_stream_mem(cdev);
775 if (rc) { 755 if (rc) {
776 DP_NOTICE(cdev, "Failed to allocate stream memory\n"); 756 DP_NOTICE(cdev, "Failed to allocate stream memory\n");
777 goto err3; 757 goto err2;
778 } 758 }
779 759
780 /* Start the slowpath */ 760 /* Start the slowpath */
diff --git a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
index 7a5ce5914ace..e8df12335a97 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
@@ -363,4 +363,8 @@
363 0x7 << 0) 363 0x7 << 0)
364#define MCP_REG_NVM_CFG4_FLASH_SIZE_SHIFT \ 364#define MCP_REG_NVM_CFG4_FLASH_SIZE_SHIFT \
365 0 365 0
366#define PGLUE_B_REG_PF_BAR0_SIZE \
367 0x2aae60UL
368#define PGLUE_B_REG_PF_BAR1_SIZE \
369 0x2aae64UL
366#endif 370#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp.h b/drivers/net/ethernet/qlogic/qed/qed_sp.h
index 31a1f1eb4f56..287fadfab52d 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sp.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_sp.h
@@ -124,8 +124,12 @@ struct qed_spq {
124 dma_addr_t p_phys; 124 dma_addr_t p_phys;
125 struct qed_spq_entry *p_virt; 125 struct qed_spq_entry *p_virt;
126 126
127 /* Used as index for completions (returns on EQ by FW) */ 127#define SPQ_RING_SIZE \
128 u16 echo_idx; 128 (CORE_SPQE_PAGE_SIZE_BYTES / sizeof(struct slow_path_element))
129
130 /* Bitmap for handling out-of-order completions */
131 DECLARE_BITMAP(p_comp_bitmap, SPQ_RING_SIZE);
132 u8 comp_bitmap_idx;
129 133
130 /* Statistics */ 134 /* Statistics */
131 u32 unlimited_pending_count; 135 u32 unlimited_pending_count;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_spq.c b/drivers/net/ethernet/qlogic/qed/qed_spq.c
index 7c0b8459666e..3dd548ab8df1 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_spq.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_spq.c
@@ -112,8 +112,6 @@ static int
112qed_spq_fill_entry(struct qed_hwfn *p_hwfn, 112qed_spq_fill_entry(struct qed_hwfn *p_hwfn,
113 struct qed_spq_entry *p_ent) 113 struct qed_spq_entry *p_ent)
114{ 114{
115 p_ent->elem.hdr.echo = 0;
116 p_hwfn->p_spq->echo_idx++;
117 p_ent->flags = 0; 115 p_ent->flags = 0;
118 116
119 switch (p_ent->comp_mode) { 117 switch (p_ent->comp_mode) {
@@ -195,10 +193,12 @@ static int qed_spq_hw_post(struct qed_hwfn *p_hwfn,
195 struct qed_spq *p_spq, 193 struct qed_spq *p_spq,
196 struct qed_spq_entry *p_ent) 194 struct qed_spq_entry *p_ent)
197{ 195{
198 struct qed_chain *p_chain = &p_hwfn->p_spq->chain; 196 struct qed_chain *p_chain = &p_hwfn->p_spq->chain;
197 u16 echo = qed_chain_get_prod_idx(p_chain);
199 struct slow_path_element *elem; 198 struct slow_path_element *elem;
200 struct core_db_data db; 199 struct core_db_data db;
201 200
201 p_ent->elem.hdr.echo = cpu_to_le16(echo);
202 elem = qed_chain_produce(p_chain); 202 elem = qed_chain_produce(p_chain);
203 if (!elem) { 203 if (!elem) {
204 DP_NOTICE(p_hwfn, "Failed to produce from SPQ chain\n"); 204 DP_NOTICE(p_hwfn, "Failed to produce from SPQ chain\n");
@@ -437,7 +437,9 @@ void qed_spq_setup(struct qed_hwfn *p_hwfn)
437 p_spq->comp_count = 0; 437 p_spq->comp_count = 0;
438 p_spq->comp_sent_count = 0; 438 p_spq->comp_sent_count = 0;
439 p_spq->unlimited_pending_count = 0; 439 p_spq->unlimited_pending_count = 0;
440 p_spq->echo_idx = 0; 440
441 bitmap_zero(p_spq->p_comp_bitmap, SPQ_RING_SIZE);
442 p_spq->comp_bitmap_idx = 0;
441 443
442 /* SPQ cid, cannot fail */ 444 /* SPQ cid, cannot fail */
443 qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_CORE, &p_spq->cid); 445 qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_CORE, &p_spq->cid);
@@ -582,26 +584,32 @@ qed_spq_add_entry(struct qed_hwfn *p_hwfn,
582 struct qed_spq *p_spq = p_hwfn->p_spq; 584 struct qed_spq *p_spq = p_hwfn->p_spq;
583 585
584 if (p_ent->queue == &p_spq->unlimited_pending) { 586 if (p_ent->queue == &p_spq->unlimited_pending) {
585 struct qed_spq_entry *p_en2;
586 587
587 if (list_empty(&p_spq->free_pool)) { 588 if (list_empty(&p_spq->free_pool)) {
588 list_add_tail(&p_ent->list, &p_spq->unlimited_pending); 589 list_add_tail(&p_ent->list, &p_spq->unlimited_pending);
589 p_spq->unlimited_pending_count++; 590 p_spq->unlimited_pending_count++;
590 591
591 return 0; 592 return 0;
592 } 593 } else {
594 struct qed_spq_entry *p_en2;
593 595
594 p_en2 = list_first_entry(&p_spq->free_pool, 596 p_en2 = list_first_entry(&p_spq->free_pool,
595 struct qed_spq_entry, 597 struct qed_spq_entry,
596 list); 598 list);
597 list_del(&p_en2->list); 599 list_del(&p_en2->list);
600
601 /* Copy the ring element physical pointer to the new
602 * entry, since we are about to override the entire ring
603 * entry and don't want to lose the pointer.
604 */
605 p_ent->elem.data_ptr = p_en2->elem.data_ptr;
598 606
599 /* Strcut assignment */ 607 *p_en2 = *p_ent;
600 *p_en2 = *p_ent;
601 608
602 kfree(p_ent); 609 kfree(p_ent);
603 610
604 p_ent = p_en2; 611 p_ent = p_en2;
612 }
605 } 613 }
606 614
607 /* entry is to be placed in 'pending' queue */ 615 /* entry is to be placed in 'pending' queue */
@@ -777,13 +785,38 @@ int qed_spq_completion(struct qed_hwfn *p_hwfn,
777 list_for_each_entry_safe(p_ent, tmp, &p_spq->completion_pending, 785 list_for_each_entry_safe(p_ent, tmp, &p_spq->completion_pending,
778 list) { 786 list) {
779 if (p_ent->elem.hdr.echo == echo) { 787 if (p_ent->elem.hdr.echo == echo) {
788 u16 pos = le16_to_cpu(echo) % SPQ_RING_SIZE;
789
780 list_del(&p_ent->list); 790 list_del(&p_ent->list);
781 791
782 qed_chain_return_produced(&p_spq->chain); 792 /* Avoid overriding of SPQ entries when getting
793 * out-of-order completions, by marking the completions
794 * in a bitmap and increasing the chain consumer only
795 * for the first successive completed entries.
796 */
797 bitmap_set(p_spq->p_comp_bitmap, pos, SPQ_RING_SIZE);
798
799 while (test_bit(p_spq->comp_bitmap_idx,
800 p_spq->p_comp_bitmap)) {
801 bitmap_clear(p_spq->p_comp_bitmap,
802 p_spq->comp_bitmap_idx,
803 SPQ_RING_SIZE);
804 p_spq->comp_bitmap_idx++;
805 qed_chain_return_produced(&p_spq->chain);
806 }
807
783 p_spq->comp_count++; 808 p_spq->comp_count++;
784 found = p_ent; 809 found = p_ent;
785 break; 810 break;
786 } 811 }
812
813 /* This is relatively uncommon - depends on scenarios
814 * which have mutliple per-PF sent ramrods.
815 */
816 DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
817 "Got completion for echo %04x - doesn't match echo %04x in completion pending list\n",
818 le16_to_cpu(echo),
819 le16_to_cpu(p_ent->elem.hdr.echo));
787 } 820 }
788 821
789 /* Release lock before callback, as callback may post 822 /* Release lock before callback, as callback may post
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
index be7d7a62cc0d..b1a452f291ee 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
@@ -246,7 +246,8 @@ int qlcnic_83xx_check_vnic_state(struct qlcnic_adapter *adapter)
246 u32 state; 246 u32 state;
247 247
248 state = QLCRDX(ahw, QLC_83XX_VNIC_STATE); 248 state = QLCRDX(ahw, QLC_83XX_VNIC_STATE);
249 while (state != QLCNIC_DEV_NPAR_OPER && idc->vnic_wait_limit--) { 249 while (state != QLCNIC_DEV_NPAR_OPER && idc->vnic_wait_limit) {
250 idc->vnic_wait_limit--;
250 msleep(1000); 251 msleep(1000);
251 state = QLCRDX(ahw, QLC_83XX_VNIC_STATE); 252 state = QLCRDX(ahw, QLC_83XX_VNIC_STATE);
252 } 253 }
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
index 02b7115b6aaa..997976426799 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
@@ -4211,8 +4211,9 @@ static int ql_change_rx_buffers(struct ql_adapter *qdev)
4211 4211
4212 /* Wait for an outstanding reset to complete. */ 4212 /* Wait for an outstanding reset to complete. */
4213 if (!test_bit(QL_ADAPTER_UP, &qdev->flags)) { 4213 if (!test_bit(QL_ADAPTER_UP, &qdev->flags)) {
4214 int i = 3; 4214 int i = 4;
4215 while (i-- && !test_bit(QL_ADAPTER_UP, &qdev->flags)) { 4215
4216 while (--i && !test_bit(QL_ADAPTER_UP, &qdev->flags)) {
4216 netif_err(qdev, ifup, qdev->ndev, 4217 netif_err(qdev, ifup, qdev->ndev,
4217 "Waiting for adapter UP...\n"); 4218 "Waiting for adapter UP...\n");
4218 ssleep(1); 4219 ssleep(1);
diff --git a/drivers/net/ethernet/qualcomm/qca_spi.c b/drivers/net/ethernet/qualcomm/qca_spi.c
index ddb2c6c6ec94..689a4a5c8dcf 100644
--- a/drivers/net/ethernet/qualcomm/qca_spi.c
+++ b/drivers/net/ethernet/qualcomm/qca_spi.c
@@ -736,9 +736,8 @@ qcaspi_netdev_tx_timeout(struct net_device *dev)
736 netdev_info(qca->net_dev, "Transmit timeout at %ld, latency %ld\n", 736 netdev_info(qca->net_dev, "Transmit timeout at %ld, latency %ld\n",
737 jiffies, jiffies - dev->trans_start); 737 jiffies, jiffies - dev->trans_start);
738 qca->net_dev->stats.tx_errors++; 738 qca->net_dev->stats.tx_errors++;
739 /* wake the queue if there is room */ 739 /* Trigger tx queue flush and QCA7000 reset */
740 if (qcaspi_tx_ring_has_space(&qca->txr)) 740 qca->sync = QCASPI_SYNC_UNKNOWN;
741 netif_wake_queue(dev);
742} 741}
743 742
744static int 743static int
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index b4f21232019a..79ef799f88ab 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -7429,15 +7429,15 @@ process_pkt:
7429 7429
7430 rtl8169_rx_vlan_tag(desc, skb); 7430 rtl8169_rx_vlan_tag(desc, skb);
7431 7431
7432 if (skb->pkt_type == PACKET_MULTICAST)
7433 dev->stats.multicast++;
7434
7432 napi_gro_receive(&tp->napi, skb); 7435 napi_gro_receive(&tp->napi, skb);
7433 7436
7434 u64_stats_update_begin(&tp->rx_stats.syncp); 7437 u64_stats_update_begin(&tp->rx_stats.syncp);
7435 tp->rx_stats.packets++; 7438 tp->rx_stats.packets++;
7436 tp->rx_stats.bytes += pkt_size; 7439 tp->rx_stats.bytes += pkt_size;
7437 u64_stats_update_end(&tp->rx_stats.syncp); 7440 u64_stats_update_end(&tp->rx_stats.syncp);
7438
7439 if (skb->pkt_type == PACKET_MULTICAST)
7440 dev->stats.multicast++;
7441 } 7441 }
7442release_descriptor: 7442release_descriptor:
7443 desc->opts2 = 0; 7443 desc->opts2 = 0;
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index aa7b2083cb53..467d41698fd5 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -408,8 +408,6 @@ static int ravb_dmac_init(struct net_device *ndev)
408 /* Interrupt enable: */ 408 /* Interrupt enable: */
409 /* Frame receive */ 409 /* Frame receive */
410 ravb_write(ndev, RIC0_FRE0 | RIC0_FRE1, RIC0); 410 ravb_write(ndev, RIC0_FRE0 | RIC0_FRE1, RIC0);
411 /* Receive FIFO full warning */
412 ravb_write(ndev, RIC1_RFWE, RIC1);
413 /* Receive FIFO full error, descriptor empty */ 411 /* Receive FIFO full error, descriptor empty */
414 ravb_write(ndev, RIC2_QFE0 | RIC2_QFE1 | RIC2_RFFE, RIC2); 412 ravb_write(ndev, RIC2_QFE0 | RIC2_QFE1 | RIC2_RFFE, RIC2);
415 /* Frame transmitted, timestamp FIFO updated */ 413 /* Frame transmitted, timestamp FIFO updated */
@@ -733,8 +731,10 @@ static irqreturn_t ravb_interrupt(int irq, void *dev_id)
733 ((tis & tic) & BIT(q))) { 731 ((tis & tic) & BIT(q))) {
734 if (napi_schedule_prep(&priv->napi[q])) { 732 if (napi_schedule_prep(&priv->napi[q])) {
735 /* Mask RX and TX interrupts */ 733 /* Mask RX and TX interrupts */
736 ravb_write(ndev, ric0 & ~BIT(q), RIC0); 734 ric0 &= ~BIT(q);
737 ravb_write(ndev, tic & ~BIT(q), TIC); 735 tic &= ~BIT(q);
736 ravb_write(ndev, ric0, RIC0);
737 ravb_write(ndev, tic, TIC);
738 __napi_schedule(&priv->napi[q]); 738 __napi_schedule(&priv->napi[q]);
739 } else { 739 } else {
740 netdev_warn(ndev, 740 netdev_warn(ndev,
@@ -905,6 +905,9 @@ static int ravb_phy_init(struct net_device *ndev)
905 netdev_info(ndev, "limited PHY to 100Mbit/s\n"); 905 netdev_info(ndev, "limited PHY to 100Mbit/s\n");
906 } 906 }
907 907
908 /* 10BASE is not supported */
909 phydev->supported &= ~PHY_10BT_FEATURES;
910
908 netdev_info(ndev, "attached PHY %d (IRQ %d) to driver %s\n", 911 netdev_info(ndev, "attached PHY %d (IRQ %d) to driver %s\n",
909 phydev->addr, phydev->irq, phydev->drv->name); 912 phydev->addr, phydev->irq, phydev->drv->name);
910 913
@@ -1037,7 +1040,7 @@ static const char ravb_gstrings_stats[][ETH_GSTRING_LEN] = {
1037 "rx_queue_1_mcast_packets", 1040 "rx_queue_1_mcast_packets",
1038 "rx_queue_1_errors", 1041 "rx_queue_1_errors",
1039 "rx_queue_1_crc_errors", 1042 "rx_queue_1_crc_errors",
1040 "rx_queue_1_frame_errors_", 1043 "rx_queue_1_frame_errors",
1041 "rx_queue_1_length_errors", 1044 "rx_queue_1_length_errors",
1042 "rx_queue_1_missed_errors", 1045 "rx_queue_1_missed_errors",
1043 "rx_queue_1_over_errors", 1046 "rx_queue_1_over_errors",
@@ -1225,7 +1228,7 @@ static int ravb_open(struct net_device *ndev)
1225 /* Device init */ 1228 /* Device init */
1226 error = ravb_dmac_init(ndev); 1229 error = ravb_dmac_init(ndev);
1227 if (error) 1230 if (error)
1228 goto out_free_irq; 1231 goto out_free_irq2;
1229 ravb_emac_init(ndev); 1232 ravb_emac_init(ndev);
1230 1233
1231 /* Initialise PTP Clock driver */ 1234 /* Initialise PTP Clock driver */
@@ -1243,9 +1246,11 @@ static int ravb_open(struct net_device *ndev)
1243out_ptp_stop: 1246out_ptp_stop:
1244 /* Stop PTP Clock driver */ 1247 /* Stop PTP Clock driver */
1245 ravb_ptp_stop(ndev); 1248 ravb_ptp_stop(ndev);
1249out_free_irq2:
1250 if (priv->chip_id == RCAR_GEN3)
1251 free_irq(priv->emac_irq, ndev);
1246out_free_irq: 1252out_free_irq:
1247 free_irq(ndev->irq, ndev); 1253 free_irq(ndev->irq, ndev);
1248 free_irq(priv->emac_irq, ndev);
1249out_napi_off: 1254out_napi_off:
1250 napi_disable(&priv->napi[RAVB_NC]); 1255 napi_disable(&priv->napi[RAVB_NC]);
1251 napi_disable(&priv->napi[RAVB_BE]); 1256 napi_disable(&priv->napi[RAVB_BE]);
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index e7bab7909ed9..a0eaf50499a2 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -52,6 +52,8 @@
52 NETIF_MSG_RX_ERR| \ 52 NETIF_MSG_RX_ERR| \
53 NETIF_MSG_TX_ERR) 53 NETIF_MSG_TX_ERR)
54 54
55#define SH_ETH_OFFSET_INVALID ((u16)~0)
56
55#define SH_ETH_OFFSET_DEFAULTS \ 57#define SH_ETH_OFFSET_DEFAULTS \
56 [0 ... SH_ETH_MAX_REGISTER_OFFSET - 1] = SH_ETH_OFFSET_INVALID 58 [0 ... SH_ETH_MAX_REGISTER_OFFSET - 1] = SH_ETH_OFFSET_INVALID
57 59
@@ -404,6 +406,28 @@ static const u16 sh_eth_offset_fast_sh3_sh2[SH_ETH_MAX_REGISTER_OFFSET] = {
404static void sh_eth_rcv_snd_disable(struct net_device *ndev); 406static void sh_eth_rcv_snd_disable(struct net_device *ndev);
405static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev); 407static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev);
406 408
409static void sh_eth_write(struct net_device *ndev, u32 data, int enum_index)
410{
411 struct sh_eth_private *mdp = netdev_priv(ndev);
412 u16 offset = mdp->reg_offset[enum_index];
413
414 if (WARN_ON(offset == SH_ETH_OFFSET_INVALID))
415 return;
416
417 iowrite32(data, mdp->addr + offset);
418}
419
420static u32 sh_eth_read(struct net_device *ndev, int enum_index)
421{
422 struct sh_eth_private *mdp = netdev_priv(ndev);
423 u16 offset = mdp->reg_offset[enum_index];
424
425 if (WARN_ON(offset == SH_ETH_OFFSET_INVALID))
426 return ~0U;
427
428 return ioread32(mdp->addr + offset);
429}
430
407static bool sh_eth_is_gether(struct sh_eth_private *mdp) 431static bool sh_eth_is_gether(struct sh_eth_private *mdp)
408{ 432{
409 return mdp->reg_offset == sh_eth_offset_gigabit; 433 return mdp->reg_offset == sh_eth_offset_gigabit;
@@ -1172,7 +1196,7 @@ static void sh_eth_ring_format(struct net_device *ndev)
1172 break; 1196 break;
1173 } 1197 }
1174 mdp->rx_skbuff[i] = skb; 1198 mdp->rx_skbuff[i] = skb;
1175 rxdesc->addr = dma_addr; 1199 rxdesc->addr = cpu_to_edmac(mdp, dma_addr);
1176 rxdesc->status = cpu_to_edmac(mdp, RD_RACT | RD_RFP); 1200 rxdesc->status = cpu_to_edmac(mdp, RD_RACT | RD_RFP);
1177 1201
1178 /* Rx descriptor address set */ 1202 /* Rx descriptor address set */
@@ -1403,7 +1427,8 @@ static int sh_eth_txfree(struct net_device *ndev)
1403 entry, edmac_to_cpu(mdp, txdesc->status)); 1427 entry, edmac_to_cpu(mdp, txdesc->status));
1404 /* Free the original skb. */ 1428 /* Free the original skb. */
1405 if (mdp->tx_skbuff[entry]) { 1429 if (mdp->tx_skbuff[entry]) {
1406 dma_unmap_single(&ndev->dev, txdesc->addr, 1430 dma_unmap_single(&ndev->dev,
1431 edmac_to_cpu(mdp, txdesc->addr),
1407 txdesc->buffer_length, DMA_TO_DEVICE); 1432 txdesc->buffer_length, DMA_TO_DEVICE);
1408 dev_kfree_skb_irq(mdp->tx_skbuff[entry]); 1433 dev_kfree_skb_irq(mdp->tx_skbuff[entry]);
1409 mdp->tx_skbuff[entry] = NULL; 1434 mdp->tx_skbuff[entry] = NULL;
@@ -1462,6 +1487,7 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
1462 if (mdp->cd->shift_rd0) 1487 if (mdp->cd->shift_rd0)
1463 desc_status >>= 16; 1488 desc_status >>= 16;
1464 1489
1490 skb = mdp->rx_skbuff[entry];
1465 if (desc_status & (RD_RFS1 | RD_RFS2 | RD_RFS3 | RD_RFS4 | 1491 if (desc_status & (RD_RFS1 | RD_RFS2 | RD_RFS3 | RD_RFS4 |
1466 RD_RFS5 | RD_RFS6 | RD_RFS10)) { 1492 RD_RFS5 | RD_RFS6 | RD_RFS10)) {
1467 ndev->stats.rx_errors++; 1493 ndev->stats.rx_errors++;
@@ -1477,16 +1503,16 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
1477 ndev->stats.rx_missed_errors++; 1503 ndev->stats.rx_missed_errors++;
1478 if (desc_status & RD_RFS10) 1504 if (desc_status & RD_RFS10)
1479 ndev->stats.rx_over_errors++; 1505 ndev->stats.rx_over_errors++;
1480 } else { 1506 } else if (skb) {
1507 dma_addr = edmac_to_cpu(mdp, rxdesc->addr);
1481 if (!mdp->cd->hw_swap) 1508 if (!mdp->cd->hw_swap)
1482 sh_eth_soft_swap( 1509 sh_eth_soft_swap(
1483 phys_to_virt(ALIGN(rxdesc->addr, 4)), 1510 phys_to_virt(ALIGN(dma_addr, 4)),
1484 pkt_len + 2); 1511 pkt_len + 2);
1485 skb = mdp->rx_skbuff[entry];
1486 mdp->rx_skbuff[entry] = NULL; 1512 mdp->rx_skbuff[entry] = NULL;
1487 if (mdp->cd->rpadir) 1513 if (mdp->cd->rpadir)
1488 skb_reserve(skb, NET_IP_ALIGN); 1514 skb_reserve(skb, NET_IP_ALIGN);
1489 dma_unmap_single(&ndev->dev, rxdesc->addr, 1515 dma_unmap_single(&ndev->dev, dma_addr,
1490 ALIGN(mdp->rx_buf_sz, 32), 1516 ALIGN(mdp->rx_buf_sz, 32),
1491 DMA_FROM_DEVICE); 1517 DMA_FROM_DEVICE);
1492 skb_put(skb, pkt_len); 1518 skb_put(skb, pkt_len);
@@ -1523,7 +1549,7 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
1523 mdp->rx_skbuff[entry] = skb; 1549 mdp->rx_skbuff[entry] = skb;
1524 1550
1525 skb_checksum_none_assert(skb); 1551 skb_checksum_none_assert(skb);
1526 rxdesc->addr = dma_addr; 1552 rxdesc->addr = cpu_to_edmac(mdp, dma_addr);
1527 } 1553 }
1528 dma_wmb(); /* RACT bit must be set after all the above writes */ 1554 dma_wmb(); /* RACT bit must be set after all the above writes */
1529 if (entry >= mdp->num_rx_ring - 1) 1555 if (entry >= mdp->num_rx_ring - 1)
@@ -2331,8 +2357,8 @@ static void sh_eth_tx_timeout(struct net_device *ndev)
2331 /* Free all the skbuffs in the Rx queue. */ 2357 /* Free all the skbuffs in the Rx queue. */
2332 for (i = 0; i < mdp->num_rx_ring; i++) { 2358 for (i = 0; i < mdp->num_rx_ring; i++) {
2333 rxdesc = &mdp->rx_ring[i]; 2359 rxdesc = &mdp->rx_ring[i];
2334 rxdesc->status = 0; 2360 rxdesc->status = cpu_to_edmac(mdp, 0);
2335 rxdesc->addr = 0xBADF00D0; 2361 rxdesc->addr = cpu_to_edmac(mdp, 0xBADF00D0);
2336 dev_kfree_skb(mdp->rx_skbuff[i]); 2362 dev_kfree_skb(mdp->rx_skbuff[i]);
2337 mdp->rx_skbuff[i] = NULL; 2363 mdp->rx_skbuff[i] = NULL;
2338 } 2364 }
@@ -2350,6 +2376,7 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
2350{ 2376{
2351 struct sh_eth_private *mdp = netdev_priv(ndev); 2377 struct sh_eth_private *mdp = netdev_priv(ndev);
2352 struct sh_eth_txdesc *txdesc; 2378 struct sh_eth_txdesc *txdesc;
2379 dma_addr_t dma_addr;
2353 u32 entry; 2380 u32 entry;
2354 unsigned long flags; 2381 unsigned long flags;
2355 2382
@@ -2372,14 +2399,14 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
2372 txdesc = &mdp->tx_ring[entry]; 2399 txdesc = &mdp->tx_ring[entry];
2373 /* soft swap. */ 2400 /* soft swap. */
2374 if (!mdp->cd->hw_swap) 2401 if (!mdp->cd->hw_swap)
2375 sh_eth_soft_swap(phys_to_virt(ALIGN(txdesc->addr, 4)), 2402 sh_eth_soft_swap(PTR_ALIGN(skb->data, 4), skb->len + 2);
2376 skb->len + 2); 2403 dma_addr = dma_map_single(&ndev->dev, skb->data, skb->len,
2377 txdesc->addr = dma_map_single(&ndev->dev, skb->data, skb->len, 2404 DMA_TO_DEVICE);
2378 DMA_TO_DEVICE); 2405 if (dma_mapping_error(&ndev->dev, dma_addr)) {
2379 if (dma_mapping_error(&ndev->dev, txdesc->addr)) {
2380 kfree_skb(skb); 2406 kfree_skb(skb);
2381 return NETDEV_TX_OK; 2407 return NETDEV_TX_OK;
2382 } 2408 }
2409 txdesc->addr = cpu_to_edmac(mdp, dma_addr);
2383 txdesc->buffer_length = skb->len; 2410 txdesc->buffer_length = skb->len;
2384 2411
2385 dma_wmb(); /* TACT bit must be set after all the above writes */ 2412 dma_wmb(); /* TACT bit must be set after all the above writes */
diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h
index 50382b1c9ddc..26ad1cf0bcf1 100644
--- a/drivers/net/ethernet/renesas/sh_eth.h
+++ b/drivers/net/ethernet/renesas/sh_eth.h
@@ -546,31 +546,6 @@ static inline void sh_eth_soft_swap(char *src, int len)
546#endif 546#endif
547} 547}
548 548
549#define SH_ETH_OFFSET_INVALID ((u16) ~0)
550
551static inline void sh_eth_write(struct net_device *ndev, u32 data,
552 int enum_index)
553{
554 struct sh_eth_private *mdp = netdev_priv(ndev);
555 u16 offset = mdp->reg_offset[enum_index];
556
557 if (WARN_ON(offset == SH_ETH_OFFSET_INVALID))
558 return;
559
560 iowrite32(data, mdp->addr + offset);
561}
562
563static inline u32 sh_eth_read(struct net_device *ndev, int enum_index)
564{
565 struct sh_eth_private *mdp = netdev_priv(ndev);
566 u16 offset = mdp->reg_offset[enum_index];
567
568 if (WARN_ON(offset == SH_ETH_OFFSET_INVALID))
569 return ~0U;
570
571 return ioread32(mdp->addr + offset);
572}
573
574static inline void *sh_eth_tsu_get_offset(struct sh_eth_private *mdp, 549static inline void *sh_eth_tsu_get_offset(struct sh_eth_private *mdp,
575 int enum_index) 550 int enum_index)
576{ 551{
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index bc6d21b471be..e6a084a6be12 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -3299,7 +3299,8 @@ static int efx_ef10_filter_remove_internal(struct efx_nic *efx,
3299 3299
3300 new_spec.priority = EFX_FILTER_PRI_AUTO; 3300 new_spec.priority = EFX_FILTER_PRI_AUTO;
3301 new_spec.flags = (EFX_FILTER_FLAG_RX | 3301 new_spec.flags = (EFX_FILTER_FLAG_RX |
3302 EFX_FILTER_FLAG_RX_RSS); 3302 (efx_rss_enabled(efx) ?
3303 EFX_FILTER_FLAG_RX_RSS : 0));
3303 new_spec.dmaq_id = 0; 3304 new_spec.dmaq_id = 0;
3304 new_spec.rss_context = EFX_FILTER_RSS_CONTEXT_DEFAULT; 3305 new_spec.rss_context = EFX_FILTER_RSS_CONTEXT_DEFAULT;
3305 rc = efx_ef10_filter_push(efx, &new_spec, 3306 rc = efx_ef10_filter_push(efx, &new_spec,
@@ -3921,6 +3922,7 @@ static int efx_ef10_filter_insert_addr_list(struct efx_nic *efx,
3921{ 3922{
3922 struct efx_ef10_filter_table *table = efx->filter_state; 3923 struct efx_ef10_filter_table *table = efx->filter_state;
3923 struct efx_ef10_dev_addr *addr_list; 3924 struct efx_ef10_dev_addr *addr_list;
3925 enum efx_filter_flags filter_flags;
3924 struct efx_filter_spec spec; 3926 struct efx_filter_spec spec;
3925 u8 baddr[ETH_ALEN]; 3927 u8 baddr[ETH_ALEN];
3926 unsigned int i, j; 3928 unsigned int i, j;
@@ -3935,11 +3937,11 @@ static int efx_ef10_filter_insert_addr_list(struct efx_nic *efx,
3935 addr_count = table->dev_uc_count; 3937 addr_count = table->dev_uc_count;
3936 } 3938 }
3937 3939
3940 filter_flags = efx_rss_enabled(efx) ? EFX_FILTER_FLAG_RX_RSS : 0;
3941
3938 /* Insert/renew filters */ 3942 /* Insert/renew filters */
3939 for (i = 0; i < addr_count; i++) { 3943 for (i = 0; i < addr_count; i++) {
3940 efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, 3944 efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, 0);
3941 EFX_FILTER_FLAG_RX_RSS,
3942 0);
3943 efx_filter_set_eth_local(&spec, EFX_FILTER_VID_UNSPEC, 3945 efx_filter_set_eth_local(&spec, EFX_FILTER_VID_UNSPEC,
3944 addr_list[i].addr); 3946 addr_list[i].addr);
3945 rc = efx_ef10_filter_insert(efx, &spec, true); 3947 rc = efx_ef10_filter_insert(efx, &spec, true);
@@ -3968,9 +3970,7 @@ static int efx_ef10_filter_insert_addr_list(struct efx_nic *efx,
3968 3970
3969 if (multicast && rollback) { 3971 if (multicast && rollback) {
3970 /* Also need an Ethernet broadcast filter */ 3972 /* Also need an Ethernet broadcast filter */
3971 efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, 3973 efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, 0);
3972 EFX_FILTER_FLAG_RX_RSS,
3973 0);
3974 eth_broadcast_addr(baddr); 3974 eth_broadcast_addr(baddr);
3975 efx_filter_set_eth_local(&spec, EFX_FILTER_VID_UNSPEC, baddr); 3975 efx_filter_set_eth_local(&spec, EFX_FILTER_VID_UNSPEC, baddr);
3976 rc = efx_ef10_filter_insert(efx, &spec, true); 3976 rc = efx_ef10_filter_insert(efx, &spec, true);
@@ -4000,13 +4000,14 @@ static int efx_ef10_filter_insert_def(struct efx_nic *efx, bool multicast,
4000{ 4000{
4001 struct efx_ef10_filter_table *table = efx->filter_state; 4001 struct efx_ef10_filter_table *table = efx->filter_state;
4002 struct efx_ef10_nic_data *nic_data = efx->nic_data; 4002 struct efx_ef10_nic_data *nic_data = efx->nic_data;
4003 enum efx_filter_flags filter_flags;
4003 struct efx_filter_spec spec; 4004 struct efx_filter_spec spec;
4004 u8 baddr[ETH_ALEN]; 4005 u8 baddr[ETH_ALEN];
4005 int rc; 4006 int rc;
4006 4007
4007 efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, 4008 filter_flags = efx_rss_enabled(efx) ? EFX_FILTER_FLAG_RX_RSS : 0;
4008 EFX_FILTER_FLAG_RX_RSS, 4009
4009 0); 4010 efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, 0);
4010 4011
4011 if (multicast) 4012 if (multicast)
4012 efx_filter_set_mc_def(&spec); 4013 efx_filter_set_mc_def(&spec);
@@ -4023,8 +4024,7 @@ static int efx_ef10_filter_insert_def(struct efx_nic *efx, bool multicast,
4023 if (!nic_data->workaround_26807) { 4024 if (!nic_data->workaround_26807) {
4024 /* Also need an Ethernet broadcast filter */ 4025 /* Also need an Ethernet broadcast filter */
4025 efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, 4026 efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO,
4026 EFX_FILTER_FLAG_RX_RSS, 4027 filter_flags, 0);
4027 0);
4028 eth_broadcast_addr(baddr); 4028 eth_broadcast_addr(baddr);
4029 efx_filter_set_eth_local(&spec, EFX_FILTER_VID_UNSPEC, 4029 efx_filter_set_eth_local(&spec, EFX_FILTER_VID_UNSPEC,
4030 baddr); 4030 baddr);
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index d288f1c928de..a3c42a376741 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -3422,7 +3422,7 @@ out:
3422 * with our request for slot reset the mmio_enabled callback will never be 3422 * with our request for slot reset the mmio_enabled callback will never be
3423 * called, and the link_reset callback is not used by AER or EEH mechanisms. 3423 * called, and the link_reset callback is not used by AER or EEH mechanisms.
3424 */ 3424 */
3425static struct pci_error_handlers efx_err_handlers = { 3425static const struct pci_error_handlers efx_err_handlers = {
3426 .error_detected = efx_io_error_detected, 3426 .error_detected = efx_io_error_detected,
3427 .slot_reset = efx_io_slot_reset, 3427 .slot_reset = efx_io_slot_reset,
3428 .resume = efx_io_resume, 3428 .resume = efx_io_resume,
diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h
index 1aaf76c1ace8..10827476bc0b 100644
--- a/drivers/net/ethernet/sfc/efx.h
+++ b/drivers/net/ethernet/sfc/efx.h
@@ -76,6 +76,11 @@ void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue);
76#define EFX_TXQ_MAX_ENT(efx) (EFX_WORKAROUND_35388(efx) ? \ 76#define EFX_TXQ_MAX_ENT(efx) (EFX_WORKAROUND_35388(efx) ? \
77 EFX_MAX_DMAQ_SIZE / 2 : EFX_MAX_DMAQ_SIZE) 77 EFX_MAX_DMAQ_SIZE / 2 : EFX_MAX_DMAQ_SIZE)
78 78
79static inline bool efx_rss_enabled(struct efx_nic *efx)
80{
81 return efx->rss_spread > 1;
82}
83
79/* Filters */ 84/* Filters */
80 85
81void efx_mac_reconfigure(struct efx_nic *efx); 86void efx_mac_reconfigure(struct efx_nic *efx);
diff --git a/drivers/net/ethernet/sfc/farch.c b/drivers/net/ethernet/sfc/farch.c
index 5a1c5a8f278a..133e9e35be9e 100644
--- a/drivers/net/ethernet/sfc/farch.c
+++ b/drivers/net/ethernet/sfc/farch.c
@@ -2242,7 +2242,7 @@ efx_farch_filter_init_rx_auto(struct efx_nic *efx,
2242 */ 2242 */
2243 spec->priority = EFX_FILTER_PRI_AUTO; 2243 spec->priority = EFX_FILTER_PRI_AUTO;
2244 spec->flags = (EFX_FILTER_FLAG_RX | 2244 spec->flags = (EFX_FILTER_FLAG_RX |
2245 (efx->n_rx_channels > 1 ? EFX_FILTER_FLAG_RX_RSS : 0) | 2245 (efx_rss_enabled(efx) ? EFX_FILTER_FLAG_RX_RSS : 0) |
2246 (efx->rx_scatter ? EFX_FILTER_FLAG_RX_SCATTER : 0)); 2246 (efx->rx_scatter ? EFX_FILTER_FLAG_RX_SCATTER : 0));
2247 spec->dmaq_id = 0; 2247 spec->dmaq_id = 0;
2248} 2248}
diff --git a/drivers/net/ethernet/sfc/txc43128_phy.c b/drivers/net/ethernet/sfc/txc43128_phy.c
index 3d5ee3259885..194f67d9f3bf 100644
--- a/drivers/net/ethernet/sfc/txc43128_phy.c
+++ b/drivers/net/ethernet/sfc/txc43128_phy.c
@@ -418,7 +418,7 @@ static void txc_reset_logic_mmd(struct efx_nic *efx, int mmd)
418 418
419 val |= (1 << TXC_GLCMD_LMTSWRST_LBN); 419 val |= (1 << TXC_GLCMD_LMTSWRST_LBN);
420 efx_mdio_write(efx, mmd, TXC_GLRGS_GLCMD, val); 420 efx_mdio_write(efx, mmd, TXC_GLRGS_GLCMD, val);
421 while (tries--) { 421 while (--tries) {
422 val = efx_mdio_read(efx, mmd, TXC_GLRGS_GLCMD); 422 val = efx_mdio_read(efx, mmd, TXC_GLRGS_GLCMD);
423 if (!(val & (1 << TXC_GLCMD_LMTSWRST_LBN))) 423 if (!(val & (1 << TXC_GLCMD_LMTSWRST_LBN)))
424 break; 424 break;
diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c
index c860c9007e49..219a99b7a631 100644
--- a/drivers/net/ethernet/smsc/smsc911x.c
+++ b/drivers/net/ethernet/smsc/smsc911x.c
@@ -809,22 +809,17 @@ static int smsc911x_phy_check_loopbackpkt(struct smsc911x_data *pdata)
809 809
810static int smsc911x_phy_reset(struct smsc911x_data *pdata) 810static int smsc911x_phy_reset(struct smsc911x_data *pdata)
811{ 811{
812 struct phy_device *phy_dev = pdata->phy_dev;
813 unsigned int temp; 812 unsigned int temp;
814 unsigned int i = 100000; 813 unsigned int i = 100000;
815 814
816 BUG_ON(!phy_dev); 815 temp = smsc911x_reg_read(pdata, PMT_CTRL);
817 BUG_ON(!phy_dev->bus); 816 smsc911x_reg_write(pdata, PMT_CTRL, temp | PMT_CTRL_PHY_RST_);
818
819 SMSC_TRACE(pdata, hw, "Performing PHY BCR Reset");
820 smsc911x_mii_write(phy_dev->bus, phy_dev->addr, MII_BMCR, BMCR_RESET);
821 do { 817 do {
822 msleep(1); 818 msleep(1);
823 temp = smsc911x_mii_read(phy_dev->bus, phy_dev->addr, 819 temp = smsc911x_reg_read(pdata, PMT_CTRL);
824 MII_BMCR); 820 } while ((i--) && (temp & PMT_CTRL_PHY_RST_));
825 } while ((i--) && (temp & BMCR_RESET));
826 821
827 if (temp & BMCR_RESET) { 822 if (unlikely(temp & PMT_CTRL_PHY_RST_)) {
828 SMSC_WARN(pdata, hw, "PHY reset failed to complete"); 823 SMSC_WARN(pdata, hw, "PHY reset failed to complete");
829 return -EIO; 824 return -EIO;
830 } 825 }
@@ -2296,7 +2291,7 @@ static int smsc911x_init(struct net_device *dev)
2296 } 2291 }
2297 2292
2298 /* Reset the LAN911x */ 2293 /* Reset the LAN911x */
2299 if (smsc911x_soft_reset(pdata)) 2294 if (smsc911x_phy_reset(pdata) || smsc911x_soft_reset(pdata))
2300 return -ENODEV; 2295 return -ENODEV;
2301 2296
2302 dev->flags |= IFF_MULTICAST; 2297 dev->flags |= IFF_MULTICAST;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
index 9d89bdbf029f..82de68b1a452 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
@@ -337,11 +337,11 @@ static int ipq806x_gmac_probe(struct platform_device *pdev)
337 QSGMII_PHY_RX_SIGNAL_DETECT_EN | 337 QSGMII_PHY_RX_SIGNAL_DETECT_EN |
338 QSGMII_PHY_TX_DRIVER_EN | 338 QSGMII_PHY_TX_DRIVER_EN |
339 QSGMII_PHY_QSGMII_EN | 339 QSGMII_PHY_QSGMII_EN |
340 0x4 << QSGMII_PHY_PHASE_LOOP_GAIN_OFFSET | 340 0x4ul << QSGMII_PHY_PHASE_LOOP_GAIN_OFFSET |
341 0x3 << QSGMII_PHY_RX_DC_BIAS_OFFSET | 341 0x3ul << QSGMII_PHY_RX_DC_BIAS_OFFSET |
342 0x1 << QSGMII_PHY_RX_INPUT_EQU_OFFSET | 342 0x1ul << QSGMII_PHY_RX_INPUT_EQU_OFFSET |
343 0x2 << QSGMII_PHY_CDR_PI_SLEW_OFFSET | 343 0x2ul << QSGMII_PHY_CDR_PI_SLEW_OFFSET |
344 0xC << QSGMII_PHY_TX_DRV_AMP_OFFSET); 344 0xCul << QSGMII_PHY_TX_DRV_AMP_OFFSET);
345 } 345 }
346 346
347 plat_dat->has_gmac = true; 347 plat_dat->has_gmac = true;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c
index 7f6f4a4fcc70..58c05acc2aab 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c
@@ -299,16 +299,17 @@ static int sti_dwmac_parse_data(struct sti_dwmac *dwmac,
299 if (IS_PHY_IF_MODE_GBIT(dwmac->interface)) { 299 if (IS_PHY_IF_MODE_GBIT(dwmac->interface)) {
300 const char *rs; 300 const char *rs;
301 301
302 dwmac->tx_retime_src = TX_RETIME_SRC_CLKGEN;
303
302 err = of_property_read_string(np, "st,tx-retime-src", &rs); 304 err = of_property_read_string(np, "st,tx-retime-src", &rs);
303 if (err < 0) { 305 if (err < 0) {
304 dev_warn(dev, "Use internal clock source\n"); 306 dev_warn(dev, "Use internal clock source\n");
305 dwmac->tx_retime_src = TX_RETIME_SRC_CLKGEN; 307 } else {
306 } else if (!strcasecmp(rs, "clk_125")) { 308 if (!strcasecmp(rs, "clk_125"))
307 dwmac->tx_retime_src = TX_RETIME_SRC_CLK_125; 309 dwmac->tx_retime_src = TX_RETIME_SRC_CLK_125;
308 } else if (!strcasecmp(rs, "txclk")) { 310 else if (!strcasecmp(rs, "txclk"))
309 dwmac->tx_retime_src = TX_RETIME_SRC_TXCLK; 311 dwmac->tx_retime_src = TX_RETIME_SRC_TXCLK;
310 } 312 }
311
312 dwmac->speed = SPEED_1000; 313 dwmac->speed = SPEED_1000;
313 } 314 }
314 315
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c
index 52b8ed9bd87c..adff46375a32 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c
@@ -153,7 +153,11 @@ static int sun7i_gmac_probe(struct platform_device *pdev)
153 if (ret) 153 if (ret)
154 return ret; 154 return ret;
155 155
156 return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); 156 ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
157 if (ret)
158 sun7i_gmac_exit(pdev, plat_dat->bsp_priv);
159
160 return ret;
157} 161}
158 162
159static const struct of_device_id sun7i_dwmac_match[] = { 163static const struct of_device_id sun7i_dwmac_match[] = {
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 64d8aa4e0cad..a5b869eb4678 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -185,7 +185,7 @@ static void stmmac_clk_csr_set(struct stmmac_priv *priv)
185 priv->clk_csr = STMMAC_CSR_100_150M; 185 priv->clk_csr = STMMAC_CSR_100_150M;
186 else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M)) 186 else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
187 priv->clk_csr = STMMAC_CSR_150_250M; 187 priv->clk_csr = STMMAC_CSR_150_250M;
188 else if ((clk_rate >= CSR_F_250M) && (clk_rate < CSR_F_300M)) 188 else if ((clk_rate >= CSR_F_250M) && (clk_rate <= CSR_F_300M))
189 priv->clk_csr = STMMAC_CSR_250_300M; 189 priv->clk_csr = STMMAC_CSR_250_300M;
190 } 190 }
191} 191}
@@ -2232,6 +2232,12 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
2232 2232
2233 frame_len = priv->hw->desc->get_rx_frame_len(p, coe); 2233 frame_len = priv->hw->desc->get_rx_frame_len(p, coe);
2234 2234
2235 /* check if frame_len fits the preallocated memory */
2236 if (frame_len > priv->dma_buf_sz) {
2237 priv->dev->stats.rx_length_errors++;
2238 break;
2239 }
2240
2235 /* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3 2241 /* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
2236 * Type frames (LLC/LLC-SNAP) 2242 * Type frames (LLC/LLC-SNAP)
2237 */ 2243 */
@@ -3040,8 +3046,6 @@ int stmmac_suspend(struct net_device *ndev)
3040 priv->hw->dma->stop_tx(priv->ioaddr); 3046 priv->hw->dma->stop_tx(priv->ioaddr);
3041 priv->hw->dma->stop_rx(priv->ioaddr); 3047 priv->hw->dma->stop_rx(priv->ioaddr);
3042 3048
3043 stmmac_clear_descriptors(priv);
3044
3045 /* Enable Power down mode by programming the PMT regs */ 3049 /* Enable Power down mode by programming the PMT regs */
3046 if (device_may_wakeup(priv->device)) { 3050 if (device_may_wakeup(priv->device)) {
3047 priv->hw->mac->pmt(priv->hw, priv->wolopts); 3051 priv->hw->mac->pmt(priv->hw, priv->wolopts);
@@ -3099,9 +3103,15 @@ int stmmac_resume(struct net_device *ndev)
3099 3103
3100 netif_device_attach(ndev); 3104 netif_device_attach(ndev);
3101 3105
3102 init_dma_desc_rings(ndev, GFP_ATOMIC); 3106 priv->cur_rx = 0;
3107 priv->dirty_rx = 0;
3108 priv->dirty_tx = 0;
3109 priv->cur_tx = 0;
3110 stmmac_clear_descriptors(priv);
3111
3103 stmmac_hw_setup(ndev, false); 3112 stmmac_hw_setup(ndev, false);
3104 stmmac_init_tx_coalesce(priv); 3113 stmmac_init_tx_coalesce(priv);
3114 stmmac_set_rx_mode(ndev);
3105 3115
3106 napi_enable(&priv->napi); 3116 napi_enable(&priv->napi);
3107 3117
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
index ebf6abc4853f..bba670c42e37 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
@@ -138,7 +138,6 @@ int stmmac_mdio_reset(struct mii_bus *bus)
138 138
139#ifdef CONFIG_OF 139#ifdef CONFIG_OF
140 if (priv->device->of_node) { 140 if (priv->device->of_node) {
141 int reset_gpio, active_low;
142 141
143 if (data->reset_gpio < 0) { 142 if (data->reset_gpio < 0) {
144 struct device_node *np = priv->device->of_node; 143 struct device_node *np = priv->device->of_node;
@@ -154,24 +153,23 @@ int stmmac_mdio_reset(struct mii_bus *bus)
154 "snps,reset-active-low"); 153 "snps,reset-active-low");
155 of_property_read_u32_array(np, 154 of_property_read_u32_array(np,
156 "snps,reset-delays-us", data->delays, 3); 155 "snps,reset-delays-us", data->delays, 3);
157 }
158 156
159 reset_gpio = data->reset_gpio; 157 if (gpio_request(data->reset_gpio, "mdio-reset"))
160 active_low = data->active_low; 158 return 0;
159 }
161 160
162 if (!gpio_request(reset_gpio, "mdio-reset")) { 161 gpio_direction_output(data->reset_gpio,
163 gpio_direction_output(reset_gpio, active_low ? 1 : 0); 162 data->active_low ? 1 : 0);
164 if (data->delays[0]) 163 if (data->delays[0])
165 msleep(DIV_ROUND_UP(data->delays[0], 1000)); 164 msleep(DIV_ROUND_UP(data->delays[0], 1000));
166 165
167 gpio_set_value(reset_gpio, active_low ? 0 : 1); 166 gpio_set_value(data->reset_gpio, data->active_low ? 0 : 1);
168 if (data->delays[1]) 167 if (data->delays[1])
169 msleep(DIV_ROUND_UP(data->delays[1], 1000)); 168 msleep(DIV_ROUND_UP(data->delays[1], 1000));
170 169
171 gpio_set_value(reset_gpio, active_low ? 1 : 0); 170 gpio_set_value(data->reset_gpio, data->active_low ? 1 : 0);
172 if (data->delays[2]) 171 if (data->delays[2])
173 msleep(DIV_ROUND_UP(data->delays[2], 1000)); 172 msleep(DIV_ROUND_UP(data->delays[2], 1000));
174 }
175 } 173 }
176#endif 174#endif
177 175
diff --git a/drivers/net/ethernet/ti/cpsw-common.c b/drivers/net/ethernet/ti/cpsw-common.c
index c08be62bceba..1562ab4151e1 100644
--- a/drivers/net/ethernet/ti/cpsw-common.c
+++ b/drivers/net/ethernet/ti/cpsw-common.c
@@ -78,6 +78,9 @@ static int cpsw_am33xx_cm_get_macid(struct device *dev, u16 offset, int slave,
78 78
79int ti_cm_get_macid(struct device *dev, int slave, u8 *mac_addr) 79int ti_cm_get_macid(struct device *dev, int slave, u8 *mac_addr)
80{ 80{
81 if (of_machine_is_compatible("ti,dm8148"))
82 return cpsw_am33xx_cm_get_macid(dev, 0x630, slave, mac_addr);
83
81 if (of_machine_is_compatible("ti,am33xx")) 84 if (of_machine_is_compatible("ti,am33xx"))
82 return cpsw_am33xx_cm_get_macid(dev, 0x630, slave, mac_addr); 85 return cpsw_am33xx_cm_get_macid(dev, 0x630, slave, mac_addr);
83 86
diff --git a/drivers/net/ethernet/via/via-velocity.c b/drivers/net/ethernet/via/via-velocity.c
index ae68afd50a15..f38696ceee74 100644
--- a/drivers/net/ethernet/via/via-velocity.c
+++ b/drivers/net/ethernet/via/via-velocity.c
@@ -345,13 +345,6 @@ VELOCITY_PARAM(flow_control, "Enable flow control ability");
345*/ 345*/
346VELOCITY_PARAM(speed_duplex, "Setting the speed and duplex mode"); 346VELOCITY_PARAM(speed_duplex, "Setting the speed and duplex mode");
347 347
348#define VAL_PKT_LEN_DEF 0
349/* ValPktLen[] is used for setting the checksum offload ability of NIC.
350 0: Receive frame with invalid layer 2 length (Default)
351 1: Drop frame with invalid layer 2 length
352*/
353VELOCITY_PARAM(ValPktLen, "Receiving or Drop invalid 802.3 frame");
354
355#define WOL_OPT_DEF 0 348#define WOL_OPT_DEF 0
356#define WOL_OPT_MIN 0 349#define WOL_OPT_MIN 0
357#define WOL_OPT_MAX 7 350#define WOL_OPT_MAX 7
@@ -494,7 +487,6 @@ static void velocity_get_options(struct velocity_opt *opts, int index,
494 487
495 velocity_set_int_opt(&opts->flow_cntl, flow_control[index], FLOW_CNTL_MIN, FLOW_CNTL_MAX, FLOW_CNTL_DEF, "flow_control", devname); 488 velocity_set_int_opt(&opts->flow_cntl, flow_control[index], FLOW_CNTL_MIN, FLOW_CNTL_MAX, FLOW_CNTL_DEF, "flow_control", devname);
496 velocity_set_bool_opt(&opts->flags, IP_byte_align[index], IP_ALIG_DEF, VELOCITY_FLAGS_IP_ALIGN, "IP_byte_align", devname); 489 velocity_set_bool_opt(&opts->flags, IP_byte_align[index], IP_ALIG_DEF, VELOCITY_FLAGS_IP_ALIGN, "IP_byte_align", devname);
497 velocity_set_bool_opt(&opts->flags, ValPktLen[index], VAL_PKT_LEN_DEF, VELOCITY_FLAGS_VAL_PKT_LEN, "ValPktLen", devname);
498 velocity_set_int_opt((int *) &opts->spd_dpx, speed_duplex[index], MED_LNK_MIN, MED_LNK_MAX, MED_LNK_DEF, "Media link mode", devname); 490 velocity_set_int_opt((int *) &opts->spd_dpx, speed_duplex[index], MED_LNK_MIN, MED_LNK_MAX, MED_LNK_DEF, "Media link mode", devname);
499 velocity_set_int_opt(&opts->wol_opts, wol_opts[index], WOL_OPT_MIN, WOL_OPT_MAX, WOL_OPT_DEF, "Wake On Lan options", devname); 491 velocity_set_int_opt(&opts->wol_opts, wol_opts[index], WOL_OPT_MIN, WOL_OPT_MAX, WOL_OPT_DEF, "Wake On Lan options", devname);
500 opts->numrx = (opts->numrx & ~3); 492 opts->numrx = (opts->numrx & ~3);
@@ -2055,8 +2047,9 @@ static int velocity_receive_frame(struct velocity_info *vptr, int idx)
2055 int pkt_len = le16_to_cpu(rd->rdesc0.len) & 0x3fff; 2047 int pkt_len = le16_to_cpu(rd->rdesc0.len) & 0x3fff;
2056 struct sk_buff *skb; 2048 struct sk_buff *skb;
2057 2049
2058 if (rd->rdesc0.RSR & (RSR_STP | RSR_EDP)) { 2050 if (unlikely(rd->rdesc0.RSR & (RSR_STP | RSR_EDP | RSR_RL))) {
2059 VELOCITY_PRT(MSG_LEVEL_VERBOSE, KERN_ERR " %s : the received frame spans multiple RDs.\n", vptr->netdev->name); 2051 if (rd->rdesc0.RSR & (RSR_STP | RSR_EDP))
2052 VELOCITY_PRT(MSG_LEVEL_VERBOSE, KERN_ERR " %s : the received frame spans multiple RDs.\n", vptr->netdev->name);
2060 stats->rx_length_errors++; 2053 stats->rx_length_errors++;
2061 return -EINVAL; 2054 return -EINVAL;
2062 } 2055 }
@@ -2069,17 +2062,6 @@ static int velocity_receive_frame(struct velocity_info *vptr, int idx)
2069 dma_sync_single_for_cpu(vptr->dev, rd_info->skb_dma, 2062 dma_sync_single_for_cpu(vptr->dev, rd_info->skb_dma,
2070 vptr->rx.buf_sz, DMA_FROM_DEVICE); 2063 vptr->rx.buf_sz, DMA_FROM_DEVICE);
2071 2064
2072 /*
2073 * Drop frame not meeting IEEE 802.3
2074 */
2075
2076 if (vptr->flags & VELOCITY_FLAGS_VAL_PKT_LEN) {
2077 if (rd->rdesc0.RSR & RSR_RL) {
2078 stats->rx_length_errors++;
2079 return -EINVAL;
2080 }
2081 }
2082
2083 velocity_rx_csum(rd, skb); 2065 velocity_rx_csum(rd, skb);
2084 2066
2085 if (velocity_rx_copy(&skb, pkt_len, vptr) < 0) { 2067 if (velocity_rx_copy(&skb, pkt_len, vptr) < 0) {
diff --git a/drivers/net/fjes/fjes_hw.c b/drivers/net/fjes/fjes_hw.c
index bb8b5304d851..b103adb8d62e 100644
--- a/drivers/net/fjes/fjes_hw.c
+++ b/drivers/net/fjes/fjes_hw.c
@@ -599,7 +599,7 @@ int fjes_hw_unregister_buff_addr(struct fjes_hw *hw, int dest_epid)
599 FJES_CMD_REQ_RES_CODE_BUSY) && 599 FJES_CMD_REQ_RES_CODE_BUSY) &&
600 (timeout > 0)) { 600 (timeout > 0)) {
601 msleep(200 + hw->my_epid * 20); 601 msleep(200 + hw->my_epid * 20);
602 timeout -= (200 + hw->my_epid * 20); 602 timeout -= (200 + hw->my_epid * 20);
603 603
604 res_buf->unshare_buffer.length = 0; 604 res_buf->unshare_buffer.length = 0;
605 res_buf->unshare_buffer.code = 0; 605 res_buf->unshare_buffer.code = 0;
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index de5c30c9f059..c2b79f5d1c89 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -967,8 +967,6 @@ static netdev_tx_t geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
967 err = udp_tunnel6_xmit_skb(dst, gs6->sock->sk, skb, dev, 967 err = udp_tunnel6_xmit_skb(dst, gs6->sock->sk, skb, dev,
968 &fl6.saddr, &fl6.daddr, prio, ttl, 968 &fl6.saddr, &fl6.daddr, prio, ttl,
969 sport, geneve->dst_port, !udp_csum); 969 sport, geneve->dst_port, !udp_csum);
970
971 iptunnel_xmit_stats(err, &dev->stats, dev->tstats);
972 return NETDEV_TX_OK; 970 return NETDEV_TX_OK;
973 971
974tx_error: 972tx_error:
diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c
index d50887e3df6d..8c48bb2a94ea 100644
--- a/drivers/net/ipvlan/ipvlan_core.c
+++ b/drivers/net/ipvlan/ipvlan_core.c
@@ -254,7 +254,7 @@ acct:
254 } 254 }
255} 255}
256 256
257static int ipvlan_rcv_frame(struct ipvl_addr *addr, struct sk_buff *skb, 257static int ipvlan_rcv_frame(struct ipvl_addr *addr, struct sk_buff **pskb,
258 bool local) 258 bool local)
259{ 259{
260 struct ipvl_dev *ipvlan = addr->master; 260 struct ipvl_dev *ipvlan = addr->master;
@@ -262,6 +262,7 @@ static int ipvlan_rcv_frame(struct ipvl_addr *addr, struct sk_buff *skb,
262 unsigned int len; 262 unsigned int len;
263 rx_handler_result_t ret = RX_HANDLER_CONSUMED; 263 rx_handler_result_t ret = RX_HANDLER_CONSUMED;
264 bool success = false; 264 bool success = false;
265 struct sk_buff *skb = *pskb;
265 266
266 len = skb->len + ETH_HLEN; 267 len = skb->len + ETH_HLEN;
267 if (unlikely(!(dev->flags & IFF_UP))) { 268 if (unlikely(!(dev->flags & IFF_UP))) {
@@ -273,6 +274,7 @@ static int ipvlan_rcv_frame(struct ipvl_addr *addr, struct sk_buff *skb,
273 if (!skb) 274 if (!skb)
274 goto out; 275 goto out;
275 276
277 *pskb = skb;
276 skb->dev = dev; 278 skb->dev = dev;
277 skb->pkt_type = PACKET_HOST; 279 skb->pkt_type = PACKET_HOST;
278 280
@@ -486,7 +488,7 @@ static int ipvlan_xmit_mode_l3(struct sk_buff *skb, struct net_device *dev)
486 488
487 addr = ipvlan_addr_lookup(ipvlan->port, lyr3h, addr_type, true); 489 addr = ipvlan_addr_lookup(ipvlan->port, lyr3h, addr_type, true);
488 if (addr) 490 if (addr)
489 return ipvlan_rcv_frame(addr, skb, true); 491 return ipvlan_rcv_frame(addr, &skb, true);
490 492
491out: 493out:
492 skb->dev = ipvlan->phy_dev; 494 skb->dev = ipvlan->phy_dev;
@@ -506,7 +508,7 @@ static int ipvlan_xmit_mode_l2(struct sk_buff *skb, struct net_device *dev)
506 if (lyr3h) { 508 if (lyr3h) {
507 addr = ipvlan_addr_lookup(ipvlan->port, lyr3h, addr_type, true); 509 addr = ipvlan_addr_lookup(ipvlan->port, lyr3h, addr_type, true);
508 if (addr) 510 if (addr)
509 return ipvlan_rcv_frame(addr, skb, true); 511 return ipvlan_rcv_frame(addr, &skb, true);
510 } 512 }
511 skb = skb_share_check(skb, GFP_ATOMIC); 513 skb = skb_share_check(skb, GFP_ATOMIC);
512 if (!skb) 514 if (!skb)
@@ -589,7 +591,7 @@ static rx_handler_result_t ipvlan_handle_mode_l3(struct sk_buff **pskb,
589 591
590 addr = ipvlan_addr_lookup(port, lyr3h, addr_type, true); 592 addr = ipvlan_addr_lookup(port, lyr3h, addr_type, true);
591 if (addr) 593 if (addr)
592 ret = ipvlan_rcv_frame(addr, skb, false); 594 ret = ipvlan_rcv_frame(addr, pskb, false);
593 595
594out: 596out:
595 return ret; 597 return ret;
@@ -626,7 +628,7 @@ static rx_handler_result_t ipvlan_handle_mode_l2(struct sk_buff **pskb,
626 628
627 addr = ipvlan_addr_lookup(port, lyr3h, addr_type, true); 629 addr = ipvlan_addr_lookup(port, lyr3h, addr_type, true);
628 if (addr) 630 if (addr)
629 ret = ipvlan_rcv_frame(addr, skb, false); 631 ret = ipvlan_rcv_frame(addr, pskb, false);
630 } 632 }
631 633
632 return ret; 634 return ret;
@@ -651,5 +653,5 @@ rx_handler_result_t ipvlan_handle_frame(struct sk_buff **pskb)
651 WARN_ONCE(true, "ipvlan_handle_frame() called for mode = [%hx]\n", 653 WARN_ONCE(true, "ipvlan_handle_frame() called for mode = [%hx]\n",
652 port->mode); 654 port->mode);
653 kfree_skb(skb); 655 kfree_skb(skb);
654 return NET_RX_DROP; 656 return RX_HANDLER_CONSUMED;
655} 657}
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 86f6c6292c27..06c8bfeaccd6 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -415,6 +415,7 @@ static rx_handler_result_t macvlan_handle_frame(struct sk_buff **pskb)
415 skb = ip_check_defrag(dev_net(skb->dev), skb, IP_DEFRAG_MACVLAN); 415 skb = ip_check_defrag(dev_net(skb->dev), skb, IP_DEFRAG_MACVLAN);
416 if (!skb) 416 if (!skb)
417 return RX_HANDLER_CONSUMED; 417 return RX_HANDLER_CONSUMED;
418 *pskb = skb;
418 eth = eth_hdr(skb); 419 eth = eth_hdr(skb);
419 macvlan_forward_source(skb, port, eth->h_source); 420 macvlan_forward_source(skb, port, eth->h_source);
420 src = macvlan_hash_lookup(port, eth->h_source); 421 src = macvlan_hash_lookup(port, eth->h_source);
@@ -456,6 +457,7 @@ static rx_handler_result_t macvlan_handle_frame(struct sk_buff **pskb)
456 goto out; 457 goto out;
457 } 458 }
458 459
460 *pskb = skb;
459 skb->dev = dev; 461 skb->dev = dev;
460 skb->pkt_type = PACKET_HOST; 462 skb->pkt_type = PACKET_HOST;
461 463
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index 54036ae0a388..0fc521941c71 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -498,7 +498,7 @@ static void macvtap_sock_write_space(struct sock *sk)
498 wait_queue_head_t *wqueue; 498 wait_queue_head_t *wqueue;
499 499
500 if (!sock_writeable(sk) || 500 if (!sock_writeable(sk) ||
501 !test_and_clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags)) 501 !test_and_clear_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags))
502 return; 502 return;
503 503
504 wqueue = sk_sleep(sk); 504 wqueue = sk_sleep(sk);
@@ -585,7 +585,7 @@ static unsigned int macvtap_poll(struct file *file, poll_table * wait)
585 mask |= POLLIN | POLLRDNORM; 585 mask |= POLLIN | POLLRDNORM;
586 586
587 if (sock_writeable(&q->sk) || 587 if (sock_writeable(&q->sk) ||
588 (!test_and_set_bit(SOCK_ASYNC_NOSPACE, &q->sock.flags) && 588 (!test_and_set_bit(SOCKWQ_ASYNC_NOSPACE, &q->sock.flags) &&
589 sock_writeable(&q->sk))) 589 sock_writeable(&q->sk)))
590 mask |= POLLOUT | POLLWRNORM; 590 mask |= POLLOUT | POLLWRNORM;
591 591
diff --git a/drivers/net/phy/at803x.c b/drivers/net/phy/at803x.c
index fabf11d32d27..2d020a3ec0b5 100644
--- a/drivers/net/phy/at803x.c
+++ b/drivers/net/phy/at803x.c
@@ -308,6 +308,8 @@ static struct phy_driver at803x_driver[] = {
308 .flags = PHY_HAS_INTERRUPT, 308 .flags = PHY_HAS_INTERRUPT,
309 .config_aneg = genphy_config_aneg, 309 .config_aneg = genphy_config_aneg,
310 .read_status = genphy_read_status, 310 .read_status = genphy_read_status,
311 .ack_interrupt = at803x_ack_interrupt,
312 .config_intr = at803x_config_intr,
311 .driver = { 313 .driver = {
312 .owner = THIS_MODULE, 314 .owner = THIS_MODULE,
313 }, 315 },
@@ -327,6 +329,8 @@ static struct phy_driver at803x_driver[] = {
327 .flags = PHY_HAS_INTERRUPT, 329 .flags = PHY_HAS_INTERRUPT,
328 .config_aneg = genphy_config_aneg, 330 .config_aneg = genphy_config_aneg,
329 .read_status = genphy_read_status, 331 .read_status = genphy_read_status,
332 .ack_interrupt = at803x_ack_interrupt,
333 .config_intr = at803x_config_intr,
330 .driver = { 334 .driver = {
331 .owner = THIS_MODULE, 335 .owner = THIS_MODULE,
332 }, 336 },
diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c
index 07a6119121c3..3ce5d9514623 100644
--- a/drivers/net/phy/broadcom.c
+++ b/drivers/net/phy/broadcom.c
@@ -614,7 +614,7 @@ static struct mdio_device_id __maybe_unused broadcom_tbl[] = {
614 { PHY_ID_BCM5461, 0xfffffff0 }, 614 { PHY_ID_BCM5461, 0xfffffff0 },
615 { PHY_ID_BCM54616S, 0xfffffff0 }, 615 { PHY_ID_BCM54616S, 0xfffffff0 },
616 { PHY_ID_BCM5464, 0xfffffff0 }, 616 { PHY_ID_BCM5464, 0xfffffff0 },
617 { PHY_ID_BCM5482, 0xfffffff0 }, 617 { PHY_ID_BCM5481, 0xfffffff0 },
618 { PHY_ID_BCM5482, 0xfffffff0 }, 618 { PHY_ID_BCM5482, 0xfffffff0 },
619 { PHY_ID_BCM50610, 0xfffffff0 }, 619 { PHY_ID_BCM50610, 0xfffffff0 },
620 { PHY_ID_BCM50610M, 0xfffffff0 }, 620 { PHY_ID_BCM50610M, 0xfffffff0 },
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index 5de8d5827536..0240552b50f3 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -1154,6 +1154,21 @@ static struct phy_driver marvell_drivers[] = {
1154 .driver = { .owner = THIS_MODULE }, 1154 .driver = { .owner = THIS_MODULE },
1155 }, 1155 },
1156 { 1156 {
1157 .phy_id = MARVELL_PHY_ID_88E1540,
1158 .phy_id_mask = MARVELL_PHY_ID_MASK,
1159 .name = "Marvell 88E1540",
1160 .features = PHY_GBIT_FEATURES,
1161 .flags = PHY_HAS_INTERRUPT,
1162 .config_aneg = &m88e1510_config_aneg,
1163 .read_status = &marvell_read_status,
1164 .ack_interrupt = &marvell_ack_interrupt,
1165 .config_intr = &marvell_config_intr,
1166 .did_interrupt = &m88e1121_did_interrupt,
1167 .resume = &genphy_resume,
1168 .suspend = &genphy_suspend,
1169 .driver = { .owner = THIS_MODULE },
1170 },
1171 {
1157 .phy_id = MARVELL_PHY_ID_88E3016, 1172 .phy_id = MARVELL_PHY_ID_88E3016,
1158 .phy_id_mask = MARVELL_PHY_ID_MASK, 1173 .phy_id_mask = MARVELL_PHY_ID_MASK,
1159 .name = "Marvell 88E3016", 1174 .name = "Marvell 88E3016",
@@ -1186,6 +1201,7 @@ static struct mdio_device_id __maybe_unused marvell_tbl[] = {
1186 { MARVELL_PHY_ID_88E1318S, MARVELL_PHY_ID_MASK }, 1201 { MARVELL_PHY_ID_88E1318S, MARVELL_PHY_ID_MASK },
1187 { MARVELL_PHY_ID_88E1116R, MARVELL_PHY_ID_MASK }, 1202 { MARVELL_PHY_ID_88E1116R, MARVELL_PHY_ID_MASK },
1188 { MARVELL_PHY_ID_88E1510, MARVELL_PHY_ID_MASK }, 1203 { MARVELL_PHY_ID_88E1510, MARVELL_PHY_ID_MASK },
1204 { MARVELL_PHY_ID_88E1540, MARVELL_PHY_ID_MASK },
1189 { MARVELL_PHY_ID_88E3016, MARVELL_PHY_ID_MASK }, 1205 { MARVELL_PHY_ID_88E3016, MARVELL_PHY_ID_MASK },
1190 { } 1206 { }
1191}; 1207};
diff --git a/drivers/net/phy/mdio-mux.c b/drivers/net/phy/mdio-mux.c
index 908e8d486342..7f8e7662e28c 100644
--- a/drivers/net/phy/mdio-mux.c
+++ b/drivers/net/phy/mdio-mux.c
@@ -149,9 +149,14 @@ int mdio_mux_init(struct device *dev,
149 } 149 }
150 cb->bus_number = v; 150 cb->bus_number = v;
151 cb->parent = pb; 151 cb->parent = pb;
152
152 cb->mii_bus = mdiobus_alloc(); 153 cb->mii_bus = mdiobus_alloc();
154 if (!cb->mii_bus) {
155 ret_val = -ENOMEM;
156 of_node_put(child_bus_node);
157 break;
158 }
153 cb->mii_bus->priv = cb; 159 cb->mii_bus->priv = cb;
154
155 cb->mii_bus->irq = cb->phy_irq; 160 cb->mii_bus->irq = cb->phy_irq;
156 cb->mii_bus->name = "mdio_mux"; 161 cb->mii_bus->name = "mdio_mux";
157 snprintf(cb->mii_bus->id, MII_BUS_ID_SIZE, "%x.%x", 162 snprintf(cb->mii_bus->id, MII_BUS_ID_SIZE, "%x.%x",
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index cf6312fafea5..e13ad6cdcc22 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -339,9 +339,18 @@ static int ksz9021_config_init(struct phy_device *phydev)
339{ 339{
340 const struct device *dev = &phydev->dev; 340 const struct device *dev = &phydev->dev;
341 const struct device_node *of_node = dev->of_node; 341 const struct device_node *of_node = dev->of_node;
342 const struct device *dev_walker;
342 343
343 if (!of_node && dev->parent->of_node) 344 /* The Micrel driver has a deprecated option to place phy OF
344 of_node = dev->parent->of_node; 345 * properties in the MAC node. Walk up the tree of devices to
346 * find a device with an OF node.
347 */
348 dev_walker = &phydev->dev;
349 do {
350 of_node = dev_walker->of_node;
351 dev_walker = dev_walker->parent;
352
353 } while (!of_node && dev_walker);
345 354
346 if (of_node) { 355 if (of_node) {
347 ksz9021_load_values_from_of(phydev, of_node, 356 ksz9021_load_values_from_of(phydev, of_node,
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index adb48abafc87..47cd306dbb3c 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -448,7 +448,8 @@ int phy_mii_ioctl(struct phy_device *phydev, struct ifreq *ifr, int cmd)
448 mdiobus_write(phydev->bus, mii_data->phy_id, 448 mdiobus_write(phydev->bus, mii_data->phy_id,
449 mii_data->reg_num, val); 449 mii_data->reg_num, val);
450 450
451 if (mii_data->reg_num == MII_BMCR && 451 if (mii_data->phy_id == phydev->addr &&
452 mii_data->reg_num == MII_BMCR &&
452 val & BMCR_RESET) 453 val & BMCR_RESET)
453 return phy_init_hw(phydev); 454 return phy_init_hw(phydev);
454 455
@@ -863,6 +864,9 @@ void phy_state_machine(struct work_struct *work)
863 needs_aneg = true; 864 needs_aneg = true;
864 break; 865 break;
865 case PHY_NOLINK: 866 case PHY_NOLINK:
867 if (phy_interrupt_is_valid(phydev))
868 break;
869
866 err = phy_read_status(phydev); 870 err = phy_read_status(phydev);
867 if (err) 871 if (err)
868 break; 872 break;
diff --git a/drivers/net/phy/vitesse.c b/drivers/net/phy/vitesse.c
index 76cad712ddb2..dd295dbaa074 100644
--- a/drivers/net/phy/vitesse.c
+++ b/drivers/net/phy/vitesse.c
@@ -66,6 +66,7 @@
66#define PHY_ID_VSC8244 0x000fc6c0 66#define PHY_ID_VSC8244 0x000fc6c0
67#define PHY_ID_VSC8514 0x00070670 67#define PHY_ID_VSC8514 0x00070670
68#define PHY_ID_VSC8574 0x000704a0 68#define PHY_ID_VSC8574 0x000704a0
69#define PHY_ID_VSC8601 0x00070420
69#define PHY_ID_VSC8662 0x00070660 70#define PHY_ID_VSC8662 0x00070660
70#define PHY_ID_VSC8221 0x000fc550 71#define PHY_ID_VSC8221 0x000fc550
71#define PHY_ID_VSC8211 0x000fc4b0 72#define PHY_ID_VSC8211 0x000fc4b0
@@ -133,7 +134,8 @@ static int vsc82xx_config_intr(struct phy_device *phydev)
133 (phydev->drv->phy_id == PHY_ID_VSC8234 || 134 (phydev->drv->phy_id == PHY_ID_VSC8234 ||
134 phydev->drv->phy_id == PHY_ID_VSC8244 || 135 phydev->drv->phy_id == PHY_ID_VSC8244 ||
135 phydev->drv->phy_id == PHY_ID_VSC8514 || 136 phydev->drv->phy_id == PHY_ID_VSC8514 ||
136 phydev->drv->phy_id == PHY_ID_VSC8574) ? 137 phydev->drv->phy_id == PHY_ID_VSC8574 ||
138 phydev->drv->phy_id == PHY_ID_VSC8601) ?
137 MII_VSC8244_IMASK_MASK : 139 MII_VSC8244_IMASK_MASK :
138 MII_VSC8221_IMASK_MASK); 140 MII_VSC8221_IMASK_MASK);
139 else { 141 else {
@@ -272,6 +274,18 @@ static struct phy_driver vsc82xx_driver[] = {
272 .config_intr = &vsc82xx_config_intr, 274 .config_intr = &vsc82xx_config_intr,
273 .driver = { .owner = THIS_MODULE,}, 275 .driver = { .owner = THIS_MODULE,},
274}, { 276}, {
277 .phy_id = PHY_ID_VSC8601,
278 .name = "Vitesse VSC8601",
279 .phy_id_mask = 0x000ffff0,
280 .features = PHY_GBIT_FEATURES,
281 .flags = PHY_HAS_INTERRUPT,
282 .config_init = &genphy_config_init,
283 .config_aneg = &genphy_config_aneg,
284 .read_status = &genphy_read_status,
285 .ack_interrupt = &vsc824x_ack_interrupt,
286 .config_intr = &vsc82xx_config_intr,
287 .driver = { .owner = THIS_MODULE,},
288}, {
275 .phy_id = PHY_ID_VSC8662, 289 .phy_id = PHY_ID_VSC8662,
276 .name = "Vitesse VSC8662", 290 .name = "Vitesse VSC8662",
277 .phy_id_mask = 0x000ffff0, 291 .phy_id_mask = 0x000ffff0,
diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c
index 5e0b43283bce..0a37f840fcc5 100644
--- a/drivers/net/ppp/pppoe.c
+++ b/drivers/net/ppp/pppoe.c
@@ -568,6 +568,9 @@ static int pppoe_create(struct net *net, struct socket *sock, int kern)
568 sk->sk_family = PF_PPPOX; 568 sk->sk_family = PF_PPPOX;
569 sk->sk_protocol = PX_PROTO_OE; 569 sk->sk_protocol = PX_PROTO_OE;
570 570
571 INIT_WORK(&pppox_sk(sk)->proto.pppoe.padt_work,
572 pppoe_unbind_sock_work);
573
571 return 0; 574 return 0;
572} 575}
573 576
@@ -632,8 +635,6 @@ static int pppoe_connect(struct socket *sock, struct sockaddr *uservaddr,
632 635
633 lock_sock(sk); 636 lock_sock(sk);
634 637
635 INIT_WORK(&po->proto.pppoe.padt_work, pppoe_unbind_sock_work);
636
637 error = -EINVAL; 638 error = -EINVAL;
638 if (sp->sa_protocol != PX_PROTO_OE) 639 if (sp->sa_protocol != PX_PROTO_OE)
639 goto end; 640 goto end;
@@ -663,8 +664,13 @@ static int pppoe_connect(struct socket *sock, struct sockaddr *uservaddr,
663 po->pppoe_dev = NULL; 664 po->pppoe_dev = NULL;
664 } 665 }
665 666
666 memset(sk_pppox(po) + 1, 0, 667 po->pppoe_ifindex = 0;
667 sizeof(struct pppox_sock) - sizeof(struct sock)); 668 memset(&po->pppoe_pa, 0, sizeof(po->pppoe_pa));
669 memset(&po->pppoe_relay, 0, sizeof(po->pppoe_relay));
670 memset(&po->chan, 0, sizeof(po->chan));
671 po->next = NULL;
672 po->num = 0;
673
668 sk->sk_state = PPPOX_NONE; 674 sk->sk_state = PPPOX_NONE;
669 } 675 }
670 676
diff --git a/drivers/net/ppp/pptp.c b/drivers/net/ppp/pptp.c
index fc69e41d0950..597c53e0a2ec 100644
--- a/drivers/net/ppp/pptp.c
+++ b/drivers/net/ppp/pptp.c
@@ -419,6 +419,9 @@ static int pptp_bind(struct socket *sock, struct sockaddr *uservaddr,
419 struct pptp_opt *opt = &po->proto.pptp; 419 struct pptp_opt *opt = &po->proto.pptp;
420 int error = 0; 420 int error = 0;
421 421
422 if (sockaddr_len < sizeof(struct sockaddr_pppox))
423 return -EINVAL;
424
422 lock_sock(sk); 425 lock_sock(sk);
423 426
424 opt->src_addr = sp->sa_addr.pptp; 427 opt->src_addr = sp->sa_addr.pptp;
@@ -440,6 +443,9 @@ static int pptp_connect(struct socket *sock, struct sockaddr *uservaddr,
440 struct flowi4 fl4; 443 struct flowi4 fl4;
441 int error = 0; 444 int error = 0;
442 445
446 if (sockaddr_len < sizeof(struct sockaddr_pppox))
447 return -EINVAL;
448
443 if (sp->sa_protocol != PX_PROTO_PPTP) 449 if (sp->sa_protocol != PX_PROTO_PPTP)
444 return -EINVAL; 450 return -EINVAL;
445 451
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index b1878faea397..f0db770e8b2f 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -1040,7 +1040,7 @@ static unsigned int tun_chr_poll(struct file *file, poll_table *wait)
1040 mask |= POLLIN | POLLRDNORM; 1040 mask |= POLLIN | POLLRDNORM;
1041 1041
1042 if (sock_writeable(sk) || 1042 if (sock_writeable(sk) ||
1043 (!test_and_set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags) && 1043 (!test_and_set_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags) &&
1044 sock_writeable(sk))) 1044 sock_writeable(sk)))
1045 mask |= POLLOUT | POLLWRNORM; 1045 mask |= POLLOUT | POLLWRNORM;
1046 1046
@@ -1488,7 +1488,7 @@ static void tun_sock_write_space(struct sock *sk)
1488 if (!sock_writeable(sk)) 1488 if (!sock_writeable(sk))
1489 return; 1489 return;
1490 1490
1491 if (!test_and_clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags)) 1491 if (!test_and_clear_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags))
1492 return; 1492 return;
1493 1493
1494 wqueue = sk_sleep(sk); 1494 wqueue = sk_sleep(sk);
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index c78d3cb1b464..3da70bf9936a 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -696,6 +696,11 @@ static const struct usb_device_id products[] = {
696 USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE), 696 USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
697 .driver_info = (kernel_ulong_t) &wwan_info, 697 .driver_info = (kernel_ulong_t) &wwan_info,
698}, { 698}, {
699 /* Dell DW5580 modules */
700 USB_DEVICE_AND_INTERFACE_INFO(DELL_VENDOR_ID, 0x81ba, USB_CLASS_COMM,
701 USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
702 .driver_info = (kernel_ulong_t)&wwan_info,
703}, {
699 USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ETHERNET, 704 USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ETHERNET,
700 USB_CDC_PROTO_NONE), 705 USB_CDC_PROTO_NONE),
701 .driver_info = (unsigned long) &cdc_info, 706 .driver_info = (unsigned long) &cdc_info,
diff --git a/drivers/net/usb/cdc_mbim.c b/drivers/net/usb/cdc_mbim.c
index bbde9884ab8a..8973abdec9f6 100644
--- a/drivers/net/usb/cdc_mbim.c
+++ b/drivers/net/usb/cdc_mbim.c
@@ -158,7 +158,7 @@ static int cdc_mbim_bind(struct usbnet *dev, struct usb_interface *intf)
158 if (!cdc_ncm_comm_intf_is_mbim(intf->cur_altsetting)) 158 if (!cdc_ncm_comm_intf_is_mbim(intf->cur_altsetting))
159 goto err; 159 goto err;
160 160
161 ret = cdc_ncm_bind_common(dev, intf, data_altsetting, 0); 161 ret = cdc_ncm_bind_common(dev, intf, data_altsetting, dev->driver_info->data);
162 if (ret) 162 if (ret)
163 goto err; 163 goto err;
164 164
@@ -582,6 +582,26 @@ static const struct driver_info cdc_mbim_info_zlp = {
582 .tx_fixup = cdc_mbim_tx_fixup, 582 .tx_fixup = cdc_mbim_tx_fixup,
583}; 583};
584 584
585/* The spefication explicitly allows NDPs to be placed anywhere in the
586 * frame, but some devices fail unless the NDP is placed after the IP
587 * packets. Using the CDC_NCM_FLAG_NDP_TO_END flags to force this
588 * behaviour.
589 *
590 * Note: The current implementation of this feature restricts each NTB
591 * to a single NDP, implying that multiplexed sessions cannot share an
592 * NTB. This might affect performace for multiplexed sessions.
593 */
594static const struct driver_info cdc_mbim_info_ndp_to_end = {
595 .description = "CDC MBIM",
596 .flags = FLAG_NO_SETINT | FLAG_MULTI_PACKET | FLAG_WWAN,
597 .bind = cdc_mbim_bind,
598 .unbind = cdc_mbim_unbind,
599 .manage_power = cdc_mbim_manage_power,
600 .rx_fixup = cdc_mbim_rx_fixup,
601 .tx_fixup = cdc_mbim_tx_fixup,
602 .data = CDC_NCM_FLAG_NDP_TO_END,
603};
604
585static const struct usb_device_id mbim_devs[] = { 605static const struct usb_device_id mbim_devs[] = {
586 /* This duplicate NCM entry is intentional. MBIM devices can 606 /* This duplicate NCM entry is intentional. MBIM devices can
587 * be disguised as NCM by default, and this is necessary to 607 * be disguised as NCM by default, and this is necessary to
@@ -597,6 +617,10 @@ static const struct usb_device_id mbim_devs[] = {
597 { USB_VENDOR_AND_INTERFACE_INFO(0x0bdb, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE), 617 { USB_VENDOR_AND_INTERFACE_INFO(0x0bdb, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE),
598 .driver_info = (unsigned long)&cdc_mbim_info, 618 .driver_info = (unsigned long)&cdc_mbim_info,
599 }, 619 },
620 /* Huawei E3372 fails unless NDP comes after the IP packets */
621 { USB_DEVICE_AND_INTERFACE_INFO(0x12d1, 0x157d, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE),
622 .driver_info = (unsigned long)&cdc_mbim_info_ndp_to_end,
623 },
600 /* default entry */ 624 /* default entry */
601 { USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE), 625 { USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE),
602 .driver_info = (unsigned long)&cdc_mbim_info_zlp, 626 .driver_info = (unsigned long)&cdc_mbim_info_zlp,
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index a187f08113ec..1e9843a41168 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -691,7 +691,6 @@ static void cdc_ncm_free(struct cdc_ncm_ctx *ctx)
691 691
692int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_altsetting, int drvflags) 692int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_altsetting, int drvflags)
693{ 693{
694 const struct usb_cdc_union_desc *union_desc = NULL;
695 struct cdc_ncm_ctx *ctx; 694 struct cdc_ncm_ctx *ctx;
696 struct usb_driver *driver; 695 struct usb_driver *driver;
697 u8 *buf; 696 u8 *buf;
@@ -725,15 +724,16 @@ int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_
725 /* parse through descriptors associated with control interface */ 724 /* parse through descriptors associated with control interface */
726 cdc_parse_cdc_header(&hdr, intf, buf, len); 725 cdc_parse_cdc_header(&hdr, intf, buf, len);
727 726
728 ctx->data = usb_ifnum_to_if(dev->udev, 727 if (hdr.usb_cdc_union_desc)
729 hdr.usb_cdc_union_desc->bSlaveInterface0); 728 ctx->data = usb_ifnum_to_if(dev->udev,
729 hdr.usb_cdc_union_desc->bSlaveInterface0);
730 ctx->ether_desc = hdr.usb_cdc_ether_desc; 730 ctx->ether_desc = hdr.usb_cdc_ether_desc;
731 ctx->func_desc = hdr.usb_cdc_ncm_desc; 731 ctx->func_desc = hdr.usb_cdc_ncm_desc;
732 ctx->mbim_desc = hdr.usb_cdc_mbim_desc; 732 ctx->mbim_desc = hdr.usb_cdc_mbim_desc;
733 ctx->mbim_extended_desc = hdr.usb_cdc_mbim_extended_desc; 733 ctx->mbim_extended_desc = hdr.usb_cdc_mbim_extended_desc;
734 734
735 /* some buggy devices have an IAD but no CDC Union */ 735 /* some buggy devices have an IAD but no CDC Union */
736 if (!union_desc && intf->intf_assoc && intf->intf_assoc->bInterfaceCount == 2) { 736 if (!hdr.usb_cdc_union_desc && intf->intf_assoc && intf->intf_assoc->bInterfaceCount == 2) {
737 ctx->data = usb_ifnum_to_if(dev->udev, intf->cur_altsetting->desc.bInterfaceNumber + 1); 737 ctx->data = usb_ifnum_to_if(dev->udev, intf->cur_altsetting->desc.bInterfaceNumber + 1);
738 dev_dbg(&intf->dev, "CDC Union missing - got slave from IAD\n"); 738 dev_dbg(&intf->dev, "CDC Union missing - got slave from IAD\n");
739 } 739 }
@@ -955,10 +955,18 @@ static struct usb_cdc_ncm_ndp16 *cdc_ncm_ndp(struct cdc_ncm_ctx *ctx, struct sk_
955 * NTH16 header as we would normally do. NDP isn't written to the SKB yet, and 955 * NTH16 header as we would normally do. NDP isn't written to the SKB yet, and
956 * the wNdpIndex field in the header is actually not consistent with reality. It will be later. 956 * the wNdpIndex field in the header is actually not consistent with reality. It will be later.
957 */ 957 */
958 if (ctx->drvflags & CDC_NCM_FLAG_NDP_TO_END) 958 if (ctx->drvflags & CDC_NCM_FLAG_NDP_TO_END) {
959 if (ctx->delayed_ndp16->dwSignature == sign) 959 if (ctx->delayed_ndp16->dwSignature == sign)
960 return ctx->delayed_ndp16; 960 return ctx->delayed_ndp16;
961 961
962 /* We can only push a single NDP to the end. Return
963 * NULL to send what we've already got and queue this
964 * skb for later.
965 */
966 else if (ctx->delayed_ndp16->dwSignature)
967 return NULL;
968 }
969
962 /* follow the chain of NDPs, looking for a match */ 970 /* follow the chain of NDPs, looking for a match */
963 while (ndpoffset) { 971 while (ndpoffset) {
964 ndp16 = (struct usb_cdc_ncm_ndp16 *)(skb->data + ndpoffset); 972 ndp16 = (struct usb_cdc_ncm_ndp16 *)(skb->data + ndpoffset);
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 34799eaace41..9a5be8b85186 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -725,6 +725,7 @@ static const struct usb_device_id products[] = {
725 {QMI_FIXED_INTF(0x2357, 0x9000, 4)}, /* TP-LINK MA260 */ 725 {QMI_FIXED_INTF(0x2357, 0x9000, 4)}, /* TP-LINK MA260 */
726 {QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */ 726 {QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */
727 {QMI_FIXED_INTF(0x1bc7, 0x1201, 2)}, /* Telit LE920 */ 727 {QMI_FIXED_INTF(0x1bc7, 0x1201, 2)}, /* Telit LE920 */
728 {QMI_FIXED_INTF(0x1c9e, 0x9b01, 3)}, /* XS Stick W100-2 from 4G Systems */
728 {QMI_FIXED_INTF(0x0b3c, 0xc000, 4)}, /* Olivetti Olicard 100 */ 729 {QMI_FIXED_INTF(0x0b3c, 0xc000, 4)}, /* Olivetti Olicard 100 */
729 {QMI_FIXED_INTF(0x0b3c, 0xc001, 4)}, /* Olivetti Olicard 120 */ 730 {QMI_FIXED_INTF(0x0b3c, 0xc001, 4)}, /* Olivetti Olicard 120 */
730 {QMI_FIXED_INTF(0x0b3c, 0xc002, 4)}, /* Olivetti Olicard 140 */ 731 {QMI_FIXED_INTF(0x0b3c, 0xc002, 4)}, /* Olivetti Olicard 140 */
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index d9427ca3dba7..2e32c41536ae 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -3067,17 +3067,6 @@ static int rtl8152_open(struct net_device *netdev)
3067 3067
3068 mutex_lock(&tp->control); 3068 mutex_lock(&tp->control);
3069 3069
3070 /* The WORK_ENABLE may be set when autoresume occurs */
3071 if (test_bit(WORK_ENABLE, &tp->flags)) {
3072 clear_bit(WORK_ENABLE, &tp->flags);
3073 usb_kill_urb(tp->intr_urb);
3074 cancel_delayed_work_sync(&tp->schedule);
3075
3076 /* disable the tx/rx, if the workqueue has enabled them. */
3077 if (netif_carrier_ok(netdev))
3078 tp->rtl_ops.disable(tp);
3079 }
3080
3081 tp->rtl_ops.up(tp); 3070 tp->rtl_ops.up(tp);
3082 3071
3083 rtl8152_set_speed(tp, AUTONEG_ENABLE, 3072 rtl8152_set_speed(tp, AUTONEG_ENABLE,
@@ -3124,12 +3113,6 @@ static int rtl8152_close(struct net_device *netdev)
3124 } else { 3113 } else {
3125 mutex_lock(&tp->control); 3114 mutex_lock(&tp->control);
3126 3115
3127 /* The autosuspend may have been enabled and wouldn't
3128 * be disable when autoresume occurs, because the
3129 * netif_running() would be false.
3130 */
3131 rtl_runtime_suspend_enable(tp, false);
3132
3133 tp->rtl_ops.down(tp); 3116 tp->rtl_ops.down(tp);
3134 3117
3135 mutex_unlock(&tp->control); 3118 mutex_unlock(&tp->control);
@@ -3512,7 +3495,7 @@ static int rtl8152_resume(struct usb_interface *intf)
3512 netif_device_attach(tp->netdev); 3495 netif_device_attach(tp->netdev);
3513 } 3496 }
3514 3497
3515 if (netif_running(tp->netdev)) { 3498 if (netif_running(tp->netdev) && tp->netdev->flags & IFF_UP) {
3516 if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) { 3499 if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) {
3517 rtl_runtime_suspend_enable(tp, false); 3500 rtl_runtime_suspend_enable(tp, false);
3518 clear_bit(SELECTIVE_SUSPEND, &tp->flags); 3501 clear_bit(SELECTIVE_SUSPEND, &tp->flags);
@@ -3532,6 +3515,8 @@ static int rtl8152_resume(struct usb_interface *intf)
3532 } 3515 }
3533 usb_submit_urb(tp->intr_urb, GFP_KERNEL); 3516 usb_submit_urb(tp->intr_urb, GFP_KERNEL);
3534 } else if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) { 3517 } else if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) {
3518 if (tp->netdev->flags & IFF_UP)
3519 rtl_runtime_suspend_enable(tp, false);
3535 clear_bit(SELECTIVE_SUSPEND, &tp->flags); 3520 clear_bit(SELECTIVE_SUSPEND, &tp->flags);
3536 } 3521 }
3537 3522
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index d8838dedb7a4..f94ab786088f 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -140,6 +140,12 @@ struct virtnet_info {
140 140
141 /* CPU hot plug notifier */ 141 /* CPU hot plug notifier */
142 struct notifier_block nb; 142 struct notifier_block nb;
143
144 /* Control VQ buffers: protected by the rtnl lock */
145 struct virtio_net_ctrl_hdr ctrl_hdr;
146 virtio_net_ctrl_ack ctrl_status;
147 u8 ctrl_promisc;
148 u8 ctrl_allmulti;
143}; 149};
144 150
145struct padded_vnet_hdr { 151struct padded_vnet_hdr {
@@ -976,31 +982,30 @@ static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
976 struct scatterlist *out) 982 struct scatterlist *out)
977{ 983{
978 struct scatterlist *sgs[4], hdr, stat; 984 struct scatterlist *sgs[4], hdr, stat;
979 struct virtio_net_ctrl_hdr ctrl;
980 virtio_net_ctrl_ack status = ~0;
981 unsigned out_num = 0, tmp; 985 unsigned out_num = 0, tmp;
982 986
983 /* Caller should know better */ 987 /* Caller should know better */
984 BUG_ON(!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ)); 988 BUG_ON(!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ));
985 989
986 ctrl.class = class; 990 vi->ctrl_status = ~0;
987 ctrl.cmd = cmd; 991 vi->ctrl_hdr.class = class;
992 vi->ctrl_hdr.cmd = cmd;
988 /* Add header */ 993 /* Add header */
989 sg_init_one(&hdr, &ctrl, sizeof(ctrl)); 994 sg_init_one(&hdr, &vi->ctrl_hdr, sizeof(vi->ctrl_hdr));
990 sgs[out_num++] = &hdr; 995 sgs[out_num++] = &hdr;
991 996
992 if (out) 997 if (out)
993 sgs[out_num++] = out; 998 sgs[out_num++] = out;
994 999
995 /* Add return status. */ 1000 /* Add return status. */
996 sg_init_one(&stat, &status, sizeof(status)); 1001 sg_init_one(&stat, &vi->ctrl_status, sizeof(vi->ctrl_status));
997 sgs[out_num] = &stat; 1002 sgs[out_num] = &stat;
998 1003
999 BUG_ON(out_num + 1 > ARRAY_SIZE(sgs)); 1004 BUG_ON(out_num + 1 > ARRAY_SIZE(sgs));
1000 virtqueue_add_sgs(vi->cvq, sgs, out_num, 1, vi, GFP_ATOMIC); 1005 virtqueue_add_sgs(vi->cvq, sgs, out_num, 1, vi, GFP_ATOMIC);
1001 1006
1002 if (unlikely(!virtqueue_kick(vi->cvq))) 1007 if (unlikely(!virtqueue_kick(vi->cvq)))
1003 return status == VIRTIO_NET_OK; 1008 return vi->ctrl_status == VIRTIO_NET_OK;
1004 1009
1005 /* Spin for a response, the kick causes an ioport write, trapping 1010 /* Spin for a response, the kick causes an ioport write, trapping
1006 * into the hypervisor, so the request should be handled immediately. 1011 * into the hypervisor, so the request should be handled immediately.
@@ -1009,7 +1014,7 @@ static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
1009 !virtqueue_is_broken(vi->cvq)) 1014 !virtqueue_is_broken(vi->cvq))
1010 cpu_relax(); 1015 cpu_relax();
1011 1016
1012 return status == VIRTIO_NET_OK; 1017 return vi->ctrl_status == VIRTIO_NET_OK;
1013} 1018}
1014 1019
1015static int virtnet_set_mac_address(struct net_device *dev, void *p) 1020static int virtnet_set_mac_address(struct net_device *dev, void *p)
@@ -1151,7 +1156,6 @@ static void virtnet_set_rx_mode(struct net_device *dev)
1151{ 1156{
1152 struct virtnet_info *vi = netdev_priv(dev); 1157 struct virtnet_info *vi = netdev_priv(dev);
1153 struct scatterlist sg[2]; 1158 struct scatterlist sg[2];
1154 u8 promisc, allmulti;
1155 struct virtio_net_ctrl_mac *mac_data; 1159 struct virtio_net_ctrl_mac *mac_data;
1156 struct netdev_hw_addr *ha; 1160 struct netdev_hw_addr *ha;
1157 int uc_count; 1161 int uc_count;
@@ -1163,22 +1167,22 @@ static void virtnet_set_rx_mode(struct net_device *dev)
1163 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_RX)) 1167 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_RX))
1164 return; 1168 return;
1165 1169
1166 promisc = ((dev->flags & IFF_PROMISC) != 0); 1170 vi->ctrl_promisc = ((dev->flags & IFF_PROMISC) != 0);
1167 allmulti = ((dev->flags & IFF_ALLMULTI) != 0); 1171 vi->ctrl_allmulti = ((dev->flags & IFF_ALLMULTI) != 0);
1168 1172
1169 sg_init_one(sg, &promisc, sizeof(promisc)); 1173 sg_init_one(sg, &vi->ctrl_promisc, sizeof(vi->ctrl_promisc));
1170 1174
1171 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX, 1175 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX,
1172 VIRTIO_NET_CTRL_RX_PROMISC, sg)) 1176 VIRTIO_NET_CTRL_RX_PROMISC, sg))
1173 dev_warn(&dev->dev, "Failed to %sable promisc mode.\n", 1177 dev_warn(&dev->dev, "Failed to %sable promisc mode.\n",
1174 promisc ? "en" : "dis"); 1178 vi->ctrl_promisc ? "en" : "dis");
1175 1179
1176 sg_init_one(sg, &allmulti, sizeof(allmulti)); 1180 sg_init_one(sg, &vi->ctrl_allmulti, sizeof(vi->ctrl_allmulti));
1177 1181
1178 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX, 1182 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX,
1179 VIRTIO_NET_CTRL_RX_ALLMULTI, sg)) 1183 VIRTIO_NET_CTRL_RX_ALLMULTI, sg))
1180 dev_warn(&dev->dev, "Failed to %sable allmulti mode.\n", 1184 dev_warn(&dev->dev, "Failed to %sable allmulti mode.\n",
1181 allmulti ? "en" : "dis"); 1185 vi->ctrl_allmulti ? "en" : "dis");
1182 1186
1183 uc_count = netdev_uc_count(dev); 1187 uc_count = netdev_uc_count(dev);
1184 mc_count = netdev_mc_count(dev); 1188 mc_count = netdev_mc_count(dev);
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index 46f4caddccbe..417903715437 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -587,6 +587,12 @@ vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx,
587 &adapter->pdev->dev, 587 &adapter->pdev->dev,
588 rbi->skb->data, rbi->len, 588 rbi->skb->data, rbi->len,
589 PCI_DMA_FROMDEVICE); 589 PCI_DMA_FROMDEVICE);
590 if (dma_mapping_error(&adapter->pdev->dev,
591 rbi->dma_addr)) {
592 dev_kfree_skb_any(rbi->skb);
593 rq->stats.rx_buf_alloc_failure++;
594 break;
595 }
590 } else { 596 } else {
591 /* rx buffer skipped by the device */ 597 /* rx buffer skipped by the device */
592 } 598 }
@@ -605,13 +611,18 @@ vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx,
605 &adapter->pdev->dev, 611 &adapter->pdev->dev,
606 rbi->page, 0, PAGE_SIZE, 612 rbi->page, 0, PAGE_SIZE,
607 PCI_DMA_FROMDEVICE); 613 PCI_DMA_FROMDEVICE);
614 if (dma_mapping_error(&adapter->pdev->dev,
615 rbi->dma_addr)) {
616 put_page(rbi->page);
617 rq->stats.rx_buf_alloc_failure++;
618 break;
619 }
608 } else { 620 } else {
609 /* rx buffers skipped by the device */ 621 /* rx buffers skipped by the device */
610 } 622 }
611 val = VMXNET3_RXD_BTYPE_BODY << VMXNET3_RXD_BTYPE_SHIFT; 623 val = VMXNET3_RXD_BTYPE_BODY << VMXNET3_RXD_BTYPE_SHIFT;
612 } 624 }
613 625
614 BUG_ON(rbi->dma_addr == 0);
615 gd->rxd.addr = cpu_to_le64(rbi->dma_addr); 626 gd->rxd.addr = cpu_to_le64(rbi->dma_addr);
616 gd->dword[2] = cpu_to_le32((!ring->gen << VMXNET3_RXD_GEN_SHIFT) 627 gd->dword[2] = cpu_to_le32((!ring->gen << VMXNET3_RXD_GEN_SHIFT)
617 | val | rbi->len); 628 | val | rbi->len);
@@ -655,7 +666,7 @@ vmxnet3_append_frag(struct sk_buff *skb, struct Vmxnet3_RxCompDesc *rcd,
655} 666}
656 667
657 668
658static void 669static int
659vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx, 670vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx,
660 struct vmxnet3_tx_queue *tq, struct pci_dev *pdev, 671 struct vmxnet3_tx_queue *tq, struct pci_dev *pdev,
661 struct vmxnet3_adapter *adapter) 672 struct vmxnet3_adapter *adapter)
@@ -715,6 +726,8 @@ vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx,
715 tbi->dma_addr = dma_map_single(&adapter->pdev->dev, 726 tbi->dma_addr = dma_map_single(&adapter->pdev->dev,
716 skb->data + buf_offset, buf_size, 727 skb->data + buf_offset, buf_size,
717 PCI_DMA_TODEVICE); 728 PCI_DMA_TODEVICE);
729 if (dma_mapping_error(&adapter->pdev->dev, tbi->dma_addr))
730 return -EFAULT;
718 731
719 tbi->len = buf_size; 732 tbi->len = buf_size;
720 733
@@ -755,6 +768,8 @@ vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx,
755 tbi->dma_addr = skb_frag_dma_map(&adapter->pdev->dev, frag, 768 tbi->dma_addr = skb_frag_dma_map(&adapter->pdev->dev, frag,
756 buf_offset, buf_size, 769 buf_offset, buf_size,
757 DMA_TO_DEVICE); 770 DMA_TO_DEVICE);
771 if (dma_mapping_error(&adapter->pdev->dev, tbi->dma_addr))
772 return -EFAULT;
758 773
759 tbi->len = buf_size; 774 tbi->len = buf_size;
760 775
@@ -782,6 +797,8 @@ vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx,
782 /* set the last buf_info for the pkt */ 797 /* set the last buf_info for the pkt */
783 tbi->skb = skb; 798 tbi->skb = skb;
784 tbi->sop_idx = ctx->sop_txd - tq->tx_ring.base; 799 tbi->sop_idx = ctx->sop_txd - tq->tx_ring.base;
800
801 return 0;
785} 802}
786 803
787 804
@@ -1020,7 +1037,8 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
1020 } 1037 }
1021 1038
1022 /* fill tx descs related to addr & len */ 1039 /* fill tx descs related to addr & len */
1023 vmxnet3_map_pkt(skb, &ctx, tq, adapter->pdev, adapter); 1040 if (vmxnet3_map_pkt(skb, &ctx, tq, adapter->pdev, adapter))
1041 goto unlock_drop_pkt;
1024 1042
1025 /* setup the EOP desc */ 1043 /* setup the EOP desc */
1026 ctx.eop_txd->dword[3] = cpu_to_le32(VMXNET3_TXD_CQ | VMXNET3_TXD_EOP); 1044 ctx.eop_txd->dword[3] = cpu_to_le32(VMXNET3_TXD_CQ | VMXNET3_TXD_EOP);
@@ -1231,6 +1249,7 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
1231 struct vmxnet3_rx_buf_info *rbi; 1249 struct vmxnet3_rx_buf_info *rbi;
1232 struct sk_buff *skb, *new_skb = NULL; 1250 struct sk_buff *skb, *new_skb = NULL;
1233 struct page *new_page = NULL; 1251 struct page *new_page = NULL;
1252 dma_addr_t new_dma_addr;
1234 int num_to_alloc; 1253 int num_to_alloc;
1235 struct Vmxnet3_RxDesc *rxd; 1254 struct Vmxnet3_RxDesc *rxd;
1236 u32 idx, ring_idx; 1255 u32 idx, ring_idx;
@@ -1287,6 +1306,21 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
1287 skip_page_frags = true; 1306 skip_page_frags = true;
1288 goto rcd_done; 1307 goto rcd_done;
1289 } 1308 }
1309 new_dma_addr = dma_map_single(&adapter->pdev->dev,
1310 new_skb->data, rbi->len,
1311 PCI_DMA_FROMDEVICE);
1312 if (dma_mapping_error(&adapter->pdev->dev,
1313 new_dma_addr)) {
1314 dev_kfree_skb(new_skb);
1315 /* Skb allocation failed, do not handover this
1316 * skb to stack. Reuse it. Drop the existing pkt
1317 */
1318 rq->stats.rx_buf_alloc_failure++;
1319 ctx->skb = NULL;
1320 rq->stats.drop_total++;
1321 skip_page_frags = true;
1322 goto rcd_done;
1323 }
1290 1324
1291 dma_unmap_single(&adapter->pdev->dev, rbi->dma_addr, 1325 dma_unmap_single(&adapter->pdev->dev, rbi->dma_addr,
1292 rbi->len, 1326 rbi->len,
@@ -1303,9 +1337,7 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
1303 1337
1304 /* Immediate refill */ 1338 /* Immediate refill */
1305 rbi->skb = new_skb; 1339 rbi->skb = new_skb;
1306 rbi->dma_addr = dma_map_single(&adapter->pdev->dev, 1340 rbi->dma_addr = new_dma_addr;
1307 rbi->skb->data, rbi->len,
1308 PCI_DMA_FROMDEVICE);
1309 rxd->addr = cpu_to_le64(rbi->dma_addr); 1341 rxd->addr = cpu_to_le64(rbi->dma_addr);
1310 rxd->len = rbi->len; 1342 rxd->len = rbi->len;
1311 if (adapter->version == 2 && 1343 if (adapter->version == 2 &&
@@ -1348,6 +1380,19 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
1348 skip_page_frags = true; 1380 skip_page_frags = true;
1349 goto rcd_done; 1381 goto rcd_done;
1350 } 1382 }
1383 new_dma_addr = dma_map_page(&adapter->pdev->dev
1384 , rbi->page,
1385 0, PAGE_SIZE,
1386 PCI_DMA_FROMDEVICE);
1387 if (dma_mapping_error(&adapter->pdev->dev,
1388 new_dma_addr)) {
1389 put_page(new_page);
1390 rq->stats.rx_buf_alloc_failure++;
1391 dev_kfree_skb(ctx->skb);
1392 ctx->skb = NULL;
1393 skip_page_frags = true;
1394 goto rcd_done;
1395 }
1351 1396
1352 dma_unmap_page(&adapter->pdev->dev, 1397 dma_unmap_page(&adapter->pdev->dev,
1353 rbi->dma_addr, rbi->len, 1398 rbi->dma_addr, rbi->len,
@@ -1357,10 +1402,7 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
1357 1402
1358 /* Immediate refill */ 1403 /* Immediate refill */
1359 rbi->page = new_page; 1404 rbi->page = new_page;
1360 rbi->dma_addr = dma_map_page(&adapter->pdev->dev 1405 rbi->dma_addr = new_dma_addr;
1361 , rbi->page,
1362 0, PAGE_SIZE,
1363 PCI_DMA_FROMDEVICE);
1364 rxd->addr = cpu_to_le64(rbi->dma_addr); 1406 rxd->addr = cpu_to_le64(rbi->dma_addr);
1365 rxd->len = rbi->len; 1407 rxd->len = rbi->len;
1366 } 1408 }
@@ -2157,16 +2199,18 @@ vmxnet3_set_mc(struct net_device *netdev)
2157 if (!netdev_mc_empty(netdev)) { 2199 if (!netdev_mc_empty(netdev)) {
2158 new_table = vmxnet3_copy_mc(netdev); 2200 new_table = vmxnet3_copy_mc(netdev);
2159 if (new_table) { 2201 if (new_table) {
2160 rxConf->mfTableLen = cpu_to_le16( 2202 size_t sz = netdev_mc_count(netdev) * ETH_ALEN;
2161 netdev_mc_count(netdev) * ETH_ALEN); 2203
2204 rxConf->mfTableLen = cpu_to_le16(sz);
2162 new_table_pa = dma_map_single( 2205 new_table_pa = dma_map_single(
2163 &adapter->pdev->dev, 2206 &adapter->pdev->dev,
2164 new_table, 2207 new_table,
2165 rxConf->mfTableLen, 2208 sz,
2166 PCI_DMA_TODEVICE); 2209 PCI_DMA_TODEVICE);
2167 } 2210 }
2168 2211
2169 if (new_table_pa) { 2212 if (!dma_mapping_error(&adapter->pdev->dev,
2213 new_table_pa)) {
2170 new_mode |= VMXNET3_RXM_MCAST; 2214 new_mode |= VMXNET3_RXM_MCAST;
2171 rxConf->mfTablePA = cpu_to_le64(new_table_pa); 2215 rxConf->mfTablePA = cpu_to_le64(new_table_pa);
2172 } else { 2216 } else {
@@ -3074,6 +3118,11 @@ vmxnet3_probe_device(struct pci_dev *pdev,
3074 adapter->adapter_pa = dma_map_single(&adapter->pdev->dev, adapter, 3118 adapter->adapter_pa = dma_map_single(&adapter->pdev->dev, adapter,
3075 sizeof(struct vmxnet3_adapter), 3119 sizeof(struct vmxnet3_adapter),
3076 PCI_DMA_TODEVICE); 3120 PCI_DMA_TODEVICE);
3121 if (dma_mapping_error(&adapter->pdev->dev, adapter->adapter_pa)) {
3122 dev_err(&pdev->dev, "Failed to map dma\n");
3123 err = -EFAULT;
3124 goto err_dma_map;
3125 }
3077 adapter->shared = dma_alloc_coherent( 3126 adapter->shared = dma_alloc_coherent(
3078 &adapter->pdev->dev, 3127 &adapter->pdev->dev,
3079 sizeof(struct Vmxnet3_DriverShared), 3128 sizeof(struct Vmxnet3_DriverShared),
@@ -3232,6 +3281,7 @@ err_alloc_queue_desc:
3232err_alloc_shared: 3281err_alloc_shared:
3233 dma_unmap_single(&adapter->pdev->dev, adapter->adapter_pa, 3282 dma_unmap_single(&adapter->pdev->dev, adapter->adapter_pa,
3234 sizeof(struct vmxnet3_adapter), PCI_DMA_TODEVICE); 3283 sizeof(struct vmxnet3_adapter), PCI_DMA_TODEVICE);
3284err_dma_map:
3235 free_netdev(netdev); 3285 free_netdev(netdev);
3236 return err; 3286 return err;
3237} 3287}
diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h
index 3f859a55c035..4c58c83dc225 100644
--- a/drivers/net/vmxnet3/vmxnet3_int.h
+++ b/drivers/net/vmxnet3/vmxnet3_int.h
@@ -69,10 +69,10 @@
69/* 69/*
70 * Version numbers 70 * Version numbers
71 */ 71 */
72#define VMXNET3_DRIVER_VERSION_STRING "1.4.3.0-k" 72#define VMXNET3_DRIVER_VERSION_STRING "1.4.4.0-k"
73 73
74/* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */ 74/* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */
75#define VMXNET3_DRIVER_VERSION_NUM 0x01040300 75#define VMXNET3_DRIVER_VERSION_NUM 0x01040400
76 76
77#if defined(CONFIG_PCI_MSI) 77#if defined(CONFIG_PCI_MSI)
78 /* RSS only makes sense if MSI-X is supported. */ 78 /* RSS only makes sense if MSI-X is supported. */
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
index 92fa3e1ea65c..4f9748457f5a 100644
--- a/drivers/net/vrf.c
+++ b/drivers/net/vrf.c
@@ -907,7 +907,6 @@ static int vrf_newlink(struct net *src_net, struct net_device *dev,
907 struct nlattr *tb[], struct nlattr *data[]) 907 struct nlattr *tb[], struct nlattr *data[])
908{ 908{
909 struct net_vrf *vrf = netdev_priv(dev); 909 struct net_vrf *vrf = netdev_priv(dev);
910 int err;
911 910
912 if (!data || !data[IFLA_VRF_TABLE]) 911 if (!data || !data[IFLA_VRF_TABLE])
913 return -EINVAL; 912 return -EINVAL;
@@ -916,15 +915,7 @@ static int vrf_newlink(struct net *src_net, struct net_device *dev,
916 915
917 dev->priv_flags |= IFF_L3MDEV_MASTER; 916 dev->priv_flags |= IFF_L3MDEV_MASTER;
918 917
919 err = register_netdevice(dev); 918 return register_netdevice(dev);
920 if (err < 0)
921 goto out_fail;
922
923 return 0;
924
925out_fail:
926 free_netdev(dev);
927 return err;
928} 919}
929 920
930static size_t vrf_nl_getsize(const struct net_device *dev) 921static size_t vrf_nl_getsize(const struct net_device *dev)
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 6369a5734d4c..ba363cedef80 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -1158,7 +1158,6 @@ static void vxlan_rcv(struct vxlan_sock *vs, struct sk_buff *skb,
1158 struct pcpu_sw_netstats *stats; 1158 struct pcpu_sw_netstats *stats;
1159 union vxlan_addr saddr; 1159 union vxlan_addr saddr;
1160 int err = 0; 1160 int err = 0;
1161 union vxlan_addr *remote_ip;
1162 1161
1163 /* For flow based devices, map all packets to VNI 0 */ 1162 /* For flow based devices, map all packets to VNI 0 */
1164 if (vs->flags & VXLAN_F_COLLECT_METADATA) 1163 if (vs->flags & VXLAN_F_COLLECT_METADATA)
@@ -1169,7 +1168,6 @@ static void vxlan_rcv(struct vxlan_sock *vs, struct sk_buff *skb,
1169 if (!vxlan) 1168 if (!vxlan)
1170 goto drop; 1169 goto drop;
1171 1170
1172 remote_ip = &vxlan->default_dst.remote_ip;
1173 skb_reset_mac_header(skb); 1171 skb_reset_mac_header(skb);
1174 skb_scrub_packet(skb, !net_eq(vxlan->net, dev_net(vxlan->dev))); 1172 skb_scrub_packet(skb, !net_eq(vxlan->net, dev_net(vxlan->dev)));
1175 skb->protocol = eth_type_trans(skb, vxlan->dev); 1173 skb->protocol = eth_type_trans(skb, vxlan->dev);
@@ -1179,8 +1177,8 @@ static void vxlan_rcv(struct vxlan_sock *vs, struct sk_buff *skb,
1179 if (ether_addr_equal(eth_hdr(skb)->h_source, vxlan->dev->dev_addr)) 1177 if (ether_addr_equal(eth_hdr(skb)->h_source, vxlan->dev->dev_addr))
1180 goto drop; 1178 goto drop;
1181 1179
1182 /* Re-examine inner Ethernet packet */ 1180 /* Get data from the outer IP header */
1183 if (remote_ip->sa.sa_family == AF_INET) { 1181 if (vxlan_get_sk_family(vs) == AF_INET) {
1184 oip = ip_hdr(skb); 1182 oip = ip_hdr(skb);
1185 saddr.sin.sin_addr.s_addr = oip->saddr; 1183 saddr.sin.sin_addr.s_addr = oip->saddr;
1186 saddr.sa.sa_family = AF_INET; 1184 saddr.sa.sa_family = AF_INET;
@@ -1848,6 +1846,34 @@ static int vxlan_xmit_skb(struct rtable *rt, struct sock *sk, struct sk_buff *sk
1848 !(vxflags & VXLAN_F_UDP_CSUM)); 1846 !(vxflags & VXLAN_F_UDP_CSUM));
1849} 1847}
1850 1848
1849#if IS_ENABLED(CONFIG_IPV6)
1850static struct dst_entry *vxlan6_get_route(struct vxlan_dev *vxlan,
1851 struct sk_buff *skb, int oif,
1852 const struct in6_addr *daddr,
1853 struct in6_addr *saddr)
1854{
1855 struct dst_entry *ndst;
1856 struct flowi6 fl6;
1857 int err;
1858
1859 memset(&fl6, 0, sizeof(fl6));
1860 fl6.flowi6_oif = oif;
1861 fl6.daddr = *daddr;
1862 fl6.saddr = vxlan->cfg.saddr.sin6.sin6_addr;
1863 fl6.flowi6_mark = skb->mark;
1864 fl6.flowi6_proto = IPPROTO_UDP;
1865
1866 err = ipv6_stub->ipv6_dst_lookup(vxlan->net,
1867 vxlan->vn6_sock->sock->sk,
1868 &ndst, &fl6);
1869 if (err < 0)
1870 return ERR_PTR(err);
1871
1872 *saddr = fl6.saddr;
1873 return ndst;
1874}
1875#endif
1876
1851/* Bypass encapsulation if the destination is local */ 1877/* Bypass encapsulation if the destination is local */
1852static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan, 1878static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan,
1853 struct vxlan_dev *dst_vxlan) 1879 struct vxlan_dev *dst_vxlan)
@@ -2035,21 +2061,17 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
2035#if IS_ENABLED(CONFIG_IPV6) 2061#if IS_ENABLED(CONFIG_IPV6)
2036 } else { 2062 } else {
2037 struct dst_entry *ndst; 2063 struct dst_entry *ndst;
2038 struct flowi6 fl6; 2064 struct in6_addr saddr;
2039 u32 rt6i_flags; 2065 u32 rt6i_flags;
2040 2066
2041 if (!vxlan->vn6_sock) 2067 if (!vxlan->vn6_sock)
2042 goto drop; 2068 goto drop;
2043 sk = vxlan->vn6_sock->sock->sk; 2069 sk = vxlan->vn6_sock->sock->sk;
2044 2070
2045 memset(&fl6, 0, sizeof(fl6)); 2071 ndst = vxlan6_get_route(vxlan, skb,
2046 fl6.flowi6_oif = rdst ? rdst->remote_ifindex : 0; 2072 rdst ? rdst->remote_ifindex : 0,
2047 fl6.daddr = dst->sin6.sin6_addr; 2073 &dst->sin6.sin6_addr, &saddr);
2048 fl6.saddr = vxlan->cfg.saddr.sin6.sin6_addr; 2074 if (IS_ERR(ndst)) {
2049 fl6.flowi6_mark = skb->mark;
2050 fl6.flowi6_proto = IPPROTO_UDP;
2051
2052 if (ipv6_stub->ipv6_dst_lookup(vxlan->net, sk, &ndst, &fl6)) {
2053 netdev_dbg(dev, "no route to %pI6\n", 2075 netdev_dbg(dev, "no route to %pI6\n",
2054 &dst->sin6.sin6_addr); 2076 &dst->sin6.sin6_addr);
2055 dev->stats.tx_carrier_errors++; 2077 dev->stats.tx_carrier_errors++;
@@ -2081,7 +2103,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
2081 } 2103 }
2082 2104
2083 ttl = ttl ? : ip6_dst_hoplimit(ndst); 2105 ttl = ttl ? : ip6_dst_hoplimit(ndst);
2084 err = vxlan6_xmit_skb(ndst, sk, skb, dev, &fl6.saddr, &fl6.daddr, 2106 err = vxlan6_xmit_skb(ndst, sk, skb, dev, &saddr, &dst->sin6.sin6_addr,
2085 0, ttl, src_port, dst_port, htonl(vni << 8), md, 2107 0, ttl, src_port, dst_port, htonl(vni << 8), md,
2086 !net_eq(vxlan->net, dev_net(vxlan->dev)), 2108 !net_eq(vxlan->net, dev_net(vxlan->dev)),
2087 flags); 2109 flags);
@@ -2395,9 +2417,30 @@ static int vxlan_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
2395 vxlan->cfg.port_max, true); 2417 vxlan->cfg.port_max, true);
2396 dport = info->key.tp_dst ? : vxlan->cfg.dst_port; 2418 dport = info->key.tp_dst ? : vxlan->cfg.dst_port;
2397 2419
2398 if (ip_tunnel_info_af(info) == AF_INET) 2420 if (ip_tunnel_info_af(info) == AF_INET) {
2421 if (!vxlan->vn4_sock)
2422 return -EINVAL;
2399 return egress_ipv4_tun_info(dev, skb, info, sport, dport); 2423 return egress_ipv4_tun_info(dev, skb, info, sport, dport);
2400 return -EINVAL; 2424 } else {
2425#if IS_ENABLED(CONFIG_IPV6)
2426 struct dst_entry *ndst;
2427
2428 if (!vxlan->vn6_sock)
2429 return -EINVAL;
2430 ndst = vxlan6_get_route(vxlan, skb, 0,
2431 &info->key.u.ipv6.dst,
2432 &info->key.u.ipv6.src);
2433 if (IS_ERR(ndst))
2434 return PTR_ERR(ndst);
2435 dst_release(ndst);
2436
2437 info->key.tp_src = sport;
2438 info->key.tp_dst = dport;
2439#else /* !CONFIG_IPV6 */
2440 return -EPFNOSUPPORT;
2441#endif
2442 }
2443 return 0;
2401} 2444}
2402 2445
2403static const struct net_device_ops vxlan_netdev_ops = { 2446static const struct net_device_ops vxlan_netdev_ops = {
diff --git a/drivers/net/wan/hdlc_fr.c b/drivers/net/wan/hdlc_fr.c
index e92aaf615901..89541cc90e87 100644
--- a/drivers/net/wan/hdlc_fr.c
+++ b/drivers/net/wan/hdlc_fr.c
@@ -1075,11 +1075,10 @@ static int fr_add_pvc(struct net_device *frad, unsigned int dlci, int type)
1075 1075
1076 used = pvc_is_used(pvc); 1076 used = pvc_is_used(pvc);
1077 1077
1078 if (type == ARPHRD_ETHER) { 1078 if (type == ARPHRD_ETHER)
1079 dev = alloc_netdev(0, "pvceth%d", NET_NAME_UNKNOWN, 1079 dev = alloc_netdev(0, "pvceth%d", NET_NAME_UNKNOWN,
1080 ether_setup); 1080 ether_setup);
1081 dev->priv_flags &= ~IFF_TX_SKB_SHARING; 1081 else
1082 } else
1083 dev = alloc_netdev(0, "pvc%d", NET_NAME_UNKNOWN, pvc_setup); 1082 dev = alloc_netdev(0, "pvc%d", NET_NAME_UNKNOWN, pvc_setup);
1084 1083
1085 if (!dev) { 1084 if (!dev) {
@@ -1088,9 +1087,10 @@ static int fr_add_pvc(struct net_device *frad, unsigned int dlci, int type)
1088 return -ENOBUFS; 1087 return -ENOBUFS;
1089 } 1088 }
1090 1089
1091 if (type == ARPHRD_ETHER) 1090 if (type == ARPHRD_ETHER) {
1091 dev->priv_flags &= ~IFF_TX_SKB_SHARING;
1092 eth_hw_addr_random(dev); 1092 eth_hw_addr_random(dev);
1093 else { 1093 } else {
1094 *(__be16*)dev->dev_addr = htons(dlci); 1094 *(__be16*)dev->dev_addr = htons(dlci);
1095 dlci_to_q922(dev->broadcast, dlci); 1095 dlci_to_q922(dev->broadcast, dlci);
1096 } 1096 }
diff --git a/drivers/net/wan/x25_asy.c b/drivers/net/wan/x25_asy.c
index 5c47b011a9d7..cd39025d2abf 100644
--- a/drivers/net/wan/x25_asy.c
+++ b/drivers/net/wan/x25_asy.c
@@ -549,16 +549,12 @@ static void x25_asy_receive_buf(struct tty_struct *tty,
549 549
550static int x25_asy_open_tty(struct tty_struct *tty) 550static int x25_asy_open_tty(struct tty_struct *tty)
551{ 551{
552 struct x25_asy *sl = tty->disc_data; 552 struct x25_asy *sl;
553 int err; 553 int err;
554 554
555 if (tty->ops->write == NULL) 555 if (tty->ops->write == NULL)
556 return -EOPNOTSUPP; 556 return -EOPNOTSUPP;
557 557
558 /* First make sure we're not already connected. */
559 if (sl && sl->magic == X25_ASY_MAGIC)
560 return -EEXIST;
561
562 /* OK. Find a free X.25 channel to use. */ 558 /* OK. Find a free X.25 channel to use. */
563 sl = x25_asy_alloc(); 559 sl = x25_asy_alloc();
564 if (sl == NULL) 560 if (sl == NULL)
diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c
index aa9bd92ac4ed..0947cc271e69 100644
--- a/drivers/net/wireless/ath/ath10k/core.c
+++ b/drivers/net/wireless/ath/ath10k/core.c
@@ -51,6 +51,7 @@ MODULE_PARM_DESC(rawmode, "Use raw 802.11 frame datapath");
51static const struct ath10k_hw_params ath10k_hw_params_list[] = { 51static const struct ath10k_hw_params ath10k_hw_params_list[] = {
52 { 52 {
53 .id = QCA988X_HW_2_0_VERSION, 53 .id = QCA988X_HW_2_0_VERSION,
54 .dev_id = QCA988X_2_0_DEVICE_ID,
54 .name = "qca988x hw2.0", 55 .name = "qca988x hw2.0",
55 .patch_load_addr = QCA988X_HW_2_0_PATCH_LOAD_ADDR, 56 .patch_load_addr = QCA988X_HW_2_0_PATCH_LOAD_ADDR,
56 .uart_pin = 7, 57 .uart_pin = 7,
@@ -69,6 +70,25 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
69 }, 70 },
70 { 71 {
71 .id = QCA6174_HW_2_1_VERSION, 72 .id = QCA6174_HW_2_1_VERSION,
73 .dev_id = QCA6164_2_1_DEVICE_ID,
74 .name = "qca6164 hw2.1",
75 .patch_load_addr = QCA6174_HW_2_1_PATCH_LOAD_ADDR,
76 .uart_pin = 6,
77 .otp_exe_param = 0,
78 .channel_counters_freq_hz = 88000,
79 .max_probe_resp_desc_thres = 0,
80 .fw = {
81 .dir = QCA6174_HW_2_1_FW_DIR,
82 .fw = QCA6174_HW_2_1_FW_FILE,
83 .otp = QCA6174_HW_2_1_OTP_FILE,
84 .board = QCA6174_HW_2_1_BOARD_DATA_FILE,
85 .board_size = QCA6174_BOARD_DATA_SZ,
86 .board_ext_size = QCA6174_BOARD_EXT_DATA_SZ,
87 },
88 },
89 {
90 .id = QCA6174_HW_2_1_VERSION,
91 .dev_id = QCA6174_2_1_DEVICE_ID,
72 .name = "qca6174 hw2.1", 92 .name = "qca6174 hw2.1",
73 .patch_load_addr = QCA6174_HW_2_1_PATCH_LOAD_ADDR, 93 .patch_load_addr = QCA6174_HW_2_1_PATCH_LOAD_ADDR,
74 .uart_pin = 6, 94 .uart_pin = 6,
@@ -86,6 +106,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
86 }, 106 },
87 { 107 {
88 .id = QCA6174_HW_3_0_VERSION, 108 .id = QCA6174_HW_3_0_VERSION,
109 .dev_id = QCA6174_2_1_DEVICE_ID,
89 .name = "qca6174 hw3.0", 110 .name = "qca6174 hw3.0",
90 .patch_load_addr = QCA6174_HW_3_0_PATCH_LOAD_ADDR, 111 .patch_load_addr = QCA6174_HW_3_0_PATCH_LOAD_ADDR,
91 .uart_pin = 6, 112 .uart_pin = 6,
@@ -103,6 +124,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
103 }, 124 },
104 { 125 {
105 .id = QCA6174_HW_3_2_VERSION, 126 .id = QCA6174_HW_3_2_VERSION,
127 .dev_id = QCA6174_2_1_DEVICE_ID,
106 .name = "qca6174 hw3.2", 128 .name = "qca6174 hw3.2",
107 .patch_load_addr = QCA6174_HW_3_0_PATCH_LOAD_ADDR, 129 .patch_load_addr = QCA6174_HW_3_0_PATCH_LOAD_ADDR,
108 .uart_pin = 6, 130 .uart_pin = 6,
@@ -121,6 +143,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
121 }, 143 },
122 { 144 {
123 .id = QCA99X0_HW_2_0_DEV_VERSION, 145 .id = QCA99X0_HW_2_0_DEV_VERSION,
146 .dev_id = QCA99X0_2_0_DEVICE_ID,
124 .name = "qca99x0 hw2.0", 147 .name = "qca99x0 hw2.0",
125 .patch_load_addr = QCA99X0_HW_2_0_PATCH_LOAD_ADDR, 148 .patch_load_addr = QCA99X0_HW_2_0_PATCH_LOAD_ADDR,
126 .uart_pin = 7, 149 .uart_pin = 7,
@@ -139,10 +162,31 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
139 }, 162 },
140 { 163 {
141 .id = QCA9377_HW_1_0_DEV_VERSION, 164 .id = QCA9377_HW_1_0_DEV_VERSION,
165 .dev_id = QCA9377_1_0_DEVICE_ID,
142 .name = "qca9377 hw1.0", 166 .name = "qca9377 hw1.0",
143 .patch_load_addr = QCA9377_HW_1_0_PATCH_LOAD_ADDR, 167 .patch_load_addr = QCA9377_HW_1_0_PATCH_LOAD_ADDR,
144 .uart_pin = 7, 168 .uart_pin = 6,
145 .otp_exe_param = 0, 169 .otp_exe_param = 0,
170 .channel_counters_freq_hz = 88000,
171 .max_probe_resp_desc_thres = 0,
172 .fw = {
173 .dir = QCA9377_HW_1_0_FW_DIR,
174 .fw = QCA9377_HW_1_0_FW_FILE,
175 .otp = QCA9377_HW_1_0_OTP_FILE,
176 .board = QCA9377_HW_1_0_BOARD_DATA_FILE,
177 .board_size = QCA9377_BOARD_DATA_SZ,
178 .board_ext_size = QCA9377_BOARD_EXT_DATA_SZ,
179 },
180 },
181 {
182 .id = QCA9377_HW_1_1_DEV_VERSION,
183 .dev_id = QCA9377_1_0_DEVICE_ID,
184 .name = "qca9377 hw1.1",
185 .patch_load_addr = QCA9377_HW_1_0_PATCH_LOAD_ADDR,
186 .uart_pin = 6,
187 .otp_exe_param = 0,
188 .channel_counters_freq_hz = 88000,
189 .max_probe_resp_desc_thres = 0,
146 .fw = { 190 .fw = {
147 .dir = QCA9377_HW_1_0_FW_DIR, 191 .dir = QCA9377_HW_1_0_FW_DIR,
148 .fw = QCA9377_HW_1_0_FW_FILE, 192 .fw = QCA9377_HW_1_0_FW_FILE,
@@ -1263,7 +1307,8 @@ static int ath10k_init_hw_params(struct ath10k *ar)
1263 for (i = 0; i < ARRAY_SIZE(ath10k_hw_params_list); i++) { 1307 for (i = 0; i < ARRAY_SIZE(ath10k_hw_params_list); i++) {
1264 hw_params = &ath10k_hw_params_list[i]; 1308 hw_params = &ath10k_hw_params_list[i];
1265 1309
1266 if (hw_params->id == ar->target_version) 1310 if (hw_params->id == ar->target_version &&
1311 hw_params->dev_id == ar->dev_id)
1267 break; 1312 break;
1268 } 1313 }
1269 1314
diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h
index 018c64f4fd25..858d75f49a9f 100644
--- a/drivers/net/wireless/ath/ath10k/core.h
+++ b/drivers/net/wireless/ath/ath10k/core.h
@@ -636,6 +636,7 @@ struct ath10k {
636 636
637 struct ath10k_hw_params { 637 struct ath10k_hw_params {
638 u32 id; 638 u32 id;
639 u16 dev_id;
639 const char *name; 640 const char *name;
640 u32 patch_load_addr; 641 u32 patch_load_addr;
641 int uart_pin; 642 int uart_pin;
diff --git a/drivers/net/wireless/ath/ath10k/hw.h b/drivers/net/wireless/ath/ath10k/hw.h
index 39966a05c1cc..713c2bcea178 100644
--- a/drivers/net/wireless/ath/ath10k/hw.h
+++ b/drivers/net/wireless/ath/ath10k/hw.h
@@ -22,6 +22,12 @@
22 22
23#define ATH10K_FW_DIR "ath10k" 23#define ATH10K_FW_DIR "ath10k"
24 24
25#define QCA988X_2_0_DEVICE_ID (0x003c)
26#define QCA6164_2_1_DEVICE_ID (0x0041)
27#define QCA6174_2_1_DEVICE_ID (0x003e)
28#define QCA99X0_2_0_DEVICE_ID (0x0040)
29#define QCA9377_1_0_DEVICE_ID (0x0042)
30
25/* QCA988X 1.0 definitions (unsupported) */ 31/* QCA988X 1.0 definitions (unsupported) */
26#define QCA988X_HW_1_0_CHIP_ID_REV 0x0 32#define QCA988X_HW_1_0_CHIP_ID_REV 0x0
27 33
@@ -42,6 +48,10 @@
42#define QCA6174_HW_3_0_VERSION 0x05020000 48#define QCA6174_HW_3_0_VERSION 0x05020000
43#define QCA6174_HW_3_2_VERSION 0x05030000 49#define QCA6174_HW_3_2_VERSION 0x05030000
44 50
51/* QCA9377 target BMI version signatures */
52#define QCA9377_HW_1_0_DEV_VERSION 0x05020000
53#define QCA9377_HW_1_1_DEV_VERSION 0x05020001
54
45enum qca6174_pci_rev { 55enum qca6174_pci_rev {
46 QCA6174_PCI_REV_1_1 = 0x11, 56 QCA6174_PCI_REV_1_1 = 0x11,
47 QCA6174_PCI_REV_1_3 = 0x13, 57 QCA6174_PCI_REV_1_3 = 0x13,
@@ -60,6 +70,11 @@ enum qca6174_chip_id_rev {
60 QCA6174_HW_3_2_CHIP_ID_REV = 10, 70 QCA6174_HW_3_2_CHIP_ID_REV = 10,
61}; 71};
62 72
73enum qca9377_chip_id_rev {
74 QCA9377_HW_1_0_CHIP_ID_REV = 0x0,
75 QCA9377_HW_1_1_CHIP_ID_REV = 0x1,
76};
77
63#define QCA6174_HW_2_1_FW_DIR "ath10k/QCA6174/hw2.1" 78#define QCA6174_HW_2_1_FW_DIR "ath10k/QCA6174/hw2.1"
64#define QCA6174_HW_2_1_FW_FILE "firmware.bin" 79#define QCA6174_HW_2_1_FW_FILE "firmware.bin"
65#define QCA6174_HW_2_1_OTP_FILE "otp.bin" 80#define QCA6174_HW_2_1_OTP_FILE "otp.bin"
@@ -85,8 +100,6 @@ enum qca6174_chip_id_rev {
85#define QCA99X0_HW_2_0_PATCH_LOAD_ADDR 0x1234 100#define QCA99X0_HW_2_0_PATCH_LOAD_ADDR 0x1234
86 101
87/* QCA9377 1.0 definitions */ 102/* QCA9377 1.0 definitions */
88#define QCA9377_HW_1_0_DEV_VERSION 0x05020001
89#define QCA9377_HW_1_0_CHIP_ID_REV 0x1
90#define QCA9377_HW_1_0_FW_DIR ATH10K_FW_DIR "/QCA9377/hw1.0" 103#define QCA9377_HW_1_0_FW_DIR ATH10K_FW_DIR "/QCA9377/hw1.0"
91#define QCA9377_HW_1_0_FW_FILE "firmware.bin" 104#define QCA9377_HW_1_0_FW_FILE "firmware.bin"
92#define QCA9377_HW_1_0_OTP_FILE "otp.bin" 105#define QCA9377_HW_1_0_OTP_FILE "otp.bin"
diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
index a7411fe90cc4..95a55405ebf0 100644
--- a/drivers/net/wireless/ath/ath10k/mac.c
+++ b/drivers/net/wireless/ath/ath10k/mac.c
@@ -4225,7 +4225,7 @@ static int ath10k_config(struct ieee80211_hw *hw, u32 changed)
4225 4225
4226static u32 get_nss_from_chainmask(u16 chain_mask) 4226static u32 get_nss_from_chainmask(u16 chain_mask)
4227{ 4227{
4228 if ((chain_mask & 0x15) == 0x15) 4228 if ((chain_mask & 0xf) == 0xf)
4229 return 4; 4229 return 4;
4230 else if ((chain_mask & 0x7) == 0x7) 4230 else if ((chain_mask & 0x7) == 0x7)
4231 return 3; 4231 return 3;
diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c
index 3fca200b986c..930785a724e1 100644
--- a/drivers/net/wireless/ath/ath10k/pci.c
+++ b/drivers/net/wireless/ath/ath10k/pci.c
@@ -57,12 +57,6 @@ MODULE_PARM_DESC(reset_mode, "0: auto, 1: warm only (default: 0)");
57#define ATH10K_PCI_TARGET_WAIT 3000 57#define ATH10K_PCI_TARGET_WAIT 3000
58#define ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS 3 58#define ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS 3
59 59
60#define QCA988X_2_0_DEVICE_ID (0x003c)
61#define QCA6164_2_1_DEVICE_ID (0x0041)
62#define QCA6174_2_1_DEVICE_ID (0x003e)
63#define QCA99X0_2_0_DEVICE_ID (0x0040)
64#define QCA9377_1_0_DEVICE_ID (0x0042)
65
66static const struct pci_device_id ath10k_pci_id_table[] = { 60static const struct pci_device_id ath10k_pci_id_table[] = {
67 { PCI_VDEVICE(ATHEROS, QCA988X_2_0_DEVICE_ID) }, /* PCI-E QCA988X V2 */ 61 { PCI_VDEVICE(ATHEROS, QCA988X_2_0_DEVICE_ID) }, /* PCI-E QCA988X V2 */
68 { PCI_VDEVICE(ATHEROS, QCA6164_2_1_DEVICE_ID) }, /* PCI-E QCA6164 V2.1 */ 62 { PCI_VDEVICE(ATHEROS, QCA6164_2_1_DEVICE_ID) }, /* PCI-E QCA6164 V2.1 */
@@ -92,7 +86,9 @@ static const struct ath10k_pci_supp_chip ath10k_pci_supp_chips[] = {
92 { QCA6174_2_1_DEVICE_ID, QCA6174_HW_3_2_CHIP_ID_REV }, 86 { QCA6174_2_1_DEVICE_ID, QCA6174_HW_3_2_CHIP_ID_REV },
93 87
94 { QCA99X0_2_0_DEVICE_ID, QCA99X0_HW_2_0_CHIP_ID_REV }, 88 { QCA99X0_2_0_DEVICE_ID, QCA99X0_HW_2_0_CHIP_ID_REV },
89
95 { QCA9377_1_0_DEVICE_ID, QCA9377_HW_1_0_CHIP_ID_REV }, 90 { QCA9377_1_0_DEVICE_ID, QCA9377_HW_1_0_CHIP_ID_REV },
91 { QCA9377_1_0_DEVICE_ID, QCA9377_HW_1_1_CHIP_ID_REV },
96}; 92};
97 93
98static void ath10k_pci_buffer_cleanup(struct ath10k *ar); 94static void ath10k_pci_buffer_cleanup(struct ath10k *ar);
@@ -111,8 +107,9 @@ static void ath10k_pci_htc_tx_cb(struct ath10k_ce_pipe *ce_state);
111static void ath10k_pci_htc_rx_cb(struct ath10k_ce_pipe *ce_state); 107static void ath10k_pci_htc_rx_cb(struct ath10k_ce_pipe *ce_state);
112static void ath10k_pci_htt_tx_cb(struct ath10k_ce_pipe *ce_state); 108static void ath10k_pci_htt_tx_cb(struct ath10k_ce_pipe *ce_state);
113static void ath10k_pci_htt_rx_cb(struct ath10k_ce_pipe *ce_state); 109static void ath10k_pci_htt_rx_cb(struct ath10k_ce_pipe *ce_state);
110static void ath10k_pci_htt_htc_rx_cb(struct ath10k_ce_pipe *ce_state);
114 111
115static const struct ce_attr host_ce_config_wlan[] = { 112static struct ce_attr host_ce_config_wlan[] = {
116 /* CE0: host->target HTC control and raw streams */ 113 /* CE0: host->target HTC control and raw streams */
117 { 114 {
118 .flags = CE_ATTR_FLAGS, 115 .flags = CE_ATTR_FLAGS,
@@ -128,7 +125,7 @@ static const struct ce_attr host_ce_config_wlan[] = {
128 .src_nentries = 0, 125 .src_nentries = 0,
129 .src_sz_max = 2048, 126 .src_sz_max = 2048,
130 .dest_nentries = 512, 127 .dest_nentries = 512,
131 .recv_cb = ath10k_pci_htc_rx_cb, 128 .recv_cb = ath10k_pci_htt_htc_rx_cb,
132 }, 129 },
133 130
134 /* CE2: target->host WMI */ 131 /* CE2: target->host WMI */
@@ -217,7 +214,7 @@ static const struct ce_attr host_ce_config_wlan[] = {
217}; 214};
218 215
219/* Target firmware's Copy Engine configuration. */ 216/* Target firmware's Copy Engine configuration. */
220static const struct ce_pipe_config target_ce_config_wlan[] = { 217static struct ce_pipe_config target_ce_config_wlan[] = {
221 /* CE0: host->target HTC control and raw streams */ 218 /* CE0: host->target HTC control and raw streams */
222 { 219 {
223 .pipenum = __cpu_to_le32(0), 220 .pipenum = __cpu_to_le32(0),
@@ -330,7 +327,7 @@ static const struct ce_pipe_config target_ce_config_wlan[] = {
330 * This table is derived from the CE_PCI TABLE, above. 327 * This table is derived from the CE_PCI TABLE, above.
331 * It is passed to the Target at startup for use by firmware. 328 * It is passed to the Target at startup for use by firmware.
332 */ 329 */
333static const struct service_to_pipe target_service_to_ce_map_wlan[] = { 330static struct service_to_pipe target_service_to_ce_map_wlan[] = {
334 { 331 {
335 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VO), 332 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VO),
336 __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */ 333 __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
@@ -1208,6 +1205,16 @@ static void ath10k_pci_htc_rx_cb(struct ath10k_ce_pipe *ce_state)
1208 ath10k_pci_process_rx_cb(ce_state, ath10k_htc_rx_completion_handler); 1205 ath10k_pci_process_rx_cb(ce_state, ath10k_htc_rx_completion_handler);
1209} 1206}
1210 1207
1208static void ath10k_pci_htt_htc_rx_cb(struct ath10k_ce_pipe *ce_state)
1209{
1210 /* CE4 polling needs to be done whenever CE pipe which transports
1211 * HTT Rx (target->host) is processed.
1212 */
1213 ath10k_ce_per_engine_service(ce_state->ar, 4);
1214
1215 ath10k_pci_process_rx_cb(ce_state, ath10k_htc_rx_completion_handler);
1216}
1217
1211/* Called by lower (CE) layer when a send to HTT Target completes. */ 1218/* Called by lower (CE) layer when a send to HTT Target completes. */
1212static void ath10k_pci_htt_tx_cb(struct ath10k_ce_pipe *ce_state) 1219static void ath10k_pci_htt_tx_cb(struct ath10k_ce_pipe *ce_state)
1213{ 1220{
@@ -2027,6 +2034,29 @@ static int ath10k_pci_init_config(struct ath10k *ar)
2027 return 0; 2034 return 0;
2028} 2035}
2029 2036
2037static void ath10k_pci_override_ce_config(struct ath10k *ar)
2038{
2039 struct ce_attr *attr;
2040 struct ce_pipe_config *config;
2041
2042 /* For QCA6174 we're overriding the Copy Engine 5 configuration,
2043 * since it is currently used for other feature.
2044 */
2045
2046 /* Override Host's Copy Engine 5 configuration */
2047 attr = &host_ce_config_wlan[5];
2048 attr->src_sz_max = 0;
2049 attr->dest_nentries = 0;
2050
2051 /* Override Target firmware's Copy Engine configuration */
2052 config = &target_ce_config_wlan[5];
2053 config->pipedir = __cpu_to_le32(PIPEDIR_OUT);
2054 config->nbytes_max = __cpu_to_le32(2048);
2055
2056 /* Map from service/endpoint to Copy Engine */
2057 target_service_to_ce_map_wlan[15].pipenum = __cpu_to_le32(1);
2058}
2059
2030static int ath10k_pci_alloc_pipes(struct ath10k *ar) 2060static int ath10k_pci_alloc_pipes(struct ath10k *ar)
2031{ 2061{
2032 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 2062 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
@@ -3020,6 +3050,9 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
3020 goto err_core_destroy; 3050 goto err_core_destroy;
3021 } 3051 }
3022 3052
3053 if (QCA_REV_6174(ar))
3054 ath10k_pci_override_ce_config(ar);
3055
3023 ret = ath10k_pci_alloc_pipes(ar); 3056 ret = ath10k_pci_alloc_pipes(ar);
3024 if (ret) { 3057 if (ret) {
3025 ath10k_err(ar, "failed to allocate copy engine pipes: %d\n", 3058 ath10k_err(ar, "failed to allocate copy engine pipes: %d\n",
diff --git a/drivers/net/wireless/iwlwifi/iwl-7000.c b/drivers/net/wireless/iwlwifi/iwl-7000.c
index 1a73c7a1da77..bf88ec3a65fa 100644
--- a/drivers/net/wireless/iwlwifi/iwl-7000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-7000.c
@@ -69,7 +69,7 @@
69#include "iwl-agn-hw.h" 69#include "iwl-agn-hw.h"
70 70
71/* Highest firmware API version supported */ 71/* Highest firmware API version supported */
72#define IWL7260_UCODE_API_MAX 17 72#define IWL7260_UCODE_API_MAX 19
73 73
74/* Oldest version we won't warn about */ 74/* Oldest version we won't warn about */
75#define IWL7260_UCODE_API_OK 13 75#define IWL7260_UCODE_API_OK 13
diff --git a/drivers/net/wireless/iwlwifi/iwl-8000.c b/drivers/net/wireless/iwlwifi/iwl-8000.c
index 0116e5a4c393..9bcc0bf937d8 100644
--- a/drivers/net/wireless/iwlwifi/iwl-8000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-8000.c
@@ -69,7 +69,7 @@
69#include "iwl-agn-hw.h" 69#include "iwl-agn-hw.h"
70 70
71/* Highest firmware API version supported */ 71/* Highest firmware API version supported */
72#define IWL8000_UCODE_API_MAX 17 72#define IWL8000_UCODE_API_MAX 19
73 73
74/* Oldest version we won't warn about */ 74/* Oldest version we won't warn about */
75#define IWL8000_UCODE_API_OK 13 75#define IWL8000_UCODE_API_OK 13
diff --git a/drivers/net/wireless/iwlwifi/mvm/d3.c b/drivers/net/wireless/iwlwifi/mvm/d3.c
index 85ae902df7c0..29ae58ebf223 100644
--- a/drivers/net/wireless/iwlwifi/mvm/d3.c
+++ b/drivers/net/wireless/iwlwifi/mvm/d3.c
@@ -309,9 +309,9 @@ static void iwl_mvm_wowlan_program_keys(struct ieee80211_hw *hw,
309 * to transmit packets to the AP, i.e. the PTK. 309 * to transmit packets to the AP, i.e. the PTK.
310 */ 310 */
311 if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) { 311 if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) {
312 key->hw_key_idx = 0;
313 mvm->ptk_ivlen = key->iv_len; 312 mvm->ptk_ivlen = key->iv_len;
314 mvm->ptk_icvlen = key->icv_len; 313 mvm->ptk_icvlen = key->icv_len;
314 ret = iwl_mvm_set_sta_key(mvm, vif, sta, key, 0);
315 } else { 315 } else {
316 /* 316 /*
317 * firmware only supports TSC/RSC for a single key, 317 * firmware only supports TSC/RSC for a single key,
@@ -319,12 +319,11 @@ static void iwl_mvm_wowlan_program_keys(struct ieee80211_hw *hw,
319 * with new ones -- this relies on mac80211 doing 319 * with new ones -- this relies on mac80211 doing
320 * list_add_tail(). 320 * list_add_tail().
321 */ 321 */
322 key->hw_key_idx = 1;
323 mvm->gtk_ivlen = key->iv_len; 322 mvm->gtk_ivlen = key->iv_len;
324 mvm->gtk_icvlen = key->icv_len; 323 mvm->gtk_icvlen = key->icv_len;
324 ret = iwl_mvm_set_sta_key(mvm, vif, sta, key, 1);
325 } 325 }
326 326
327 ret = iwl_mvm_set_sta_key(mvm, vif, sta, key, true);
328 data->error = ret != 0; 327 data->error = ret != 0;
329out_unlock: 328out_unlock:
330 mutex_unlock(&mvm->mutex); 329 mutex_unlock(&mvm->mutex);
@@ -772,9 +771,6 @@ static int iwl_mvm_switch_to_d3(struct iwl_mvm *mvm)
772 */ 771 */
773 set_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status); 772 set_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status);
774 773
775 /* We reprogram keys and shouldn't allocate new key indices */
776 memset(mvm->fw_key_table, 0, sizeof(mvm->fw_key_table));
777
778 mvm->ptk_ivlen = 0; 774 mvm->ptk_ivlen = 0;
779 mvm->ptk_icvlen = 0; 775 mvm->ptk_icvlen = 0;
780 mvm->ptk_ivlen = 0; 776 mvm->ptk_ivlen = 0;
diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
index 1fb684693040..e88afac51c5d 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
@@ -2941,6 +2941,7 @@ static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
2941{ 2941{
2942 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 2942 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2943 int ret; 2943 int ret;
2944 u8 key_offset;
2944 2945
2945 if (iwlwifi_mod_params.sw_crypto) { 2946 if (iwlwifi_mod_params.sw_crypto) {
2946 IWL_DEBUG_MAC80211(mvm, "leave - hwcrypto disabled\n"); 2947 IWL_DEBUG_MAC80211(mvm, "leave - hwcrypto disabled\n");
@@ -3006,10 +3007,14 @@ static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
3006 break; 3007 break;
3007 } 3008 }
3008 3009
3010 /* in HW restart reuse the index, otherwise request a new one */
3011 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
3012 key_offset = key->hw_key_idx;
3013 else
3014 key_offset = STA_KEY_IDX_INVALID;
3015
3009 IWL_DEBUG_MAC80211(mvm, "set hwcrypto key\n"); 3016 IWL_DEBUG_MAC80211(mvm, "set hwcrypto key\n");
3010 ret = iwl_mvm_set_sta_key(mvm, vif, sta, key, 3017 ret = iwl_mvm_set_sta_key(mvm, vif, sta, key, key_offset);
3011 test_bit(IWL_MVM_STATUS_IN_HW_RESTART,
3012 &mvm->status));
3013 if (ret) { 3018 if (ret) {
3014 IWL_WARN(mvm, "set key failed\n"); 3019 IWL_WARN(mvm, "set key failed\n");
3015 /* 3020 /*
diff --git a/drivers/net/wireless/iwlwifi/mvm/sta.c b/drivers/net/wireless/iwlwifi/mvm/sta.c
index 300a249486e4..354acbde088e 100644
--- a/drivers/net/wireless/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/iwlwifi/mvm/sta.c
@@ -1201,7 +1201,8 @@ static int iwl_mvm_set_fw_key_idx(struct iwl_mvm *mvm)
1201 return max_offs; 1201 return max_offs;
1202} 1202}
1203 1203
1204static u8 iwl_mvm_get_key_sta_id(struct ieee80211_vif *vif, 1204static u8 iwl_mvm_get_key_sta_id(struct iwl_mvm *mvm,
1205 struct ieee80211_vif *vif,
1205 struct ieee80211_sta *sta) 1206 struct ieee80211_sta *sta)
1206{ 1207{
1207 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 1208 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
@@ -1218,8 +1219,21 @@ static u8 iwl_mvm_get_key_sta_id(struct ieee80211_vif *vif,
1218 * station ID, then use AP's station ID. 1219 * station ID, then use AP's station ID.
1219 */ 1220 */
1220 if (vif->type == NL80211_IFTYPE_STATION && 1221 if (vif->type == NL80211_IFTYPE_STATION &&
1221 mvmvif->ap_sta_id != IWL_MVM_STATION_COUNT) 1222 mvmvif->ap_sta_id != IWL_MVM_STATION_COUNT) {
1222 return mvmvif->ap_sta_id; 1223 u8 sta_id = mvmvif->ap_sta_id;
1224
1225 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
1226 lockdep_is_held(&mvm->mutex));
1227 /*
1228 * It is possible that the 'sta' parameter is NULL,
1229 * for example when a GTK is removed - the sta_id will then
1230 * be the AP ID, and no station was passed by mac80211.
1231 */
1232 if (IS_ERR_OR_NULL(sta))
1233 return IWL_MVM_STATION_COUNT;
1234
1235 return sta_id;
1236 }
1223 1237
1224 return IWL_MVM_STATION_COUNT; 1238 return IWL_MVM_STATION_COUNT;
1225} 1239}
@@ -1227,7 +1241,8 @@ static u8 iwl_mvm_get_key_sta_id(struct ieee80211_vif *vif,
1227static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm, 1241static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm,
1228 struct iwl_mvm_sta *mvm_sta, 1242 struct iwl_mvm_sta *mvm_sta,
1229 struct ieee80211_key_conf *keyconf, bool mcast, 1243 struct ieee80211_key_conf *keyconf, bool mcast,
1230 u32 tkip_iv32, u16 *tkip_p1k, u32 cmd_flags) 1244 u32 tkip_iv32, u16 *tkip_p1k, u32 cmd_flags,
1245 u8 key_offset)
1231{ 1246{
1232 struct iwl_mvm_add_sta_key_cmd cmd = {}; 1247 struct iwl_mvm_add_sta_key_cmd cmd = {};
1233 __le16 key_flags; 1248 __le16 key_flags;
@@ -1269,7 +1284,7 @@ static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm,
1269 if (mcast) 1284 if (mcast)
1270 key_flags |= cpu_to_le16(STA_KEY_MULTICAST); 1285 key_flags |= cpu_to_le16(STA_KEY_MULTICAST);
1271 1286
1272 cmd.key_offset = keyconf->hw_key_idx; 1287 cmd.key_offset = key_offset;
1273 cmd.key_flags = key_flags; 1288 cmd.key_flags = key_flags;
1274 cmd.sta_id = sta_id; 1289 cmd.sta_id = sta_id;
1275 1290
@@ -1360,6 +1375,7 @@ static int __iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
1360 struct ieee80211_vif *vif, 1375 struct ieee80211_vif *vif,
1361 struct ieee80211_sta *sta, 1376 struct ieee80211_sta *sta,
1362 struct ieee80211_key_conf *keyconf, 1377 struct ieee80211_key_conf *keyconf,
1378 u8 key_offset,
1363 bool mcast) 1379 bool mcast)
1364{ 1380{
1365 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); 1381 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
@@ -1375,17 +1391,17 @@ static int __iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
1375 ieee80211_get_key_rx_seq(keyconf, 0, &seq); 1391 ieee80211_get_key_rx_seq(keyconf, 0, &seq);
1376 ieee80211_get_tkip_rx_p1k(keyconf, addr, seq.tkip.iv32, p1k); 1392 ieee80211_get_tkip_rx_p1k(keyconf, addr, seq.tkip.iv32, p1k);
1377 ret = iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, mcast, 1393 ret = iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, mcast,
1378 seq.tkip.iv32, p1k, 0); 1394 seq.tkip.iv32, p1k, 0, key_offset);
1379 break; 1395 break;
1380 case WLAN_CIPHER_SUITE_CCMP: 1396 case WLAN_CIPHER_SUITE_CCMP:
1381 case WLAN_CIPHER_SUITE_WEP40: 1397 case WLAN_CIPHER_SUITE_WEP40:
1382 case WLAN_CIPHER_SUITE_WEP104: 1398 case WLAN_CIPHER_SUITE_WEP104:
1383 ret = iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, mcast, 1399 ret = iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, mcast,
1384 0, NULL, 0); 1400 0, NULL, 0, key_offset);
1385 break; 1401 break;
1386 default: 1402 default:
1387 ret = iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, mcast, 1403 ret = iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, mcast,
1388 0, NULL, 0); 1404 0, NULL, 0, key_offset);
1389 } 1405 }
1390 1406
1391 return ret; 1407 return ret;
@@ -1433,7 +1449,7 @@ int iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
1433 struct ieee80211_vif *vif, 1449 struct ieee80211_vif *vif,
1434 struct ieee80211_sta *sta, 1450 struct ieee80211_sta *sta,
1435 struct ieee80211_key_conf *keyconf, 1451 struct ieee80211_key_conf *keyconf,
1436 bool have_key_offset) 1452 u8 key_offset)
1437{ 1453{
1438 bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE); 1454 bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
1439 u8 sta_id; 1455 u8 sta_id;
@@ -1443,7 +1459,7 @@ int iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
1443 lockdep_assert_held(&mvm->mutex); 1459 lockdep_assert_held(&mvm->mutex);
1444 1460
1445 /* Get the station id from the mvm local station table */ 1461 /* Get the station id from the mvm local station table */
1446 sta_id = iwl_mvm_get_key_sta_id(vif, sta); 1462 sta_id = iwl_mvm_get_key_sta_id(mvm, vif, sta);
1447 if (sta_id == IWL_MVM_STATION_COUNT) { 1463 if (sta_id == IWL_MVM_STATION_COUNT) {
1448 IWL_ERR(mvm, "Failed to find station id\n"); 1464 IWL_ERR(mvm, "Failed to find station id\n");
1449 return -EINVAL; 1465 return -EINVAL;
@@ -1470,18 +1486,25 @@ int iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
1470 if (WARN_ON_ONCE(iwl_mvm_sta_from_mac80211(sta)->vif != vif)) 1486 if (WARN_ON_ONCE(iwl_mvm_sta_from_mac80211(sta)->vif != vif))
1471 return -EINVAL; 1487 return -EINVAL;
1472 1488
1473 if (!have_key_offset) { 1489 /* If the key_offset is not pre-assigned, we need to find a
1474 /* 1490 * new offset to use. In normal cases, the offset is not
1475 * The D3 firmware hardcodes the PTK offset to 0, so we have to 1491 * pre-assigned, but during HW_RESTART we want to reuse the
1476 * configure it there. As a result, this workaround exists to 1492 * same indices, so we pass them when this function is called.
1477 * let the caller set the key offset (hw_key_idx), see d3.c. 1493 *
1478 */ 1494 * In D3 entry, we need to hardcoded the indices (because the
1479 keyconf->hw_key_idx = iwl_mvm_set_fw_key_idx(mvm); 1495 * firmware hardcodes the PTK offset to 0). In this case, we
1480 if (keyconf->hw_key_idx == STA_KEY_IDX_INVALID) 1496 * need to make sure we don't overwrite the hw_key_idx in the
1497 * keyconf structure, because otherwise we cannot configure
1498 * the original ones back when resuming.
1499 */
1500 if (key_offset == STA_KEY_IDX_INVALID) {
1501 key_offset = iwl_mvm_set_fw_key_idx(mvm);
1502 if (key_offset == STA_KEY_IDX_INVALID)
1481 return -ENOSPC; 1503 return -ENOSPC;
1504 keyconf->hw_key_idx = key_offset;
1482 } 1505 }
1483 1506
1484 ret = __iwl_mvm_set_sta_key(mvm, vif, sta, keyconf, mcast); 1507 ret = __iwl_mvm_set_sta_key(mvm, vif, sta, keyconf, key_offset, mcast);
1485 if (ret) { 1508 if (ret) {
1486 __clear_bit(keyconf->hw_key_idx, mvm->fw_key_table); 1509 __clear_bit(keyconf->hw_key_idx, mvm->fw_key_table);
1487 goto end; 1510 goto end;
@@ -1495,7 +1518,8 @@ int iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
1495 */ 1518 */
1496 if (keyconf->cipher == WLAN_CIPHER_SUITE_WEP40 || 1519 if (keyconf->cipher == WLAN_CIPHER_SUITE_WEP40 ||
1497 keyconf->cipher == WLAN_CIPHER_SUITE_WEP104) { 1520 keyconf->cipher == WLAN_CIPHER_SUITE_WEP104) {
1498 ret = __iwl_mvm_set_sta_key(mvm, vif, sta, keyconf, !mcast); 1521 ret = __iwl_mvm_set_sta_key(mvm, vif, sta, keyconf,
1522 key_offset, !mcast);
1499 if (ret) { 1523 if (ret) {
1500 __clear_bit(keyconf->hw_key_idx, mvm->fw_key_table); 1524 __clear_bit(keyconf->hw_key_idx, mvm->fw_key_table);
1501 __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, mcast); 1525 __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, mcast);
@@ -1521,7 +1545,7 @@ int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm,
1521 lockdep_assert_held(&mvm->mutex); 1545 lockdep_assert_held(&mvm->mutex);
1522 1546
1523 /* Get the station id from the mvm local station table */ 1547 /* Get the station id from the mvm local station table */
1524 sta_id = iwl_mvm_get_key_sta_id(vif, sta); 1548 sta_id = iwl_mvm_get_key_sta_id(mvm, vif, sta);
1525 1549
1526 IWL_DEBUG_WEP(mvm, "mvm remove dynamic key: idx=%d sta=%d\n", 1550 IWL_DEBUG_WEP(mvm, "mvm remove dynamic key: idx=%d sta=%d\n",
1527 keyconf->keyidx, sta_id); 1551 keyconf->keyidx, sta_id);
@@ -1547,24 +1571,6 @@ int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm,
1547 return 0; 1571 return 0;
1548 } 1572 }
1549 1573
1550 /*
1551 * It is possible that the 'sta' parameter is NULL, and thus
1552 * there is a need to retrieve the sta from the local station table,
1553 * for example when a GTK is removed (where the sta_id will then be
1554 * the AP ID, and no station was passed by mac80211.)
1555 */
1556 if (!sta) {
1557 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
1558 lockdep_is_held(&mvm->mutex));
1559 if (!sta) {
1560 IWL_ERR(mvm, "Invalid station id\n");
1561 return -EINVAL;
1562 }
1563 }
1564
1565 if (WARN_ON_ONCE(iwl_mvm_sta_from_mac80211(sta)->vif != vif))
1566 return -EINVAL;
1567
1568 ret = __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, mcast); 1574 ret = __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, mcast);
1569 if (ret) 1575 if (ret)
1570 return ret; 1576 return ret;
@@ -1584,7 +1590,7 @@ void iwl_mvm_update_tkip_key(struct iwl_mvm *mvm,
1584 u16 *phase1key) 1590 u16 *phase1key)
1585{ 1591{
1586 struct iwl_mvm_sta *mvm_sta; 1592 struct iwl_mvm_sta *mvm_sta;
1587 u8 sta_id = iwl_mvm_get_key_sta_id(vif, sta); 1593 u8 sta_id = iwl_mvm_get_key_sta_id(mvm, vif, sta);
1588 bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE); 1594 bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
1589 1595
1590 if (WARN_ON_ONCE(sta_id == IWL_MVM_STATION_COUNT)) 1596 if (WARN_ON_ONCE(sta_id == IWL_MVM_STATION_COUNT))
@@ -1602,7 +1608,7 @@ void iwl_mvm_update_tkip_key(struct iwl_mvm *mvm,
1602 1608
1603 mvm_sta = iwl_mvm_sta_from_mac80211(sta); 1609 mvm_sta = iwl_mvm_sta_from_mac80211(sta);
1604 iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, mcast, 1610 iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, mcast,
1605 iv32, phase1key, CMD_ASYNC); 1611 iv32, phase1key, CMD_ASYNC, keyconf->hw_key_idx);
1606 rcu_read_unlock(); 1612 rcu_read_unlock();
1607} 1613}
1608 1614
diff --git a/drivers/net/wireless/iwlwifi/mvm/sta.h b/drivers/net/wireless/iwlwifi/mvm/sta.h
index eedb215eba3f..0631cc0a6d3c 100644
--- a/drivers/net/wireless/iwlwifi/mvm/sta.h
+++ b/drivers/net/wireless/iwlwifi/mvm/sta.h
@@ -365,8 +365,8 @@ int iwl_mvm_rm_sta_id(struct iwl_mvm *mvm,
365int iwl_mvm_set_sta_key(struct iwl_mvm *mvm, 365int iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
366 struct ieee80211_vif *vif, 366 struct ieee80211_vif *vif,
367 struct ieee80211_sta *sta, 367 struct ieee80211_sta *sta,
368 struct ieee80211_key_conf *key, 368 struct ieee80211_key_conf *keyconf,
369 bool have_key_offset); 369 u8 key_offset);
370int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm, 370int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm,
371 struct ieee80211_vif *vif, 371 struct ieee80211_vif *vif,
372 struct ieee80211_sta *sta, 372 struct ieee80211_sta *sta,
diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/iwlwifi/pcie/drv.c
index 644b58bc5226..639761fb2bfb 100644
--- a/drivers/net/wireless/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/iwlwifi/pcie/drv.c
@@ -423,14 +423,21 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
423/* 8000 Series */ 423/* 8000 Series */
424 {IWL_PCI_DEVICE(0x24F3, 0x0010, iwl8260_2ac_cfg)}, 424 {IWL_PCI_DEVICE(0x24F3, 0x0010, iwl8260_2ac_cfg)},
425 {IWL_PCI_DEVICE(0x24F3, 0x1010, iwl8260_2ac_cfg)}, 425 {IWL_PCI_DEVICE(0x24F3, 0x1010, iwl8260_2ac_cfg)},
426 {IWL_PCI_DEVICE(0x24F3, 0x0130, iwl8260_2ac_cfg)},
427 {IWL_PCI_DEVICE(0x24F3, 0x1130, iwl8260_2ac_cfg)},
428 {IWL_PCI_DEVICE(0x24F3, 0x0132, iwl8260_2ac_cfg)},
429 {IWL_PCI_DEVICE(0x24F3, 0x1132, iwl8260_2ac_cfg)},
426 {IWL_PCI_DEVICE(0x24F3, 0x0110, iwl8260_2ac_cfg)}, 430 {IWL_PCI_DEVICE(0x24F3, 0x0110, iwl8260_2ac_cfg)},
431 {IWL_PCI_DEVICE(0x24F3, 0x01F0, iwl8260_2ac_cfg)},
432 {IWL_PCI_DEVICE(0x24F3, 0x0012, iwl8260_2ac_cfg)},
433 {IWL_PCI_DEVICE(0x24F3, 0x1012, iwl8260_2ac_cfg)},
427 {IWL_PCI_DEVICE(0x24F3, 0x1110, iwl8260_2ac_cfg)}, 434 {IWL_PCI_DEVICE(0x24F3, 0x1110, iwl8260_2ac_cfg)},
428 {IWL_PCI_DEVICE(0x24F3, 0x0050, iwl8260_2ac_cfg)}, 435 {IWL_PCI_DEVICE(0x24F3, 0x0050, iwl8260_2ac_cfg)},
429 {IWL_PCI_DEVICE(0x24F3, 0x0250, iwl8260_2ac_cfg)}, 436 {IWL_PCI_DEVICE(0x24F3, 0x0250, iwl8260_2ac_cfg)},
430 {IWL_PCI_DEVICE(0x24F3, 0x1050, iwl8260_2ac_cfg)}, 437 {IWL_PCI_DEVICE(0x24F3, 0x1050, iwl8260_2ac_cfg)},
431 {IWL_PCI_DEVICE(0x24F3, 0x0150, iwl8260_2ac_cfg)}, 438 {IWL_PCI_DEVICE(0x24F3, 0x0150, iwl8260_2ac_cfg)},
439 {IWL_PCI_DEVICE(0x24F3, 0x1150, iwl8260_2ac_cfg)},
432 {IWL_PCI_DEVICE(0x24F4, 0x0030, iwl8260_2ac_cfg)}, 440 {IWL_PCI_DEVICE(0x24F4, 0x0030, iwl8260_2ac_cfg)},
433 {IWL_PCI_DEVICE(0x24F4, 0x1130, iwl8260_2ac_cfg)},
434 {IWL_PCI_DEVICE(0x24F4, 0x1030, iwl8260_2ac_cfg)}, 441 {IWL_PCI_DEVICE(0x24F4, 0x1030, iwl8260_2ac_cfg)},
435 {IWL_PCI_DEVICE(0x24F3, 0xC010, iwl8260_2ac_cfg)}, 442 {IWL_PCI_DEVICE(0x24F3, 0xC010, iwl8260_2ac_cfg)},
436 {IWL_PCI_DEVICE(0x24F3, 0xC110, iwl8260_2ac_cfg)}, 443 {IWL_PCI_DEVICE(0x24F3, 0xC110, iwl8260_2ac_cfg)},
@@ -438,18 +445,28 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
438 {IWL_PCI_DEVICE(0x24F3, 0xC050, iwl8260_2ac_cfg)}, 445 {IWL_PCI_DEVICE(0x24F3, 0xC050, iwl8260_2ac_cfg)},
439 {IWL_PCI_DEVICE(0x24F3, 0xD050, iwl8260_2ac_cfg)}, 446 {IWL_PCI_DEVICE(0x24F3, 0xD050, iwl8260_2ac_cfg)},
440 {IWL_PCI_DEVICE(0x24F3, 0x8010, iwl8260_2ac_cfg)}, 447 {IWL_PCI_DEVICE(0x24F3, 0x8010, iwl8260_2ac_cfg)},
448 {IWL_PCI_DEVICE(0x24F3, 0x8110, iwl8260_2ac_cfg)},
441 {IWL_PCI_DEVICE(0x24F3, 0x9010, iwl8260_2ac_cfg)}, 449 {IWL_PCI_DEVICE(0x24F3, 0x9010, iwl8260_2ac_cfg)},
450 {IWL_PCI_DEVICE(0x24F3, 0x9110, iwl8260_2ac_cfg)},
442 {IWL_PCI_DEVICE(0x24F4, 0x8030, iwl8260_2ac_cfg)}, 451 {IWL_PCI_DEVICE(0x24F4, 0x8030, iwl8260_2ac_cfg)},
443 {IWL_PCI_DEVICE(0x24F4, 0x9030, iwl8260_2ac_cfg)}, 452 {IWL_PCI_DEVICE(0x24F4, 0x9030, iwl8260_2ac_cfg)},
453 {IWL_PCI_DEVICE(0x24F3, 0x8130, iwl8260_2ac_cfg)},
454 {IWL_PCI_DEVICE(0x24F3, 0x9130, iwl8260_2ac_cfg)},
455 {IWL_PCI_DEVICE(0x24F3, 0x8132, iwl8260_2ac_cfg)},
456 {IWL_PCI_DEVICE(0x24F3, 0x9132, iwl8260_2ac_cfg)},
444 {IWL_PCI_DEVICE(0x24F3, 0x8050, iwl8260_2ac_cfg)}, 457 {IWL_PCI_DEVICE(0x24F3, 0x8050, iwl8260_2ac_cfg)},
458 {IWL_PCI_DEVICE(0x24F3, 0x8150, iwl8260_2ac_cfg)},
445 {IWL_PCI_DEVICE(0x24F3, 0x9050, iwl8260_2ac_cfg)}, 459 {IWL_PCI_DEVICE(0x24F3, 0x9050, iwl8260_2ac_cfg)},
460 {IWL_PCI_DEVICE(0x24F3, 0x9150, iwl8260_2ac_cfg)},
446 {IWL_PCI_DEVICE(0x24F3, 0x0004, iwl8260_2n_cfg)}, 461 {IWL_PCI_DEVICE(0x24F3, 0x0004, iwl8260_2n_cfg)},
462 {IWL_PCI_DEVICE(0x24F3, 0x0044, iwl8260_2n_cfg)},
447 {IWL_PCI_DEVICE(0x24F5, 0x0010, iwl4165_2ac_cfg)}, 463 {IWL_PCI_DEVICE(0x24F5, 0x0010, iwl4165_2ac_cfg)},
448 {IWL_PCI_DEVICE(0x24F6, 0x0030, iwl4165_2ac_cfg)}, 464 {IWL_PCI_DEVICE(0x24F6, 0x0030, iwl4165_2ac_cfg)},
449 {IWL_PCI_DEVICE(0x24F3, 0x0810, iwl8260_2ac_cfg)}, 465 {IWL_PCI_DEVICE(0x24F3, 0x0810, iwl8260_2ac_cfg)},
450 {IWL_PCI_DEVICE(0x24F3, 0x0910, iwl8260_2ac_cfg)}, 466 {IWL_PCI_DEVICE(0x24F3, 0x0910, iwl8260_2ac_cfg)},
451 {IWL_PCI_DEVICE(0x24F3, 0x0850, iwl8260_2ac_cfg)}, 467 {IWL_PCI_DEVICE(0x24F3, 0x0850, iwl8260_2ac_cfg)},
452 {IWL_PCI_DEVICE(0x24F3, 0x0950, iwl8260_2ac_cfg)}, 468 {IWL_PCI_DEVICE(0x24F3, 0x0950, iwl8260_2ac_cfg)},
469 {IWL_PCI_DEVICE(0x24F3, 0x0930, iwl8260_2ac_cfg)},
453#endif /* CONFIG_IWLMVM */ 470#endif /* CONFIG_IWLMVM */
454 471
455 {0} 472 {0}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
index 6e9418ed90c2..bbb789f8990b 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
@@ -2272,7 +2272,7 @@ void rtl8821ae_enable_interrupt(struct ieee80211_hw *hw)
2272 struct rtl_priv *rtlpriv = rtl_priv(hw); 2272 struct rtl_priv *rtlpriv = rtl_priv(hw);
2273 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); 2273 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
2274 2274
2275 if (!rtlpci->int_clear) 2275 if (rtlpci->int_clear)
2276 rtl8821ae_clear_interrupt(hw);/*clear it here first*/ 2276 rtl8821ae_clear_interrupt(hw);/*clear it here first*/
2277 2277
2278 rtl_write_dword(rtlpriv, REG_HIMR, rtlpci->irq_mask[0] & 0xFFFFFFFF); 2278 rtl_write_dword(rtlpriv, REG_HIMR, rtlpci->irq_mask[0] & 0xFFFFFFFF);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c
index 8ee141a55bc5..142bdff4ed60 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c
@@ -448,7 +448,7 @@ MODULE_PARM_DESC(fwlps, "Set to 1 to use FW control power save (default 1)\n");
448MODULE_PARM_DESC(msi, "Set to 1 to use MSI interrupts mode (default 1)\n"); 448MODULE_PARM_DESC(msi, "Set to 1 to use MSI interrupts mode (default 1)\n");
449MODULE_PARM_DESC(debug, "Set debug level (0-5) (default 0)"); 449MODULE_PARM_DESC(debug, "Set debug level (0-5) (default 0)");
450MODULE_PARM_DESC(disable_watchdog, "Set to 1 to disable the watchdog (default 0)\n"); 450MODULE_PARM_DESC(disable_watchdog, "Set to 1 to disable the watchdog (default 0)\n");
451MODULE_PARM_DESC(int_clear, "Set to 1 to disable interrupt clear before set (default 0)\n"); 451MODULE_PARM_DESC(int_clear, "Set to 0 to disable interrupt clear before set (default 1)\n");
452 452
453static SIMPLE_DEV_PM_OPS(rtlwifi_pm_ops, rtl_pci_suspend, rtl_pci_resume); 453static SIMPLE_DEV_PM_OPS(rtlwifi_pm_ops, rtl_pci_suspend, rtl_pci_resume);
454 454
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index e481f3710bd3..1049c34e7d43 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -258,18 +258,18 @@ static struct xenvif_rx_meta *get_next_rx_buffer(struct xenvif_queue *queue,
258 struct netrx_pending_operations *npo) 258 struct netrx_pending_operations *npo)
259{ 259{
260 struct xenvif_rx_meta *meta; 260 struct xenvif_rx_meta *meta;
261 struct xen_netif_rx_request *req; 261 struct xen_netif_rx_request req;
262 262
263 req = RING_GET_REQUEST(&queue->rx, queue->rx.req_cons++); 263 RING_COPY_REQUEST(&queue->rx, queue->rx.req_cons++, &req);
264 264
265 meta = npo->meta + npo->meta_prod++; 265 meta = npo->meta + npo->meta_prod++;
266 meta->gso_type = XEN_NETIF_GSO_TYPE_NONE; 266 meta->gso_type = XEN_NETIF_GSO_TYPE_NONE;
267 meta->gso_size = 0; 267 meta->gso_size = 0;
268 meta->size = 0; 268 meta->size = 0;
269 meta->id = req->id; 269 meta->id = req.id;
270 270
271 npo->copy_off = 0; 271 npo->copy_off = 0;
272 npo->copy_gref = req->gref; 272 npo->copy_gref = req.gref;
273 273
274 return meta; 274 return meta;
275} 275}
@@ -424,7 +424,7 @@ static int xenvif_gop_skb(struct sk_buff *skb,
424 struct xenvif *vif = netdev_priv(skb->dev); 424 struct xenvif *vif = netdev_priv(skb->dev);
425 int nr_frags = skb_shinfo(skb)->nr_frags; 425 int nr_frags = skb_shinfo(skb)->nr_frags;
426 int i; 426 int i;
427 struct xen_netif_rx_request *req; 427 struct xen_netif_rx_request req;
428 struct xenvif_rx_meta *meta; 428 struct xenvif_rx_meta *meta;
429 unsigned char *data; 429 unsigned char *data;
430 int head = 1; 430 int head = 1;
@@ -443,15 +443,15 @@ static int xenvif_gop_skb(struct sk_buff *skb,
443 443
444 /* Set up a GSO prefix descriptor, if necessary */ 444 /* Set up a GSO prefix descriptor, if necessary */
445 if ((1 << gso_type) & vif->gso_prefix_mask) { 445 if ((1 << gso_type) & vif->gso_prefix_mask) {
446 req = RING_GET_REQUEST(&queue->rx, queue->rx.req_cons++); 446 RING_COPY_REQUEST(&queue->rx, queue->rx.req_cons++, &req);
447 meta = npo->meta + npo->meta_prod++; 447 meta = npo->meta + npo->meta_prod++;
448 meta->gso_type = gso_type; 448 meta->gso_type = gso_type;
449 meta->gso_size = skb_shinfo(skb)->gso_size; 449 meta->gso_size = skb_shinfo(skb)->gso_size;
450 meta->size = 0; 450 meta->size = 0;
451 meta->id = req->id; 451 meta->id = req.id;
452 } 452 }
453 453
454 req = RING_GET_REQUEST(&queue->rx, queue->rx.req_cons++); 454 RING_COPY_REQUEST(&queue->rx, queue->rx.req_cons++, &req);
455 meta = npo->meta + npo->meta_prod++; 455 meta = npo->meta + npo->meta_prod++;
456 456
457 if ((1 << gso_type) & vif->gso_mask) { 457 if ((1 << gso_type) & vif->gso_mask) {
@@ -463,9 +463,9 @@ static int xenvif_gop_skb(struct sk_buff *skb,
463 } 463 }
464 464
465 meta->size = 0; 465 meta->size = 0;
466 meta->id = req->id; 466 meta->id = req.id;
467 npo->copy_off = 0; 467 npo->copy_off = 0;
468 npo->copy_gref = req->gref; 468 npo->copy_gref = req.gref;
469 469
470 data = skb->data; 470 data = skb->data;
471 while (data < skb_tail_pointer(skb)) { 471 while (data < skb_tail_pointer(skb)) {
@@ -679,9 +679,7 @@ static void tx_add_credit(struct xenvif_queue *queue)
679 * Allow a burst big enough to transmit a jumbo packet of up to 128kB. 679 * Allow a burst big enough to transmit a jumbo packet of up to 128kB.
680 * Otherwise the interface can seize up due to insufficient credit. 680 * Otherwise the interface can seize up due to insufficient credit.
681 */ 681 */
682 max_burst = RING_GET_REQUEST(&queue->tx, queue->tx.req_cons)->size; 682 max_burst = max(131072UL, queue->credit_bytes);
683 max_burst = min(max_burst, 131072UL);
684 max_burst = max(max_burst, queue->credit_bytes);
685 683
686 /* Take care that adding a new chunk of credit doesn't wrap to zero. */ 684 /* Take care that adding a new chunk of credit doesn't wrap to zero. */
687 max_credit = queue->remaining_credit + queue->credit_bytes; 685 max_credit = queue->remaining_credit + queue->credit_bytes;
@@ -711,7 +709,7 @@ static void xenvif_tx_err(struct xenvif_queue *queue,
711 spin_unlock_irqrestore(&queue->response_lock, flags); 709 spin_unlock_irqrestore(&queue->response_lock, flags);
712 if (cons == end) 710 if (cons == end)
713 break; 711 break;
714 txp = RING_GET_REQUEST(&queue->tx, cons++); 712 RING_COPY_REQUEST(&queue->tx, cons++, txp);
715 } while (1); 713 } while (1);
716 queue->tx.req_cons = cons; 714 queue->tx.req_cons = cons;
717} 715}
@@ -778,8 +776,7 @@ static int xenvif_count_requests(struct xenvif_queue *queue,
778 if (drop_err) 776 if (drop_err)
779 txp = &dropped_tx; 777 txp = &dropped_tx;
780 778
781 memcpy(txp, RING_GET_REQUEST(&queue->tx, cons + slots), 779 RING_COPY_REQUEST(&queue->tx, cons + slots, txp);
782 sizeof(*txp));
783 780
784 /* If the guest submitted a frame >= 64 KiB then 781 /* If the guest submitted a frame >= 64 KiB then
785 * first->size overflowed and following slots will 782 * first->size overflowed and following slots will
@@ -1112,8 +1109,7 @@ static int xenvif_get_extras(struct xenvif_queue *queue,
1112 return -EBADR; 1109 return -EBADR;
1113 } 1110 }
1114 1111
1115 memcpy(&extra, RING_GET_REQUEST(&queue->tx, cons), 1112 RING_COPY_REQUEST(&queue->tx, cons, &extra);
1116 sizeof(extra));
1117 if (unlikely(!extra.type || 1113 if (unlikely(!extra.type ||
1118 extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) { 1114 extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
1119 queue->tx.req_cons = ++cons; 1115 queue->tx.req_cons = ++cons;
@@ -1322,7 +1318,7 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
1322 1318
1323 idx = queue->tx.req_cons; 1319 idx = queue->tx.req_cons;
1324 rmb(); /* Ensure that we see the request before we copy it. */ 1320 rmb(); /* Ensure that we see the request before we copy it. */
1325 memcpy(&txreq, RING_GET_REQUEST(&queue->tx, idx), sizeof(txreq)); 1321 RING_COPY_REQUEST(&queue->tx, idx, &txreq);
1326 1322
1327 /* Credit-based scheduling. */ 1323 /* Credit-based scheduling. */
1328 if (txreq.size > queue->remaining_credit && 1324 if (txreq.size > queue->remaining_credit &&
diff --git a/drivers/nvme/host/Makefile b/drivers/nvme/host/Makefile
index 219dc206fa5f..a5fe23952586 100644
--- a/drivers/nvme/host/Makefile
+++ b/drivers/nvme/host/Makefile
@@ -1,4 +1,5 @@
1 1
2obj-$(CONFIG_BLK_DEV_NVME) += nvme.o 2obj-$(CONFIG_BLK_DEV_NVME) += nvme.o
3 3
4nvme-y += pci.o scsi.o lightnvm.o 4lightnvm-$(CONFIG_NVM) := lightnvm.o
5nvme-y += pci.o scsi.o $(lightnvm-y)
diff --git a/drivers/nvme/host/lightnvm.c b/drivers/nvme/host/lightnvm.c
index e0b7b95813bc..15f2acb4d5cd 100644
--- a/drivers/nvme/host/lightnvm.c
+++ b/drivers/nvme/host/lightnvm.c
@@ -22,8 +22,6 @@
22 22
23#include "nvme.h" 23#include "nvme.h"
24 24
25#ifdef CONFIG_NVM
26
27#include <linux/nvme.h> 25#include <linux/nvme.h>
28#include <linux/bitops.h> 26#include <linux/bitops.h>
29#include <linux/lightnvm.h> 27#include <linux/lightnvm.h>
@@ -93,7 +91,7 @@ struct nvme_nvm_l2ptbl {
93 __le16 cdw14[6]; 91 __le16 cdw14[6];
94}; 92};
95 93
96struct nvme_nvm_bbtbl { 94struct nvme_nvm_getbbtbl {
97 __u8 opcode; 95 __u8 opcode;
98 __u8 flags; 96 __u8 flags;
99 __u16 command_id; 97 __u16 command_id;
@@ -101,10 +99,23 @@ struct nvme_nvm_bbtbl {
101 __u64 rsvd[2]; 99 __u64 rsvd[2];
102 __le64 prp1; 100 __le64 prp1;
103 __le64 prp2; 101 __le64 prp2;
104 __le32 prp1_len; 102 __le64 spba;
105 __le32 prp2_len; 103 __u32 rsvd4[4];
106 __le32 lbb; 104};
107 __u32 rsvd11[3]; 105
106struct nvme_nvm_setbbtbl {
107 __u8 opcode;
108 __u8 flags;
109 __u16 command_id;
110 __le32 nsid;
111 __le64 rsvd[2];
112 __le64 prp1;
113 __le64 prp2;
114 __le64 spba;
115 __le16 nlb;
116 __u8 value;
117 __u8 rsvd3;
118 __u32 rsvd4[3];
108}; 119};
109 120
110struct nvme_nvm_erase_blk { 121struct nvme_nvm_erase_blk {
@@ -129,8 +140,8 @@ struct nvme_nvm_command {
129 struct nvme_nvm_hb_rw hb_rw; 140 struct nvme_nvm_hb_rw hb_rw;
130 struct nvme_nvm_ph_rw ph_rw; 141 struct nvme_nvm_ph_rw ph_rw;
131 struct nvme_nvm_l2ptbl l2p; 142 struct nvme_nvm_l2ptbl l2p;
132 struct nvme_nvm_bbtbl get_bb; 143 struct nvme_nvm_getbbtbl get_bb;
133 struct nvme_nvm_bbtbl set_bb; 144 struct nvme_nvm_setbbtbl set_bb;
134 struct nvme_nvm_erase_blk erase; 145 struct nvme_nvm_erase_blk erase;
135 }; 146 };
136}; 147};
@@ -142,11 +153,13 @@ struct nvme_nvm_id_group {
142 __u8 num_ch; 153 __u8 num_ch;
143 __u8 num_lun; 154 __u8 num_lun;
144 __u8 num_pln; 155 __u8 num_pln;
156 __u8 rsvd1;
145 __le16 num_blk; 157 __le16 num_blk;
146 __le16 num_pg; 158 __le16 num_pg;
147 __le16 fpg_sz; 159 __le16 fpg_sz;
148 __le16 csecs; 160 __le16 csecs;
149 __le16 sos; 161 __le16 sos;
162 __le16 rsvd2;
150 __le32 trdt; 163 __le32 trdt;
151 __le32 trdm; 164 __le32 trdm;
152 __le32 tprt; 165 __le32 tprt;
@@ -154,8 +167,9 @@ struct nvme_nvm_id_group {
154 __le32 tbet; 167 __le32 tbet;
155 __le32 tbem; 168 __le32 tbem;
156 __le32 mpos; 169 __le32 mpos;
170 __le32 mccap;
157 __le16 cpar; 171 __le16 cpar;
158 __u8 reserved[913]; 172 __u8 reserved[906];
159} __packed; 173} __packed;
160 174
161struct nvme_nvm_addr_format { 175struct nvme_nvm_addr_format {
@@ -178,15 +192,28 @@ struct nvme_nvm_id {
178 __u8 ver_id; 192 __u8 ver_id;
179 __u8 vmnt; 193 __u8 vmnt;
180 __u8 cgrps; 194 __u8 cgrps;
181 __u8 res[5]; 195 __u8 res;
182 __le32 cap; 196 __le32 cap;
183 __le32 dom; 197 __le32 dom;
184 struct nvme_nvm_addr_format ppaf; 198 struct nvme_nvm_addr_format ppaf;
185 __u8 ppat; 199 __u8 resv[228];
186 __u8 resv[223];
187 struct nvme_nvm_id_group groups[4]; 200 struct nvme_nvm_id_group groups[4];
188} __packed; 201} __packed;
189 202
203struct nvme_nvm_bb_tbl {
204 __u8 tblid[4];
205 __le16 verid;
206 __le16 revid;
207 __le32 rvsd1;
208 __le32 tblks;
209 __le32 tfact;
210 __le32 tgrown;
211 __le32 tdresv;
212 __le32 thresv;
213 __le32 rsvd2[8];
214 __u8 blk[0];
215};
216
190/* 217/*
191 * Check we didn't inadvertently grow the command struct 218 * Check we didn't inadvertently grow the command struct
192 */ 219 */
@@ -195,12 +222,14 @@ static inline void _nvme_nvm_check_size(void)
195 BUILD_BUG_ON(sizeof(struct nvme_nvm_identity) != 64); 222 BUILD_BUG_ON(sizeof(struct nvme_nvm_identity) != 64);
196 BUILD_BUG_ON(sizeof(struct nvme_nvm_hb_rw) != 64); 223 BUILD_BUG_ON(sizeof(struct nvme_nvm_hb_rw) != 64);
197 BUILD_BUG_ON(sizeof(struct nvme_nvm_ph_rw) != 64); 224 BUILD_BUG_ON(sizeof(struct nvme_nvm_ph_rw) != 64);
198 BUILD_BUG_ON(sizeof(struct nvme_nvm_bbtbl) != 64); 225 BUILD_BUG_ON(sizeof(struct nvme_nvm_getbbtbl) != 64);
226 BUILD_BUG_ON(sizeof(struct nvme_nvm_setbbtbl) != 64);
199 BUILD_BUG_ON(sizeof(struct nvme_nvm_l2ptbl) != 64); 227 BUILD_BUG_ON(sizeof(struct nvme_nvm_l2ptbl) != 64);
200 BUILD_BUG_ON(sizeof(struct nvme_nvm_erase_blk) != 64); 228 BUILD_BUG_ON(sizeof(struct nvme_nvm_erase_blk) != 64);
201 BUILD_BUG_ON(sizeof(struct nvme_nvm_id_group) != 960); 229 BUILD_BUG_ON(sizeof(struct nvme_nvm_id_group) != 960);
202 BUILD_BUG_ON(sizeof(struct nvme_nvm_addr_format) != 128); 230 BUILD_BUG_ON(sizeof(struct nvme_nvm_addr_format) != 128);
203 BUILD_BUG_ON(sizeof(struct nvme_nvm_id) != 4096); 231 BUILD_BUG_ON(sizeof(struct nvme_nvm_id) != 4096);
232 BUILD_BUG_ON(sizeof(struct nvme_nvm_bb_tbl) != 512);
204} 233}
205 234
206static int init_grps(struct nvm_id *nvm_id, struct nvme_nvm_id *nvme_nvm_id) 235static int init_grps(struct nvm_id *nvm_id, struct nvme_nvm_id *nvme_nvm_id)
@@ -234,6 +263,7 @@ static int init_grps(struct nvm_id *nvm_id, struct nvme_nvm_id *nvme_nvm_id)
234 dst->tbet = le32_to_cpu(src->tbet); 263 dst->tbet = le32_to_cpu(src->tbet);
235 dst->tbem = le32_to_cpu(src->tbem); 264 dst->tbem = le32_to_cpu(src->tbem);
236 dst->mpos = le32_to_cpu(src->mpos); 265 dst->mpos = le32_to_cpu(src->mpos);
266 dst->mccap = le32_to_cpu(src->mccap);
237 267
238 dst->cpar = le16_to_cpu(src->cpar); 268 dst->cpar = le16_to_cpu(src->cpar);
239 } 269 }
@@ -241,9 +271,10 @@ static int init_grps(struct nvm_id *nvm_id, struct nvme_nvm_id *nvme_nvm_id)
241 return 0; 271 return 0;
242} 272}
243 273
244static int nvme_nvm_identity(struct request_queue *q, struct nvm_id *nvm_id) 274static int nvme_nvm_identity(struct nvm_dev *nvmdev, struct nvm_id *nvm_id)
245{ 275{
246 struct nvme_ns *ns = q->queuedata; 276 struct nvme_ns *ns = nvmdev->q->queuedata;
277 struct nvme_dev *dev = ns->dev;
247 struct nvme_nvm_id *nvme_nvm_id; 278 struct nvme_nvm_id *nvme_nvm_id;
248 struct nvme_nvm_command c = {}; 279 struct nvme_nvm_command c = {};
249 int ret; 280 int ret;
@@ -256,8 +287,8 @@ static int nvme_nvm_identity(struct request_queue *q, struct nvm_id *nvm_id)
256 if (!nvme_nvm_id) 287 if (!nvme_nvm_id)
257 return -ENOMEM; 288 return -ENOMEM;
258 289
259 ret = nvme_submit_sync_cmd(q, (struct nvme_command *)&c, nvme_nvm_id, 290 ret = nvme_submit_sync_cmd(dev->admin_q, (struct nvme_command *)&c,
260 sizeof(struct nvme_nvm_id)); 291 nvme_nvm_id, sizeof(struct nvme_nvm_id));
261 if (ret) { 292 if (ret) {
262 ret = -EIO; 293 ret = -EIO;
263 goto out; 294 goto out;
@@ -268,6 +299,8 @@ static int nvme_nvm_identity(struct request_queue *q, struct nvm_id *nvm_id)
268 nvm_id->cgrps = nvme_nvm_id->cgrps; 299 nvm_id->cgrps = nvme_nvm_id->cgrps;
269 nvm_id->cap = le32_to_cpu(nvme_nvm_id->cap); 300 nvm_id->cap = le32_to_cpu(nvme_nvm_id->cap);
270 nvm_id->dom = le32_to_cpu(nvme_nvm_id->dom); 301 nvm_id->dom = le32_to_cpu(nvme_nvm_id->dom);
302 memcpy(&nvm_id->ppaf, &nvme_nvm_id->ppaf,
303 sizeof(struct nvme_nvm_addr_format));
271 304
272 ret = init_grps(nvm_id, nvme_nvm_id); 305 ret = init_grps(nvm_id, nvme_nvm_id);
273out: 306out:
@@ -275,13 +308,13 @@ out:
275 return ret; 308 return ret;
276} 309}
277 310
278static int nvme_nvm_get_l2p_tbl(struct request_queue *q, u64 slba, u32 nlb, 311static int nvme_nvm_get_l2p_tbl(struct nvm_dev *nvmdev, u64 slba, u32 nlb,
279 nvm_l2p_update_fn *update_l2p, void *priv) 312 nvm_l2p_update_fn *update_l2p, void *priv)
280{ 313{
281 struct nvme_ns *ns = q->queuedata; 314 struct nvme_ns *ns = nvmdev->q->queuedata;
282 struct nvme_dev *dev = ns->dev; 315 struct nvme_dev *dev = ns->dev;
283 struct nvme_nvm_command c = {}; 316 struct nvme_nvm_command c = {};
284 u32 len = queue_max_hw_sectors(q) << 9; 317 u32 len = queue_max_hw_sectors(dev->admin_q) << 9;
285 u32 nlb_pr_rq = len / sizeof(u64); 318 u32 nlb_pr_rq = len / sizeof(u64);
286 u64 cmd_slba = slba; 319 u64 cmd_slba = slba;
287 void *entries; 320 void *entries;
@@ -299,8 +332,8 @@ static int nvme_nvm_get_l2p_tbl(struct request_queue *q, u64 slba, u32 nlb,
299 c.l2p.slba = cpu_to_le64(cmd_slba); 332 c.l2p.slba = cpu_to_le64(cmd_slba);
300 c.l2p.nlb = cpu_to_le32(cmd_nlb); 333 c.l2p.nlb = cpu_to_le32(cmd_nlb);
301 334
302 ret = nvme_submit_sync_cmd(q, (struct nvme_command *)&c, 335 ret = nvme_submit_sync_cmd(dev->admin_q,
303 entries, len); 336 (struct nvme_command *)&c, entries, len);
304 if (ret) { 337 if (ret) {
305 dev_err(dev->dev, "L2P table transfer failed (%d)\n", 338 dev_err(dev->dev, "L2P table transfer failed (%d)\n",
306 ret); 339 ret);
@@ -322,43 +355,84 @@ out:
322 return ret; 355 return ret;
323} 356}
324 357
325static int nvme_nvm_get_bb_tbl(struct request_queue *q, int lunid, 358static int nvme_nvm_get_bb_tbl(struct nvm_dev *nvmdev, struct ppa_addr ppa,
326 unsigned int nr_blocks, 359 int nr_blocks, nvm_bb_update_fn *update_bbtbl,
327 nvm_bb_update_fn *update_bbtbl, void *priv) 360 void *priv)
328{ 361{
362 struct request_queue *q = nvmdev->q;
329 struct nvme_ns *ns = q->queuedata; 363 struct nvme_ns *ns = q->queuedata;
330 struct nvme_dev *dev = ns->dev; 364 struct nvme_dev *dev = ns->dev;
331 struct nvme_nvm_command c = {}; 365 struct nvme_nvm_command c = {};
332 void *bb_bitmap; 366 struct nvme_nvm_bb_tbl *bb_tbl;
333 u16 bb_bitmap_size; 367 int tblsz = sizeof(struct nvme_nvm_bb_tbl) + nr_blocks;
334 int ret = 0; 368 int ret = 0;
335 369
336 c.get_bb.opcode = nvme_nvm_admin_get_bb_tbl; 370 c.get_bb.opcode = nvme_nvm_admin_get_bb_tbl;
337 c.get_bb.nsid = cpu_to_le32(ns->ns_id); 371 c.get_bb.nsid = cpu_to_le32(ns->ns_id);
338 c.get_bb.lbb = cpu_to_le32(lunid); 372 c.get_bb.spba = cpu_to_le64(ppa.ppa);
339 bb_bitmap_size = ((nr_blocks >> 15) + 1) * PAGE_SIZE;
340 bb_bitmap = kmalloc(bb_bitmap_size, GFP_KERNEL);
341 if (!bb_bitmap)
342 return -ENOMEM;
343 373
344 bitmap_zero(bb_bitmap, nr_blocks); 374 bb_tbl = kzalloc(tblsz, GFP_KERNEL);
375 if (!bb_tbl)
376 return -ENOMEM;
345 377
346 ret = nvme_submit_sync_cmd(q, (struct nvme_command *)&c, bb_bitmap, 378 ret = nvme_submit_sync_cmd(dev->admin_q, (struct nvme_command *)&c,
347 bb_bitmap_size); 379 bb_tbl, tblsz);
348 if (ret) { 380 if (ret) {
349 dev_err(dev->dev, "get bad block table failed (%d)\n", ret); 381 dev_err(dev->dev, "get bad block table failed (%d)\n", ret);
350 ret = -EIO; 382 ret = -EIO;
351 goto out; 383 goto out;
352 } 384 }
353 385
354 ret = update_bbtbl(lunid, bb_bitmap, nr_blocks, priv); 386 if (bb_tbl->tblid[0] != 'B' || bb_tbl->tblid[1] != 'B' ||
387 bb_tbl->tblid[2] != 'L' || bb_tbl->tblid[3] != 'T') {
388 dev_err(dev->dev, "bbt format mismatch\n");
389 ret = -EINVAL;
390 goto out;
391 }
392
393 if (le16_to_cpu(bb_tbl->verid) != 1) {
394 ret = -EINVAL;
395 dev_err(dev->dev, "bbt version not supported\n");
396 goto out;
397 }
398
399 if (le32_to_cpu(bb_tbl->tblks) != nr_blocks) {
400 ret = -EINVAL;
401 dev_err(dev->dev, "bbt unsuspected blocks returned (%u!=%u)",
402 le32_to_cpu(bb_tbl->tblks), nr_blocks);
403 goto out;
404 }
405
406 ppa = dev_to_generic_addr(nvmdev, ppa);
407 ret = update_bbtbl(ppa, nr_blocks, bb_tbl->blk, priv);
355 if (ret) { 408 if (ret) {
356 ret = -EINTR; 409 ret = -EINTR;
357 goto out; 410 goto out;
358 } 411 }
359 412
360out: 413out:
361 kfree(bb_bitmap); 414 kfree(bb_tbl);
415 return ret;
416}
417
418static int nvme_nvm_set_bb_tbl(struct nvm_dev *nvmdev, struct nvm_rq *rqd,
419 int type)
420{
421 struct nvme_ns *ns = nvmdev->q->queuedata;
422 struct nvme_dev *dev = ns->dev;
423 struct nvme_nvm_command c = {};
424 int ret = 0;
425
426 c.set_bb.opcode = nvme_nvm_admin_set_bb_tbl;
427 c.set_bb.nsid = cpu_to_le32(ns->ns_id);
428 c.set_bb.spba = cpu_to_le64(rqd->ppa_addr.ppa);
429 c.set_bb.nlb = cpu_to_le16(rqd->nr_pages - 1);
430 c.set_bb.value = type;
431
432 ret = nvme_submit_sync_cmd(dev->admin_q, (struct nvme_command *)&c,
433 NULL, 0);
434 if (ret)
435 dev_err(dev->dev, "set bad block table failed (%d)\n", ret);
362 return ret; 436 return ret;
363} 437}
364 438
@@ -381,7 +455,7 @@ static void nvme_nvm_end_io(struct request *rq, int error)
381 struct nvm_rq *rqd = rq->end_io_data; 455 struct nvm_rq *rqd = rq->end_io_data;
382 struct nvm_dev *dev = rqd->dev; 456 struct nvm_dev *dev = rqd->dev;
383 457
384 if (dev->mt->end_io(rqd, error)) 458 if (dev->mt && dev->mt->end_io(rqd, error))
385 pr_err("nvme: err status: %x result: %lx\n", 459 pr_err("nvme: err status: %x result: %lx\n",
386 rq->errors, (unsigned long)rq->special); 460 rq->errors, (unsigned long)rq->special);
387 461
@@ -389,8 +463,9 @@ static void nvme_nvm_end_io(struct request *rq, int error)
389 blk_mq_free_request(rq); 463 blk_mq_free_request(rq);
390} 464}
391 465
392static int nvme_nvm_submit_io(struct request_queue *q, struct nvm_rq *rqd) 466static int nvme_nvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd)
393{ 467{
468 struct request_queue *q = dev->q;
394 struct nvme_ns *ns = q->queuedata; 469 struct nvme_ns *ns = q->queuedata;
395 struct request *rq; 470 struct request *rq;
396 struct bio *bio = rqd->bio; 471 struct bio *bio = rqd->bio;
@@ -428,8 +503,9 @@ static int nvme_nvm_submit_io(struct request_queue *q, struct nvm_rq *rqd)
428 return 0; 503 return 0;
429} 504}
430 505
431static int nvme_nvm_erase_block(struct request_queue *q, struct nvm_rq *rqd) 506static int nvme_nvm_erase_block(struct nvm_dev *dev, struct nvm_rq *rqd)
432{ 507{
508 struct request_queue *q = dev->q;
433 struct nvme_ns *ns = q->queuedata; 509 struct nvme_ns *ns = q->queuedata;
434 struct nvme_nvm_command c = {}; 510 struct nvme_nvm_command c = {};
435 511
@@ -441,9 +517,9 @@ static int nvme_nvm_erase_block(struct request_queue *q, struct nvm_rq *rqd)
441 return nvme_submit_sync_cmd(q, (struct nvme_command *)&c, NULL, 0); 517 return nvme_submit_sync_cmd(q, (struct nvme_command *)&c, NULL, 0);
442} 518}
443 519
444static void *nvme_nvm_create_dma_pool(struct request_queue *q, char *name) 520static void *nvme_nvm_create_dma_pool(struct nvm_dev *nvmdev, char *name)
445{ 521{
446 struct nvme_ns *ns = q->queuedata; 522 struct nvme_ns *ns = nvmdev->q->queuedata;
447 struct nvme_dev *dev = ns->dev; 523 struct nvme_dev *dev = ns->dev;
448 524
449 return dma_pool_create(name, dev->dev, PAGE_SIZE, PAGE_SIZE, 0); 525 return dma_pool_create(name, dev->dev, PAGE_SIZE, PAGE_SIZE, 0);
@@ -456,7 +532,7 @@ static void nvme_nvm_destroy_dma_pool(void *pool)
456 dma_pool_destroy(dma_pool); 532 dma_pool_destroy(dma_pool);
457} 533}
458 534
459static void *nvme_nvm_dev_dma_alloc(struct request_queue *q, void *pool, 535static void *nvme_nvm_dev_dma_alloc(struct nvm_dev *dev, void *pool,
460 gfp_t mem_flags, dma_addr_t *dma_handler) 536 gfp_t mem_flags, dma_addr_t *dma_handler)
461{ 537{
462 return dma_pool_alloc(pool, mem_flags, dma_handler); 538 return dma_pool_alloc(pool, mem_flags, dma_handler);
@@ -474,6 +550,7 @@ static struct nvm_dev_ops nvme_nvm_dev_ops = {
474 .get_l2p_tbl = nvme_nvm_get_l2p_tbl, 550 .get_l2p_tbl = nvme_nvm_get_l2p_tbl,
475 551
476 .get_bb_tbl = nvme_nvm_get_bb_tbl, 552 .get_bb_tbl = nvme_nvm_get_bb_tbl,
553 .set_bb_tbl = nvme_nvm_set_bb_tbl,
477 554
478 .submit_io = nvme_nvm_submit_io, 555 .submit_io = nvme_nvm_submit_io,
479 .erase_block = nvme_nvm_erase_block, 556 .erase_block = nvme_nvm_erase_block,
@@ -496,31 +573,27 @@ void nvme_nvm_unregister(struct request_queue *q, char *disk_name)
496 nvm_unregister(disk_name); 573 nvm_unregister(disk_name);
497} 574}
498 575
576/* move to shared place when used in multiple places. */
577#define PCI_VENDOR_ID_CNEX 0x1d1d
578#define PCI_DEVICE_ID_CNEX_WL 0x2807
579#define PCI_DEVICE_ID_CNEX_QEMU 0x1f1f
580
499int nvme_nvm_ns_supported(struct nvme_ns *ns, struct nvme_id_ns *id) 581int nvme_nvm_ns_supported(struct nvme_ns *ns, struct nvme_id_ns *id)
500{ 582{
501 struct nvme_dev *dev = ns->dev; 583 struct nvme_dev *dev = ns->dev;
502 struct pci_dev *pdev = to_pci_dev(dev->dev); 584 struct pci_dev *pdev = to_pci_dev(dev->dev);
503 585
504 /* QEMU NVMe simulator - PCI ID + Vendor specific bit */ 586 /* QEMU NVMe simulator - PCI ID + Vendor specific bit */
505 if (pdev->vendor == PCI_VENDOR_ID_INTEL && pdev->device == 0x5845 && 587 if (pdev->vendor == PCI_VENDOR_ID_CNEX &&
588 pdev->device == PCI_DEVICE_ID_CNEX_QEMU &&
506 id->vs[0] == 0x1) 589 id->vs[0] == 0x1)
507 return 1; 590 return 1;
508 591
509 /* CNEX Labs - PCI ID + Vendor specific bit */ 592 /* CNEX Labs - PCI ID + Vendor specific bit */
510 if (pdev->vendor == 0x1d1d && pdev->device == 0x2807 && 593 if (pdev->vendor == PCI_VENDOR_ID_CNEX &&
594 pdev->device == PCI_DEVICE_ID_CNEX_WL &&
511 id->vs[0] == 0x1) 595 id->vs[0] == 0x1)
512 return 1; 596 return 1;
513 597
514 return 0; 598 return 0;
515} 599}
516#else
517int nvme_nvm_register(struct request_queue *q, char *disk_name)
518{
519 return 0;
520}
521void nvme_nvm_unregister(struct request_queue *q, char *disk_name) {};
522int nvme_nvm_ns_supported(struct nvme_ns *ns, struct nvme_id_ns *id)
523{
524 return 0;
525}
526#endif /* CONFIG_NVM */
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index fdb4e5bad9ac..044253dca30a 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -136,8 +136,22 @@ int nvme_sg_io(struct nvme_ns *ns, struct sg_io_hdr __user *u_hdr);
136int nvme_sg_io32(struct nvme_ns *ns, unsigned long arg); 136int nvme_sg_io32(struct nvme_ns *ns, unsigned long arg);
137int nvme_sg_get_version_num(int __user *ip); 137int nvme_sg_get_version_num(int __user *ip);
138 138
139#ifdef CONFIG_NVM
139int nvme_nvm_ns_supported(struct nvme_ns *ns, struct nvme_id_ns *id); 140int nvme_nvm_ns_supported(struct nvme_ns *ns, struct nvme_id_ns *id);
140int nvme_nvm_register(struct request_queue *q, char *disk_name); 141int nvme_nvm_register(struct request_queue *q, char *disk_name);
141void nvme_nvm_unregister(struct request_queue *q, char *disk_name); 142void nvme_nvm_unregister(struct request_queue *q, char *disk_name);
143#else
144static inline int nvme_nvm_register(struct request_queue *q, char *disk_name)
145{
146 return 0;
147}
148
149static inline void nvme_nvm_unregister(struct request_queue *q, char *disk_name) {};
150
151static inline int nvme_nvm_ns_supported(struct nvme_ns *ns, struct nvme_id_ns *id)
152{
153 return 0;
154}
155#endif /* CONFIG_NVM */
142 156
143#endif /* _NVME_H */ 157#endif /* _NVME_H */
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 8187df204695..9e294ff4e652 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -896,19 +896,28 @@ static int nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
896 goto retry_cmd; 896 goto retry_cmd;
897 } 897 }
898 if (blk_integrity_rq(req)) { 898 if (blk_integrity_rq(req)) {
899 if (blk_rq_count_integrity_sg(req->q, req->bio) != 1) 899 if (blk_rq_count_integrity_sg(req->q, req->bio) != 1) {
900 dma_unmap_sg(dev->dev, iod->sg, iod->nents,
901 dma_dir);
900 goto error_cmd; 902 goto error_cmd;
903 }
901 904
902 sg_init_table(iod->meta_sg, 1); 905 sg_init_table(iod->meta_sg, 1);
903 if (blk_rq_map_integrity_sg( 906 if (blk_rq_map_integrity_sg(
904 req->q, req->bio, iod->meta_sg) != 1) 907 req->q, req->bio, iod->meta_sg) != 1) {
908 dma_unmap_sg(dev->dev, iod->sg, iod->nents,
909 dma_dir);
905 goto error_cmd; 910 goto error_cmd;
911 }
906 912
907 if (rq_data_dir(req)) 913 if (rq_data_dir(req))
908 nvme_dif_remap(req, nvme_dif_prep); 914 nvme_dif_remap(req, nvme_dif_prep);
909 915
910 if (!dma_map_sg(nvmeq->q_dmadev, iod->meta_sg, 1, dma_dir)) 916 if (!dma_map_sg(nvmeq->q_dmadev, iod->meta_sg, 1, dma_dir)) {
917 dma_unmap_sg(dev->dev, iod->sg, iod->nents,
918 dma_dir);
911 goto error_cmd; 919 goto error_cmd;
920 }
912 } 921 }
913 } 922 }
914 923
@@ -968,7 +977,8 @@ static void __nvme_process_cq(struct nvme_queue *nvmeq, unsigned int *tag)
968 if (head == nvmeq->cq_head && phase == nvmeq->cq_phase) 977 if (head == nvmeq->cq_head && phase == nvmeq->cq_phase)
969 return; 978 return;
970 979
971 writel(head, nvmeq->q_db + nvmeq->dev->db_stride); 980 if (likely(nvmeq->cq_vector >= 0))
981 writel(head, nvmeq->q_db + nvmeq->dev->db_stride);
972 nvmeq->cq_head = head; 982 nvmeq->cq_head = head;
973 nvmeq->cq_phase = phase; 983 nvmeq->cq_phase = phase;
974 984
@@ -1727,9 +1737,13 @@ static int nvme_configure_admin_queue(struct nvme_dev *dev)
1727 u32 aqa; 1737 u32 aqa;
1728 u64 cap = lo_hi_readq(&dev->bar->cap); 1738 u64 cap = lo_hi_readq(&dev->bar->cap);
1729 struct nvme_queue *nvmeq; 1739 struct nvme_queue *nvmeq;
1730 unsigned page_shift = PAGE_SHIFT; 1740 /*
1741 * default to a 4K page size, with the intention to update this
1742 * path in the future to accomodate architectures with differing
1743 * kernel and IO page sizes.
1744 */
1745 unsigned page_shift = 12;
1731 unsigned dev_page_min = NVME_CAP_MPSMIN(cap) + 12; 1746 unsigned dev_page_min = NVME_CAP_MPSMIN(cap) + 12;
1732 unsigned dev_page_max = NVME_CAP_MPSMAX(cap) + 12;
1733 1747
1734 if (page_shift < dev_page_min) { 1748 if (page_shift < dev_page_min) {
1735 dev_err(dev->dev, 1749 dev_err(dev->dev,
@@ -1738,13 +1752,6 @@ static int nvme_configure_admin_queue(struct nvme_dev *dev)
1738 1 << page_shift); 1752 1 << page_shift);
1739 return -ENODEV; 1753 return -ENODEV;
1740 } 1754 }
1741 if (page_shift > dev_page_max) {
1742 dev_info(dev->dev,
1743 "Device maximum page size (%u) smaller than "
1744 "host (%u); enabling work-around\n",
1745 1 << dev_page_max, 1 << page_shift);
1746 page_shift = dev_page_max;
1747 }
1748 1755
1749 dev->subsystem = readl(&dev->bar->vs) >= NVME_VS(1, 1) ? 1756 dev->subsystem = readl(&dev->bar->vs) >= NVME_VS(1, 1) ?
1750 NVME_CAP_NSSRC(cap) : 0; 1757 NVME_CAP_NSSRC(cap) : 0;
@@ -2268,7 +2275,7 @@ static void nvme_alloc_ns(struct nvme_dev *dev, unsigned nsid)
2268 if (dev->max_hw_sectors) { 2275 if (dev->max_hw_sectors) {
2269 blk_queue_max_hw_sectors(ns->queue, dev->max_hw_sectors); 2276 blk_queue_max_hw_sectors(ns->queue, dev->max_hw_sectors);
2270 blk_queue_max_segments(ns->queue, 2277 blk_queue_max_segments(ns->queue,
2271 ((dev->max_hw_sectors << 9) / dev->page_size) + 1); 2278 (dev->max_hw_sectors / (dev->page_size >> 9)) + 1);
2272 } 2279 }
2273 if (dev->stripe_size) 2280 if (dev->stripe_size)
2274 blk_queue_chunk_sectors(ns->queue, dev->stripe_size >> 9); 2281 blk_queue_chunk_sectors(ns->queue, dev->stripe_size >> 9);
@@ -2701,6 +2708,18 @@ static int nvme_dev_map(struct nvme_dev *dev)
2701 dev->q_depth = min_t(int, NVME_CAP_MQES(cap) + 1, NVME_Q_DEPTH); 2708 dev->q_depth = min_t(int, NVME_CAP_MQES(cap) + 1, NVME_Q_DEPTH);
2702 dev->db_stride = 1 << NVME_CAP_STRIDE(cap); 2709 dev->db_stride = 1 << NVME_CAP_STRIDE(cap);
2703 dev->dbs = ((void __iomem *)dev->bar) + 4096; 2710 dev->dbs = ((void __iomem *)dev->bar) + 4096;
2711
2712 /*
2713 * Temporary fix for the Apple controller found in the MacBook8,1 and
2714 * some MacBook7,1 to avoid controller resets and data loss.
2715 */
2716 if (pdev->vendor == PCI_VENDOR_ID_APPLE && pdev->device == 0x2001) {
2717 dev->q_depth = 2;
2718 dev_warn(dev->dev, "detected Apple NVMe controller, set "
2719 "queue depth=%u to work around controller resets\n",
2720 dev->q_depth);
2721 }
2722
2704 if (readl(&dev->bar->vs) >= NVME_VS(1, 2)) 2723 if (readl(&dev->bar->vs) >= NVME_VS(1, 2))
2705 dev->cmb = nvme_map_cmb(dev); 2724 dev->cmb = nvme_map_cmb(dev);
2706 2725
@@ -2787,6 +2806,10 @@ static void nvme_del_queue_end(struct nvme_queue *nvmeq)
2787{ 2806{
2788 struct nvme_delq_ctx *dq = nvmeq->cmdinfo.ctx; 2807 struct nvme_delq_ctx *dq = nvmeq->cmdinfo.ctx;
2789 nvme_put_dq(dq); 2808 nvme_put_dq(dq);
2809
2810 spin_lock_irq(&nvmeq->q_lock);
2811 nvme_process_cq(nvmeq);
2812 spin_unlock_irq(&nvmeq->q_lock);
2790} 2813}
2791 2814
2792static int adapter_async_del_queue(struct nvme_queue *nvmeq, u8 opcode, 2815static int adapter_async_del_queue(struct nvme_queue *nvmeq, u8 opcode,
diff --git a/drivers/of/address.c b/drivers/of/address.c
index cd53fe4a0c86..9582c5703b3c 100644
--- a/drivers/of/address.c
+++ b/drivers/of/address.c
@@ -485,9 +485,10 @@ static int of_translate_one(struct device_node *parent, struct of_bus *bus,
485 int rone; 485 int rone;
486 u64 offset = OF_BAD_ADDR; 486 u64 offset = OF_BAD_ADDR;
487 487
488 /* Normally, an absence of a "ranges" property means we are 488 /*
489 * Normally, an absence of a "ranges" property means we are
489 * crossing a non-translatable boundary, and thus the addresses 490 * crossing a non-translatable boundary, and thus the addresses
490 * below the current not cannot be converted to CPU physical ones. 491 * below the current cannot be converted to CPU physical ones.
491 * Unfortunately, while this is very clear in the spec, it's not 492 * Unfortunately, while this is very clear in the spec, it's not
492 * what Apple understood, and they do have things like /uni-n or 493 * what Apple understood, and they do have things like /uni-n or
493 * /ht nodes with no "ranges" property and a lot of perfectly 494 * /ht nodes with no "ranges" property and a lot of perfectly
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
index d2430298a309..655f79db7899 100644
--- a/drivers/of/fdt.c
+++ b/drivers/of/fdt.c
@@ -13,6 +13,7 @@
13#include <linux/kernel.h> 13#include <linux/kernel.h>
14#include <linux/initrd.h> 14#include <linux/initrd.h>
15#include <linux/memblock.h> 15#include <linux/memblock.h>
16#include <linux/mutex.h>
16#include <linux/of.h> 17#include <linux/of.h>
17#include <linux/of_fdt.h> 18#include <linux/of_fdt.h>
18#include <linux/of_reserved_mem.h> 19#include <linux/of_reserved_mem.h>
@@ -436,6 +437,8 @@ static void *kernel_tree_alloc(u64 size, u64 align)
436 return kzalloc(size, GFP_KERNEL); 437 return kzalloc(size, GFP_KERNEL);
437} 438}
438 439
440static DEFINE_MUTEX(of_fdt_unflatten_mutex);
441
439/** 442/**
440 * of_fdt_unflatten_tree - create tree of device_nodes from flat blob 443 * of_fdt_unflatten_tree - create tree of device_nodes from flat blob
441 * 444 *
@@ -447,7 +450,9 @@ static void *kernel_tree_alloc(u64 size, u64 align)
447void of_fdt_unflatten_tree(const unsigned long *blob, 450void of_fdt_unflatten_tree(const unsigned long *blob,
448 struct device_node **mynodes) 451 struct device_node **mynodes)
449{ 452{
453 mutex_lock(&of_fdt_unflatten_mutex);
450 __unflatten_device_tree(blob, mynodes, &kernel_tree_alloc); 454 __unflatten_device_tree(blob, mynodes, &kernel_tree_alloc);
455 mutex_unlock(&of_fdt_unflatten_mutex);
451} 456}
452EXPORT_SYMBOL_GPL(of_fdt_unflatten_tree); 457EXPORT_SYMBOL_GPL(of_fdt_unflatten_tree);
453 458
@@ -1041,7 +1046,7 @@ void __init __weak early_init_dt_add_memory_arch(u64 base, u64 size)
1041int __init __weak early_init_dt_reserve_memory_arch(phys_addr_t base, 1046int __init __weak early_init_dt_reserve_memory_arch(phys_addr_t base,
1042 phys_addr_t size, bool nomap) 1047 phys_addr_t size, bool nomap)
1043{ 1048{
1044 pr_err("Reserved memory not supported, ignoring range 0x%pa - 0x%pa%s\n", 1049 pr_err("Reserved memory not supported, ignoring range %pa - %pa%s\n",
1045 &base, &size, nomap ? " (nomap)" : ""); 1050 &base, &size, nomap ? " (nomap)" : "");
1046 return -ENOSYS; 1051 return -ENOSYS;
1047} 1052}
diff --git a/drivers/of/irq.c b/drivers/of/irq.c
index 902b89be7217..4fa916dffc91 100644
--- a/drivers/of/irq.c
+++ b/drivers/of/irq.c
@@ -53,7 +53,7 @@ EXPORT_SYMBOL_GPL(irq_of_parse_and_map);
53 * Returns a pointer to the interrupt parent node, or NULL if the interrupt 53 * Returns a pointer to the interrupt parent node, or NULL if the interrupt
54 * parent could not be determined. 54 * parent could not be determined.
55 */ 55 */
56static struct device_node *of_irq_find_parent(struct device_node *child) 56struct device_node *of_irq_find_parent(struct device_node *child)
57{ 57{
58 struct device_node *p; 58 struct device_node *p;
59 const __be32 *parp; 59 const __be32 *parp;
@@ -77,6 +77,7 @@ static struct device_node *of_irq_find_parent(struct device_node *child)
77 77
78 return p; 78 return p;
79} 79}
80EXPORT_SYMBOL_GPL(of_irq_find_parent);
80 81
81/** 82/**
82 * of_irq_parse_raw - Low level interrupt tree parsing 83 * of_irq_parse_raw - Low level interrupt tree parsing
diff --git a/drivers/of/of_reserved_mem.c b/drivers/of/of_reserved_mem.c
index be77e75c587d..1a3556a9e9ea 100644
--- a/drivers/of/of_reserved_mem.c
+++ b/drivers/of/of_reserved_mem.c
@@ -206,7 +206,13 @@ static int __init __rmem_cmp(const void *a, const void *b)
206{ 206{
207 const struct reserved_mem *ra = a, *rb = b; 207 const struct reserved_mem *ra = a, *rb = b;
208 208
209 return ra->base - rb->base; 209 if (ra->base < rb->base)
210 return -1;
211
212 if (ra->base > rb->base)
213 return 1;
214
215 return 0;
210} 216}
211 217
212static void __init __rmem_check_for_overlap(void) 218static void __init __rmem_check_for_overlap(void)
diff --git a/drivers/parisc/iommu-helpers.h b/drivers/parisc/iommu-helpers.h
index 761e77bfce5d..e56f1569f6c3 100644
--- a/drivers/parisc/iommu-helpers.h
+++ b/drivers/parisc/iommu-helpers.h
@@ -104,7 +104,11 @@ iommu_coalesce_chunks(struct ioc *ioc, struct device *dev,
104 struct scatterlist *contig_sg; /* contig chunk head */ 104 struct scatterlist *contig_sg; /* contig chunk head */
105 unsigned long dma_offset, dma_len; /* start/len of DMA stream */ 105 unsigned long dma_offset, dma_len; /* start/len of DMA stream */
106 unsigned int n_mappings = 0; 106 unsigned int n_mappings = 0;
107 unsigned int max_seg_size = dma_get_max_seg_size(dev); 107 unsigned int max_seg_size = min(dma_get_max_seg_size(dev),
108 (unsigned)DMA_CHUNK_SIZE);
109 unsigned int max_seg_boundary = dma_get_seg_boundary(dev) + 1;
110 if (max_seg_boundary) /* check if the addition above didn't overflow */
111 max_seg_size = min(max_seg_size, max_seg_boundary);
108 112
109 while (nents > 0) { 113 while (nents > 0) {
110 114
@@ -138,14 +142,11 @@ iommu_coalesce_chunks(struct ioc *ioc, struct device *dev,
138 142
139 /* 143 /*
140 ** First make sure current dma stream won't 144 ** First make sure current dma stream won't
141 ** exceed DMA_CHUNK_SIZE if we coalesce the 145 ** exceed max_seg_size if we coalesce the
142 ** next entry. 146 ** next entry.
143 */ 147 */
144 if(unlikely(ALIGN(dma_len + dma_offset + startsg->length, 148 if (unlikely(ALIGN(dma_len + dma_offset + startsg->length, IOVP_SIZE) >
145 IOVP_SIZE) > DMA_CHUNK_SIZE)) 149 max_seg_size))
146 break;
147
148 if (startsg->length + dma_len > max_seg_size)
149 break; 150 break;
150 151
151 /* 152 /*
diff --git a/drivers/pci/host/pcie-altera.c b/drivers/pci/host/pcie-altera.c
index e5dda38bdde5..99da549d5d06 100644
--- a/drivers/pci/host/pcie-altera.c
+++ b/drivers/pci/host/pcie-altera.c
@@ -55,8 +55,10 @@
55#define TLP_CFG_DW2(bus, devfn, offset) \ 55#define TLP_CFG_DW2(bus, devfn, offset) \
56 (((bus) << 24) | ((devfn) << 16) | (offset)) 56 (((bus) << 24) | ((devfn) << 16) | (offset))
57#define TLP_REQ_ID(bus, devfn) (((bus) << 8) | (devfn)) 57#define TLP_REQ_ID(bus, devfn) (((bus) << 8) | (devfn))
58#define TLP_COMP_STATUS(s) (((s) >> 12) & 7)
58#define TLP_HDR_SIZE 3 59#define TLP_HDR_SIZE 3
59#define TLP_LOOP 500 60#define TLP_LOOP 500
61#define RP_DEVFN 0
60 62
61#define INTX_NUM 4 63#define INTX_NUM 4
62 64
@@ -166,34 +168,41 @@ static bool altera_pcie_valid_config(struct altera_pcie *pcie,
166 168
167static int tlp_read_packet(struct altera_pcie *pcie, u32 *value) 169static int tlp_read_packet(struct altera_pcie *pcie, u32 *value)
168{ 170{
169 u8 loop; 171 int i;
170 bool sop = 0; 172 bool sop = 0;
171 u32 ctrl; 173 u32 ctrl;
172 u32 reg0, reg1; 174 u32 reg0, reg1;
175 u32 comp_status = 1;
173 176
174 /* 177 /*
175 * Minimum 2 loops to read TLP headers and 1 loop to read data 178 * Minimum 2 loops to read TLP headers and 1 loop to read data
176 * payload. 179 * payload.
177 */ 180 */
178 for (loop = 0; loop < TLP_LOOP; loop++) { 181 for (i = 0; i < TLP_LOOP; i++) {
179 ctrl = cra_readl(pcie, RP_RXCPL_STATUS); 182 ctrl = cra_readl(pcie, RP_RXCPL_STATUS);
180 if ((ctrl & RP_RXCPL_SOP) || (ctrl & RP_RXCPL_EOP) || sop) { 183 if ((ctrl & RP_RXCPL_SOP) || (ctrl & RP_RXCPL_EOP) || sop) {
181 reg0 = cra_readl(pcie, RP_RXCPL_REG0); 184 reg0 = cra_readl(pcie, RP_RXCPL_REG0);
182 reg1 = cra_readl(pcie, RP_RXCPL_REG1); 185 reg1 = cra_readl(pcie, RP_RXCPL_REG1);
183 186
184 if (ctrl & RP_RXCPL_SOP) 187 if (ctrl & RP_RXCPL_SOP) {
185 sop = true; 188 sop = true;
189 comp_status = TLP_COMP_STATUS(reg1);
190 }
186 191
187 if (ctrl & RP_RXCPL_EOP) { 192 if (ctrl & RP_RXCPL_EOP) {
193 if (comp_status)
194 return PCIBIOS_DEVICE_NOT_FOUND;
195
188 if (value) 196 if (value)
189 *value = reg0; 197 *value = reg0;
198
190 return PCIBIOS_SUCCESSFUL; 199 return PCIBIOS_SUCCESSFUL;
191 } 200 }
192 } 201 }
193 udelay(5); 202 udelay(5);
194 } 203 }
195 204
196 return -ENOENT; 205 return PCIBIOS_DEVICE_NOT_FOUND;
197} 206}
198 207
199static void tlp_write_packet(struct altera_pcie *pcie, u32 *headers, 208static void tlp_write_packet(struct altera_pcie *pcie, u32 *headers,
@@ -233,7 +242,7 @@ static int tlp_cfg_dword_read(struct altera_pcie *pcie, u8 bus, u32 devfn,
233 else 242 else
234 headers[0] = TLP_CFG_DW0(TLP_FMTTYPE_CFGRD1); 243 headers[0] = TLP_CFG_DW0(TLP_FMTTYPE_CFGRD1);
235 244
236 headers[1] = TLP_CFG_DW1(TLP_REQ_ID(pcie->root_bus_nr, devfn), 245 headers[1] = TLP_CFG_DW1(TLP_REQ_ID(pcie->root_bus_nr, RP_DEVFN),
237 TLP_READ_TAG, byte_en); 246 TLP_READ_TAG, byte_en);
238 headers[2] = TLP_CFG_DW2(bus, devfn, where); 247 headers[2] = TLP_CFG_DW2(bus, devfn, where);
239 248
@@ -253,7 +262,7 @@ static int tlp_cfg_dword_write(struct altera_pcie *pcie, u8 bus, u32 devfn,
253 else 262 else
254 headers[0] = TLP_CFG_DW0(TLP_FMTTYPE_CFGWR1); 263 headers[0] = TLP_CFG_DW0(TLP_FMTTYPE_CFGWR1);
255 264
256 headers[1] = TLP_CFG_DW1(TLP_REQ_ID(pcie->root_bus_nr, devfn), 265 headers[1] = TLP_CFG_DW1(TLP_REQ_ID(pcie->root_bus_nr, RP_DEVFN),
257 TLP_WRITE_TAG, byte_en); 266 TLP_WRITE_TAG, byte_en);
258 headers[2] = TLP_CFG_DW2(bus, devfn, where); 267 headers[2] = TLP_CFG_DW2(bus, devfn, where);
259 268
@@ -458,7 +467,7 @@ static int altera_pcie_init_irq_domain(struct altera_pcie *pcie)
458 struct device_node *node = dev->of_node; 467 struct device_node *node = dev->of_node;
459 468
460 /* Setup INTx */ 469 /* Setup INTx */
461 pcie->irq_domain = irq_domain_add_linear(node, INTX_NUM, 470 pcie->irq_domain = irq_domain_add_linear(node, INTX_NUM + 1,
462 &intx_domain_ops, pcie); 471 &intx_domain_ops, pcie);
463 if (!pcie->irq_domain) { 472 if (!pcie->irq_domain) {
464 dev_err(dev, "Failed to get a INTx IRQ domain\n"); 473 dev_err(dev, "Failed to get a INTx IRQ domain\n");
diff --git a/drivers/pci/host/pcie-designware.c b/drivers/pci/host/pcie-designware.c
index 540f077c37ea..02a7452bdf23 100644
--- a/drivers/pci/host/pcie-designware.c
+++ b/drivers/pci/host/pcie-designware.c
@@ -440,7 +440,6 @@ int dw_pcie_host_init(struct pcie_port *pp)
440 ret, pp->io); 440 ret, pp->io);
441 continue; 441 continue;
442 } 442 }
443 pp->io_base = pp->io->start;
444 break; 443 break;
445 case IORESOURCE_MEM: 444 case IORESOURCE_MEM:
446 pp->mem = win->res; 445 pp->mem = win->res;
diff --git a/drivers/pci/host/pcie-hisi.c b/drivers/pci/host/pcie-hisi.c
index 35457ecd8e70..163671a4f798 100644
--- a/drivers/pci/host/pcie-hisi.c
+++ b/drivers/pci/host/pcie-hisi.c
@@ -111,7 +111,7 @@ static struct pcie_host_ops hisi_pcie_host_ops = {
111 .link_up = hisi_pcie_link_up, 111 .link_up = hisi_pcie_link_up,
112}; 112};
113 113
114static int __init hisi_add_pcie_port(struct pcie_port *pp, 114static int hisi_add_pcie_port(struct pcie_port *pp,
115 struct platform_device *pdev) 115 struct platform_device *pdev)
116{ 116{
117 int ret; 117 int ret;
@@ -139,7 +139,7 @@ static int __init hisi_add_pcie_port(struct pcie_port *pp,
139 return 0; 139 return 0;
140} 140}
141 141
142static int __init hisi_pcie_probe(struct platform_device *pdev) 142static int hisi_pcie_probe(struct platform_device *pdev)
143{ 143{
144 struct hisi_pcie *hisi_pcie; 144 struct hisi_pcie *hisi_pcie;
145 struct pcie_port *pp; 145 struct pcie_port *pp;
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
index 53e463244bb7..7eaa4c87fec7 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -54,7 +54,7 @@ static int pci_msi_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
54 struct irq_domain *domain; 54 struct irq_domain *domain;
55 55
56 domain = pci_msi_get_domain(dev); 56 domain = pci_msi_get_domain(dev);
57 if (domain) 57 if (domain && irq_domain_is_hierarchy(domain))
58 return pci_msi_domain_alloc_irqs(domain, dev, nvec, type); 58 return pci_msi_domain_alloc_irqs(domain, dev, nvec, type);
59 59
60 return arch_setup_msi_irqs(dev, nvec, type); 60 return arch_setup_msi_irqs(dev, nvec, type);
@@ -65,7 +65,7 @@ static void pci_msi_teardown_msi_irqs(struct pci_dev *dev)
65 struct irq_domain *domain; 65 struct irq_domain *domain;
66 66
67 domain = pci_msi_get_domain(dev); 67 domain = pci_msi_get_domain(dev);
68 if (domain) 68 if (domain && irq_domain_is_hierarchy(domain))
69 pci_msi_domain_free_irqs(domain, dev); 69 pci_msi_domain_free_irqs(domain, dev);
70 else 70 else
71 arch_teardown_msi_irqs(dev); 71 arch_teardown_msi_irqs(dev);
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index 4446fcb5effd..d7ffd66814bb 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -1146,9 +1146,21 @@ static int pci_pm_runtime_suspend(struct device *dev)
1146 pci_dev->state_saved = false; 1146 pci_dev->state_saved = false;
1147 pci_dev->no_d3cold = false; 1147 pci_dev->no_d3cold = false;
1148 error = pm->runtime_suspend(dev); 1148 error = pm->runtime_suspend(dev);
1149 suspend_report_result(pm->runtime_suspend, error); 1149 if (error) {
1150 if (error) 1150 /*
1151 * -EBUSY and -EAGAIN is used to request the runtime PM core
1152 * to schedule a new suspend, so log the event only with debug
1153 * log level.
1154 */
1155 if (error == -EBUSY || error == -EAGAIN)
1156 dev_dbg(dev, "can't suspend now (%pf returned %d)\n",
1157 pm->runtime_suspend, error);
1158 else
1159 dev_err(dev, "can't suspend (%pf returned %d)\n",
1160 pm->runtime_suspend, error);
1161
1151 return error; 1162 return error;
1163 }
1152 if (!pci_dev->d3cold_allowed) 1164 if (!pci_dev->d3cold_allowed)
1153 pci_dev->no_d3cold = true; 1165 pci_dev->no_d3cold = true;
1154 1166
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index 92618686604c..eead54cd01b2 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -216,7 +216,10 @@ static ssize_t numa_node_store(struct device *dev,
216 if (ret) 216 if (ret)
217 return ret; 217 return ret;
218 218
219 if (node >= MAX_NUMNODES || !node_online(node)) 219 if ((node < 0 && node != NUMA_NO_NODE) || node >= MAX_NUMNODES)
220 return -EINVAL;
221
222 if (node != NUMA_NO_NODE && !node_online(node))
220 return -EINVAL; 223 return -EINVAL;
221 224
222 add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK); 225 add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index fd2f03fa53f3..d390fc1475ec 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -337,6 +337,4 @@ static inline int pci_dev_specific_reset(struct pci_dev *dev, int probe)
337} 337}
338#endif 338#endif
339 339
340struct pci_host_bridge *pci_find_host_bridge(struct pci_bus *bus);
341
342#endif /* DRIVERS_PCI_H */ 340#endif /* DRIVERS_PCI_H */
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index e735c728e3b3..edb1984201e9 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -1685,8 +1685,8 @@ static void pci_dma_configure(struct pci_dev *dev)
1685{ 1685{
1686 struct device *bridge = pci_get_host_bridge_device(dev); 1686 struct device *bridge = pci_get_host_bridge_device(dev);
1687 1687
1688 if (IS_ENABLED(CONFIG_OF) && dev->dev.of_node) { 1688 if (IS_ENABLED(CONFIG_OF) &&
1689 if (bridge->parent) 1689 bridge->parent && bridge->parent->of_node) {
1690 of_dma_configure(&dev->dev, bridge->parent->of_node); 1690 of_dma_configure(&dev->dev, bridge->parent->of_node);
1691 } else if (has_acpi_companion(bridge)) { 1691 } else if (has_acpi_companion(bridge)) {
1692 struct acpi_device *adev = to_acpi_device_node(bridge->fwnode); 1692 struct acpi_device *adev = to_acpi_device_node(bridge->fwnode);
diff --git a/drivers/phy/Kconfig b/drivers/phy/Kconfig
index 7eb5859dd035..03cb3ea2d2c0 100644
--- a/drivers/phy/Kconfig
+++ b/drivers/phy/Kconfig
@@ -233,6 +233,7 @@ config PHY_SUN9I_USB
233 tristate "Allwinner sun9i SoC USB PHY driver" 233 tristate "Allwinner sun9i SoC USB PHY driver"
234 depends on ARCH_SUNXI && HAS_IOMEM && OF 234 depends on ARCH_SUNXI && HAS_IOMEM && OF
235 depends on RESET_CONTROLLER 235 depends on RESET_CONTROLLER
236 depends on USB_COMMON
236 select GENERIC_PHY 237 select GENERIC_PHY
237 help 238 help
238 Enable this to support the transceiver that is part of Allwinner 239 Enable this to support the transceiver that is part of Allwinner
diff --git a/drivers/phy/phy-bcm-cygnus-pcie.c b/drivers/phy/phy-bcm-cygnus-pcie.c
index 7ad72b7d2b98..082c03f6438f 100644
--- a/drivers/phy/phy-bcm-cygnus-pcie.c
+++ b/drivers/phy/phy-bcm-cygnus-pcie.c
@@ -128,6 +128,7 @@ static int cygnus_pcie_phy_probe(struct platform_device *pdev)
128 struct phy_provider *provider; 128 struct phy_provider *provider;
129 struct resource *res; 129 struct resource *res;
130 unsigned cnt = 0; 130 unsigned cnt = 0;
131 int ret;
131 132
132 if (of_get_child_count(node) == 0) { 133 if (of_get_child_count(node) == 0) {
133 dev_err(dev, "PHY no child node\n"); 134 dev_err(dev, "PHY no child node\n");
@@ -154,24 +155,28 @@ static int cygnus_pcie_phy_probe(struct platform_device *pdev)
154 if (of_property_read_u32(child, "reg", &id)) { 155 if (of_property_read_u32(child, "reg", &id)) {
155 dev_err(dev, "missing reg property for %s\n", 156 dev_err(dev, "missing reg property for %s\n",
156 child->name); 157 child->name);
157 return -EINVAL; 158 ret = -EINVAL;
159 goto put_child;
158 } 160 }
159 161
160 if (id >= MAX_NUM_PHYS) { 162 if (id >= MAX_NUM_PHYS) {
161 dev_err(dev, "invalid PHY id: %u\n", id); 163 dev_err(dev, "invalid PHY id: %u\n", id);
162 return -EINVAL; 164 ret = -EINVAL;
165 goto put_child;
163 } 166 }
164 167
165 if (core->phys[id].phy) { 168 if (core->phys[id].phy) {
166 dev_err(dev, "duplicated PHY id: %u\n", id); 169 dev_err(dev, "duplicated PHY id: %u\n", id);
167 return -EINVAL; 170 ret = -EINVAL;
171 goto put_child;
168 } 172 }
169 173
170 p = &core->phys[id]; 174 p = &core->phys[id];
171 p->phy = devm_phy_create(dev, child, &cygnus_pcie_phy_ops); 175 p->phy = devm_phy_create(dev, child, &cygnus_pcie_phy_ops);
172 if (IS_ERR(p->phy)) { 176 if (IS_ERR(p->phy)) {
173 dev_err(dev, "failed to create PHY\n"); 177 dev_err(dev, "failed to create PHY\n");
174 return PTR_ERR(p->phy); 178 ret = PTR_ERR(p->phy);
179 goto put_child;
175 } 180 }
176 181
177 p->core = core; 182 p->core = core;
@@ -191,6 +196,9 @@ static int cygnus_pcie_phy_probe(struct platform_device *pdev)
191 dev_dbg(dev, "registered %u PCIe PHY(s)\n", cnt); 196 dev_dbg(dev, "registered %u PCIe PHY(s)\n", cnt);
192 197
193 return 0; 198 return 0;
199put_child:
200 of_node_put(child);
201 return ret;
194} 202}
195 203
196static const struct of_device_id cygnus_pcie_phy_match_table[] = { 204static const struct of_device_id cygnus_pcie_phy_match_table[] = {
diff --git a/drivers/phy/phy-berlin-sata.c b/drivers/phy/phy-berlin-sata.c
index 77a2e054fdea..f84a33a1bdd9 100644
--- a/drivers/phy/phy-berlin-sata.c
+++ b/drivers/phy/phy-berlin-sata.c
@@ -195,7 +195,7 @@ static int phy_berlin_sata_probe(struct platform_device *pdev)
195 struct phy_provider *phy_provider; 195 struct phy_provider *phy_provider;
196 struct phy_berlin_priv *priv; 196 struct phy_berlin_priv *priv;
197 struct resource *res; 197 struct resource *res;
198 int i = 0; 198 int ret, i = 0;
199 u32 phy_id; 199 u32 phy_id;
200 200
201 priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); 201 priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
@@ -237,22 +237,27 @@ static int phy_berlin_sata_probe(struct platform_device *pdev)
237 if (of_property_read_u32(child, "reg", &phy_id)) { 237 if (of_property_read_u32(child, "reg", &phy_id)) {
238 dev_err(dev, "missing reg property in node %s\n", 238 dev_err(dev, "missing reg property in node %s\n",
239 child->name); 239 child->name);
240 return -EINVAL; 240 ret = -EINVAL;
241 goto put_child;
241 } 242 }
242 243
243 if (phy_id >= ARRAY_SIZE(phy_berlin_power_down_bits)) { 244 if (phy_id >= ARRAY_SIZE(phy_berlin_power_down_bits)) {
244 dev_err(dev, "invalid reg in node %s\n", child->name); 245 dev_err(dev, "invalid reg in node %s\n", child->name);
245 return -EINVAL; 246 ret = -EINVAL;
247 goto put_child;
246 } 248 }
247 249
248 phy_desc = devm_kzalloc(dev, sizeof(*phy_desc), GFP_KERNEL); 250 phy_desc = devm_kzalloc(dev, sizeof(*phy_desc), GFP_KERNEL);
249 if (!phy_desc) 251 if (!phy_desc) {
250 return -ENOMEM; 252 ret = -ENOMEM;
253 goto put_child;
254 }
251 255
252 phy = devm_phy_create(dev, NULL, &phy_berlin_sata_ops); 256 phy = devm_phy_create(dev, NULL, &phy_berlin_sata_ops);
253 if (IS_ERR(phy)) { 257 if (IS_ERR(phy)) {
254 dev_err(dev, "failed to create PHY %d\n", phy_id); 258 dev_err(dev, "failed to create PHY %d\n", phy_id);
255 return PTR_ERR(phy); 259 ret = PTR_ERR(phy);
260 goto put_child;
256 } 261 }
257 262
258 phy_desc->phy = phy; 263 phy_desc->phy = phy;
@@ -269,6 +274,9 @@ static int phy_berlin_sata_probe(struct platform_device *pdev)
269 phy_provider = 274 phy_provider =
270 devm_of_phy_provider_register(dev, phy_berlin_sata_phy_xlate); 275 devm_of_phy_provider_register(dev, phy_berlin_sata_phy_xlate);
271 return PTR_ERR_OR_ZERO(phy_provider); 276 return PTR_ERR_OR_ZERO(phy_provider);
277put_child:
278 of_node_put(child);
279 return ret;
272} 280}
273 281
274static const struct of_device_id phy_berlin_sata_of_match[] = { 282static const struct of_device_id phy_berlin_sata_of_match[] = {
diff --git a/drivers/phy/phy-brcmstb-sata.c b/drivers/phy/phy-brcmstb-sata.c
index 8a2cb16a1937..cd9dba820566 100644
--- a/drivers/phy/phy-brcmstb-sata.c
+++ b/drivers/phy/phy-brcmstb-sata.c
@@ -140,7 +140,7 @@ static int brcm_sata_phy_probe(struct platform_device *pdev)
140 struct brcm_sata_phy *priv; 140 struct brcm_sata_phy *priv;
141 struct resource *res; 141 struct resource *res;
142 struct phy_provider *provider; 142 struct phy_provider *provider;
143 int count = 0; 143 int ret, count = 0;
144 144
145 if (of_get_child_count(dn) == 0) 145 if (of_get_child_count(dn) == 0)
146 return -ENODEV; 146 return -ENODEV;
@@ -163,16 +163,19 @@ static int brcm_sata_phy_probe(struct platform_device *pdev)
163 if (of_property_read_u32(child, "reg", &id)) { 163 if (of_property_read_u32(child, "reg", &id)) {
164 dev_err(dev, "missing reg property in node %s\n", 164 dev_err(dev, "missing reg property in node %s\n",
165 child->name); 165 child->name);
166 return -EINVAL; 166 ret = -EINVAL;
167 goto put_child;
167 } 168 }
168 169
169 if (id >= MAX_PORTS) { 170 if (id >= MAX_PORTS) {
170 dev_err(dev, "invalid reg: %u\n", id); 171 dev_err(dev, "invalid reg: %u\n", id);
171 return -EINVAL; 172 ret = -EINVAL;
173 goto put_child;
172 } 174 }
173 if (priv->phys[id].phy) { 175 if (priv->phys[id].phy) {
174 dev_err(dev, "already registered port %u\n", id); 176 dev_err(dev, "already registered port %u\n", id);
175 return -EINVAL; 177 ret = -EINVAL;
178 goto put_child;
176 } 179 }
177 180
178 port = &priv->phys[id]; 181 port = &priv->phys[id];
@@ -182,7 +185,8 @@ static int brcm_sata_phy_probe(struct platform_device *pdev)
182 port->ssc_en = of_property_read_bool(child, "brcm,enable-ssc"); 185 port->ssc_en = of_property_read_bool(child, "brcm,enable-ssc");
183 if (IS_ERR(port->phy)) { 186 if (IS_ERR(port->phy)) {
184 dev_err(dev, "failed to create PHY\n"); 187 dev_err(dev, "failed to create PHY\n");
185 return PTR_ERR(port->phy); 188 ret = PTR_ERR(port->phy);
189 goto put_child;
186 } 190 }
187 191
188 phy_set_drvdata(port->phy, port); 192 phy_set_drvdata(port->phy, port);
@@ -198,6 +202,9 @@ static int brcm_sata_phy_probe(struct platform_device *pdev)
198 dev_info(dev, "registered %d port(s)\n", count); 202 dev_info(dev, "registered %d port(s)\n", count);
199 203
200 return 0; 204 return 0;
205put_child:
206 of_node_put(child);
207 return ret;
201} 208}
202 209
203static struct platform_driver brcm_sata_phy_driver = { 210static struct platform_driver brcm_sata_phy_driver = {
diff --git a/drivers/phy/phy-core.c b/drivers/phy/phy-core.c
index fc48fac003a6..8c7f27db6ad3 100644
--- a/drivers/phy/phy-core.c
+++ b/drivers/phy/phy-core.c
@@ -636,8 +636,9 @@ EXPORT_SYMBOL_GPL(devm_of_phy_get);
636 * @np: node containing the phy 636 * @np: node containing the phy
637 * @index: index of the phy 637 * @index: index of the phy
638 * 638 *
639 * Gets the phy using _of_phy_get(), and associates a device with it using 639 * Gets the phy using _of_phy_get(), then gets a refcount to it,
640 * devres. On driver detach, release function is invoked on the devres data, 640 * and associates a device with it using devres. On driver detach,
641 * release function is invoked on the devres data,
641 * then, devres data is freed. 642 * then, devres data is freed.
642 * 643 *
643 */ 644 */
@@ -651,13 +652,21 @@ struct phy *devm_of_phy_get_by_index(struct device *dev, struct device_node *np,
651 return ERR_PTR(-ENOMEM); 652 return ERR_PTR(-ENOMEM);
652 653
653 phy = _of_phy_get(np, index); 654 phy = _of_phy_get(np, index);
654 if (!IS_ERR(phy)) { 655 if (IS_ERR(phy)) {
655 *ptr = phy;
656 devres_add(dev, ptr);
657 } else {
658 devres_free(ptr); 656 devres_free(ptr);
657 return phy;
659 } 658 }
660 659
660 if (!try_module_get(phy->ops->owner)) {
661 devres_free(ptr);
662 return ERR_PTR(-EPROBE_DEFER);
663 }
664
665 get_device(&phy->dev);
666
667 *ptr = phy;
668 devres_add(dev, ptr);
669
661 return phy; 670 return phy;
662} 671}
663EXPORT_SYMBOL_GPL(devm_of_phy_get_by_index); 672EXPORT_SYMBOL_GPL(devm_of_phy_get_by_index);
diff --git a/drivers/phy/phy-miphy28lp.c b/drivers/phy/phy-miphy28lp.c
index c47b56b4a2b8..3acd2a1808df 100644
--- a/drivers/phy/phy-miphy28lp.c
+++ b/drivers/phy/phy-miphy28lp.c
@@ -1226,15 +1226,18 @@ static int miphy28lp_probe(struct platform_device *pdev)
1226 1226
1227 miphy_phy = devm_kzalloc(&pdev->dev, sizeof(*miphy_phy), 1227 miphy_phy = devm_kzalloc(&pdev->dev, sizeof(*miphy_phy),
1228 GFP_KERNEL); 1228 GFP_KERNEL);
1229 if (!miphy_phy) 1229 if (!miphy_phy) {
1230 return -ENOMEM; 1230 ret = -ENOMEM;
1231 goto put_child;
1232 }
1231 1233
1232 miphy_dev->phys[port] = miphy_phy; 1234 miphy_dev->phys[port] = miphy_phy;
1233 1235
1234 phy = devm_phy_create(&pdev->dev, child, &miphy28lp_ops); 1236 phy = devm_phy_create(&pdev->dev, child, &miphy28lp_ops);
1235 if (IS_ERR(phy)) { 1237 if (IS_ERR(phy)) {
1236 dev_err(&pdev->dev, "failed to create PHY\n"); 1238 dev_err(&pdev->dev, "failed to create PHY\n");
1237 return PTR_ERR(phy); 1239 ret = PTR_ERR(phy);
1240 goto put_child;
1238 } 1241 }
1239 1242
1240 miphy_dev->phys[port]->phy = phy; 1243 miphy_dev->phys[port]->phy = phy;
@@ -1242,11 +1245,11 @@ static int miphy28lp_probe(struct platform_device *pdev)
1242 1245
1243 ret = miphy28lp_of_probe(child, miphy_phy); 1246 ret = miphy28lp_of_probe(child, miphy_phy);
1244 if (ret) 1247 if (ret)
1245 return ret; 1248 goto put_child;
1246 1249
1247 ret = miphy28lp_probe_resets(child, miphy_dev->phys[port]); 1250 ret = miphy28lp_probe_resets(child, miphy_dev->phys[port]);
1248 if (ret) 1251 if (ret)
1249 return ret; 1252 goto put_child;
1250 1253
1251 phy_set_drvdata(phy, miphy_dev->phys[port]); 1254 phy_set_drvdata(phy, miphy_dev->phys[port]);
1252 port++; 1255 port++;
@@ -1255,6 +1258,9 @@ static int miphy28lp_probe(struct platform_device *pdev)
1255 1258
1256 provider = devm_of_phy_provider_register(&pdev->dev, miphy28lp_xlate); 1259 provider = devm_of_phy_provider_register(&pdev->dev, miphy28lp_xlate);
1257 return PTR_ERR_OR_ZERO(provider); 1260 return PTR_ERR_OR_ZERO(provider);
1261put_child:
1262 of_node_put(child);
1263 return ret;
1258} 1264}
1259 1265
1260static const struct of_device_id miphy28lp_of_match[] = { 1266static const struct of_device_id miphy28lp_of_match[] = {
diff --git a/drivers/phy/phy-miphy365x.c b/drivers/phy/phy-miphy365x.c
index 00a686a073ed..e661f3b36eaa 100644
--- a/drivers/phy/phy-miphy365x.c
+++ b/drivers/phy/phy-miphy365x.c
@@ -566,22 +566,25 @@ static int miphy365x_probe(struct platform_device *pdev)
566 566
567 miphy_phy = devm_kzalloc(&pdev->dev, sizeof(*miphy_phy), 567 miphy_phy = devm_kzalloc(&pdev->dev, sizeof(*miphy_phy),
568 GFP_KERNEL); 568 GFP_KERNEL);
569 if (!miphy_phy) 569 if (!miphy_phy) {
570 return -ENOMEM; 570 ret = -ENOMEM;
571 goto put_child;
572 }
571 573
572 miphy_dev->phys[port] = miphy_phy; 574 miphy_dev->phys[port] = miphy_phy;
573 575
574 phy = devm_phy_create(&pdev->dev, child, &miphy365x_ops); 576 phy = devm_phy_create(&pdev->dev, child, &miphy365x_ops);
575 if (IS_ERR(phy)) { 577 if (IS_ERR(phy)) {
576 dev_err(&pdev->dev, "failed to create PHY\n"); 578 dev_err(&pdev->dev, "failed to create PHY\n");
577 return PTR_ERR(phy); 579 ret = PTR_ERR(phy);
580 goto put_child;
578 } 581 }
579 582
580 miphy_dev->phys[port]->phy = phy; 583 miphy_dev->phys[port]->phy = phy;
581 584
582 ret = miphy365x_of_probe(child, miphy_phy); 585 ret = miphy365x_of_probe(child, miphy_phy);
583 if (ret) 586 if (ret)
584 return ret; 587 goto put_child;
585 588
586 phy_set_drvdata(phy, miphy_dev->phys[port]); 589 phy_set_drvdata(phy, miphy_dev->phys[port]);
587 590
@@ -591,12 +594,15 @@ static int miphy365x_probe(struct platform_device *pdev)
591 &miphy_phy->ctrlreg); 594 &miphy_phy->ctrlreg);
592 if (ret) { 595 if (ret) {
593 dev_err(&pdev->dev, "No sysconfig offset found\n"); 596 dev_err(&pdev->dev, "No sysconfig offset found\n");
594 return ret; 597 goto put_child;
595 } 598 }
596 } 599 }
597 600
598 provider = devm_of_phy_provider_register(&pdev->dev, miphy365x_xlate); 601 provider = devm_of_phy_provider_register(&pdev->dev, miphy365x_xlate);
599 return PTR_ERR_OR_ZERO(provider); 602 return PTR_ERR_OR_ZERO(provider);
603put_child:
604 of_node_put(child);
605 return ret;
600} 606}
601 607
602static const struct of_device_id miphy365x_of_match[] = { 608static const struct of_device_id miphy365x_of_match[] = {
diff --git a/drivers/phy/phy-mt65xx-usb3.c b/drivers/phy/phy-mt65xx-usb3.c
index f30b28bd41fe..e427c3b788ff 100644
--- a/drivers/phy/phy-mt65xx-usb3.c
+++ b/drivers/phy/phy-mt65xx-usb3.c
@@ -415,7 +415,7 @@ static int mt65xx_u3phy_probe(struct platform_device *pdev)
415 struct resource *sif_res; 415 struct resource *sif_res;
416 struct mt65xx_u3phy *u3phy; 416 struct mt65xx_u3phy *u3phy;
417 struct resource res; 417 struct resource res;
418 int port; 418 int port, retval;
419 419
420 u3phy = devm_kzalloc(dev, sizeof(*u3phy), GFP_KERNEL); 420 u3phy = devm_kzalloc(dev, sizeof(*u3phy), GFP_KERNEL);
421 if (!u3phy) 421 if (!u3phy)
@@ -447,31 +447,34 @@ static int mt65xx_u3phy_probe(struct platform_device *pdev)
447 for_each_child_of_node(np, child_np) { 447 for_each_child_of_node(np, child_np) {
448 struct mt65xx_phy_instance *instance; 448 struct mt65xx_phy_instance *instance;
449 struct phy *phy; 449 struct phy *phy;
450 int retval;
451 450
452 instance = devm_kzalloc(dev, sizeof(*instance), GFP_KERNEL); 451 instance = devm_kzalloc(dev, sizeof(*instance), GFP_KERNEL);
453 if (!instance) 452 if (!instance) {
454 return -ENOMEM; 453 retval = -ENOMEM;
454 goto put_child;
455 }
455 456
456 u3phy->phys[port] = instance; 457 u3phy->phys[port] = instance;
457 458
458 phy = devm_phy_create(dev, child_np, &mt65xx_u3phy_ops); 459 phy = devm_phy_create(dev, child_np, &mt65xx_u3phy_ops);
459 if (IS_ERR(phy)) { 460 if (IS_ERR(phy)) {
460 dev_err(dev, "failed to create phy\n"); 461 dev_err(dev, "failed to create phy\n");
461 return PTR_ERR(phy); 462 retval = PTR_ERR(phy);
463 goto put_child;
462 } 464 }
463 465
464 retval = of_address_to_resource(child_np, 0, &res); 466 retval = of_address_to_resource(child_np, 0, &res);
465 if (retval) { 467 if (retval) {
466 dev_err(dev, "failed to get address resource(id-%d)\n", 468 dev_err(dev, "failed to get address resource(id-%d)\n",
467 port); 469 port);
468 return retval; 470 goto put_child;
469 } 471 }
470 472
471 instance->port_base = devm_ioremap_resource(&phy->dev, &res); 473 instance->port_base = devm_ioremap_resource(&phy->dev, &res);
472 if (IS_ERR(instance->port_base)) { 474 if (IS_ERR(instance->port_base)) {
473 dev_err(dev, "failed to remap phy regs\n"); 475 dev_err(dev, "failed to remap phy regs\n");
474 return PTR_ERR(instance->port_base); 476 retval = PTR_ERR(instance->port_base);
477 goto put_child;
475 } 478 }
476 479
477 instance->phy = phy; 480 instance->phy = phy;
@@ -483,6 +486,9 @@ static int mt65xx_u3phy_probe(struct platform_device *pdev)
483 provider = devm_of_phy_provider_register(dev, mt65xx_phy_xlate); 486 provider = devm_of_phy_provider_register(dev, mt65xx_phy_xlate);
484 487
485 return PTR_ERR_OR_ZERO(provider); 488 return PTR_ERR_OR_ZERO(provider);
489put_child:
490 of_node_put(child_np);
491 return retval;
486} 492}
487 493
488static const struct of_device_id mt65xx_u3phy_id_table[] = { 494static const struct of_device_id mt65xx_u3phy_id_table[] = {
diff --git a/drivers/phy/phy-rockchip-usb.c b/drivers/phy/phy-rockchip-usb.c
index 91d6f342c565..62c43c435194 100644
--- a/drivers/phy/phy-rockchip-usb.c
+++ b/drivers/phy/phy-rockchip-usb.c
@@ -108,13 +108,16 @@ static int rockchip_usb_phy_probe(struct platform_device *pdev)
108 108
109 for_each_available_child_of_node(dev->of_node, child) { 109 for_each_available_child_of_node(dev->of_node, child) {
110 rk_phy = devm_kzalloc(dev, sizeof(*rk_phy), GFP_KERNEL); 110 rk_phy = devm_kzalloc(dev, sizeof(*rk_phy), GFP_KERNEL);
111 if (!rk_phy) 111 if (!rk_phy) {
112 return -ENOMEM; 112 err = -ENOMEM;
113 goto put_child;
114 }
113 115
114 if (of_property_read_u32(child, "reg", &reg_offset)) { 116 if (of_property_read_u32(child, "reg", &reg_offset)) {
115 dev_err(dev, "missing reg property in node %s\n", 117 dev_err(dev, "missing reg property in node %s\n",
116 child->name); 118 child->name);
117 return -EINVAL; 119 err = -EINVAL;
120 goto put_child;
118 } 121 }
119 122
120 rk_phy->reg_offset = reg_offset; 123 rk_phy->reg_offset = reg_offset;
@@ -127,18 +130,22 @@ static int rockchip_usb_phy_probe(struct platform_device *pdev)
127 rk_phy->phy = devm_phy_create(dev, child, &ops); 130 rk_phy->phy = devm_phy_create(dev, child, &ops);
128 if (IS_ERR(rk_phy->phy)) { 131 if (IS_ERR(rk_phy->phy)) {
129 dev_err(dev, "failed to create PHY\n"); 132 dev_err(dev, "failed to create PHY\n");
130 return PTR_ERR(rk_phy->phy); 133 err = PTR_ERR(rk_phy->phy);
134 goto put_child;
131 } 135 }
132 phy_set_drvdata(rk_phy->phy, rk_phy); 136 phy_set_drvdata(rk_phy->phy, rk_phy);
133 137
134 /* only power up usb phy when it use, so disable it when init*/ 138 /* only power up usb phy when it use, so disable it when init*/
135 err = rockchip_usb_phy_power(rk_phy, 1); 139 err = rockchip_usb_phy_power(rk_phy, 1);
136 if (err) 140 if (err)
137 return err; 141 goto put_child;
138 } 142 }
139 143
140 phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate); 144 phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
141 return PTR_ERR_OR_ZERO(phy_provider); 145 return PTR_ERR_OR_ZERO(phy_provider);
146put_child:
147 of_node_put(child);
148 return err;
142} 149}
143 150
144static const struct of_device_id rockchip_usb_phy_dt_ids[] = { 151static const struct of_device_id rockchip_usb_phy_dt_ids[] = {
diff --git a/drivers/pinctrl/Kconfig b/drivers/pinctrl/Kconfig
index b422e4ed73f4..312c78b27a32 100644
--- a/drivers/pinctrl/Kconfig
+++ b/drivers/pinctrl/Kconfig
@@ -5,8 +5,6 @@
5config PINCTRL 5config PINCTRL
6 bool 6 bool
7 7
8if PINCTRL
9
10menu "Pin controllers" 8menu "Pin controllers"
11 depends on PINCTRL 9 depends on PINCTRL
12 10
@@ -274,5 +272,3 @@ config PINCTRL_TB10X
274 select GPIOLIB 272 select GPIOLIB
275 273
276endmenu 274endmenu
277
278endif
diff --git a/drivers/pinctrl/bcm/pinctrl-bcm2835.c b/drivers/pinctrl/bcm/pinctrl-bcm2835.c
index 0bc1abcedbae..595f87028b19 100644
--- a/drivers/pinctrl/bcm/pinctrl-bcm2835.c
+++ b/drivers/pinctrl/bcm/pinctrl-bcm2835.c
@@ -342,12 +342,6 @@ static int bcm2835_gpio_get(struct gpio_chip *chip, unsigned offset)
342 return bcm2835_gpio_get_bit(pc, GPLEV0, offset); 342 return bcm2835_gpio_get_bit(pc, GPLEV0, offset);
343} 343}
344 344
345static int bcm2835_gpio_direction_output(struct gpio_chip *chip,
346 unsigned offset, int value)
347{
348 return pinctrl_gpio_direction_output(chip->base + offset);
349}
350
351static void bcm2835_gpio_set(struct gpio_chip *chip, unsigned offset, int value) 345static void bcm2835_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
352{ 346{
353 struct bcm2835_pinctrl *pc = dev_get_drvdata(chip->parent); 347 struct bcm2835_pinctrl *pc = dev_get_drvdata(chip->parent);
@@ -355,6 +349,13 @@ static void bcm2835_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
355 bcm2835_gpio_set_bit(pc, value ? GPSET0 : GPCLR0, offset); 349 bcm2835_gpio_set_bit(pc, value ? GPSET0 : GPCLR0, offset);
356} 350}
357 351
352static int bcm2835_gpio_direction_output(struct gpio_chip *chip,
353 unsigned offset, int value)
354{
355 bcm2835_gpio_set(chip, offset, value);
356 return pinctrl_gpio_direction_output(chip->base + offset);
357}
358
358static int bcm2835_gpio_to_irq(struct gpio_chip *chip, unsigned offset) 359static int bcm2835_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
359{ 360{
360 struct bcm2835_pinctrl *pc = dev_get_drvdata(chip->parent); 361 struct bcm2835_pinctrl *pc = dev_get_drvdata(chip->parent);
diff --git a/drivers/pinctrl/freescale/pinctrl-imx1-core.c b/drivers/pinctrl/freescale/pinctrl-imx1-core.c
index 88a7fac11bd4..acaf84cadca3 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx1-core.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx1-core.c
@@ -538,8 +538,10 @@ static int imx1_pinctrl_parse_functions(struct device_node *np,
538 func->groups[i] = child->name; 538 func->groups[i] = child->name;
539 grp = &info->groups[grp_index++]; 539 grp = &info->groups[grp_index++];
540 ret = imx1_pinctrl_parse_groups(child, grp, info, i++); 540 ret = imx1_pinctrl_parse_groups(child, grp, info, i++);
541 if (ret == -ENOMEM) 541 if (ret == -ENOMEM) {
542 of_node_put(child);
542 return ret; 543 return ret;
544 }
543 } 545 }
544 546
545 return 0; 547 return 0;
@@ -582,8 +584,10 @@ static int imx1_pinctrl_parse_dt(struct platform_device *pdev,
582 584
583 for_each_child_of_node(np, child) { 585 for_each_child_of_node(np, child) {
584 ret = imx1_pinctrl_parse_functions(child, info, ifunc++); 586 ret = imx1_pinctrl_parse_functions(child, info, ifunc++);
585 if (ret == -ENOMEM) 587 if (ret == -ENOMEM) {
588 of_node_put(child);
586 return -ENOMEM; 589 return -ENOMEM;
590 }
587 } 591 }
588 592
589 return 0; 593 return 0;
diff --git a/drivers/pinctrl/freescale/pinctrl-vf610.c b/drivers/pinctrl/freescale/pinctrl-vf610.c
index 37a037543d29..587d1ff6210e 100644
--- a/drivers/pinctrl/freescale/pinctrl-vf610.c
+++ b/drivers/pinctrl/freescale/pinctrl-vf610.c
@@ -299,7 +299,7 @@ static const struct pinctrl_pin_desc vf610_pinctrl_pads[] = {
299static struct imx_pinctrl_soc_info vf610_pinctrl_info = { 299static struct imx_pinctrl_soc_info vf610_pinctrl_info = {
300 .pins = vf610_pinctrl_pads, 300 .pins = vf610_pinctrl_pads,
301 .npins = ARRAY_SIZE(vf610_pinctrl_pads), 301 .npins = ARRAY_SIZE(vf610_pinctrl_pads),
302 .flags = SHARE_MUX_CONF_REG, 302 .flags = SHARE_MUX_CONF_REG | ZERO_OFFSET_VALID,
303}; 303};
304 304
305static const struct of_device_id vf610_pinctrl_of_match[] = { 305static const struct of_device_id vf610_pinctrl_of_match[] = {
diff --git a/drivers/pinctrl/intel/pinctrl-broxton.c b/drivers/pinctrl/intel/pinctrl-broxton.c
index e42d5d4183f5..5979d38c46b2 100644
--- a/drivers/pinctrl/intel/pinctrl-broxton.c
+++ b/drivers/pinctrl/intel/pinctrl-broxton.c
@@ -28,6 +28,7 @@
28 .padcfglock_offset = BXT_PADCFGLOCK, \ 28 .padcfglock_offset = BXT_PADCFGLOCK, \
29 .hostown_offset = BXT_HOSTSW_OWN, \ 29 .hostown_offset = BXT_HOSTSW_OWN, \
30 .ie_offset = BXT_GPI_IE, \ 30 .ie_offset = BXT_GPI_IE, \
31 .gpp_size = 32, \
31 .pin_base = (s), \ 32 .pin_base = (s), \
32 .npins = ((e) - (s) + 1), \ 33 .npins = ((e) - (s) + 1), \
33 } 34 }
diff --git a/drivers/pinctrl/intel/pinctrl-intel.c b/drivers/pinctrl/intel/pinctrl-intel.c
index 401c186244be..c6dcde7132de 100644
--- a/drivers/pinctrl/intel/pinctrl-intel.c
+++ b/drivers/pinctrl/intel/pinctrl-intel.c
@@ -25,9 +25,6 @@
25 25
26#include "pinctrl-intel.h" 26#include "pinctrl-intel.h"
27 27
28/* Maximum number of pads in each group */
29#define NPADS_IN_GPP 24
30
31/* Offset from regs */ 28/* Offset from regs */
32#define PADBAR 0x00c 29#define PADBAR 0x00c
33#define GPI_IS 0x100 30#define GPI_IS 0x100
@@ -37,6 +34,7 @@
37#define PADOWN_BITS 4 34#define PADOWN_BITS 4
38#define PADOWN_SHIFT(p) ((p) % 8 * PADOWN_BITS) 35#define PADOWN_SHIFT(p) ((p) % 8 * PADOWN_BITS)
39#define PADOWN_MASK(p) (0xf << PADOWN_SHIFT(p)) 36#define PADOWN_MASK(p) (0xf << PADOWN_SHIFT(p))
37#define PADOWN_GPP(p) ((p) / 8)
40 38
41/* Offset from pad_regs */ 39/* Offset from pad_regs */
42#define PADCFG0 0x000 40#define PADCFG0 0x000
@@ -142,7 +140,7 @@ static void __iomem *intel_get_padcfg(struct intel_pinctrl *pctrl, unsigned pin,
142static bool intel_pad_owned_by_host(struct intel_pinctrl *pctrl, unsigned pin) 140static bool intel_pad_owned_by_host(struct intel_pinctrl *pctrl, unsigned pin)
143{ 141{
144 const struct intel_community *community; 142 const struct intel_community *community;
145 unsigned padno, gpp, gpp_offset, offset; 143 unsigned padno, gpp, offset, group;
146 void __iomem *padown; 144 void __iomem *padown;
147 145
148 community = intel_get_community(pctrl, pin); 146 community = intel_get_community(pctrl, pin);
@@ -152,9 +150,9 @@ static bool intel_pad_owned_by_host(struct intel_pinctrl *pctrl, unsigned pin)
152 return true; 150 return true;
153 151
154 padno = pin_to_padno(community, pin); 152 padno = pin_to_padno(community, pin);
155 gpp = padno / NPADS_IN_GPP; 153 group = padno / community->gpp_size;
156 gpp_offset = padno % NPADS_IN_GPP; 154 gpp = PADOWN_GPP(padno % community->gpp_size);
157 offset = community->padown_offset + gpp * 16 + (gpp_offset / 8) * 4; 155 offset = community->padown_offset + 0x10 * group + gpp * 4;
158 padown = community->regs + offset; 156 padown = community->regs + offset;
159 157
160 return !(readl(padown) & PADOWN_MASK(padno)); 158 return !(readl(padown) & PADOWN_MASK(padno));
@@ -173,11 +171,11 @@ static bool intel_pad_acpi_mode(struct intel_pinctrl *pctrl, unsigned pin)
173 return false; 171 return false;
174 172
175 padno = pin_to_padno(community, pin); 173 padno = pin_to_padno(community, pin);
176 gpp = padno / NPADS_IN_GPP; 174 gpp = padno / community->gpp_size;
177 offset = community->hostown_offset + gpp * 4; 175 offset = community->hostown_offset + gpp * 4;
178 hostown = community->regs + offset; 176 hostown = community->regs + offset;
179 177
180 return !(readl(hostown) & BIT(padno % NPADS_IN_GPP)); 178 return !(readl(hostown) & BIT(padno % community->gpp_size));
181} 179}
182 180
183static bool intel_pad_locked(struct intel_pinctrl *pctrl, unsigned pin) 181static bool intel_pad_locked(struct intel_pinctrl *pctrl, unsigned pin)
@@ -193,7 +191,7 @@ static bool intel_pad_locked(struct intel_pinctrl *pctrl, unsigned pin)
193 return false; 191 return false;
194 192
195 padno = pin_to_padno(community, pin); 193 padno = pin_to_padno(community, pin);
196 gpp = padno / NPADS_IN_GPP; 194 gpp = padno / community->gpp_size;
197 195
198 /* 196 /*
199 * If PADCFGLOCK and PADCFGLOCKTX bits are both clear for this pad, 197 * If PADCFGLOCK and PADCFGLOCKTX bits are both clear for this pad,
@@ -202,12 +200,12 @@ static bool intel_pad_locked(struct intel_pinctrl *pctrl, unsigned pin)
202 */ 200 */
203 offset = community->padcfglock_offset + gpp * 8; 201 offset = community->padcfglock_offset + gpp * 8;
204 value = readl(community->regs + offset); 202 value = readl(community->regs + offset);
205 if (value & BIT(pin % NPADS_IN_GPP)) 203 if (value & BIT(pin % community->gpp_size))
206 return true; 204 return true;
207 205
208 offset = community->padcfglock_offset + 4 + gpp * 8; 206 offset = community->padcfglock_offset + 4 + gpp * 8;
209 value = readl(community->regs + offset); 207 value = readl(community->regs + offset);
210 if (value & BIT(pin % NPADS_IN_GPP)) 208 if (value & BIT(pin % community->gpp_size))
211 return true; 209 return true;
212 210
213 return false; 211 return false;
@@ -663,8 +661,8 @@ static void intel_gpio_irq_ack(struct irq_data *d)
663 community = intel_get_community(pctrl, pin); 661 community = intel_get_community(pctrl, pin);
664 if (community) { 662 if (community) {
665 unsigned padno = pin_to_padno(community, pin); 663 unsigned padno = pin_to_padno(community, pin);
666 unsigned gpp_offset = padno % NPADS_IN_GPP; 664 unsigned gpp_offset = padno % community->gpp_size;
667 unsigned gpp = padno / NPADS_IN_GPP; 665 unsigned gpp = padno / community->gpp_size;
668 666
669 writel(BIT(gpp_offset), community->regs + GPI_IS + gpp * 4); 667 writel(BIT(gpp_offset), community->regs + GPI_IS + gpp * 4);
670 } 668 }
@@ -685,8 +683,8 @@ static void intel_gpio_irq_mask_unmask(struct irq_data *d, bool mask)
685 community = intel_get_community(pctrl, pin); 683 community = intel_get_community(pctrl, pin);
686 if (community) { 684 if (community) {
687 unsigned padno = pin_to_padno(community, pin); 685 unsigned padno = pin_to_padno(community, pin);
688 unsigned gpp_offset = padno % NPADS_IN_GPP; 686 unsigned gpp_offset = padno % community->gpp_size;
689 unsigned gpp = padno / NPADS_IN_GPP; 687 unsigned gpp = padno / community->gpp_size;
690 void __iomem *reg; 688 void __iomem *reg;
691 u32 value; 689 u32 value;
692 690
@@ -780,8 +778,8 @@ static int intel_gpio_irq_wake(struct irq_data *d, unsigned int on)
780 return -EINVAL; 778 return -EINVAL;
781 779
782 padno = pin_to_padno(community, pin); 780 padno = pin_to_padno(community, pin);
783 gpp = padno / NPADS_IN_GPP; 781 gpp = padno / community->gpp_size;
784 gpp_offset = padno % NPADS_IN_GPP; 782 gpp_offset = padno % community->gpp_size;
785 783
786 /* Clear the existing wake status */ 784 /* Clear the existing wake status */
787 writel(BIT(gpp_offset), community->regs + GPI_GPE_STS + gpp * 4); 785 writel(BIT(gpp_offset), community->regs + GPI_GPE_STS + gpp * 4);
@@ -819,14 +817,14 @@ static irqreturn_t intel_gpio_community_irq_handler(struct intel_pinctrl *pctrl,
819 /* Only interrupts that are enabled */ 817 /* Only interrupts that are enabled */
820 pending &= enabled; 818 pending &= enabled;
821 819
822 for_each_set_bit(gpp_offset, &pending, NPADS_IN_GPP) { 820 for_each_set_bit(gpp_offset, &pending, community->gpp_size) {
823 unsigned padno, irq; 821 unsigned padno, irq;
824 822
825 /* 823 /*
826 * The last group in community can have less pins 824 * The last group in community can have less pins
827 * than NPADS_IN_GPP. 825 * than NPADS_IN_GPP.
828 */ 826 */
829 padno = gpp_offset + gpp * NPADS_IN_GPP; 827 padno = gpp_offset + gpp * community->gpp_size;
830 if (padno >= community->npins) 828 if (padno >= community->npins)
831 break; 829 break;
832 830
@@ -1002,7 +1000,8 @@ int intel_pinctrl_probe(struct platform_device *pdev,
1002 1000
1003 community->regs = regs; 1001 community->regs = regs;
1004 community->pad_regs = regs + padbar; 1002 community->pad_regs = regs + padbar;
1005 community->ngpps = DIV_ROUND_UP(community->npins, NPADS_IN_GPP); 1003 community->ngpps = DIV_ROUND_UP(community->npins,
1004 community->gpp_size);
1006 } 1005 }
1007 1006
1008 irq = platform_get_irq(pdev, 0); 1007 irq = platform_get_irq(pdev, 0);
diff --git a/drivers/pinctrl/intel/pinctrl-intel.h b/drivers/pinctrl/intel/pinctrl-intel.h
index 4ec8b572a288..b60215793017 100644
--- a/drivers/pinctrl/intel/pinctrl-intel.h
+++ b/drivers/pinctrl/intel/pinctrl-intel.h
@@ -55,6 +55,8 @@ struct intel_function {
55 * ACPI). 55 * ACPI).
56 * @ie_offset: Register offset of GPI_IE from @regs. 56 * @ie_offset: Register offset of GPI_IE from @regs.
57 * @pin_base: Starting pin of pins in this community 57 * @pin_base: Starting pin of pins in this community
58 * @gpp_size: Maximum number of pads in each group, such as PADCFGLOCK,
59 * HOSTSW_OWN, GPI_IS, GPI_IE, etc.
58 * @npins: Number of pins in this community 60 * @npins: Number of pins in this community
59 * @regs: Community specific common registers (reserved for core driver) 61 * @regs: Community specific common registers (reserved for core driver)
60 * @pad_regs: Community specific pad registers (reserved for core driver) 62 * @pad_regs: Community specific pad registers (reserved for core driver)
@@ -68,6 +70,7 @@ struct intel_community {
68 unsigned hostown_offset; 70 unsigned hostown_offset;
69 unsigned ie_offset; 71 unsigned ie_offset;
70 unsigned pin_base; 72 unsigned pin_base;
73 unsigned gpp_size;
71 size_t npins; 74 size_t npins;
72 void __iomem *regs; 75 void __iomem *regs;
73 void __iomem *pad_regs; 76 void __iomem *pad_regs;
diff --git a/drivers/pinctrl/intel/pinctrl-sunrisepoint.c b/drivers/pinctrl/intel/pinctrl-sunrisepoint.c
index 1de9ae5010db..c725a5313b4e 100644
--- a/drivers/pinctrl/intel/pinctrl-sunrisepoint.c
+++ b/drivers/pinctrl/intel/pinctrl-sunrisepoint.c
@@ -30,6 +30,7 @@
30 .padcfglock_offset = SPT_PADCFGLOCK, \ 30 .padcfglock_offset = SPT_PADCFGLOCK, \
31 .hostown_offset = SPT_HOSTSW_OWN, \ 31 .hostown_offset = SPT_HOSTSW_OWN, \
32 .ie_offset = SPT_GPI_IE, \ 32 .ie_offset = SPT_GPI_IE, \
33 .gpp_size = 24, \
33 .pin_base = (s), \ 34 .pin_base = (s), \
34 .npins = ((e) - (s) + 1), \ 35 .npins = ((e) - (s) + 1), \
35 } 36 }
diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
index a71f68362967..9ddba444e127 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
@@ -747,7 +747,7 @@ static int mtk_gpio_get_direction(struct gpio_chip *chip, unsigned offset)
747 reg_addr = mtk_get_port(pctl, offset) + pctl->devdata->dir_offset; 747 reg_addr = mtk_get_port(pctl, offset) + pctl->devdata->dir_offset;
748 bit = BIT(offset & 0xf); 748 bit = BIT(offset & 0xf);
749 regmap_read(pctl->regmap1, reg_addr, &read_val); 749 regmap_read(pctl->regmap1, reg_addr, &read_val);
750 return !!(read_val & bit); 750 return !(read_val & bit);
751} 751}
752 752
753static int mtk_gpio_get(struct gpio_chip *chip, unsigned offset) 753static int mtk_gpio_get(struct gpio_chip *chip, unsigned offset)
@@ -757,12 +757,8 @@ static int mtk_gpio_get(struct gpio_chip *chip, unsigned offset)
757 unsigned int read_val = 0; 757 unsigned int read_val = 0;
758 struct mtk_pinctrl *pctl = dev_get_drvdata(chip->parent); 758 struct mtk_pinctrl *pctl = dev_get_drvdata(chip->parent);
759 759
760 if (mtk_gpio_get_direction(chip, offset)) 760 reg_addr = mtk_get_port(pctl, offset) +
761 reg_addr = mtk_get_port(pctl, offset) + 761 pctl->devdata->din_offset;
762 pctl->devdata->dout_offset;
763 else
764 reg_addr = mtk_get_port(pctl, offset) +
765 pctl->devdata->din_offset;
766 762
767 bit = BIT(offset & 0xf); 763 bit = BIT(offset & 0xf);
768 regmap_read(pctl->regmap1, reg_addr, &read_val); 764 regmap_read(pctl->regmap1, reg_addr, &read_val);
@@ -997,6 +993,7 @@ static struct gpio_chip mtk_gpio_chip = {
997 .owner = THIS_MODULE, 993 .owner = THIS_MODULE,
998 .request = gpiochip_generic_request, 994 .request = gpiochip_generic_request,
999 .free = gpiochip_generic_free, 995 .free = gpiochip_generic_free,
996 .get_direction = mtk_gpio_get_direction,
1000 .direction_input = mtk_gpio_direction_input, 997 .direction_input = mtk_gpio_direction_input,
1001 .direction_output = mtk_gpio_direction_output, 998 .direction_output = mtk_gpio_direction_output,
1002 .get = mtk_gpio_get, 999 .get = mtk_gpio_get,
diff --git a/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c b/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c
index 7b80fa9c2049..7bea0df06fb1 100644
--- a/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c
+++ b/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c
@@ -672,7 +672,7 @@ static int pm8xxx_gpio_probe(struct platform_device *pdev)
672 return -ENOMEM; 672 return -ENOMEM;
673 673
674 pctrl->dev = &pdev->dev; 674 pctrl->dev = &pdev->dev;
675 pctrl->npins = (unsigned)of_device_get_match_data(&pdev->dev); 675 pctrl->npins = (unsigned long)of_device_get_match_data(&pdev->dev);
676 676
677 pctrl->regmap = dev_get_regmap(pdev->dev.parent, NULL); 677 pctrl->regmap = dev_get_regmap(pdev->dev.parent, NULL);
678 if (!pctrl->regmap) { 678 if (!pctrl->regmap) {
diff --git a/drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c b/drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c
index 7bc1e0f27447..8f5c96cbf94e 100644
--- a/drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c
+++ b/drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c
@@ -763,7 +763,7 @@ static int pm8xxx_mpp_probe(struct platform_device *pdev)
763 return -ENOMEM; 763 return -ENOMEM;
764 764
765 pctrl->dev = &pdev->dev; 765 pctrl->dev = &pdev->dev;
766 pctrl->npins = (unsigned)of_device_get_match_data(&pdev->dev); 766 pctrl->npins = (unsigned long)of_device_get_match_data(&pdev->dev);
767 767
768 pctrl->regmap = dev_get_regmap(pdev->dev.parent, NULL); 768 pctrl->regmap = dev_get_regmap(pdev->dev.parent, NULL);
769 if (!pctrl->regmap) { 769 if (!pctrl->regmap) {
diff --git a/drivers/pinctrl/sh-pfc/pfc-sh7734.c b/drivers/pinctrl/sh-pfc/pfc-sh7734.c
index e7deb51de7dc..9842bb106796 100644
--- a/drivers/pinctrl/sh-pfc/pfc-sh7734.c
+++ b/drivers/pinctrl/sh-pfc/pfc-sh7734.c
@@ -31,11 +31,11 @@
31 PORT_GP_12(5, fn, sfx) 31 PORT_GP_12(5, fn, sfx)
32 32
33#undef _GP_DATA 33#undef _GP_DATA
34#define _GP_DATA(bank, pin, name, sfx) \ 34#define _GP_DATA(bank, pin, name, sfx, cfg) \
35 PINMUX_DATA(name##_DATA, name##_FN, name##_IN, name##_OUT) 35 PINMUX_DATA(name##_DATA, name##_FN, name##_IN, name##_OUT)
36 36
37#define _GP_INOUTSEL(bank, pin, name, sfx) name##_IN, name##_OUT 37#define _GP_INOUTSEL(bank, pin, name, sfx, cfg) name##_IN, name##_OUT
38#define _GP_INDT(bank, pin, name, sfx) name##_DATA 38#define _GP_INDT(bank, pin, name, sfx, cfg) name##_DATA
39#define GP_INOUTSEL(bank) PORT_GP_32_REV(bank, _GP_INOUTSEL, unused) 39#define GP_INOUTSEL(bank) PORT_GP_32_REV(bank, _GP_INOUTSEL, unused)
40#define GP_INDT(bank) PORT_GP_32_REV(bank, _GP_INDT, unused) 40#define GP_INDT(bank) PORT_GP_32_REV(bank, _GP_INDT, unused)
41 41
diff --git a/drivers/powercap/intel_rapl.c b/drivers/powercap/intel_rapl.c
index cc97f0869791..48747c28a43d 100644
--- a/drivers/powercap/intel_rapl.c
+++ b/drivers/powercap/intel_rapl.c
@@ -1341,10 +1341,13 @@ static int rapl_detect_domains(struct rapl_package *rp, int cpu)
1341 1341
1342 for (rd = rp->domains; rd < rp->domains + rp->nr_domains; rd++) { 1342 for (rd = rp->domains; rd < rp->domains + rp->nr_domains; rd++) {
1343 /* check if the domain is locked by BIOS */ 1343 /* check if the domain is locked by BIOS */
1344 if (rapl_read_data_raw(rd, FW_LOCK, false, &locked)) { 1344 ret = rapl_read_data_raw(rd, FW_LOCK, false, &locked);
1345 if (ret)
1346 return ret;
1347 if (locked) {
1345 pr_info("RAPL package %d domain %s locked by BIOS\n", 1348 pr_info("RAPL package %d domain %s locked by BIOS\n",
1346 rp->id, rd->name); 1349 rp->id, rd->name);
1347 rd->state |= DOMAIN_STATE_BIOS_LOCKED; 1350 rd->state |= DOMAIN_STATE_BIOS_LOCKED;
1348 } 1351 }
1349 } 1352 }
1350 1353
diff --git a/drivers/remoteproc/remoteproc_core.c b/drivers/remoteproc/remoteproc_core.c
index 8b3130f22b42..9e03d158f411 100644
--- a/drivers/remoteproc/remoteproc_core.c
+++ b/drivers/remoteproc/remoteproc_core.c
@@ -1478,6 +1478,8 @@ module_init(remoteproc_init);
1478 1478
1479static void __exit remoteproc_exit(void) 1479static void __exit remoteproc_exit(void)
1480{ 1480{
1481 ida_destroy(&rproc_dev_index);
1482
1481 rproc_exit_debugfs(); 1483 rproc_exit_debugfs();
1482} 1484}
1483module_exit(remoteproc_exit); 1485module_exit(remoteproc_exit);
diff --git a/drivers/remoteproc/remoteproc_debugfs.c b/drivers/remoteproc/remoteproc_debugfs.c
index 9d30809bb407..916af5096f57 100644
--- a/drivers/remoteproc/remoteproc_debugfs.c
+++ b/drivers/remoteproc/remoteproc_debugfs.c
@@ -156,7 +156,7 @@ rproc_recovery_write(struct file *filp, const char __user *user_buf,
156 char buf[10]; 156 char buf[10];
157 int ret; 157 int ret;
158 158
159 if (count > sizeof(buf)) 159 if (count < 1 || count > sizeof(buf))
160 return count; 160 return count;
161 161
162 ret = copy_from_user(buf, user_buf, count); 162 ret = copy_from_user(buf, user_buf, count);
diff --git a/drivers/rtc/rtc-da9063.c b/drivers/rtc/rtc-da9063.c
index 284b587da65c..d6c853bbfa9f 100644
--- a/drivers/rtc/rtc-da9063.c
+++ b/drivers/rtc/rtc-da9063.c
@@ -483,24 +483,23 @@ static int da9063_rtc_probe(struct platform_device *pdev)
483 483
484 platform_set_drvdata(pdev, rtc); 484 platform_set_drvdata(pdev, rtc);
485 485
486 rtc->rtc_dev = devm_rtc_device_register(&pdev->dev, DA9063_DRVNAME_RTC,
487 &da9063_rtc_ops, THIS_MODULE);
488 if (IS_ERR(rtc->rtc_dev))
489 return PTR_ERR(rtc->rtc_dev);
490
491 da9063_data_to_tm(data, &rtc->alarm_time, rtc);
492 rtc->rtc_sync = false;
493
486 irq_alarm = platform_get_irq_byname(pdev, "ALARM"); 494 irq_alarm = platform_get_irq_byname(pdev, "ALARM");
487 ret = devm_request_threaded_irq(&pdev->dev, irq_alarm, NULL, 495 ret = devm_request_threaded_irq(&pdev->dev, irq_alarm, NULL,
488 da9063_alarm_event, 496 da9063_alarm_event,
489 IRQF_TRIGGER_LOW | IRQF_ONESHOT, 497 IRQF_TRIGGER_LOW | IRQF_ONESHOT,
490 "ALARM", rtc); 498 "ALARM", rtc);
491 if (ret) { 499 if (ret)
492 dev_err(&pdev->dev, "Failed to request ALARM IRQ %d: %d\n", 500 dev_err(&pdev->dev, "Failed to request ALARM IRQ %d: %d\n",
493 irq_alarm, ret); 501 irq_alarm, ret);
494 return ret;
495 }
496
497 rtc->rtc_dev = devm_rtc_device_register(&pdev->dev, DA9063_DRVNAME_RTC,
498 &da9063_rtc_ops, THIS_MODULE);
499 if (IS_ERR(rtc->rtc_dev))
500 return PTR_ERR(rtc->rtc_dev);
501 502
502 da9063_data_to_tm(data, &rtc->alarm_time, rtc);
503 rtc->rtc_sync = false;
504 return ret; 503 return ret;
505} 504}
506 505
diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c
index 188006c55ce0..aa705bb4748c 100644
--- a/drivers/rtc/rtc-ds1307.c
+++ b/drivers/rtc/rtc-ds1307.c
@@ -15,9 +15,6 @@
15#include <linux/i2c.h> 15#include <linux/i2c.h>
16#include <linux/init.h> 16#include <linux/init.h>
17#include <linux/module.h> 17#include <linux/module.h>
18#include <linux/of_device.h>
19#include <linux/of_irq.h>
20#include <linux/pm_wakeirq.h>
21#include <linux/rtc/ds1307.h> 18#include <linux/rtc/ds1307.h>
22#include <linux/rtc.h> 19#include <linux/rtc.h>
23#include <linux/slab.h> 20#include <linux/slab.h>
@@ -117,7 +114,6 @@ struct ds1307 {
117#define HAS_ALARM 1 /* bit 1 == irq claimed */ 114#define HAS_ALARM 1 /* bit 1 == irq claimed */
118 struct i2c_client *client; 115 struct i2c_client *client;
119 struct rtc_device *rtc; 116 struct rtc_device *rtc;
120 int wakeirq;
121 s32 (*read_block_data)(const struct i2c_client *client, u8 command, 117 s32 (*read_block_data)(const struct i2c_client *client, u8 command,
122 u8 length, u8 *values); 118 u8 length, u8 *values);
123 s32 (*write_block_data)(const struct i2c_client *client, u8 command, 119 s32 (*write_block_data)(const struct i2c_client *client, u8 command,
@@ -1138,7 +1134,10 @@ read_rtc:
1138 bin2bcd(tmp)); 1134 bin2bcd(tmp));
1139 } 1135 }
1140 1136
1141 device_set_wakeup_capable(&client->dev, want_irq); 1137 if (want_irq) {
1138 device_set_wakeup_capable(&client->dev, true);
1139 set_bit(HAS_ALARM, &ds1307->flags);
1140 }
1142 ds1307->rtc = devm_rtc_device_register(&client->dev, client->name, 1141 ds1307->rtc = devm_rtc_device_register(&client->dev, client->name,
1143 rtc_ops, THIS_MODULE); 1142 rtc_ops, THIS_MODULE);
1144 if (IS_ERR(ds1307->rtc)) { 1143 if (IS_ERR(ds1307->rtc)) {
@@ -1146,43 +1145,19 @@ read_rtc:
1146 } 1145 }
1147 1146
1148 if (want_irq) { 1147 if (want_irq) {
1149 struct device_node *node = client->dev.of_node;
1150
1151 err = devm_request_threaded_irq(&client->dev, 1148 err = devm_request_threaded_irq(&client->dev,
1152 client->irq, NULL, irq_handler, 1149 client->irq, NULL, irq_handler,
1153 IRQF_SHARED | IRQF_ONESHOT, 1150 IRQF_SHARED | IRQF_ONESHOT,
1154 ds1307->rtc->name, client); 1151 ds1307->rtc->name, client);
1155 if (err) { 1152 if (err) {
1156 client->irq = 0; 1153 client->irq = 0;
1154 device_set_wakeup_capable(&client->dev, false);
1155 clear_bit(HAS_ALARM, &ds1307->flags);
1157 dev_err(&client->dev, "unable to request IRQ!\n"); 1156 dev_err(&client->dev, "unable to request IRQ!\n");
1158 goto no_irq; 1157 } else
1159 } 1158 dev_dbg(&client->dev, "got IRQ %d\n", client->irq);
1160
1161 set_bit(HAS_ALARM, &ds1307->flags);
1162 dev_dbg(&client->dev, "got IRQ %d\n", client->irq);
1163
1164 /* Currently supported by OF code only! */
1165 if (!node)
1166 goto no_irq;
1167
1168 err = of_irq_get(node, 1);
1169 if (err <= 0) {
1170 if (err == -EPROBE_DEFER)
1171 goto exit;
1172 goto no_irq;
1173 }
1174 ds1307->wakeirq = err;
1175
1176 err = dev_pm_set_dedicated_wake_irq(&client->dev,
1177 ds1307->wakeirq);
1178 if (err) {
1179 dev_err(&client->dev, "unable to setup wakeIRQ %d!\n",
1180 err);
1181 goto exit;
1182 }
1183 } 1159 }
1184 1160
1185no_irq:
1186 if (chip->nvram_size) { 1161 if (chip->nvram_size) {
1187 1162
1188 ds1307->nvram = devm_kzalloc(&client->dev, 1163 ds1307->nvram = devm_kzalloc(&client->dev,
@@ -1226,9 +1201,6 @@ static int ds1307_remove(struct i2c_client *client)
1226{ 1201{
1227 struct ds1307 *ds1307 = i2c_get_clientdata(client); 1202 struct ds1307 *ds1307 = i2c_get_clientdata(client);
1228 1203
1229 if (ds1307->wakeirq)
1230 dev_pm_clear_wake_irq(&client->dev);
1231
1232 if (test_and_clear_bit(HAS_NVRAM, &ds1307->flags)) 1204 if (test_and_clear_bit(HAS_NVRAM, &ds1307->flags))
1233 sysfs_remove_bin_file(&client->dev.kobj, ds1307->nvram); 1205 sysfs_remove_bin_file(&client->dev.kobj, ds1307->nvram);
1234 1206
diff --git a/drivers/rtc/rtc-rk808.c b/drivers/rtc/rtc-rk808.c
index 91ca0bc1b484..35c9aada07c8 100644
--- a/drivers/rtc/rtc-rk808.c
+++ b/drivers/rtc/rtc-rk808.c
@@ -56,6 +56,42 @@ struct rk808_rtc {
56 int irq; 56 int irq;
57}; 57};
58 58
59/*
60 * The Rockchip calendar used by the RK808 counts November with 31 days. We use
61 * these translation functions to convert its dates to/from the Gregorian
62 * calendar used by the rest of the world. We arbitrarily define Jan 1st, 2016
63 * as the day when both calendars were in sync, and treat all other dates
64 * relative to that.
65 * NOTE: Other system software (e.g. firmware) that reads the same hardware must
66 * implement this exact same conversion algorithm, with the same anchor date.
67 */
68static time64_t nov2dec_transitions(struct rtc_time *tm)
69{
70 return (tm->tm_year + 1900) - 2016 + (tm->tm_mon + 1 > 11 ? 1 : 0);
71}
72
73static void rockchip_to_gregorian(struct rtc_time *tm)
74{
75 /* If it's Nov 31st, rtc_tm_to_time64() will count that like Dec 1st */
76 time64_t time = rtc_tm_to_time64(tm);
77 rtc_time64_to_tm(time + nov2dec_transitions(tm) * 86400, tm);
78}
79
80static void gregorian_to_rockchip(struct rtc_time *tm)
81{
82 time64_t extra_days = nov2dec_transitions(tm);
83 time64_t time = rtc_tm_to_time64(tm);
84 rtc_time64_to_tm(time - extra_days * 86400, tm);
85
86 /* Compensate if we went back over Nov 31st (will work up to 2381) */
87 if (nov2dec_transitions(tm) < extra_days) {
88 if (tm->tm_mon + 1 == 11)
89 tm->tm_mday++; /* This may result in 31! */
90 else
91 rtc_time64_to_tm(time - (extra_days - 1) * 86400, tm);
92 }
93}
94
59/* Read current time and date in RTC */ 95/* Read current time and date in RTC */
60static int rk808_rtc_readtime(struct device *dev, struct rtc_time *tm) 96static int rk808_rtc_readtime(struct device *dev, struct rtc_time *tm)
61{ 97{
@@ -101,9 +137,10 @@ static int rk808_rtc_readtime(struct device *dev, struct rtc_time *tm)
101 tm->tm_mon = (bcd2bin(rtc_data[4] & MONTHS_REG_MSK)) - 1; 137 tm->tm_mon = (bcd2bin(rtc_data[4] & MONTHS_REG_MSK)) - 1;
102 tm->tm_year = (bcd2bin(rtc_data[5] & YEARS_REG_MSK)) + 100; 138 tm->tm_year = (bcd2bin(rtc_data[5] & YEARS_REG_MSK)) + 100;
103 tm->tm_wday = bcd2bin(rtc_data[6] & WEEKS_REG_MSK); 139 tm->tm_wday = bcd2bin(rtc_data[6] & WEEKS_REG_MSK);
140 rockchip_to_gregorian(tm);
104 dev_dbg(dev, "RTC date/time %4d-%02d-%02d(%d) %02d:%02d:%02d\n", 141 dev_dbg(dev, "RTC date/time %4d-%02d-%02d(%d) %02d:%02d:%02d\n",
105 1900 + tm->tm_year, tm->tm_mon + 1, tm->tm_mday, 142 1900 + tm->tm_year, tm->tm_mon + 1, tm->tm_mday,
106 tm->tm_wday, tm->tm_hour , tm->tm_min, tm->tm_sec); 143 tm->tm_wday, tm->tm_hour, tm->tm_min, tm->tm_sec);
107 144
108 return ret; 145 return ret;
109} 146}
@@ -116,6 +153,10 @@ static int rk808_rtc_set_time(struct device *dev, struct rtc_time *tm)
116 u8 rtc_data[NUM_TIME_REGS]; 153 u8 rtc_data[NUM_TIME_REGS];
117 int ret; 154 int ret;
118 155
156 dev_dbg(dev, "set RTC date/time %4d-%02d-%02d(%d) %02d:%02d:%02d\n",
157 1900 + tm->tm_year, tm->tm_mon + 1, tm->tm_mday,
158 tm->tm_wday, tm->tm_hour, tm->tm_min, tm->tm_sec);
159 gregorian_to_rockchip(tm);
119 rtc_data[0] = bin2bcd(tm->tm_sec); 160 rtc_data[0] = bin2bcd(tm->tm_sec);
120 rtc_data[1] = bin2bcd(tm->tm_min); 161 rtc_data[1] = bin2bcd(tm->tm_min);
121 rtc_data[2] = bin2bcd(tm->tm_hour); 162 rtc_data[2] = bin2bcd(tm->tm_hour);
@@ -123,9 +164,6 @@ static int rk808_rtc_set_time(struct device *dev, struct rtc_time *tm)
123 rtc_data[4] = bin2bcd(tm->tm_mon + 1); 164 rtc_data[4] = bin2bcd(tm->tm_mon + 1);
124 rtc_data[5] = bin2bcd(tm->tm_year - 100); 165 rtc_data[5] = bin2bcd(tm->tm_year - 100);
125 rtc_data[6] = bin2bcd(tm->tm_wday); 166 rtc_data[6] = bin2bcd(tm->tm_wday);
126 dev_dbg(dev, "set RTC date/time %4d-%02d-%02d(%d) %02d:%02d:%02d\n",
127 1900 + tm->tm_year, tm->tm_mon + 1, tm->tm_mday,
128 tm->tm_wday, tm->tm_hour , tm->tm_min, tm->tm_sec);
129 167
130 /* Stop RTC while updating the RTC registers */ 168 /* Stop RTC while updating the RTC registers */
131 ret = regmap_update_bits(rk808->regmap, RK808_RTC_CTRL_REG, 169 ret = regmap_update_bits(rk808->regmap, RK808_RTC_CTRL_REG,
@@ -170,6 +208,7 @@ static int rk808_rtc_readalarm(struct device *dev, struct rtc_wkalrm *alrm)
170 alrm->time.tm_mday = bcd2bin(alrm_data[3] & DAYS_REG_MSK); 208 alrm->time.tm_mday = bcd2bin(alrm_data[3] & DAYS_REG_MSK);
171 alrm->time.tm_mon = (bcd2bin(alrm_data[4] & MONTHS_REG_MSK)) - 1; 209 alrm->time.tm_mon = (bcd2bin(alrm_data[4] & MONTHS_REG_MSK)) - 1;
172 alrm->time.tm_year = (bcd2bin(alrm_data[5] & YEARS_REG_MSK)) + 100; 210 alrm->time.tm_year = (bcd2bin(alrm_data[5] & YEARS_REG_MSK)) + 100;
211 rockchip_to_gregorian(&alrm->time);
173 212
174 ret = regmap_read(rk808->regmap, RK808_RTC_INT_REG, &int_reg); 213 ret = regmap_read(rk808->regmap, RK808_RTC_INT_REG, &int_reg);
175 if (ret) { 214 if (ret) {
@@ -227,6 +266,7 @@ static int rk808_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm)
227 alrm->time.tm_mday, alrm->time.tm_wday, alrm->time.tm_hour, 266 alrm->time.tm_mday, alrm->time.tm_wday, alrm->time.tm_hour,
228 alrm->time.tm_min, alrm->time.tm_sec); 267 alrm->time.tm_min, alrm->time.tm_sec);
229 268
269 gregorian_to_rockchip(&alrm->time);
230 alrm_data[0] = bin2bcd(alrm->time.tm_sec); 270 alrm_data[0] = bin2bcd(alrm->time.tm_sec);
231 alrm_data[1] = bin2bcd(alrm->time.tm_min); 271 alrm_data[1] = bin2bcd(alrm->time.tm_min);
232 alrm_data[2] = bin2bcd(alrm->time.tm_hour); 272 alrm_data[2] = bin2bcd(alrm->time.tm_hour);
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
index 548a18916a31..a831d18596a5 100644
--- a/drivers/s390/cio/chsc.c
+++ b/drivers/s390/cio/chsc.c
@@ -1080,28 +1080,10 @@ void __init chsc_init_cleanup(void)
1080 free_page((unsigned long)sei_page); 1080 free_page((unsigned long)sei_page);
1081} 1081}
1082 1082
1083int chsc_enable_facility(int operation_code) 1083int __chsc_enable_facility(struct chsc_sda_area *sda_area, int operation_code)
1084{ 1084{
1085 unsigned long flags;
1086 int ret; 1085 int ret;
1087 struct {
1088 struct chsc_header request;
1089 u8 reserved1:4;
1090 u8 format:4;
1091 u8 reserved2;
1092 u16 operation_code;
1093 u32 reserved3;
1094 u32 reserved4;
1095 u32 operation_data_area[252];
1096 struct chsc_header response;
1097 u32 reserved5:4;
1098 u32 format2:4;
1099 u32 reserved6:24;
1100 } __attribute__ ((packed)) *sda_area;
1101 1086
1102 spin_lock_irqsave(&chsc_page_lock, flags);
1103 memset(chsc_page, 0, PAGE_SIZE);
1104 sda_area = chsc_page;
1105 sda_area->request.length = 0x0400; 1087 sda_area->request.length = 0x0400;
1106 sda_area->request.code = 0x0031; 1088 sda_area->request.code = 0x0031;
1107 sda_area->operation_code = operation_code; 1089 sda_area->operation_code = operation_code;
@@ -1119,10 +1101,25 @@ int chsc_enable_facility(int operation_code)
1119 default: 1101 default:
1120 ret = chsc_error_from_response(sda_area->response.code); 1102 ret = chsc_error_from_response(sda_area->response.code);
1121 } 1103 }
1104out:
1105 return ret;
1106}
1107
1108int chsc_enable_facility(int operation_code)
1109{
1110 struct chsc_sda_area *sda_area;
1111 unsigned long flags;
1112 int ret;
1113
1114 spin_lock_irqsave(&chsc_page_lock, flags);
1115 memset(chsc_page, 0, PAGE_SIZE);
1116 sda_area = chsc_page;
1117
1118 ret = __chsc_enable_facility(sda_area, operation_code);
1122 if (ret != 0) 1119 if (ret != 0)
1123 CIO_CRW_EVENT(2, "chsc: sda (oc=%x) failed (rc=%04x)\n", 1120 CIO_CRW_EVENT(2, "chsc: sda (oc=%x) failed (rc=%04x)\n",
1124 operation_code, sda_area->response.code); 1121 operation_code, sda_area->response.code);
1125out: 1122
1126 spin_unlock_irqrestore(&chsc_page_lock, flags); 1123 spin_unlock_irqrestore(&chsc_page_lock, flags);
1127 return ret; 1124 return ret;
1128} 1125}
diff --git a/drivers/s390/cio/chsc.h b/drivers/s390/cio/chsc.h
index 76c9b50700b2..0de134c3a204 100644
--- a/drivers/s390/cio/chsc.h
+++ b/drivers/s390/cio/chsc.h
@@ -115,6 +115,20 @@ struct chsc_scpd {
115 u8 data[PAGE_SIZE - 20]; 115 u8 data[PAGE_SIZE - 20];
116} __attribute__ ((packed)); 116} __attribute__ ((packed));
117 117
118struct chsc_sda_area {
119 struct chsc_header request;
120 u8 :4;
121 u8 format:4;
122 u8 :8;
123 u16 operation_code;
124 u32 :32;
125 u32 :32;
126 u32 operation_data_area[252];
127 struct chsc_header response;
128 u32 :4;
129 u32 format2:4;
130 u32 :24;
131} __packed __aligned(PAGE_SIZE);
118 132
119extern int chsc_get_ssd_info(struct subchannel_id schid, 133extern int chsc_get_ssd_info(struct subchannel_id schid,
120 struct chsc_ssd_info *ssd); 134 struct chsc_ssd_info *ssd);
@@ -122,6 +136,7 @@ extern int chsc_determine_css_characteristics(void);
122extern int chsc_init(void); 136extern int chsc_init(void);
123extern void chsc_init_cleanup(void); 137extern void chsc_init_cleanup(void);
124 138
139int __chsc_enable_facility(struct chsc_sda_area *sda_area, int operation_code);
125extern int chsc_enable_facility(int); 140extern int chsc_enable_facility(int);
126struct channel_subsystem; 141struct channel_subsystem;
127extern int chsc_secm(struct channel_subsystem *, int); 142extern int chsc_secm(struct channel_subsystem *, int);
diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c
index b5620e818d6b..690b8547e828 100644
--- a/drivers/s390/cio/cio.c
+++ b/drivers/s390/cio/cio.c
@@ -925,18 +925,32 @@ void reipl_ccw_dev(struct ccw_dev_id *devid)
925 925
926int __init cio_get_iplinfo(struct cio_iplinfo *iplinfo) 926int __init cio_get_iplinfo(struct cio_iplinfo *iplinfo)
927{ 927{
928 static struct chsc_sda_area sda_area __initdata;
928 struct subchannel_id schid; 929 struct subchannel_id schid;
929 struct schib schib; 930 struct schib schib;
930 931
931 schid = *(struct subchannel_id *)&S390_lowcore.subchannel_id; 932 schid = *(struct subchannel_id *)&S390_lowcore.subchannel_id;
932 if (!schid.one) 933 if (!schid.one)
933 return -ENODEV; 934 return -ENODEV;
935
936 if (schid.ssid) {
937 /*
938 * Firmware should have already enabled MSS but whoever started
939 * the kernel might have initiated a channel subsystem reset.
940 * Ensure that MSS is enabled.
941 */
942 memset(&sda_area, 0, sizeof(sda_area));
943 if (__chsc_enable_facility(&sda_area, CHSC_SDA_OC_MSS))
944 return -ENODEV;
945 }
934 if (stsch_err(schid, &schib)) 946 if (stsch_err(schid, &schib))
935 return -ENODEV; 947 return -ENODEV;
936 if (schib.pmcw.st != SUBCHANNEL_TYPE_IO) 948 if (schib.pmcw.st != SUBCHANNEL_TYPE_IO)
937 return -ENODEV; 949 return -ENODEV;
938 if (!schib.pmcw.dnv) 950 if (!schib.pmcw.dnv)
939 return -ENODEV; 951 return -ENODEV;
952
953 iplinfo->ssid = schid.ssid;
940 iplinfo->devno = schib.pmcw.dev; 954 iplinfo->devno = schib.pmcw.dev;
941 iplinfo->is_qdio = schib.pmcw.qf; 955 iplinfo->is_qdio = schib.pmcw.qf;
942 return 0; 956 return 0;
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
index 2ee3053bdc12..489e703dc82d 100644
--- a/drivers/s390/cio/css.c
+++ b/drivers/s390/cio/css.c
@@ -702,17 +702,12 @@ css_generate_pgid(struct channel_subsystem *css, u32 tod_high)
702 css->global_pgid.pgid_high.ext_cssid.version = 0x80; 702 css->global_pgid.pgid_high.ext_cssid.version = 0x80;
703 css->global_pgid.pgid_high.ext_cssid.cssid = css->cssid; 703 css->global_pgid.pgid_high.ext_cssid.cssid = css->cssid;
704 } else { 704 } else {
705#ifdef CONFIG_SMP
706 css->global_pgid.pgid_high.cpu_addr = stap(); 705 css->global_pgid.pgid_high.cpu_addr = stap();
707#else
708 css->global_pgid.pgid_high.cpu_addr = 0;
709#endif
710 } 706 }
711 get_cpu_id(&cpu_id); 707 get_cpu_id(&cpu_id);
712 css->global_pgid.cpu_id = cpu_id.ident; 708 css->global_pgid.cpu_id = cpu_id.ident;
713 css->global_pgid.cpu_model = cpu_id.machine; 709 css->global_pgid.cpu_model = cpu_id.machine;
714 css->global_pgid.tod_high = tod_high; 710 css->global_pgid.tod_high = tod_high;
715
716} 711}
717 712
718static void 713static void
diff --git a/drivers/s390/crypto/Makefile b/drivers/s390/crypto/Makefile
index 57f710b3c8a4..b8ab18676e69 100644
--- a/drivers/s390/crypto/Makefile
+++ b/drivers/s390/crypto/Makefile
@@ -3,6 +3,9 @@
3# 3#
4 4
5ap-objs := ap_bus.o 5ap-objs := ap_bus.o
6obj-$(CONFIG_ZCRYPT) += ap.o zcrypt_api.o zcrypt_pcixcc.o 6# zcrypt_api depends on ap
7obj-$(CONFIG_ZCRYPT) += zcrypt_cex2a.o zcrypt_cex4.o 7obj-$(CONFIG_ZCRYPT) += ap.o zcrypt_api.o
8# msgtype* depend on zcrypt_api
8obj-$(CONFIG_ZCRYPT) += zcrypt_msgtype6.o zcrypt_msgtype50.o 9obj-$(CONFIG_ZCRYPT) += zcrypt_msgtype6.o zcrypt_msgtype50.o
10# adapter drivers depend on ap, zcrypt_api and msgtype*
11obj-$(CONFIG_ZCRYPT) += zcrypt_pcixcc.o zcrypt_cex2a.o zcrypt_cex4.o
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index 9cb3dfbcaddb..61f768518a34 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -74,6 +74,7 @@ static struct device *ap_root_device = NULL;
74static struct ap_config_info *ap_configuration; 74static struct ap_config_info *ap_configuration;
75static DEFINE_SPINLOCK(ap_device_list_lock); 75static DEFINE_SPINLOCK(ap_device_list_lock);
76static LIST_HEAD(ap_device_list); 76static LIST_HEAD(ap_device_list);
77static bool initialised;
77 78
78/* 79/*
79 * Workqueue timer for bus rescan. 80 * Workqueue timer for bus rescan.
@@ -1384,6 +1385,9 @@ int ap_driver_register(struct ap_driver *ap_drv, struct module *owner,
1384{ 1385{
1385 struct device_driver *drv = &ap_drv->driver; 1386 struct device_driver *drv = &ap_drv->driver;
1386 1387
1388 if (!initialised)
1389 return -ENODEV;
1390
1387 drv->bus = &ap_bus_type; 1391 drv->bus = &ap_bus_type;
1388 drv->probe = ap_device_probe; 1392 drv->probe = ap_device_probe;
1389 drv->remove = ap_device_remove; 1393 drv->remove = ap_device_remove;
@@ -1808,6 +1812,7 @@ int __init ap_module_init(void)
1808 goto out_pm; 1812 goto out_pm;
1809 1813
1810 queue_work(system_long_wq, &ap_scan_work); 1814 queue_work(system_long_wq, &ap_scan_work);
1815 initialised = true;
1811 1816
1812 return 0; 1817 return 0;
1813 1818
@@ -1837,6 +1842,7 @@ void ap_module_exit(void)
1837{ 1842{
1838 int i; 1843 int i;
1839 1844
1845 initialised = false;
1840 ap_reset_domain(); 1846 ap_reset_domain();
1841 ap_poll_thread_stop(); 1847 ap_poll_thread_stop();
1842 del_timer_sync(&ap_config_timer); 1848 del_timer_sync(&ap_config_timer);
diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c
index a9603ebbc1f8..9f8fa42c062c 100644
--- a/drivers/s390/crypto/zcrypt_api.c
+++ b/drivers/s390/crypto/zcrypt_api.c
@@ -317,11 +317,9 @@ EXPORT_SYMBOL(zcrypt_device_unregister);
317 317
318void zcrypt_msgtype_register(struct zcrypt_ops *zops) 318void zcrypt_msgtype_register(struct zcrypt_ops *zops)
319{ 319{
320 if (zops->owner) { 320 spin_lock_bh(&zcrypt_ops_list_lock);
321 spin_lock_bh(&zcrypt_ops_list_lock); 321 list_add_tail(&zops->list, &zcrypt_ops_list);
322 list_add_tail(&zops->list, &zcrypt_ops_list); 322 spin_unlock_bh(&zcrypt_ops_list_lock);
323 spin_unlock_bh(&zcrypt_ops_list_lock);
324 }
325} 323}
326EXPORT_SYMBOL(zcrypt_msgtype_register); 324EXPORT_SYMBOL(zcrypt_msgtype_register);
327 325
@@ -342,7 +340,7 @@ struct zcrypt_ops *__ops_lookup(unsigned char *name, int variant)
342 spin_lock_bh(&zcrypt_ops_list_lock); 340 spin_lock_bh(&zcrypt_ops_list_lock);
343 list_for_each_entry(zops, &zcrypt_ops_list, list) { 341 list_for_each_entry(zops, &zcrypt_ops_list, list) {
344 if ((zops->variant == variant) && 342 if ((zops->variant == variant) &&
345 (!strncmp(zops->owner->name, name, MODULE_NAME_LEN))) { 343 (!strncmp(zops->name, name, sizeof(zops->name)))) {
346 found = 1; 344 found = 1;
347 break; 345 break;
348 } 346 }
diff --git a/drivers/s390/crypto/zcrypt_api.h b/drivers/s390/crypto/zcrypt_api.h
index 750876891931..38618f05ad92 100644
--- a/drivers/s390/crypto/zcrypt_api.h
+++ b/drivers/s390/crypto/zcrypt_api.h
@@ -96,6 +96,7 @@ struct zcrypt_ops {
96 struct list_head list; /* zcrypt ops list. */ 96 struct list_head list; /* zcrypt ops list. */
97 struct module *owner; 97 struct module *owner;
98 int variant; 98 int variant;
99 char name[128];
99}; 100};
100 101
101struct zcrypt_device { 102struct zcrypt_device {
diff --git a/drivers/s390/crypto/zcrypt_msgtype50.c b/drivers/s390/crypto/zcrypt_msgtype50.c
index 71ceee9137a8..74edf2934e7c 100644
--- a/drivers/s390/crypto/zcrypt_msgtype50.c
+++ b/drivers/s390/crypto/zcrypt_msgtype50.c
@@ -513,6 +513,7 @@ static struct zcrypt_ops zcrypt_msgtype50_ops = {
513 .rsa_modexpo = zcrypt_cex2a_modexpo, 513 .rsa_modexpo = zcrypt_cex2a_modexpo,
514 .rsa_modexpo_crt = zcrypt_cex2a_modexpo_crt, 514 .rsa_modexpo_crt = zcrypt_cex2a_modexpo_crt,
515 .owner = THIS_MODULE, 515 .owner = THIS_MODULE,
516 .name = MSGTYPE50_NAME,
516 .variant = MSGTYPE50_VARIANT_DEFAULT, 517 .variant = MSGTYPE50_VARIANT_DEFAULT,
517}; 518};
518 519
diff --git a/drivers/s390/crypto/zcrypt_msgtype6.c b/drivers/s390/crypto/zcrypt_msgtype6.c
index 74762214193b..9a2dd472c1cc 100644
--- a/drivers/s390/crypto/zcrypt_msgtype6.c
+++ b/drivers/s390/crypto/zcrypt_msgtype6.c
@@ -1119,6 +1119,7 @@ static long zcrypt_msgtype6_rng(struct zcrypt_device *zdev,
1119 */ 1119 */
1120static struct zcrypt_ops zcrypt_msgtype6_norng_ops = { 1120static struct zcrypt_ops zcrypt_msgtype6_norng_ops = {
1121 .owner = THIS_MODULE, 1121 .owner = THIS_MODULE,
1122 .name = MSGTYPE06_NAME,
1122 .variant = MSGTYPE06_VARIANT_NORNG, 1123 .variant = MSGTYPE06_VARIANT_NORNG,
1123 .rsa_modexpo = zcrypt_msgtype6_modexpo, 1124 .rsa_modexpo = zcrypt_msgtype6_modexpo,
1124 .rsa_modexpo_crt = zcrypt_msgtype6_modexpo_crt, 1125 .rsa_modexpo_crt = zcrypt_msgtype6_modexpo_crt,
@@ -1127,6 +1128,7 @@ static struct zcrypt_ops zcrypt_msgtype6_norng_ops = {
1127 1128
1128static struct zcrypt_ops zcrypt_msgtype6_ops = { 1129static struct zcrypt_ops zcrypt_msgtype6_ops = {
1129 .owner = THIS_MODULE, 1130 .owner = THIS_MODULE,
1131 .name = MSGTYPE06_NAME,
1130 .variant = MSGTYPE06_VARIANT_DEFAULT, 1132 .variant = MSGTYPE06_VARIANT_DEFAULT,
1131 .rsa_modexpo = zcrypt_msgtype6_modexpo, 1133 .rsa_modexpo = zcrypt_msgtype6_modexpo,
1132 .rsa_modexpo_crt = zcrypt_msgtype6_modexpo_crt, 1134 .rsa_modexpo_crt = zcrypt_msgtype6_modexpo_crt,
@@ -1136,6 +1138,7 @@ static struct zcrypt_ops zcrypt_msgtype6_ops = {
1136 1138
1137static struct zcrypt_ops zcrypt_msgtype6_ep11_ops = { 1139static struct zcrypt_ops zcrypt_msgtype6_ep11_ops = {
1138 .owner = THIS_MODULE, 1140 .owner = THIS_MODULE,
1141 .name = MSGTYPE06_NAME,
1139 .variant = MSGTYPE06_VARIANT_EP11, 1142 .variant = MSGTYPE06_VARIANT_EP11,
1140 .rsa_modexpo = NULL, 1143 .rsa_modexpo = NULL,
1141 .rsa_modexpo_crt = NULL, 1144 .rsa_modexpo_crt = NULL,
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 5f692ae40749..64eed87d34a8 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -364,6 +364,7 @@ config SCSI_HPSA
364 tristate "HP Smart Array SCSI driver" 364 tristate "HP Smart Array SCSI driver"
365 depends on PCI && SCSI 365 depends on PCI && SCSI
366 select CHECK_SIGNATURE 366 select CHECK_SIGNATURE
367 select SCSI_SAS_ATTRS
367 help 368 help
368 This driver supports HP Smart Array Controllers (circa 2009). 369 This driver supports HP Smart Array Controllers (circa 2009).
369 It is a SCSI alternative to the cciss driver, which is a block 370 It is a SCSI alternative to the cciss driver, which is a block
@@ -499,6 +500,7 @@ config SCSI_ADVANSYS
499 tristate "AdvanSys SCSI support" 500 tristate "AdvanSys SCSI support"
500 depends on SCSI 501 depends on SCSI
501 depends on ISA || EISA || PCI 502 depends on ISA || EISA || PCI
503 depends on ISA_DMA_API || !ISA
502 help 504 help
503 This is a driver for all SCSI host adapters manufactured by 505 This is a driver for all SCSI host adapters manufactured by
504 AdvanSys. It is documented in the kernel source in 506 AdvanSys. It is documented in the kernel source in
diff --git a/drivers/scsi/advansys.c b/drivers/scsi/advansys.c
index 519f9a4b3dad..febbd83e2ecd 100644
--- a/drivers/scsi/advansys.c
+++ b/drivers/scsi/advansys.c
@@ -7803,7 +7803,7 @@ adv_build_req(struct asc_board *boardp, struct scsi_cmnd *scp,
7803 return ASC_BUSY; 7803 return ASC_BUSY;
7804 } 7804 }
7805 scsiqp->sense_addr = cpu_to_le32(sense_addr); 7805 scsiqp->sense_addr = cpu_to_le32(sense_addr);
7806 scsiqp->sense_len = cpu_to_le32(SCSI_SENSE_BUFFERSIZE); 7806 scsiqp->sense_len = SCSI_SENSE_BUFFERSIZE;
7807 7807
7808 /* Build ADV_SCSI_REQ_Q */ 7808 /* Build ADV_SCSI_REQ_Q */
7809 7809
diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
index 323982fd00c3..82ac1cd818ac 100644
--- a/drivers/scsi/hosts.c
+++ b/drivers/scsi/hosts.c
@@ -333,6 +333,17 @@ static void scsi_host_dev_release(struct device *dev)
333 kfree(queuedata); 333 kfree(queuedata);
334 } 334 }
335 335
336 if (shost->shost_state == SHOST_CREATED) {
337 /*
338 * Free the shost_dev device name here if scsi_host_alloc()
339 * and scsi_host_put() have been called but neither
340 * scsi_host_add() nor scsi_host_remove() has been called.
341 * This avoids that the memory allocated for the shost_dev
342 * name is leaked.
343 */
344 kfree(dev_name(&shost->shost_dev));
345 }
346
336 scsi_destroy_command_freelist(shost); 347 scsi_destroy_command_freelist(shost);
337 if (shost_use_blk_mq(shost)) { 348 if (shost_use_blk_mq(shost)) {
338 if (shost->tag_set.tags) 349 if (shost->tag_set.tags)
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index 6a8f95808ee0..a3860367b568 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -8671,7 +8671,7 @@ static void hpsa_disable_rld_caching(struct ctlr_info *h)
8671 if ((rc != 0) || (c->err_info->CommandStatus != 0)) 8671 if ((rc != 0) || (c->err_info->CommandStatus != 0))
8672 goto errout; 8672 goto errout;
8673 8673
8674 if (*options && HPSA_DIAG_OPTS_DISABLE_RLD_CACHING) 8674 if (*options & HPSA_DIAG_OPTS_DISABLE_RLD_CACHING)
8675 goto out; 8675 goto out;
8676 8676
8677errout: 8677errout:
diff --git a/drivers/scsi/mpt3sas/Kconfig b/drivers/scsi/mpt3sas/Kconfig
index 29061467cc17..b736dbc80485 100644
--- a/drivers/scsi/mpt3sas/Kconfig
+++ b/drivers/scsi/mpt3sas/Kconfig
@@ -71,3 +71,12 @@ config SCSI_MPT3SAS_MAX_SGE
71 MAX_PHYS_SEGMENTS in most kernels. However in SuSE kernels this 71 MAX_PHYS_SEGMENTS in most kernels. However in SuSE kernels this
72 can be 256. However, it may decreased down to 16. Decreasing this 72 can be 256. However, it may decreased down to 16. Decreasing this
73 parameter will reduce memory requirements on a per controller instance. 73 parameter will reduce memory requirements on a per controller instance.
74
75config SCSI_MPT2SAS
76 tristate "Legacy MPT2SAS config option"
77 default n
78 select SCSI_MPT3SAS
79 depends on PCI && SCSI
80 ---help---
81 Dummy config option for backwards compatiblity: configure the MPT3SAS
82 driver instead.
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index d95206b7e116..9ab77b06434d 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -3905,8 +3905,7 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
3905 * We do not expose raid functionality to upper layer for warpdrive. 3905 * We do not expose raid functionality to upper layer for warpdrive.
3906 */ 3906 */
3907 if (!ioc->is_warpdrive && !scsih_is_raid(&scmd->device->sdev_gendev) 3907 if (!ioc->is_warpdrive && !scsih_is_raid(&scmd->device->sdev_gendev)
3908 && (sas_device_priv_data->flags & MPT_DEVICE_TLR_ON) && 3908 && sas_is_tlr_enabled(scmd->device) && scmd->cmd_len != 32)
3909 scmd->cmd_len != 32)
3910 mpi_control |= MPI2_SCSIIO_CONTROL_TLR_ON; 3909 mpi_control |= MPI2_SCSIIO_CONTROL_TLR_ON;
3911 3910
3912 smid = mpt3sas_base_get_smid_scsiio(ioc, ioc->scsi_io_cb_idx, scmd); 3911 smid = mpt3sas_base_get_smid_scsiio(ioc, ioc->scsi_io_cb_idx, scmd);
diff --git a/drivers/scsi/mvsas/mv_init.c b/drivers/scsi/mvsas/mv_init.c
index 90fdf0e859e3..675e7fab0796 100644
--- a/drivers/scsi/mvsas/mv_init.c
+++ b/drivers/scsi/mvsas/mv_init.c
@@ -758,7 +758,7 @@ mvs_store_interrupt_coalescing(struct device *cdev,
758 struct device_attribute *attr, 758 struct device_attribute *attr,
759 const char *buffer, size_t size) 759 const char *buffer, size_t size)
760{ 760{
761 int val = 0; 761 unsigned int val = 0;
762 struct mvs_info *mvi = NULL; 762 struct mvs_info *mvi = NULL;
763 struct Scsi_Host *shost = class_to_shost(cdev); 763 struct Scsi_Host *shost = class_to_shost(cdev);
764 struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); 764 struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
@@ -766,7 +766,7 @@ mvs_store_interrupt_coalescing(struct device *cdev,
766 if (buffer == NULL) 766 if (buffer == NULL)
767 return size; 767 return size;
768 768
769 if (sscanf(buffer, "%d", &val) != 1) 769 if (sscanf(buffer, "%u", &val) != 1)
770 return -EINVAL; 770 return -EINVAL;
771 771
772 if (val >= 0x10000) { 772 if (val >= 0x10000) {
diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c
index eb0cc5475c45..b6b4cfdd7620 100644
--- a/drivers/scsi/qla2xxx/qla_nx.c
+++ b/drivers/scsi/qla2xxx/qla_nx.c
@@ -433,7 +433,7 @@ qla82xx_pci_get_crb_addr_2M(struct qla_hw_data *ha, ulong off_in,
433 if (off_in < QLA82XX_PCI_CRBSPACE) 433 if (off_in < QLA82XX_PCI_CRBSPACE)
434 return -1; 434 return -1;
435 435
436 *off_out = (void __iomem *)(off_in - QLA82XX_PCI_CRBSPACE); 436 off_in -= QLA82XX_PCI_CRBSPACE;
437 437
438 /* Try direct map */ 438 /* Try direct map */
439 m = &crb_128M_2M_map[CRB_BLK(off_in)].sub_block[CRB_SUBBLK(off_in)]; 439 m = &crb_128M_2M_map[CRB_BLK(off_in)].sub_block[CRB_SUBBLK(off_in)];
@@ -443,6 +443,7 @@ qla82xx_pci_get_crb_addr_2M(struct qla_hw_data *ha, ulong off_in,
443 return 0; 443 return 0;
444 } 444 }
445 /* Not in direct map, use crb window */ 445 /* Not in direct map, use crb window */
446 *off_out = (void __iomem *)off_in;
446 return 1; 447 return 1;
447} 448}
448 449
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
index 3ba2e9564b9a..81af294f15a7 100644
--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
@@ -902,7 +902,7 @@ static ssize_t tcm_qla2xxx_tpg_fabric_prot_type_show(struct config_item *item,
902 return sprintf(page, "%d\n", tpg->tpg_attrib.fabric_prot_type); 902 return sprintf(page, "%d\n", tpg->tpg_attrib.fabric_prot_type);
903} 903}
904 904
905CONFIGFS_ATTR_WO(tcm_qla2xxx_tpg_, enable); 905CONFIGFS_ATTR(tcm_qla2xxx_tpg_, enable);
906CONFIGFS_ATTR_RO(tcm_qla2xxx_tpg_, dynamic_sessions); 906CONFIGFS_ATTR_RO(tcm_qla2xxx_tpg_, dynamic_sessions);
907CONFIGFS_ATTR(tcm_qla2xxx_tpg_, fabric_prot_type); 907CONFIGFS_ATTR(tcm_qla2xxx_tpg_, fabric_prot_type);
908 908
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index dfcc45bb03b1..d09d60293c27 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -465,8 +465,9 @@ static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEMENT + 1] = {
465 0} }, 465 0} },
466 {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* MAINT OUT */ 466 {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* MAINT OUT */
467 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, 467 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
468 {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* VERIFY */ 468 {0, 0x2f, 0, F_D_OUT_MAYBE | FF_DIRECT_IO, NULL, NULL, /* VERIFY(10) */
469 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, 469 {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7,
470 0, 0, 0, 0, 0, 0} },
470 {1, 0x7f, 0x9, F_SA_HIGH | F_D_IN | FF_DIRECT_IO, resp_read_dt0, 471 {1, 0x7f, 0x9, F_SA_HIGH | F_D_IN | FF_DIRECT_IO, resp_read_dt0,
471 vl_iarr, {32, 0xc7, 0, 0, 0, 0, 0x1f, 0x18, 0x0, 0x9, 0xfe, 0, 472 vl_iarr, {32, 0xc7, 0, 0, 0, 0, 0x1f, 0x18, 0x0, 0x9, 0xfe, 0,
472 0xff, 0xff, 0xff, 0xff} },/* VARIABLE LENGTH, READ(32) */ 473 0xff, 0xff, 0xff, 0xff} },/* VARIABLE LENGTH, READ(32) */
@@ -477,8 +478,8 @@ static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEMENT + 1] = {
477 {10, 0x13, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 478 {10, 0x13, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
478 0} }, 479 0} },
479/* 20 */ 480/* 20 */
480 {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* ALLOW REMOVAL */ 481 {0, 0x1e, 0, 0, NULL, NULL, /* ALLOW REMOVAL */
481 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, 482 {6, 0, 0, 0, 0x3, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
482 {0, 0x1, 0, 0, resp_start_stop, NULL, /* REWIND ?? */ 483 {0, 0x1, 0, 0, resp_start_stop, NULL, /* REWIND ?? */
483 {6, 0x1, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, 484 {6, 0x1, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
484 {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* ATA_PT */ 485 {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* ATA_PT */
diff --git a/drivers/scsi/scsi_pm.c b/drivers/scsi/scsi_pm.c
index e4b799837948..459abe1dcc87 100644
--- a/drivers/scsi/scsi_pm.c
+++ b/drivers/scsi/scsi_pm.c
@@ -219,13 +219,13 @@ static int sdev_runtime_suspend(struct device *dev)
219 struct scsi_device *sdev = to_scsi_device(dev); 219 struct scsi_device *sdev = to_scsi_device(dev);
220 int err = 0; 220 int err = 0;
221 221
222 if (pm && pm->runtime_suspend) { 222 err = blk_pre_runtime_suspend(sdev->request_queue);
223 err = blk_pre_runtime_suspend(sdev->request_queue); 223 if (err)
224 if (err) 224 return err;
225 return err; 225 if (pm && pm->runtime_suspend)
226 err = pm->runtime_suspend(dev); 226 err = pm->runtime_suspend(dev);
227 blk_post_runtime_suspend(sdev->request_queue, err); 227 blk_post_runtime_suspend(sdev->request_queue, err);
228 } 228
229 return err; 229 return err;
230} 230}
231 231
@@ -248,11 +248,11 @@ static int sdev_runtime_resume(struct device *dev)
248 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; 248 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
249 int err = 0; 249 int err = 0;
250 250
251 if (pm && pm->runtime_resume) { 251 blk_pre_runtime_resume(sdev->request_queue);
252 blk_pre_runtime_resume(sdev->request_queue); 252 if (pm && pm->runtime_resume)
253 err = pm->runtime_resume(dev); 253 err = pm->runtime_resume(dev);
254 blk_post_runtime_resume(sdev->request_queue, err); 254 blk_post_runtime_resume(sdev->request_queue, err);
255 } 255
256 return err; 256 return err;
257} 257}
258 258
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index 83245391e956..054923e3393c 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -701,9 +701,12 @@ static int scsi_probe_lun(struct scsi_device *sdev, unsigned char *inq_result,
701 * strings. 701 * strings.
702 */ 702 */
703 if (sdev->inquiry_len < 36) { 703 if (sdev->inquiry_len < 36) {
704 sdev_printk(KERN_INFO, sdev, 704 if (!sdev->host->short_inquiry) {
705 "scsi scan: INQUIRY result too short (%d)," 705 shost_printk(KERN_INFO, sdev->host,
706 " using 36\n", sdev->inquiry_len); 706 "scsi scan: INQUIRY result too short (%d),"
707 " using 36\n", sdev->inquiry_len);
708 sdev->host->short_inquiry = 1;
709 }
707 sdev->inquiry_len = 36; 710 sdev->inquiry_len = 36;
708 } 711 }
709 712
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index 8d2312239ae0..21930c9ac9cd 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -1102,6 +1102,14 @@ void __scsi_remove_device(struct scsi_device *sdev)
1102{ 1102{
1103 struct device *dev = &sdev->sdev_gendev; 1103 struct device *dev = &sdev->sdev_gendev;
1104 1104
1105 /*
1106 * This cleanup path is not reentrant and while it is impossible
1107 * to get a new reference with scsi_device_get() someone can still
1108 * hold a previously acquired one.
1109 */
1110 if (sdev->sdev_state == SDEV_DEL)
1111 return;
1112
1105 if (sdev->is_visible) { 1113 if (sdev->is_visible) {
1106 if (scsi_device_set_state(sdev, SDEV_CANCEL) != 0) 1114 if (scsi_device_set_state(sdev, SDEV_CANCEL) != 0)
1107 return; 1115 return;
@@ -1110,7 +1118,9 @@ void __scsi_remove_device(struct scsi_device *sdev)
1110 device_unregister(&sdev->sdev_dev); 1118 device_unregister(&sdev->sdev_dev);
1111 transport_remove_device(dev); 1119 transport_remove_device(dev);
1112 scsi_dh_remove_device(sdev); 1120 scsi_dh_remove_device(sdev);
1113 } 1121 device_del(dev);
1122 } else
1123 put_device(&sdev->sdev_dev);
1114 1124
1115 /* 1125 /*
1116 * Stop accepting new requests and wait until all queuecommand() and 1126 * Stop accepting new requests and wait until all queuecommand() and
@@ -1121,16 +1131,6 @@ void __scsi_remove_device(struct scsi_device *sdev)
1121 blk_cleanup_queue(sdev->request_queue); 1131 blk_cleanup_queue(sdev->request_queue);
1122 cancel_work_sync(&sdev->requeue_work); 1132 cancel_work_sync(&sdev->requeue_work);
1123 1133
1124 /*
1125 * Remove the device after blk_cleanup_queue() has been called such
1126 * a possible bdi_register() call with the same name occurs after
1127 * blk_cleanup_queue() has called bdi_destroy().
1128 */
1129 if (sdev->is_visible)
1130 device_del(dev);
1131 else
1132 put_device(&sdev->sdev_dev);
1133
1134 if (sdev->host->hostt->slave_destroy) 1134 if (sdev->host->hostt->slave_destroy)
1135 sdev->host->hostt->slave_destroy(sdev); 1135 sdev->host->hostt->slave_destroy(sdev);
1136 transport_destroy_device(dev); 1136 transport_destroy_device(dev);
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 54519804c46a..3d22fc3e3c1a 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -638,11 +638,24 @@ static void sd_config_discard(struct scsi_disk *sdkp, unsigned int mode)
638 unsigned int max_blocks = 0; 638 unsigned int max_blocks = 0;
639 639
640 q->limits.discard_zeroes_data = 0; 640 q->limits.discard_zeroes_data = 0;
641 q->limits.discard_alignment = sdkp->unmap_alignment * 641
642 logical_block_size; 642 /*
643 q->limits.discard_granularity = 643 * When LBPRZ is reported, discard alignment and granularity
644 max(sdkp->physical_block_size, 644 * must be fixed to the logical block size. Otherwise the block
645 sdkp->unmap_granularity * logical_block_size); 645 * layer will drop misaligned portions of the request which can
646 * lead to data corruption. If LBPRZ is not set, we honor the
647 * device preference.
648 */
649 if (sdkp->lbprz) {
650 q->limits.discard_alignment = 0;
651 q->limits.discard_granularity = 1;
652 } else {
653 q->limits.discard_alignment = sdkp->unmap_alignment *
654 logical_block_size;
655 q->limits.discard_granularity =
656 max(sdkp->physical_block_size,
657 sdkp->unmap_granularity * logical_block_size);
658 }
646 659
647 sdkp->provisioning_mode = mode; 660 sdkp->provisioning_mode = mode;
648 661
@@ -2321,11 +2334,8 @@ got_data:
2321 } 2334 }
2322 } 2335 }
2323 2336
2324 if (sdkp->capacity > 0xffffffff) { 2337 if (sdkp->capacity > 0xffffffff)
2325 sdp->use_16_for_rw = 1; 2338 sdp->use_16_for_rw = 1;
2326 sdkp->max_xfer_blocks = SD_MAX_XFER_BLOCKS;
2327 } else
2328 sdkp->max_xfer_blocks = SD_DEF_XFER_BLOCKS;
2329 2339
2330 /* Rescale capacity to 512-byte units */ 2340 /* Rescale capacity to 512-byte units */
2331 if (sector_size == 4096) 2341 if (sector_size == 4096)
@@ -2642,7 +2652,6 @@ static void sd_read_block_limits(struct scsi_disk *sdkp)
2642{ 2652{
2643 unsigned int sector_sz = sdkp->device->sector_size; 2653 unsigned int sector_sz = sdkp->device->sector_size;
2644 const int vpd_len = 64; 2654 const int vpd_len = 64;
2645 u32 max_xfer_length;
2646 unsigned char *buffer = kmalloc(vpd_len, GFP_KERNEL); 2655 unsigned char *buffer = kmalloc(vpd_len, GFP_KERNEL);
2647 2656
2648 if (!buffer || 2657 if (!buffer ||
@@ -2650,14 +2659,11 @@ static void sd_read_block_limits(struct scsi_disk *sdkp)
2650 scsi_get_vpd_page(sdkp->device, 0xb0, buffer, vpd_len)) 2659 scsi_get_vpd_page(sdkp->device, 0xb0, buffer, vpd_len))
2651 goto out; 2660 goto out;
2652 2661
2653 max_xfer_length = get_unaligned_be32(&buffer[8]);
2654 if (max_xfer_length)
2655 sdkp->max_xfer_blocks = max_xfer_length;
2656
2657 blk_queue_io_min(sdkp->disk->queue, 2662 blk_queue_io_min(sdkp->disk->queue,
2658 get_unaligned_be16(&buffer[6]) * sector_sz); 2663 get_unaligned_be16(&buffer[6]) * sector_sz);
2659 blk_queue_io_opt(sdkp->disk->queue, 2664
2660 get_unaligned_be32(&buffer[12]) * sector_sz); 2665 sdkp->max_xfer_blocks = get_unaligned_be32(&buffer[8]);
2666 sdkp->opt_xfer_blocks = get_unaligned_be32(&buffer[12]);
2661 2667
2662 if (buffer[3] == 0x3c) { 2668 if (buffer[3] == 0x3c) {
2663 unsigned int lba_count, desc_count; 2669 unsigned int lba_count, desc_count;
@@ -2806,6 +2812,11 @@ static int sd_try_extended_inquiry(struct scsi_device *sdp)
2806 return 0; 2812 return 0;
2807} 2813}
2808 2814
2815static inline u32 logical_to_sectors(struct scsi_device *sdev, u32 blocks)
2816{
2817 return blocks << (ilog2(sdev->sector_size) - 9);
2818}
2819
2809/** 2820/**
2810 * sd_revalidate_disk - called the first time a new disk is seen, 2821 * sd_revalidate_disk - called the first time a new disk is seen,
2811 * performs disk spin up, read_capacity, etc. 2822 * performs disk spin up, read_capacity, etc.
@@ -2815,8 +2826,9 @@ static int sd_revalidate_disk(struct gendisk *disk)
2815{ 2826{
2816 struct scsi_disk *sdkp = scsi_disk(disk); 2827 struct scsi_disk *sdkp = scsi_disk(disk);
2817 struct scsi_device *sdp = sdkp->device; 2828 struct scsi_device *sdp = sdkp->device;
2829 struct request_queue *q = sdkp->disk->queue;
2818 unsigned char *buffer; 2830 unsigned char *buffer;
2819 unsigned int max_xfer; 2831 unsigned int dev_max, rw_max;
2820 2832
2821 SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp, 2833 SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp,
2822 "sd_revalidate_disk\n")); 2834 "sd_revalidate_disk\n"));
@@ -2864,11 +2876,26 @@ static int sd_revalidate_disk(struct gendisk *disk)
2864 */ 2876 */
2865 sd_set_flush_flag(sdkp); 2877 sd_set_flush_flag(sdkp);
2866 2878
2867 max_xfer = sdkp->max_xfer_blocks; 2879 /* Initial block count limit based on CDB TRANSFER LENGTH field size. */
2868 max_xfer <<= ilog2(sdp->sector_size) - 9; 2880 dev_max = sdp->use_16_for_rw ? SD_MAX_XFER_BLOCKS : SD_DEF_XFER_BLOCKS;
2881
2882 /* Some devices report a maximum block count for READ/WRITE requests. */
2883 dev_max = min_not_zero(dev_max, sdkp->max_xfer_blocks);
2884 q->limits.max_dev_sectors = logical_to_sectors(sdp, dev_max);
2885
2886 /*
2887 * Use the device's preferred I/O size for reads and writes
2888 * unless the reported value is unreasonably large (or garbage).
2889 */
2890 if (sdkp->opt_xfer_blocks && sdkp->opt_xfer_blocks <= dev_max &&
2891 sdkp->opt_xfer_blocks <= SD_DEF_XFER_BLOCKS)
2892 rw_max = q->limits.io_opt =
2893 logical_to_sectors(sdp, sdkp->opt_xfer_blocks);
2894 else
2895 rw_max = BLK_DEF_MAX_SECTORS;
2869 2896
2870 sdkp->disk->queue->limits.max_sectors = 2897 /* Combine with controller limits */
2871 min_not_zero(queue_max_hw_sectors(sdkp->disk->queue), max_xfer); 2898 q->limits.max_sectors = min(rw_max, queue_max_hw_sectors(q));
2872 2899
2873 set_capacity(disk, sdkp->capacity); 2900 set_capacity(disk, sdkp->capacity);
2874 sd_config_write_same(sdkp); 2901 sd_config_write_same(sdkp);
diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h
index 63ba5ca7f9a1..5f2a84aff29f 100644
--- a/drivers/scsi/sd.h
+++ b/drivers/scsi/sd.h
@@ -67,6 +67,7 @@ struct scsi_disk {
67 atomic_t openers; 67 atomic_t openers;
68 sector_t capacity; /* size in 512-byte sectors */ 68 sector_t capacity; /* size in 512-byte sectors */
69 u32 max_xfer_blocks; 69 u32 max_xfer_blocks;
70 u32 opt_xfer_blocks;
70 u32 max_ws_blocks; 71 u32 max_ws_blocks;
71 u32 max_unmap_blocks; 72 u32 max_unmap_blocks;
72 u32 unmap_granularity; 73 u32 unmap_granularity;
diff --git a/drivers/scsi/ses.c b/drivers/scsi/ses.c
index dcb0d76d7312..044d06410d4c 100644
--- a/drivers/scsi/ses.c
+++ b/drivers/scsi/ses.c
@@ -84,6 +84,7 @@ static void init_device_slot_control(unsigned char *dest_desc,
84static int ses_recv_diag(struct scsi_device *sdev, int page_code, 84static int ses_recv_diag(struct scsi_device *sdev, int page_code,
85 void *buf, int bufflen) 85 void *buf, int bufflen)
86{ 86{
87 int ret;
87 unsigned char cmd[] = { 88 unsigned char cmd[] = {
88 RECEIVE_DIAGNOSTIC, 89 RECEIVE_DIAGNOSTIC,
89 1, /* Set PCV bit */ 90 1, /* Set PCV bit */
@@ -92,9 +93,26 @@ static int ses_recv_diag(struct scsi_device *sdev, int page_code,
92 bufflen & 0xff, 93 bufflen & 0xff,
93 0 94 0
94 }; 95 };
96 unsigned char recv_page_code;
95 97
96 return scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buf, bufflen, 98 ret = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buf, bufflen,
97 NULL, SES_TIMEOUT, SES_RETRIES, NULL); 99 NULL, SES_TIMEOUT, SES_RETRIES, NULL);
100 if (unlikely(!ret))
101 return ret;
102
103 recv_page_code = ((unsigned char *)buf)[0];
104
105 if (likely(recv_page_code == page_code))
106 return ret;
107
108 /* successful diagnostic but wrong page code. This happens to some
109 * USB devices, just print a message and pretend there was an error */
110
111 sdev_printk(KERN_ERR, sdev,
112 "Wrong diagnostic page; asked for %d got %u\n",
113 page_code, recv_page_code);
114
115 return -EINVAL;
98} 116}
99 117
100static int ses_send_diag(struct scsi_device *sdev, int page_code, 118static int ses_send_diag(struct scsi_device *sdev, int page_code,
@@ -541,7 +559,15 @@ static void ses_enclosure_data_process(struct enclosure_device *edev,
541 if (desc_ptr) 559 if (desc_ptr)
542 desc_ptr += len; 560 desc_ptr += len;
543 561
544 if (addl_desc_ptr) 562 if (addl_desc_ptr &&
563 /* only find additional descriptions for specific devices */
564 (type_ptr[0] == ENCLOSURE_COMPONENT_DEVICE ||
565 type_ptr[0] == ENCLOSURE_COMPONENT_ARRAY_DEVICE ||
566 type_ptr[0] == ENCLOSURE_COMPONENT_SAS_EXPANDER ||
567 /* these elements are optional */
568 type_ptr[0] == ENCLOSURE_COMPONENT_SCSI_TARGET_PORT ||
569 type_ptr[0] == ENCLOSURE_COMPONENT_SCSI_INITIATOR_PORT ||
570 type_ptr[0] == ENCLOSURE_COMPONENT_CONTROLLER_ELECTRONICS))
545 addl_desc_ptr += addl_desc_ptr[1] + 2; 571 addl_desc_ptr += addl_desc_ptr[1] + 2;
546 572
547 } 573 }
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index e0a1e52a04e7..2e522951b619 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -4083,6 +4083,7 @@ static int create_one_cdev(struct scsi_tape *tape, int mode, int rew)
4083 } 4083 }
4084 cdev->owner = THIS_MODULE; 4084 cdev->owner = THIS_MODULE;
4085 cdev->ops = &st_fops; 4085 cdev->ops = &st_fops;
4086 STm->cdevs[rew] = cdev;
4086 4087
4087 error = cdev_add(cdev, cdev_devno, 1); 4088 error = cdev_add(cdev, cdev_devno, 1);
4088 if (error) { 4089 if (error) {
@@ -4091,7 +4092,6 @@ static int create_one_cdev(struct scsi_tape *tape, int mode, int rew)
4091 pr_err("st%d: Device not attached.\n", dev_num); 4092 pr_err("st%d: Device not attached.\n", dev_num);
4092 goto out_free; 4093 goto out_free;
4093 } 4094 }
4094 STm->cdevs[rew] = cdev;
4095 4095
4096 i = mode << (4 - ST_NBR_MODE_BITS); 4096 i = mode << (4 - ST_NBR_MODE_BITS);
4097 snprintf(name, 10, "%s%s%s", rew ? "n" : "", 4097 snprintf(name, 10, "%s%s%s", rew ? "n" : "",
@@ -4110,8 +4110,9 @@ static int create_one_cdev(struct scsi_tape *tape, int mode, int rew)
4110 return 0; 4110 return 0;
4111out_free: 4111out_free:
4112 cdev_del(STm->cdevs[rew]); 4112 cdev_del(STm->cdevs[rew]);
4113 STm->cdevs[rew] = NULL;
4114out: 4113out:
4114 STm->cdevs[rew] = NULL;
4115 STm->devs[rew] = NULL;
4115 return error; 4116 return error;
4116} 4117}
4117 4118
diff --git a/drivers/sh/pm_runtime.c b/drivers/sh/pm_runtime.c
index 25abd4eb7d10..91a003011acf 100644
--- a/drivers/sh/pm_runtime.c
+++ b/drivers/sh/pm_runtime.c
@@ -34,7 +34,7 @@ static struct pm_clk_notifier_block platform_bus_notifier = {
34 34
35static int __init sh_pm_runtime_init(void) 35static int __init sh_pm_runtime_init(void)
36{ 36{
37 if (IS_ENABLED(CONFIG_ARCH_SHMOBILE_MULTI)) { 37 if (IS_ENABLED(CONFIG_ARCH_SHMOBILE)) {
38 if (!of_find_compatible_node(NULL, NULL, 38 if (!of_find_compatible_node(NULL, NULL,
39 "renesas,cpg-mstp-clocks")) 39 "renesas,cpg-mstp-clocks"))
40 return 0; 40 return 0;
diff --git a/drivers/soc/mediatek/Kconfig b/drivers/soc/mediatek/Kconfig
index 9d5068248aa0..0a4ea809a61b 100644
--- a/drivers/soc/mediatek/Kconfig
+++ b/drivers/soc/mediatek/Kconfig
@@ -23,6 +23,7 @@ config MTK_PMIC_WRAP
23config MTK_SCPSYS 23config MTK_SCPSYS
24 bool "MediaTek SCPSYS Support" 24 bool "MediaTek SCPSYS Support"
25 depends on ARCH_MEDIATEK || COMPILE_TEST 25 depends on ARCH_MEDIATEK || COMPILE_TEST
26 default ARM64 && ARCH_MEDIATEK
26 select REGMAP 27 select REGMAP
27 select MTK_INFRACFG 28 select MTK_INFRACFG
28 select PM_GENERIC_DOMAINS if PM 29 select PM_GENERIC_DOMAINS if PM
diff --git a/drivers/soc/ti/knav_qmss_queue.c b/drivers/soc/ti/knav_qmss_queue.c
index f3a0b6a4b54e..8c03a80b482d 100644
--- a/drivers/soc/ti/knav_qmss_queue.c
+++ b/drivers/soc/ti/knav_qmss_queue.c
@@ -1179,7 +1179,7 @@ static int knav_queue_setup_link_ram(struct knav_device *kdev)
1179 1179
1180 block++; 1180 block++;
1181 if (!block->size) 1181 if (!block->size)
1182 return 0; 1182 continue;
1183 1183
1184 dev_dbg(kdev->dev, "linkram1: phys:%x, virt:%p, size:%x\n", 1184 dev_dbg(kdev->dev, "linkram1: phys:%x, virt:%p, size:%x\n",
1185 block->phys, block->virt, block->size); 1185 block->phys, block->virt, block->size);
@@ -1519,9 +1519,9 @@ static int knav_queue_load_pdsp(struct knav_device *kdev,
1519 1519
1520 for (i = 0; i < ARRAY_SIZE(knav_acc_firmwares); i++) { 1520 for (i = 0; i < ARRAY_SIZE(knav_acc_firmwares); i++) {
1521 if (knav_acc_firmwares[i]) { 1521 if (knav_acc_firmwares[i]) {
1522 ret = request_firmware(&fw, 1522 ret = request_firmware_direct(&fw,
1523 knav_acc_firmwares[i], 1523 knav_acc_firmwares[i],
1524 kdev->dev); 1524 kdev->dev);
1525 if (!ret) { 1525 if (!ret) {
1526 found = true; 1526 found = true;
1527 break; 1527 break;
diff --git a/drivers/spi/spi-bcm63xx.c b/drivers/spi/spi-bcm63xx.c
index 06858e04ec59..bf9a610e5b89 100644
--- a/drivers/spi/spi-bcm63xx.c
+++ b/drivers/spi/spi-bcm63xx.c
@@ -562,8 +562,8 @@ static int bcm63xx_spi_probe(struct platform_device *pdev)
562 goto out_clk_disable; 562 goto out_clk_disable;
563 } 563 }
564 564
565 dev_info(dev, "at 0x%08x (irq %d, FIFOs size %d)\n", 565 dev_info(dev, "at %pr (irq %d, FIFOs size %d)\n",
566 r->start, irq, bs->fifo_size); 566 r, irq, bs->fifo_size);
567 567
568 return 0; 568 return 0;
569 569
diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c
index 59a11437db70..39412c9097c6 100644
--- a/drivers/spi/spi-fsl-dspi.c
+++ b/drivers/spi/spi-fsl-dspi.c
@@ -167,7 +167,7 @@ static inline int is_double_byte_mode(struct fsl_dspi *dspi)
167{ 167{
168 unsigned int val; 168 unsigned int val;
169 169
170 regmap_read(dspi->regmap, SPI_CTAR(dspi->cs), &val); 170 regmap_read(dspi->regmap, SPI_CTAR(0), &val);
171 171
172 return ((val & SPI_FRAME_BITS_MASK) == SPI_FRAME_BITS(8)) ? 0 : 1; 172 return ((val & SPI_FRAME_BITS_MASK) == SPI_FRAME_BITS(8)) ? 0 : 1;
173} 173}
@@ -257,7 +257,7 @@ static u32 dspi_data_to_pushr(struct fsl_dspi *dspi, int tx_word)
257 257
258 return SPI_PUSHR_TXDATA(d16) | 258 return SPI_PUSHR_TXDATA(d16) |
259 SPI_PUSHR_PCS(dspi->cs) | 259 SPI_PUSHR_PCS(dspi->cs) |
260 SPI_PUSHR_CTAS(dspi->cs) | 260 SPI_PUSHR_CTAS(0) |
261 SPI_PUSHR_CONT; 261 SPI_PUSHR_CONT;
262} 262}
263 263
@@ -290,7 +290,7 @@ static int dspi_eoq_write(struct fsl_dspi *dspi)
290 */ 290 */
291 if (tx_word && (dspi->len == 1)) { 291 if (tx_word && (dspi->len == 1)) {
292 dspi->dataflags |= TRAN_STATE_WORD_ODD_NUM; 292 dspi->dataflags |= TRAN_STATE_WORD_ODD_NUM;
293 regmap_update_bits(dspi->regmap, SPI_CTAR(dspi->cs), 293 regmap_update_bits(dspi->regmap, SPI_CTAR(0),
294 SPI_FRAME_BITS_MASK, SPI_FRAME_BITS(8)); 294 SPI_FRAME_BITS_MASK, SPI_FRAME_BITS(8));
295 tx_word = 0; 295 tx_word = 0;
296 } 296 }
@@ -339,7 +339,7 @@ static int dspi_tcfq_write(struct fsl_dspi *dspi)
339 339
340 if (tx_word && (dspi->len == 1)) { 340 if (tx_word && (dspi->len == 1)) {
341 dspi->dataflags |= TRAN_STATE_WORD_ODD_NUM; 341 dspi->dataflags |= TRAN_STATE_WORD_ODD_NUM;
342 regmap_update_bits(dspi->regmap, SPI_CTAR(dspi->cs), 342 regmap_update_bits(dspi->regmap, SPI_CTAR(0),
343 SPI_FRAME_BITS_MASK, SPI_FRAME_BITS(8)); 343 SPI_FRAME_BITS_MASK, SPI_FRAME_BITS(8));
344 tx_word = 0; 344 tx_word = 0;
345 } 345 }
@@ -407,7 +407,7 @@ static int dspi_transfer_one_message(struct spi_master *master,
407 regmap_update_bits(dspi->regmap, SPI_MCR, 407 regmap_update_bits(dspi->regmap, SPI_MCR,
408 SPI_MCR_CLR_TXF | SPI_MCR_CLR_RXF, 408 SPI_MCR_CLR_TXF | SPI_MCR_CLR_RXF,
409 SPI_MCR_CLR_TXF | SPI_MCR_CLR_RXF); 409 SPI_MCR_CLR_TXF | SPI_MCR_CLR_RXF);
410 regmap_write(dspi->regmap, SPI_CTAR(dspi->cs), 410 regmap_write(dspi->regmap, SPI_CTAR(0),
411 dspi->cur_chip->ctar_val); 411 dspi->cur_chip->ctar_val);
412 412
413 trans_mode = dspi->devtype_data->trans_mode; 413 trans_mode = dspi->devtype_data->trans_mode;
@@ -566,7 +566,7 @@ static irqreturn_t dspi_interrupt(int irq, void *dev_id)
566 if (!dspi->len) { 566 if (!dspi->len) {
567 if (dspi->dataflags & TRAN_STATE_WORD_ODD_NUM) { 567 if (dspi->dataflags & TRAN_STATE_WORD_ODD_NUM) {
568 regmap_update_bits(dspi->regmap, 568 regmap_update_bits(dspi->regmap,
569 SPI_CTAR(dspi->cs), 569 SPI_CTAR(0),
570 SPI_FRAME_BITS_MASK, 570 SPI_FRAME_BITS_MASK,
571 SPI_FRAME_BITS(16)); 571 SPI_FRAME_BITS(16));
572 dspi->dataflags &= ~TRAN_STATE_WORD_ODD_NUM; 572 dspi->dataflags &= ~TRAN_STATE_WORD_ODD_NUM;
diff --git a/drivers/spi/spi-mt65xx.c b/drivers/spi/spi-mt65xx.c
index 563954a61424..7840067062a8 100644
--- a/drivers/spi/spi-mt65xx.c
+++ b/drivers/spi/spi-mt65xx.c
@@ -410,7 +410,7 @@ static int mtk_spi_setup(struct spi_device *spi)
410 if (!spi->controller_data) 410 if (!spi->controller_data)
411 spi->controller_data = (void *)&mtk_default_chip_info; 411 spi->controller_data = (void *)&mtk_default_chip_info;
412 412
413 if (mdata->dev_comp->need_pad_sel) 413 if (mdata->dev_comp->need_pad_sel && gpio_is_valid(spi->cs_gpio))
414 gpio_direction_output(spi->cs_gpio, !(spi->mode & SPI_CS_HIGH)); 414 gpio_direction_output(spi->cs_gpio, !(spi->mode & SPI_CS_HIGH));
415 415
416 return 0; 416 return 0;
@@ -632,13 +632,23 @@ static int mtk_spi_probe(struct platform_device *pdev)
632 goto err_put_master; 632 goto err_put_master;
633 } 633 }
634 634
635 for (i = 0; i < master->num_chipselect; i++) { 635 if (!master->cs_gpios && master->num_chipselect > 1) {
636 ret = devm_gpio_request(&pdev->dev, master->cs_gpios[i], 636 dev_err(&pdev->dev,
637 dev_name(&pdev->dev)); 637 "cs_gpios not specified and num_chipselect > 1\n");
638 if (ret) { 638 ret = -EINVAL;
639 dev_err(&pdev->dev, 639 goto err_put_master;
640 "can't get CS GPIO %i\n", i); 640 }
641 goto err_put_master; 641
642 if (master->cs_gpios) {
643 for (i = 0; i < master->num_chipselect; i++) {
644 ret = devm_gpio_request(&pdev->dev,
645 master->cs_gpios[i],
646 dev_name(&pdev->dev));
647 if (ret) {
648 dev_err(&pdev->dev,
649 "can't get CS GPIO %i\n", i);
650 goto err_put_master;
651 }
642 } 652 }
643 } 653 }
644 } 654 }
diff --git a/drivers/spi/spi-pl022.c b/drivers/spi/spi-pl022.c
index 94af80676684..5e5fd77e2711 100644
--- a/drivers/spi/spi-pl022.c
+++ b/drivers/spi/spi-pl022.c
@@ -1171,19 +1171,31 @@ err_no_rxchan:
1171static int pl022_dma_autoprobe(struct pl022 *pl022) 1171static int pl022_dma_autoprobe(struct pl022 *pl022)
1172{ 1172{
1173 struct device *dev = &pl022->adev->dev; 1173 struct device *dev = &pl022->adev->dev;
1174 struct dma_chan *chan;
1175 int err;
1174 1176
1175 /* automatically configure DMA channels from platform, normally using DT */ 1177 /* automatically configure DMA channels from platform, normally using DT */
1176 pl022->dma_rx_channel = dma_request_slave_channel(dev, "rx"); 1178 chan = dma_request_slave_channel_reason(dev, "rx");
1177 if (!pl022->dma_rx_channel) 1179 if (IS_ERR(chan)) {
1180 err = PTR_ERR(chan);
1178 goto err_no_rxchan; 1181 goto err_no_rxchan;
1182 }
1183
1184 pl022->dma_rx_channel = chan;
1179 1185
1180 pl022->dma_tx_channel = dma_request_slave_channel(dev, "tx"); 1186 chan = dma_request_slave_channel_reason(dev, "tx");
1181 if (!pl022->dma_tx_channel) 1187 if (IS_ERR(chan)) {
1188 err = PTR_ERR(chan);
1182 goto err_no_txchan; 1189 goto err_no_txchan;
1190 }
1191
1192 pl022->dma_tx_channel = chan;
1183 1193
1184 pl022->dummypage = kmalloc(PAGE_SIZE, GFP_KERNEL); 1194 pl022->dummypage = kmalloc(PAGE_SIZE, GFP_KERNEL);
1185 if (!pl022->dummypage) 1195 if (!pl022->dummypage) {
1196 err = -ENOMEM;
1186 goto err_no_dummypage; 1197 goto err_no_dummypage;
1198 }
1187 1199
1188 return 0; 1200 return 0;
1189 1201
@@ -1194,7 +1206,7 @@ err_no_txchan:
1194 dma_release_channel(pl022->dma_rx_channel); 1206 dma_release_channel(pl022->dma_rx_channel);
1195 pl022->dma_rx_channel = NULL; 1207 pl022->dma_rx_channel = NULL;
1196err_no_rxchan: 1208err_no_rxchan:
1197 return -ENODEV; 1209 return err;
1198} 1210}
1199 1211
1200static void terminate_dma(struct pl022 *pl022) 1212static void terminate_dma(struct pl022 *pl022)
@@ -2236,6 +2248,10 @@ static int pl022_probe(struct amba_device *adev, const struct amba_id *id)
2236 2248
2237 /* Get DMA channels, try autoconfiguration first */ 2249 /* Get DMA channels, try autoconfiguration first */
2238 status = pl022_dma_autoprobe(pl022); 2250 status = pl022_dma_autoprobe(pl022);
2251 if (status == -EPROBE_DEFER) {
2252 dev_dbg(dev, "deferring probe to get DMA channel\n");
2253 goto err_no_irq;
2254 }
2239 2255
2240 /* If that failed, use channels from platform_info */ 2256 /* If that failed, use channels from platform_info */
2241 if (status == 0) 2257 if (status == 0)
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index e2415be209d5..dee1cb87d24f 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -376,6 +376,7 @@ static void spi_drv_shutdown(struct device *dev)
376 376
377/** 377/**
378 * __spi_register_driver - register a SPI driver 378 * __spi_register_driver - register a SPI driver
379 * @owner: owner module of the driver to register
379 * @sdrv: the driver to register 380 * @sdrv: the driver to register
380 * Context: can sleep 381 * Context: can sleep
381 * 382 *
@@ -1704,7 +1705,7 @@ struct spi_master *spi_alloc_master(struct device *dev, unsigned size)
1704 master->bus_num = -1; 1705 master->bus_num = -1;
1705 master->num_chipselect = 1; 1706 master->num_chipselect = 1;
1706 master->dev.class = &spi_master_class; 1707 master->dev.class = &spi_master_class;
1707 master->dev.parent = get_device(dev); 1708 master->dev.parent = dev;
1708 spi_master_set_devdata(master, &master[1]); 1709 spi_master_set_devdata(master, &master[1]);
1709 1710
1710 return master; 1711 return master;
@@ -2130,6 +2131,7 @@ static int __spi_validate(struct spi_device *spi, struct spi_message *message)
2130 * Set transfer tx_nbits and rx_nbits as single transfer default 2131 * Set transfer tx_nbits and rx_nbits as single transfer default
2131 * (SPI_NBITS_SINGLE) if it is not set for this transfer. 2132 * (SPI_NBITS_SINGLE) if it is not set for this transfer.
2132 */ 2133 */
2134 message->frame_length = 0;
2133 list_for_each_entry(xfer, &message->transfers, transfer_list) { 2135 list_for_each_entry(xfer, &message->transfers, transfer_list) {
2134 message->frame_length += xfer->len; 2136 message->frame_length += xfer->len;
2135 if (!xfer->bits_per_word) 2137 if (!xfer->bits_per_word)
diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c
index 91a0fcd72423..d0e7dfc647cf 100644
--- a/drivers/spi/spidev.c
+++ b/drivers/spi/spidev.c
@@ -651,11 +651,11 @@ static int spidev_release(struct inode *inode, struct file *filp)
651 kfree(spidev->rx_buffer); 651 kfree(spidev->rx_buffer);
652 spidev->rx_buffer = NULL; 652 spidev->rx_buffer = NULL;
653 653
654 spin_lock_irq(&spidev->spi_lock);
654 if (spidev->spi) 655 if (spidev->spi)
655 spidev->speed_hz = spidev->spi->max_speed_hz; 656 spidev->speed_hz = spidev->spi->max_speed_hz;
656 657
657 /* ... after we unbound from the underlying device? */ 658 /* ... after we unbound from the underlying device? */
658 spin_lock_irq(&spidev->spi_lock);
659 dofree = (spidev->spi == NULL); 659 dofree = (spidev->spi == NULL);
660 spin_unlock_irq(&spidev->spi_lock); 660 spin_unlock_irq(&spidev->spi_lock);
661 661
diff --git a/drivers/staging/android/ion/ion_chunk_heap.c b/drivers/staging/android/ion/ion_chunk_heap.c
index 195c41d7bd53..0813163f962f 100644
--- a/drivers/staging/android/ion/ion_chunk_heap.c
+++ b/drivers/staging/android/ion/ion_chunk_heap.c
@@ -81,7 +81,7 @@ static int ion_chunk_heap_allocate(struct ion_heap *heap,
81err: 81err:
82 sg = table->sgl; 82 sg = table->sgl;
83 for (i -= 1; i >= 0; i--) { 83 for (i -= 1; i >= 0; i--) {
84 gen_pool_free(chunk_heap->pool, sg_phys(sg) & PAGE_MASK, 84 gen_pool_free(chunk_heap->pool, page_to_phys(sg_page(sg)),
85 sg->length); 85 sg->length);
86 sg = sg_next(sg); 86 sg = sg_next(sg);
87 } 87 }
@@ -109,7 +109,7 @@ static void ion_chunk_heap_free(struct ion_buffer *buffer)
109 DMA_BIDIRECTIONAL); 109 DMA_BIDIRECTIONAL);
110 110
111 for_each_sg(table->sgl, sg, table->nents, i) { 111 for_each_sg(table->sgl, sg, table->nents, i) {
112 gen_pool_free(chunk_heap->pool, sg_phys(sg) & PAGE_MASK, 112 gen_pool_free(chunk_heap->pool, page_to_phys(sg_page(sg)),
113 sg->length); 113 sg->length);
114 } 114 }
115 chunk_heap->allocated -= allocated_size; 115 chunk_heap->allocated -= allocated_size;
diff --git a/drivers/staging/iio/Kconfig b/drivers/staging/iio/Kconfig
index 6d5b38d69578..9d7f0004d2d7 100644
--- a/drivers/staging/iio/Kconfig
+++ b/drivers/staging/iio/Kconfig
@@ -18,7 +18,8 @@ source "drivers/staging/iio/resolver/Kconfig"
18source "drivers/staging/iio/trigger/Kconfig" 18source "drivers/staging/iio/trigger/Kconfig"
19 19
20config IIO_DUMMY_EVGEN 20config IIO_DUMMY_EVGEN
21 tristate 21 tristate
22 select IRQ_WORK
22 23
23config IIO_SIMPLE_DUMMY 24config IIO_SIMPLE_DUMMY
24 tristate "An example driver with no hardware requirements" 25 tristate "An example driver with no hardware requirements"
diff --git a/drivers/staging/iio/adc/lpc32xx_adc.c b/drivers/staging/iio/adc/lpc32xx_adc.c
index d11c54b72186..b51f237cd817 100644
--- a/drivers/staging/iio/adc/lpc32xx_adc.c
+++ b/drivers/staging/iio/adc/lpc32xx_adc.c
@@ -76,7 +76,7 @@ static int lpc32xx_read_raw(struct iio_dev *indio_dev,
76 76
77 if (mask == IIO_CHAN_INFO_RAW) { 77 if (mask == IIO_CHAN_INFO_RAW) {
78 mutex_lock(&indio_dev->mlock); 78 mutex_lock(&indio_dev->mlock);
79 clk_enable(info->clk); 79 clk_prepare_enable(info->clk);
80 /* Measurement setup */ 80 /* Measurement setup */
81 __raw_writel(AD_INTERNAL | (chan->address) | AD_REFp | AD_REFm, 81 __raw_writel(AD_INTERNAL | (chan->address) | AD_REFp | AD_REFm,
82 LPC32XX_ADC_SELECT(info->adc_base)); 82 LPC32XX_ADC_SELECT(info->adc_base));
@@ -84,7 +84,7 @@ static int lpc32xx_read_raw(struct iio_dev *indio_dev,
84 __raw_writel(AD_PDN_CTRL | AD_STROBE, 84 __raw_writel(AD_PDN_CTRL | AD_STROBE,
85 LPC32XX_ADC_CTRL(info->adc_base)); 85 LPC32XX_ADC_CTRL(info->adc_base));
86 wait_for_completion(&info->completion); /* set by ISR */ 86 wait_for_completion(&info->completion); /* set by ISR */
87 clk_disable(info->clk); 87 clk_disable_unprepare(info->clk);
88 *val = info->value; 88 *val = info->value;
89 mutex_unlock(&indio_dev->mlock); 89 mutex_unlock(&indio_dev->mlock);
90 90
diff --git a/drivers/staging/iio/iio_simple_dummy_events.c b/drivers/staging/iio/iio_simple_dummy_events.c
index bfbf1c56bd22..6eb600ff7056 100644
--- a/drivers/staging/iio/iio_simple_dummy_events.c
+++ b/drivers/staging/iio/iio_simple_dummy_events.c
@@ -159,7 +159,7 @@ static irqreturn_t iio_simple_dummy_get_timestamp(int irq, void *private)
159 struct iio_dummy_state *st = iio_priv(indio_dev); 159 struct iio_dummy_state *st = iio_priv(indio_dev);
160 160
161 st->event_timestamp = iio_get_time_ns(); 161 st->event_timestamp = iio_get_time_ns();
162 return IRQ_HANDLED; 162 return IRQ_WAKE_THREAD;
163} 163}
164 164
165/** 165/**
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_ioctl.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_ioctl.h
index f5d741f25ffd..485ab2670918 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_ioctl.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_ioctl.h
@@ -110,7 +110,6 @@ struct libcfs_ioctl_handler {
110#define IOC_LIBCFS_CLEAR_DEBUG _IOWR('e', 31, long) 110#define IOC_LIBCFS_CLEAR_DEBUG _IOWR('e', 31, long)
111#define IOC_LIBCFS_MARK_DEBUG _IOWR('e', 32, long) 111#define IOC_LIBCFS_MARK_DEBUG _IOWR('e', 32, long)
112#define IOC_LIBCFS_MEMHOG _IOWR('e', 36, long) 112#define IOC_LIBCFS_MEMHOG _IOWR('e', 36, long)
113#define IOC_LIBCFS_PING_TEST _IOWR('e', 37, long)
114/* lnet ioctls */ 113/* lnet ioctls */
115#define IOC_LIBCFS_GET_NI _IOWR('e', 50, long) 114#define IOC_LIBCFS_GET_NI _IOWR('e', 50, long)
116#define IOC_LIBCFS_FAIL_NID _IOWR('e', 51, long) 115#define IOC_LIBCFS_FAIL_NID _IOWR('e', 51, long)
diff --git a/drivers/staging/lustre/lustre/libcfs/module.c b/drivers/staging/lustre/lustre/libcfs/module.c
index 07a68594c279..e7c2b26156b9 100644
--- a/drivers/staging/lustre/lustre/libcfs/module.c
+++ b/drivers/staging/lustre/lustre/libcfs/module.c
@@ -274,23 +274,6 @@ static int libcfs_ioctl_int(struct cfs_psdev_file *pfile, unsigned long cmd,
274 } 274 }
275 break; 275 break;
276 276
277 case IOC_LIBCFS_PING_TEST: {
278 extern void (kping_client)(struct libcfs_ioctl_data *);
279 void (*ping)(struct libcfs_ioctl_data *);
280
281 CDEBUG(D_IOCTL, "doing %d pings to nid %s (%s)\n",
282 data->ioc_count, libcfs_nid2str(data->ioc_nid),
283 libcfs_nid2str(data->ioc_nid));
284 ping = symbol_get(kping_client);
285 if (!ping)
286 CERROR("symbol_get failed\n");
287 else {
288 ping(data);
289 symbol_put(kping_client);
290 }
291 return 0;
292 }
293
294 default: { 277 default: {
295 struct libcfs_ioctl_handler *hand; 278 struct libcfs_ioctl_handler *hand;
296 279
diff --git a/drivers/staging/lustre/lustre/obdecho/echo_client.c b/drivers/staging/lustre/lustre/obdecho/echo_client.c
index f61ef669644c..a4a9a763ff02 100644
--- a/drivers/staging/lustre/lustre/obdecho/echo_client.c
+++ b/drivers/staging/lustre/lustre/obdecho/echo_client.c
@@ -1270,6 +1270,7 @@ static int
1270echo_copyout_lsm(struct lov_stripe_md *lsm, void *_ulsm, int ulsm_nob) 1270echo_copyout_lsm(struct lov_stripe_md *lsm, void *_ulsm, int ulsm_nob)
1271{ 1271{
1272 struct lov_stripe_md *ulsm = _ulsm; 1272 struct lov_stripe_md *ulsm = _ulsm;
1273 struct lov_oinfo **p;
1273 int nob, i; 1274 int nob, i;
1274 1275
1275 nob = offsetof(struct lov_stripe_md, lsm_oinfo[lsm->lsm_stripe_count]); 1276 nob = offsetof(struct lov_stripe_md, lsm_oinfo[lsm->lsm_stripe_count]);
@@ -1279,9 +1280,10 @@ echo_copyout_lsm(struct lov_stripe_md *lsm, void *_ulsm, int ulsm_nob)
1279 if (copy_to_user(ulsm, lsm, sizeof(*ulsm))) 1280 if (copy_to_user(ulsm, lsm, sizeof(*ulsm)))
1280 return -EFAULT; 1281 return -EFAULT;
1281 1282
1282 for (i = 0; i < lsm->lsm_stripe_count; i++) { 1283 for (i = 0, p = lsm->lsm_oinfo; i < lsm->lsm_stripe_count; i++, p++) {
1283 if (copy_to_user(ulsm->lsm_oinfo[i], lsm->lsm_oinfo[i], 1284 struct lov_oinfo __user *up;
1284 sizeof(lsm->lsm_oinfo[0]))) 1285 if (get_user(up, ulsm->lsm_oinfo + i) ||
1286 copy_to_user(up, *p, sizeof(struct lov_oinfo)))
1285 return -EFAULT; 1287 return -EFAULT;
1286 } 1288 }
1287 return 0; 1289 return 0;
@@ -1289,9 +1291,10 @@ echo_copyout_lsm(struct lov_stripe_md *lsm, void *_ulsm, int ulsm_nob)
1289 1291
1290static int 1292static int
1291echo_copyin_lsm(struct echo_device *ed, struct lov_stripe_md *lsm, 1293echo_copyin_lsm(struct echo_device *ed, struct lov_stripe_md *lsm,
1292 void *ulsm, int ulsm_nob) 1294 struct lov_stripe_md __user *ulsm, int ulsm_nob)
1293{ 1295{
1294 struct echo_client_obd *ec = ed->ed_ec; 1296 struct echo_client_obd *ec = ed->ed_ec;
1297 struct lov_oinfo **p;
1295 int i; 1298 int i;
1296 1299
1297 if (ulsm_nob < sizeof(*lsm)) 1300 if (ulsm_nob < sizeof(*lsm))
@@ -1306,11 +1309,10 @@ echo_copyin_lsm(struct echo_device *ed, struct lov_stripe_md *lsm,
1306 ((__u64)lsm->lsm_stripe_size * lsm->lsm_stripe_count > ~0UL)) 1309 ((__u64)lsm->lsm_stripe_size * lsm->lsm_stripe_count > ~0UL))
1307 return -EINVAL; 1310 return -EINVAL;
1308 1311
1309 for (i = 0; i < lsm->lsm_stripe_count; i++) { 1312 for (i = 0, p = lsm->lsm_oinfo; i < lsm->lsm_stripe_count; i++, p++) {
1310 if (copy_from_user(lsm->lsm_oinfo[i], 1313 struct lov_oinfo __user *up;
1311 ((struct lov_stripe_md *)ulsm)-> \ 1314 if (get_user(up, ulsm->lsm_oinfo + i) ||
1312 lsm_oinfo[i], 1315 copy_from_user(*p, up, sizeof(struct lov_oinfo)))
1313 sizeof(lsm->lsm_oinfo[0])))
1314 return -EFAULT; 1316 return -EFAULT;
1315 } 1317 }
1316 return 0; 1318 return 0;
diff --git a/drivers/staging/wilc1000/coreconfigurator.c b/drivers/staging/wilc1000/coreconfigurator.c
index e10c6ffa698a..9568bdb6319b 100644
--- a/drivers/staging/wilc1000/coreconfigurator.c
+++ b/drivers/staging/wilc1000/coreconfigurator.c
@@ -13,12 +13,8 @@
13#include "wilc_wlan.h" 13#include "wilc_wlan.h"
14#include <linux/errno.h> 14#include <linux/errno.h>
15#include <linux/slab.h> 15#include <linux/slab.h>
16#include <linux/etherdevice.h>
17#define TAG_PARAM_OFFSET (MAC_HDR_LEN + TIME_STAMP_LEN + \ 16#define TAG_PARAM_OFFSET (MAC_HDR_LEN + TIME_STAMP_LEN + \
18 BEACON_INTERVAL_LEN + CAP_INFO_LEN) 17 BEACON_INTERVAL_LEN + CAP_INFO_LEN)
19#define ADDR1 4
20#define ADDR2 10
21#define ADDR3 16
22 18
23/* Basic Frame Type Codes (2-bit) */ 19/* Basic Frame Type Codes (2-bit) */
24enum basic_frame_type { 20enum basic_frame_type {
@@ -175,32 +171,38 @@ static inline u8 get_from_ds(u8 *header)
175 return ((header[1] & 0x02) >> 1); 171 return ((header[1] & 0x02) >> 1);
176} 172}
177 173
174/* This function extracts the MAC Address in 'address1' field of the MAC */
175/* header and updates the MAC Address in the allocated 'addr' variable. */
176static inline void get_address1(u8 *pu8msa, u8 *addr)
177{
178 memcpy(addr, pu8msa + 4, 6);
179}
180
181/* This function extracts the MAC Address in 'address2' field of the MAC */
182/* header and updates the MAC Address in the allocated 'addr' variable. */
183static inline void get_address2(u8 *pu8msa, u8 *addr)
184{
185 memcpy(addr, pu8msa + 10, 6);
186}
187
188/* This function extracts the MAC Address in 'address3' field of the MAC */
189/* header and updates the MAC Address in the allocated 'addr' variable. */
190static inline void get_address3(u8 *pu8msa, u8 *addr)
191{
192 memcpy(addr, pu8msa + 16, 6);
193}
194
178/* This function extracts the BSSID from the incoming WLAN packet based on */ 195/* This function extracts the BSSID from the incoming WLAN packet based on */
179/* the 'from ds' bit, and updates the MAC Address in the allocated 'data' */ 196/* the 'from ds' bit, and updates the MAC Address in the allocated 'addr' */
180/* variable. */ 197/* variable. */
181static inline void get_BSSID(u8 *data, u8 *bssid) 198static inline void get_BSSID(u8 *data, u8 *bssid)
182{ 199{
183 if (get_from_ds(data) == 1) 200 if (get_from_ds(data) == 1)
184 /* 201 get_address2(data, bssid);
185 * Extract the MAC Address in 'address2' field of the MAC
186 * header and update the MAC Address in the allocated 'data'
187 * variable.
188 */
189 ether_addr_copy(data, bssid + ADDR2);
190 else if (get_to_ds(data) == 1) 202 else if (get_to_ds(data) == 1)
191 /* 203 get_address1(data, bssid);
192 * Extract the MAC Address in 'address1' field of the MAC
193 * header and update the MAC Address in the allocated 'data'
194 * variable.
195 */
196 ether_addr_copy(data, bssid + ADDR1);
197 else 204 else
198 /* 205 get_address3(data, bssid);
199 * Extract the MAC Address in 'address3' field of the MAC
200 * header and update the MAC Address in the allocated 'data'
201 * variable.
202 */
203 ether_addr_copy(data, bssid + ADDR3);
204} 206}
205 207
206/* This function extracts the SSID from a beacon/probe response frame */ 208/* This function extracts the SSID from a beacon/probe response frame */
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index 342a07c58d89..72204fbf2bb1 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -4074,6 +4074,17 @@ reject:
4074 return iscsit_add_reject(conn, ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf); 4074 return iscsit_add_reject(conn, ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf);
4075} 4075}
4076 4076
4077static bool iscsi_target_check_conn_state(struct iscsi_conn *conn)
4078{
4079 bool ret;
4080
4081 spin_lock_bh(&conn->state_lock);
4082 ret = (conn->conn_state != TARG_CONN_STATE_LOGGED_IN);
4083 spin_unlock_bh(&conn->state_lock);
4084
4085 return ret;
4086}
4087
4077int iscsi_target_rx_thread(void *arg) 4088int iscsi_target_rx_thread(void *arg)
4078{ 4089{
4079 int ret, rc; 4090 int ret, rc;
@@ -4091,7 +4102,7 @@ int iscsi_target_rx_thread(void *arg)
4091 * incoming iscsi/tcp socket I/O, and/or failing the connection. 4102 * incoming iscsi/tcp socket I/O, and/or failing the connection.
4092 */ 4103 */
4093 rc = wait_for_completion_interruptible(&conn->rx_login_comp); 4104 rc = wait_for_completion_interruptible(&conn->rx_login_comp);
4094 if (rc < 0) 4105 if (rc < 0 || iscsi_target_check_conn_state(conn))
4095 return 0; 4106 return 0;
4096 4107
4097 if (conn->conn_transport->transport_type == ISCSI_INFINIBAND) { 4108 if (conn->conn_transport->transport_type == ISCSI_INFINIBAND) {
diff --git a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c
index 5c964c09c89f..9fc9117d0f22 100644
--- a/drivers/target/iscsi/iscsi_target_nego.c
+++ b/drivers/target/iscsi/iscsi_target_nego.c
@@ -388,6 +388,7 @@ err:
388 if (login->login_complete) { 388 if (login->login_complete) {
389 if (conn->rx_thread && conn->rx_thread_active) { 389 if (conn->rx_thread && conn->rx_thread_active) {
390 send_sig(SIGINT, conn->rx_thread, 1); 390 send_sig(SIGINT, conn->rx_thread, 1);
391 complete(&conn->rx_login_comp);
391 kthread_stop(conn->rx_thread); 392 kthread_stop(conn->rx_thread);
392 } 393 }
393 if (conn->tx_thread && conn->tx_thread_active) { 394 if (conn->tx_thread && conn->tx_thread_active) {
diff --git a/drivers/target/iscsi/iscsi_target_parameters.c b/drivers/target/iscsi/iscsi_target_parameters.c
index 51d1734d5390..2cbea2af7cd0 100644
--- a/drivers/target/iscsi/iscsi_target_parameters.c
+++ b/drivers/target/iscsi/iscsi_target_parameters.c
@@ -208,7 +208,7 @@ int iscsi_create_default_params(struct iscsi_param_list **param_list_ptr)
208 if (!pl) { 208 if (!pl) {
209 pr_err("Unable to allocate memory for" 209 pr_err("Unable to allocate memory for"
210 " struct iscsi_param_list.\n"); 210 " struct iscsi_param_list.\n");
211 return -1 ; 211 return -ENOMEM;
212 } 212 }
213 INIT_LIST_HEAD(&pl->param_list); 213 INIT_LIST_HEAD(&pl->param_list);
214 INIT_LIST_HEAD(&pl->extra_response_list); 214 INIT_LIST_HEAD(&pl->extra_response_list);
@@ -578,7 +578,7 @@ int iscsi_copy_param_list(
578 param_list = kzalloc(sizeof(struct iscsi_param_list), GFP_KERNEL); 578 param_list = kzalloc(sizeof(struct iscsi_param_list), GFP_KERNEL);
579 if (!param_list) { 579 if (!param_list) {
580 pr_err("Unable to allocate memory for struct iscsi_param_list.\n"); 580 pr_err("Unable to allocate memory for struct iscsi_param_list.\n");
581 return -1; 581 return -ENOMEM;
582 } 582 }
583 INIT_LIST_HEAD(&param_list->param_list); 583 INIT_LIST_HEAD(&param_list->param_list);
584 INIT_LIST_HEAD(&param_list->extra_response_list); 584 INIT_LIST_HEAD(&param_list->extra_response_list);
@@ -629,7 +629,7 @@ int iscsi_copy_param_list(
629 629
630err_out: 630err_out:
631 iscsi_release_param_list(param_list); 631 iscsi_release_param_list(param_list);
632 return -1; 632 return -ENOMEM;
633} 633}
634 634
635static void iscsi_release_extra_responses(struct iscsi_param_list *param_list) 635static void iscsi_release_extra_responses(struct iscsi_param_list *param_list)
@@ -729,7 +729,7 @@ static int iscsi_add_notunderstood_response(
729 if (!extra_response) { 729 if (!extra_response) {
730 pr_err("Unable to allocate memory for" 730 pr_err("Unable to allocate memory for"
731 " struct iscsi_extra_response.\n"); 731 " struct iscsi_extra_response.\n");
732 return -1; 732 return -ENOMEM;
733 } 733 }
734 INIT_LIST_HEAD(&extra_response->er_list); 734 INIT_LIST_HEAD(&extra_response->er_list);
735 735
@@ -1370,7 +1370,7 @@ int iscsi_decode_text_input(
1370 tmpbuf = kzalloc(length + 1, GFP_KERNEL); 1370 tmpbuf = kzalloc(length + 1, GFP_KERNEL);
1371 if (!tmpbuf) { 1371 if (!tmpbuf) {
1372 pr_err("Unable to allocate %u + 1 bytes for tmpbuf.\n", length); 1372 pr_err("Unable to allocate %u + 1 bytes for tmpbuf.\n", length);
1373 return -1; 1373 return -ENOMEM;
1374 } 1374 }
1375 1375
1376 memcpy(tmpbuf, textbuf, length); 1376 memcpy(tmpbuf, textbuf, length);
diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c
index 0b4b2a67d9f9..98698d875742 100644
--- a/drivers/target/target_core_sbc.c
+++ b/drivers/target/target_core_sbc.c
@@ -371,7 +371,8 @@ sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *o
371 return 0; 371 return 0;
372} 372}
373 373
374static sense_reason_t xdreadwrite_callback(struct se_cmd *cmd, bool success) 374static sense_reason_t xdreadwrite_callback(struct se_cmd *cmd, bool success,
375 int *post_ret)
375{ 376{
376 unsigned char *buf, *addr; 377 unsigned char *buf, *addr;
377 struct scatterlist *sg; 378 struct scatterlist *sg;
@@ -437,7 +438,8 @@ sbc_execute_rw(struct se_cmd *cmd)
437 cmd->data_direction); 438 cmd->data_direction);
438} 439}
439 440
440static sense_reason_t compare_and_write_post(struct se_cmd *cmd, bool success) 441static sense_reason_t compare_and_write_post(struct se_cmd *cmd, bool success,
442 int *post_ret)
441{ 443{
442 struct se_device *dev = cmd->se_dev; 444 struct se_device *dev = cmd->se_dev;
443 445
@@ -447,8 +449,10 @@ static sense_reason_t compare_and_write_post(struct se_cmd *cmd, bool success)
447 * sent to the backend driver. 449 * sent to the backend driver.
448 */ 450 */
449 spin_lock_irq(&cmd->t_state_lock); 451 spin_lock_irq(&cmd->t_state_lock);
450 if ((cmd->transport_state & CMD_T_SENT) && !cmd->scsi_status) 452 if ((cmd->transport_state & CMD_T_SENT) && !cmd->scsi_status) {
451 cmd->se_cmd_flags |= SCF_COMPARE_AND_WRITE_POST; 453 cmd->se_cmd_flags |= SCF_COMPARE_AND_WRITE_POST;
454 *post_ret = 1;
455 }
452 spin_unlock_irq(&cmd->t_state_lock); 456 spin_unlock_irq(&cmd->t_state_lock);
453 457
454 /* 458 /*
@@ -460,7 +464,8 @@ static sense_reason_t compare_and_write_post(struct se_cmd *cmd, bool success)
460 return TCM_NO_SENSE; 464 return TCM_NO_SENSE;
461} 465}
462 466
463static sense_reason_t compare_and_write_callback(struct se_cmd *cmd, bool success) 467static sense_reason_t compare_and_write_callback(struct se_cmd *cmd, bool success,
468 int *post_ret)
464{ 469{
465 struct se_device *dev = cmd->se_dev; 470 struct se_device *dev = cmd->se_dev;
466 struct scatterlist *write_sg = NULL, *sg; 471 struct scatterlist *write_sg = NULL, *sg;
@@ -556,11 +561,11 @@ static sense_reason_t compare_and_write_callback(struct se_cmd *cmd, bool succes
556 561
557 if (block_size < PAGE_SIZE) { 562 if (block_size < PAGE_SIZE) {
558 sg_set_page(&write_sg[i], m.page, block_size, 563 sg_set_page(&write_sg[i], m.page, block_size,
559 block_size); 564 m.piter.sg->offset + block_size);
560 } else { 565 } else {
561 sg_miter_next(&m); 566 sg_miter_next(&m);
562 sg_set_page(&write_sg[i], m.page, block_size, 567 sg_set_page(&write_sg[i], m.page, block_size,
563 0); 568 m.piter.sg->offset);
564 } 569 }
565 len -= block_size; 570 len -= block_size;
566 i++; 571 i++;
diff --git a/drivers/target/target_core_stat.c b/drivers/target/target_core_stat.c
index 273c72b2b83d..81a6b3e07687 100644
--- a/drivers/target/target_core_stat.c
+++ b/drivers/target/target_core_stat.c
@@ -246,7 +246,7 @@ static ssize_t target_stat_lu_prod_show(struct config_item *item, char *page)
246 char str[sizeof(dev->t10_wwn.model)+1]; 246 char str[sizeof(dev->t10_wwn.model)+1];
247 247
248 /* scsiLuProductId */ 248 /* scsiLuProductId */
249 for (i = 0; i < sizeof(dev->t10_wwn.vendor); i++) 249 for (i = 0; i < sizeof(dev->t10_wwn.model); i++)
250 str[i] = ISPRINT(dev->t10_wwn.model[i]) ? 250 str[i] = ISPRINT(dev->t10_wwn.model[i]) ?
251 dev->t10_wwn.model[i] : ' '; 251 dev->t10_wwn.model[i] : ' ';
252 str[i] = '\0'; 252 str[i] = '\0';
diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
index 5b2820312310..28fb3016370f 100644
--- a/drivers/target/target_core_tmr.c
+++ b/drivers/target/target_core_tmr.c
@@ -130,6 +130,9 @@ void core_tmr_abort_task(
130 if (tmr->ref_task_tag != ref_tag) 130 if (tmr->ref_task_tag != ref_tag)
131 continue; 131 continue;
132 132
133 if (!kref_get_unless_zero(&se_cmd->cmd_kref))
134 continue;
135
133 printk("ABORT_TASK: Found referenced %s task_tag: %llu\n", 136 printk("ABORT_TASK: Found referenced %s task_tag: %llu\n",
134 se_cmd->se_tfo->get_fabric_name(), ref_tag); 137 se_cmd->se_tfo->get_fabric_name(), ref_tag);
135 138
@@ -139,13 +142,15 @@ void core_tmr_abort_task(
139 " skipping\n", ref_tag); 142 " skipping\n", ref_tag);
140 spin_unlock(&se_cmd->t_state_lock); 143 spin_unlock(&se_cmd->t_state_lock);
141 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); 144 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
145
146 target_put_sess_cmd(se_cmd);
147
142 goto out; 148 goto out;
143 } 149 }
144 se_cmd->transport_state |= CMD_T_ABORTED; 150 se_cmd->transport_state |= CMD_T_ABORTED;
145 spin_unlock(&se_cmd->t_state_lock); 151 spin_unlock(&se_cmd->t_state_lock);
146 152
147 list_del_init(&se_cmd->se_cmd_list); 153 list_del_init(&se_cmd->se_cmd_list);
148 kref_get(&se_cmd->cmd_kref);
149 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); 154 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
150 155
151 cancel_work_sync(&se_cmd->work); 156 cancel_work_sync(&se_cmd->work);
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 5bacc7b5ed6d..4fdcee2006d1 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -1658,7 +1658,7 @@ bool target_stop_cmd(struct se_cmd *cmd, unsigned long *flags)
1658void transport_generic_request_failure(struct se_cmd *cmd, 1658void transport_generic_request_failure(struct se_cmd *cmd,
1659 sense_reason_t sense_reason) 1659 sense_reason_t sense_reason)
1660{ 1660{
1661 int ret = 0; 1661 int ret = 0, post_ret = 0;
1662 1662
1663 pr_debug("-----[ Storage Engine Exception for cmd: %p ITT: 0x%08llx" 1663 pr_debug("-----[ Storage Engine Exception for cmd: %p ITT: 0x%08llx"
1664 " CDB: 0x%02x\n", cmd, cmd->tag, cmd->t_task_cdb[0]); 1664 " CDB: 0x%02x\n", cmd, cmd->tag, cmd->t_task_cdb[0]);
@@ -1680,7 +1680,7 @@ void transport_generic_request_failure(struct se_cmd *cmd,
1680 */ 1680 */
1681 if ((cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) && 1681 if ((cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) &&
1682 cmd->transport_complete_callback) 1682 cmd->transport_complete_callback)
1683 cmd->transport_complete_callback(cmd, false); 1683 cmd->transport_complete_callback(cmd, false, &post_ret);
1684 1684
1685 switch (sense_reason) { 1685 switch (sense_reason) {
1686 case TCM_NON_EXISTENT_LUN: 1686 case TCM_NON_EXISTENT_LUN:
@@ -2068,11 +2068,13 @@ static void target_complete_ok_work(struct work_struct *work)
2068 */ 2068 */
2069 if (cmd->transport_complete_callback) { 2069 if (cmd->transport_complete_callback) {
2070 sense_reason_t rc; 2070 sense_reason_t rc;
2071 bool caw = (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE);
2072 bool zero_dl = !(cmd->data_length);
2073 int post_ret = 0;
2071 2074
2072 rc = cmd->transport_complete_callback(cmd, true); 2075 rc = cmd->transport_complete_callback(cmd, true, &post_ret);
2073 if (!rc && !(cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE_POST)) { 2076 if (!rc && !post_ret) {
2074 if ((cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) && 2077 if (caw && zero_dl)
2075 !cmd->data_length)
2076 goto queue_rsp; 2078 goto queue_rsp;
2077 2079
2078 return; 2080 return;
@@ -2507,23 +2509,24 @@ out:
2507EXPORT_SYMBOL(target_get_sess_cmd); 2509EXPORT_SYMBOL(target_get_sess_cmd);
2508 2510
2509static void target_release_cmd_kref(struct kref *kref) 2511static void target_release_cmd_kref(struct kref *kref)
2510 __releases(&se_cmd->se_sess->sess_cmd_lock)
2511{ 2512{
2512 struct se_cmd *se_cmd = container_of(kref, struct se_cmd, cmd_kref); 2513 struct se_cmd *se_cmd = container_of(kref, struct se_cmd, cmd_kref);
2513 struct se_session *se_sess = se_cmd->se_sess; 2514 struct se_session *se_sess = se_cmd->se_sess;
2515 unsigned long flags;
2514 2516
2517 spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
2515 if (list_empty(&se_cmd->se_cmd_list)) { 2518 if (list_empty(&se_cmd->se_cmd_list)) {
2516 spin_unlock(&se_sess->sess_cmd_lock); 2519 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
2517 se_cmd->se_tfo->release_cmd(se_cmd); 2520 se_cmd->se_tfo->release_cmd(se_cmd);
2518 return; 2521 return;
2519 } 2522 }
2520 if (se_sess->sess_tearing_down && se_cmd->cmd_wait_set) { 2523 if (se_sess->sess_tearing_down && se_cmd->cmd_wait_set) {
2521 spin_unlock(&se_sess->sess_cmd_lock); 2524 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
2522 complete(&se_cmd->cmd_wait_comp); 2525 complete(&se_cmd->cmd_wait_comp);
2523 return; 2526 return;
2524 } 2527 }
2525 list_del(&se_cmd->se_cmd_list); 2528 list_del(&se_cmd->se_cmd_list);
2526 spin_unlock(&se_sess->sess_cmd_lock); 2529 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
2527 2530
2528 se_cmd->se_tfo->release_cmd(se_cmd); 2531 se_cmd->se_tfo->release_cmd(se_cmd);
2529} 2532}
@@ -2539,8 +2542,7 @@ int target_put_sess_cmd(struct se_cmd *se_cmd)
2539 se_cmd->se_tfo->release_cmd(se_cmd); 2542 se_cmd->se_tfo->release_cmd(se_cmd);
2540 return 1; 2543 return 1;
2541 } 2544 }
2542 return kref_put_spinlock_irqsave(&se_cmd->cmd_kref, target_release_cmd_kref, 2545 return kref_put(&se_cmd->cmd_kref, target_release_cmd_kref);
2543 &se_sess->sess_cmd_lock);
2544} 2546}
2545EXPORT_SYMBOL(target_put_sess_cmd); 2547EXPORT_SYMBOL(target_put_sess_cmd);
2546 2548
diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
index 937cebf76633..5e6d6cb348fc 100644
--- a/drivers/target/target_core_user.c
+++ b/drivers/target/target_core_user.c
@@ -638,7 +638,7 @@ static int tcmu_check_expired_cmd(int id, void *p, void *data)
638 if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) 638 if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags))
639 return 0; 639 return 0;
640 640
641 if (!time_after(cmd->deadline, jiffies)) 641 if (!time_after(jiffies, cmd->deadline))
642 return 0; 642 return 0;
643 643
644 set_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags); 644 set_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags);
@@ -1101,8 +1101,6 @@ tcmu_parse_cdb(struct se_cmd *cmd)
1101 1101
1102static const struct target_backend_ops tcmu_ops = { 1102static const struct target_backend_ops tcmu_ops = {
1103 .name = "user", 1103 .name = "user",
1104 .inquiry_prod = "USER",
1105 .inquiry_rev = TCMU_VERSION,
1106 .owner = THIS_MODULE, 1104 .owner = THIS_MODULE,
1107 .transport_flags = TRANSPORT_FLAG_PASSTHROUGH, 1105 .transport_flags = TRANSPORT_FLAG_PASSTHROUGH,
1108 .attach_hba = tcmu_attach_hba, 1106 .attach_hba = tcmu_attach_hba,
diff --git a/drivers/thermal/Kconfig b/drivers/thermal/Kconfig
index c463c89b90ef..8cc4ac64a91c 100644
--- a/drivers/thermal/Kconfig
+++ b/drivers/thermal/Kconfig
@@ -382,7 +382,7 @@ endmenu
382 382
383config QCOM_SPMI_TEMP_ALARM 383config QCOM_SPMI_TEMP_ALARM
384 tristate "Qualcomm SPMI PMIC Temperature Alarm" 384 tristate "Qualcomm SPMI PMIC Temperature Alarm"
385 depends on OF && (SPMI || COMPILE_TEST) && IIO 385 depends on OF && SPMI && IIO
386 select REGMAP_SPMI 386 select REGMAP_SPMI
387 help 387 help
388 This enables a thermal sysfs driver for Qualcomm plug-and-play (QPNP) 388 This enables a thermal sysfs driver for Qualcomm plug-and-play (QPNP)
diff --git a/drivers/thermal/imx_thermal.c b/drivers/thermal/imx_thermal.c
index c8fe3cac2e0e..c5547bd711db 100644
--- a/drivers/thermal/imx_thermal.c
+++ b/drivers/thermal/imx_thermal.c
@@ -55,6 +55,7 @@
55#define TEMPSENSE2_PANIC_VALUE_SHIFT 16 55#define TEMPSENSE2_PANIC_VALUE_SHIFT 16
56#define TEMPSENSE2_PANIC_VALUE_MASK 0xfff0000 56#define TEMPSENSE2_PANIC_VALUE_MASK 0xfff0000
57 57
58#define OCOTP_MEM0 0x0480
58#define OCOTP_ANA1 0x04e0 59#define OCOTP_ANA1 0x04e0
59 60
60/* The driver supports 1 passive trip point and 1 critical trip point */ 61/* The driver supports 1 passive trip point and 1 critical trip point */
@@ -64,12 +65,6 @@ enum imx_thermal_trip {
64 IMX_TRIP_NUM, 65 IMX_TRIP_NUM,
65}; 66};
66 67
67/*
68 * It defines the temperature in millicelsius for passive trip point
69 * that will trigger cooling action when crossed.
70 */
71#define IMX_TEMP_PASSIVE 85000
72
73#define IMX_POLLING_DELAY 2000 /* millisecond */ 68#define IMX_POLLING_DELAY 2000 /* millisecond */
74#define IMX_PASSIVE_DELAY 1000 69#define IMX_PASSIVE_DELAY 1000
75 70
@@ -100,12 +95,14 @@ struct imx_thermal_data {
100 u32 c1, c2; /* See formula in imx_get_sensor_data() */ 95 u32 c1, c2; /* See formula in imx_get_sensor_data() */
101 int temp_passive; 96 int temp_passive;
102 int temp_critical; 97 int temp_critical;
98 int temp_max;
103 int alarm_temp; 99 int alarm_temp;
104 int last_temp; 100 int last_temp;
105 bool irq_enabled; 101 bool irq_enabled;
106 int irq; 102 int irq;
107 struct clk *thermal_clk; 103 struct clk *thermal_clk;
108 const struct thermal_soc_data *socdata; 104 const struct thermal_soc_data *socdata;
105 const char *temp_grade;
109}; 106};
110 107
111static void imx_set_panic_temp(struct imx_thermal_data *data, 108static void imx_set_panic_temp(struct imx_thermal_data *data,
@@ -285,10 +282,12 @@ static int imx_set_trip_temp(struct thermal_zone_device *tz, int trip,
285{ 282{
286 struct imx_thermal_data *data = tz->devdata; 283 struct imx_thermal_data *data = tz->devdata;
287 284
285 /* do not allow changing critical threshold */
288 if (trip == IMX_TRIP_CRITICAL) 286 if (trip == IMX_TRIP_CRITICAL)
289 return -EPERM; 287 return -EPERM;
290 288
291 if (temp < 0 || temp > IMX_TEMP_PASSIVE) 289 /* do not allow passive to be set higher than critical */
290 if (temp < 0 || temp > data->temp_critical)
292 return -EINVAL; 291 return -EINVAL;
293 292
294 data->temp_passive = temp; 293 data->temp_passive = temp;
@@ -404,17 +403,39 @@ static int imx_get_sensor_data(struct platform_device *pdev)
404 data->c1 = temp64; 403 data->c1 = temp64;
405 data->c2 = n1 * data->c1 + 1000 * t1; 404 data->c2 = n1 * data->c1 + 1000 * t1;
406 405
407 /* 406 /* use OTP for thermal grade */
408 * Set the default passive cooling trip point, 407 ret = regmap_read(map, OCOTP_MEM0, &val);
409 * can be changed from userspace. 408 if (ret) {
410 */ 409 dev_err(&pdev->dev, "failed to read temp grade: %d\n", ret);
411 data->temp_passive = IMX_TEMP_PASSIVE; 410 return ret;
411 }
412
413 /* The maximum die temp is specified by the Temperature Grade */
414 switch ((val >> 6) & 0x3) {
415 case 0: /* Commercial (0 to 95C) */
416 data->temp_grade = "Commercial";
417 data->temp_max = 95000;
418 break;
419 case 1: /* Extended Commercial (-20 to 105C) */
420 data->temp_grade = "Extended Commercial";
421 data->temp_max = 105000;
422 break;
423 case 2: /* Industrial (-40 to 105C) */
424 data->temp_grade = "Industrial";
425 data->temp_max = 105000;
426 break;
427 case 3: /* Automotive (-40 to 125C) */
428 data->temp_grade = "Automotive";
429 data->temp_max = 125000;
430 break;
431 }
412 432
413 /* 433 /*
414 * The maximum die temperature set to 20 C higher than 434 * Set the critical trip point at 5C under max
415 * IMX_TEMP_PASSIVE. 435 * Set the passive trip point at 10C under max (can change via sysfs)
416 */ 436 */
417 data->temp_critical = 1000 * 20 + data->temp_passive; 437 data->temp_critical = data->temp_max - (1000 * 5);
438 data->temp_passive = data->temp_max - (1000 * 10);
418 439
419 return 0; 440 return 0;
420} 441}
@@ -551,6 +572,11 @@ static int imx_thermal_probe(struct platform_device *pdev)
551 return ret; 572 return ret;
552 } 573 }
553 574
575 dev_info(&pdev->dev, "%s CPU temperature grade - max:%dC"
576 " critical:%dC passive:%dC\n", data->temp_grade,
577 data->temp_max / 1000, data->temp_critical / 1000,
578 data->temp_passive / 1000);
579
554 /* Enable measurements at ~ 10 Hz */ 580 /* Enable measurements at ~ 10 Hz */
555 regmap_write(map, TEMPSENSE1 + REG_CLR, TEMPSENSE1_MEASURE_FREQ); 581 regmap_write(map, TEMPSENSE1 + REG_CLR, TEMPSENSE1_MEASURE_FREQ);
556 measure_freq = DIV_ROUND_UP(32768, 10); /* 10 Hz */ 582 measure_freq = DIV_ROUND_UP(32768, 10); /* 10 Hz */
diff --git a/drivers/thermal/of-thermal.c b/drivers/thermal/of-thermal.c
index 42b7d4253b94..be4eedcb839a 100644
--- a/drivers/thermal/of-thermal.c
+++ b/drivers/thermal/of-thermal.c
@@ -964,7 +964,7 @@ void of_thermal_destroy_zones(void)
964 964
965 np = of_find_node_by_name(NULL, "thermal-zones"); 965 np = of_find_node_by_name(NULL, "thermal-zones");
966 if (!np) { 966 if (!np) {
967 pr_err("unable to find thermal zones\n"); 967 pr_debug("unable to find thermal zones\n");
968 return; 968 return;
969 } 969 }
970 970
diff --git a/drivers/thermal/power_allocator.c b/drivers/thermal/power_allocator.c
index f0fbea386869..1246aa6fcab0 100644
--- a/drivers/thermal/power_allocator.c
+++ b/drivers/thermal/power_allocator.c
@@ -174,7 +174,6 @@ static void estimate_pid_constants(struct thermal_zone_device *tz,
174/** 174/**
175 * pid_controller() - PID controller 175 * pid_controller() - PID controller
176 * @tz: thermal zone we are operating in 176 * @tz: thermal zone we are operating in
177 * @current_temp: the current temperature in millicelsius
178 * @control_temp: the target temperature in millicelsius 177 * @control_temp: the target temperature in millicelsius
179 * @max_allocatable_power: maximum allocatable power for this thermal zone 178 * @max_allocatable_power: maximum allocatable power for this thermal zone
180 * 179 *
@@ -191,7 +190,6 @@ static void estimate_pid_constants(struct thermal_zone_device *tz,
191 * Return: The power budget for the next period. 190 * Return: The power budget for the next period.
192 */ 191 */
193static u32 pid_controller(struct thermal_zone_device *tz, 192static u32 pid_controller(struct thermal_zone_device *tz,
194 int current_temp,
195 int control_temp, 193 int control_temp,
196 u32 max_allocatable_power) 194 u32 max_allocatable_power)
197{ 195{
@@ -211,7 +209,7 @@ static u32 pid_controller(struct thermal_zone_device *tz,
211 true); 209 true);
212 } 210 }
213 211
214 err = control_temp - current_temp; 212 err = control_temp - tz->temperature;
215 err = int_to_frac(err); 213 err = int_to_frac(err);
216 214
217 /* Calculate the proportional term */ 215 /* Calculate the proportional term */
@@ -332,7 +330,6 @@ static void divvy_up_power(u32 *req_power, u32 *max_power, int num_actors,
332} 330}
333 331
334static int allocate_power(struct thermal_zone_device *tz, 332static int allocate_power(struct thermal_zone_device *tz,
335 int current_temp,
336 int control_temp) 333 int control_temp)
337{ 334{
338 struct thermal_instance *instance; 335 struct thermal_instance *instance;
@@ -418,8 +415,7 @@ static int allocate_power(struct thermal_zone_device *tz,
418 i++; 415 i++;
419 } 416 }
420 417
421 power_range = pid_controller(tz, current_temp, control_temp, 418 power_range = pid_controller(tz, control_temp, max_allocatable_power);
422 max_allocatable_power);
423 419
424 divvy_up_power(weighted_req_power, max_power, num_actors, 420 divvy_up_power(weighted_req_power, max_power, num_actors,
425 total_weighted_req_power, power_range, granted_power, 421 total_weighted_req_power, power_range, granted_power,
@@ -444,8 +440,8 @@ static int allocate_power(struct thermal_zone_device *tz,
444 trace_thermal_power_allocator(tz, req_power, total_req_power, 440 trace_thermal_power_allocator(tz, req_power, total_req_power,
445 granted_power, total_granted_power, 441 granted_power, total_granted_power,
446 num_actors, power_range, 442 num_actors, power_range,
447 max_allocatable_power, current_temp, 443 max_allocatable_power, tz->temperature,
448 control_temp - current_temp); 444 control_temp - tz->temperature);
449 445
450 kfree(req_power); 446 kfree(req_power);
451unlock: 447unlock:
@@ -612,7 +608,7 @@ static void power_allocator_unbind(struct thermal_zone_device *tz)
612static int power_allocator_throttle(struct thermal_zone_device *tz, int trip) 608static int power_allocator_throttle(struct thermal_zone_device *tz, int trip)
613{ 609{
614 int ret; 610 int ret;
615 int switch_on_temp, control_temp, current_temp; 611 int switch_on_temp, control_temp;
616 struct power_allocator_params *params = tz->governor_data; 612 struct power_allocator_params *params = tz->governor_data;
617 613
618 /* 614 /*
@@ -622,15 +618,9 @@ static int power_allocator_throttle(struct thermal_zone_device *tz, int trip)
622 if (trip != params->trip_max_desired_temperature) 618 if (trip != params->trip_max_desired_temperature)
623 return 0; 619 return 0;
624 620
625 ret = thermal_zone_get_temp(tz, &current_temp);
626 if (ret) {
627 dev_warn(&tz->device, "Failed to get temperature: %d\n", ret);
628 return ret;
629 }
630
631 ret = tz->ops->get_trip_temp(tz, params->trip_switch_on, 621 ret = tz->ops->get_trip_temp(tz, params->trip_switch_on,
632 &switch_on_temp); 622 &switch_on_temp);
633 if (!ret && (current_temp < switch_on_temp)) { 623 if (!ret && (tz->temperature < switch_on_temp)) {
634 tz->passive = 0; 624 tz->passive = 0;
635 reset_pid_controller(params); 625 reset_pid_controller(params);
636 allow_maximum_power(tz); 626 allow_maximum_power(tz);
@@ -648,7 +638,7 @@ static int power_allocator_throttle(struct thermal_zone_device *tz, int trip)
648 return ret; 638 return ret;
649 } 639 }
650 640
651 return allocate_power(tz, current_temp, control_temp); 641 return allocate_power(tz, control_temp);
652} 642}
653 643
654static struct thermal_governor thermal_gov_power_allocator = { 644static struct thermal_governor thermal_gov_power_allocator = {
diff --git a/drivers/thermal/rcar_thermal.c b/drivers/thermal/rcar_thermal.c
index 5d4ae7d705e0..13d01edc7a04 100644
--- a/drivers/thermal/rcar_thermal.c
+++ b/drivers/thermal/rcar_thermal.c
@@ -361,6 +361,24 @@ static irqreturn_t rcar_thermal_irq(int irq, void *data)
361/* 361/*
362 * platform functions 362 * platform functions
363 */ 363 */
364static int rcar_thermal_remove(struct platform_device *pdev)
365{
366 struct rcar_thermal_common *common = platform_get_drvdata(pdev);
367 struct device *dev = &pdev->dev;
368 struct rcar_thermal_priv *priv;
369
370 rcar_thermal_for_each_priv(priv, common) {
371 if (rcar_has_irq_support(priv))
372 rcar_thermal_irq_disable(priv);
373 thermal_zone_device_unregister(priv->zone);
374 }
375
376 pm_runtime_put(dev);
377 pm_runtime_disable(dev);
378
379 return 0;
380}
381
364static int rcar_thermal_probe(struct platform_device *pdev) 382static int rcar_thermal_probe(struct platform_device *pdev)
365{ 383{
366 struct rcar_thermal_common *common; 384 struct rcar_thermal_common *common;
@@ -377,6 +395,8 @@ static int rcar_thermal_probe(struct platform_device *pdev)
377 if (!common) 395 if (!common)
378 return -ENOMEM; 396 return -ENOMEM;
379 397
398 platform_set_drvdata(pdev, common);
399
380 INIT_LIST_HEAD(&common->head); 400 INIT_LIST_HEAD(&common->head);
381 spin_lock_init(&common->lock); 401 spin_lock_init(&common->lock);
382 common->dev = dev; 402 common->dev = dev;
@@ -454,43 +474,16 @@ static int rcar_thermal_probe(struct platform_device *pdev)
454 rcar_thermal_common_write(common, ENR, enr_bits); 474 rcar_thermal_common_write(common, ENR, enr_bits);
455 } 475 }
456 476
457 platform_set_drvdata(pdev, common);
458
459 dev_info(dev, "%d sensor probed\n", i); 477 dev_info(dev, "%d sensor probed\n", i);
460 478
461 return 0; 479 return 0;
462 480
463error_unregister: 481error_unregister:
464 rcar_thermal_for_each_priv(priv, common) { 482 rcar_thermal_remove(pdev);
465 if (rcar_has_irq_support(priv))
466 rcar_thermal_irq_disable(priv);
467 thermal_zone_device_unregister(priv->zone);
468 }
469
470 pm_runtime_put(dev);
471 pm_runtime_disable(dev);
472 483
473 return ret; 484 return ret;
474} 485}
475 486
476static int rcar_thermal_remove(struct platform_device *pdev)
477{
478 struct rcar_thermal_common *common = platform_get_drvdata(pdev);
479 struct device *dev = &pdev->dev;
480 struct rcar_thermal_priv *priv;
481
482 rcar_thermal_for_each_priv(priv, common) {
483 if (rcar_has_irq_support(priv))
484 rcar_thermal_irq_disable(priv);
485 thermal_zone_device_unregister(priv->zone);
486 }
487
488 pm_runtime_put(dev);
489 pm_runtime_disable(dev);
490
491 return 0;
492}
493
494static const struct of_device_id rcar_thermal_dt_ids[] = { 487static const struct of_device_id rcar_thermal_dt_ids[] = {
495 { .compatible = "renesas,rcar-thermal", }, 488 { .compatible = "renesas,rcar-thermal", },
496 {}, 489 {},
diff --git a/drivers/thermal/rockchip_thermal.c b/drivers/thermal/rockchip_thermal.c
index 9787e8aa509f..e845841ab036 100644
--- a/drivers/thermal/rockchip_thermal.c
+++ b/drivers/thermal/rockchip_thermal.c
@@ -1,6 +1,9 @@
1/* 1/*
2 * Copyright (c) 2014, Fuzhou Rockchip Electronics Co., Ltd 2 * Copyright (c) 2014, Fuzhou Rockchip Electronics Co., Ltd
3 * 3 *
4 * Copyright (c) 2015, Fuzhou Rockchip Electronics Co., Ltd
5 * Caesar Wang <wxt@rock-chips.com>
6 *
4 * This program is free software; you can redistribute it and/or modify it 7 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License, 8 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation. 9 * version 2, as published by the Free Software Foundation.
@@ -45,17 +48,50 @@ enum tshut_polarity {
45}; 48};
46 49
47/** 50/**
48 * The system has three Temperature Sensors. channel 0 is reserved, 51 * The system has two Temperature Sensors.
49 * channel 1 is for CPU, and channel 2 is for GPU. 52 * sensor0 is for CPU, and sensor1 is for GPU.
50 */ 53 */
51enum sensor_id { 54enum sensor_id {
52 SENSOR_CPU = 1, 55 SENSOR_CPU = 0,
53 SENSOR_GPU, 56 SENSOR_GPU,
54}; 57};
55 58
59/**
60* The conversion table has the adc value and temperature.
61* ADC_DECREMENT is the adc value decremnet.(e.g. v2_code_table)
62* ADC_INCREMNET is the adc value incremnet.(e.g. v3_code_table)
63*/
64enum adc_sort_mode {
65 ADC_DECREMENT = 0,
66 ADC_INCREMENT,
67};
68
69/**
70 * The max sensors is two in rockchip SoCs.
71 * Two sensors: CPU and GPU sensor.
72 */
73#define SOC_MAX_SENSORS 2
74
75struct chip_tsadc_table {
76 const struct tsadc_table *id;
77
78 /* the array table size*/
79 unsigned int length;
80
81 /* that analogic mask data */
82 u32 data_mask;
83
84 /* the sort mode is adc value that increment or decrement in table */
85 enum adc_sort_mode mode;
86};
87
56struct rockchip_tsadc_chip { 88struct rockchip_tsadc_chip {
89 /* The sensor id of chip correspond to the ADC channel */
90 int chn_id[SOC_MAX_SENSORS];
91 int chn_num;
92
57 /* The hardware-controlled tshut property */ 93 /* The hardware-controlled tshut property */
58 long tshut_temp; 94 int tshut_temp;
59 enum tshut_mode tshut_mode; 95 enum tshut_mode tshut_mode;
60 enum tshut_polarity tshut_polarity; 96 enum tshut_polarity tshut_polarity;
61 97
@@ -65,37 +101,40 @@ struct rockchip_tsadc_chip {
65 void (*control)(void __iomem *reg, bool on); 101 void (*control)(void __iomem *reg, bool on);
66 102
67 /* Per-sensor methods */ 103 /* Per-sensor methods */
68 int (*get_temp)(int chn, void __iomem *reg, int *temp); 104 int (*get_temp)(struct chip_tsadc_table table,
69 void (*set_tshut_temp)(int chn, void __iomem *reg, long temp); 105 int chn, void __iomem *reg, int *temp);
106 void (*set_tshut_temp)(struct chip_tsadc_table table,
107 int chn, void __iomem *reg, int temp);
70 void (*set_tshut_mode)(int chn, void __iomem *reg, enum tshut_mode m); 108 void (*set_tshut_mode)(int chn, void __iomem *reg, enum tshut_mode m);
109
110 /* Per-table methods */
111 struct chip_tsadc_table table;
71}; 112};
72 113
73struct rockchip_thermal_sensor { 114struct rockchip_thermal_sensor {
74 struct rockchip_thermal_data *thermal; 115 struct rockchip_thermal_data *thermal;
75 struct thermal_zone_device *tzd; 116 struct thermal_zone_device *tzd;
76 enum sensor_id id; 117 int id;
77}; 118};
78 119
79#define NUM_SENSORS 2 /* Ignore unused sensor 0 */
80
81struct rockchip_thermal_data { 120struct rockchip_thermal_data {
82 const struct rockchip_tsadc_chip *chip; 121 const struct rockchip_tsadc_chip *chip;
83 struct platform_device *pdev; 122 struct platform_device *pdev;
84 struct reset_control *reset; 123 struct reset_control *reset;
85 124
86 struct rockchip_thermal_sensor sensors[NUM_SENSORS]; 125 struct rockchip_thermal_sensor sensors[SOC_MAX_SENSORS];
87 126
88 struct clk *clk; 127 struct clk *clk;
89 struct clk *pclk; 128 struct clk *pclk;
90 129
91 void __iomem *regs; 130 void __iomem *regs;
92 131
93 long tshut_temp; 132 int tshut_temp;
94 enum tshut_mode tshut_mode; 133 enum tshut_mode tshut_mode;
95 enum tshut_polarity tshut_polarity; 134 enum tshut_polarity tshut_polarity;
96}; 135};
97 136
98/* TSADC V2 Sensor info define: */ 137/* TSADC Sensor info define: */
99#define TSADCV2_AUTO_CON 0x04 138#define TSADCV2_AUTO_CON 0x04
100#define TSADCV2_INT_EN 0x08 139#define TSADCV2_INT_EN 0x08
101#define TSADCV2_INT_PD 0x0c 140#define TSADCV2_INT_PD 0x0c
@@ -117,6 +156,8 @@ struct rockchip_thermal_data {
117#define TSADCV2_INT_PD_CLEAR_MASK ~BIT(8) 156#define TSADCV2_INT_PD_CLEAR_MASK ~BIT(8)
118 157
119#define TSADCV2_DATA_MASK 0xfff 158#define TSADCV2_DATA_MASK 0xfff
159#define TSADCV3_DATA_MASK 0x3ff
160
120#define TSADCV2_HIGHT_INT_DEBOUNCE_COUNT 4 161#define TSADCV2_HIGHT_INT_DEBOUNCE_COUNT 4
121#define TSADCV2_HIGHT_TSHUT_DEBOUNCE_COUNT 4 162#define TSADCV2_HIGHT_TSHUT_DEBOUNCE_COUNT 4
122#define TSADCV2_AUTO_PERIOD_TIME 250 /* msec */ 163#define TSADCV2_AUTO_PERIOD_TIME 250 /* msec */
@@ -124,7 +165,7 @@ struct rockchip_thermal_data {
124 165
125struct tsadc_table { 166struct tsadc_table {
126 u32 code; 167 u32 code;
127 long temp; 168 int temp;
128}; 169};
129 170
130static const struct tsadc_table v2_code_table[] = { 171static const struct tsadc_table v2_code_table[] = {
@@ -165,21 +206,61 @@ static const struct tsadc_table v2_code_table[] = {
165 {3421, 125000}, 206 {3421, 125000},
166}; 207};
167 208
168static u32 rk_tsadcv2_temp_to_code(long temp) 209static const struct tsadc_table v3_code_table[] = {
210 {0, -40000},
211 {106, -40000},
212 {108, -35000},
213 {110, -30000},
214 {112, -25000},
215 {114, -20000},
216 {116, -15000},
217 {118, -10000},
218 {120, -5000},
219 {122, 0},
220 {124, 5000},
221 {126, 10000},
222 {128, 15000},
223 {130, 20000},
224 {132, 25000},
225 {134, 30000},
226 {136, 35000},
227 {138, 40000},
228 {140, 45000},
229 {142, 50000},
230 {144, 55000},
231 {146, 60000},
232 {148, 65000},
233 {150, 70000},
234 {152, 75000},
235 {154, 80000},
236 {156, 85000},
237 {158, 90000},
238 {160, 95000},
239 {162, 100000},
240 {163, 105000},
241 {165, 110000},
242 {167, 115000},
243 {169, 120000},
244 {171, 125000},
245 {TSADCV3_DATA_MASK, 125000},
246};
247
248static u32 rk_tsadcv2_temp_to_code(struct chip_tsadc_table table,
249 int temp)
169{ 250{
170 int high, low, mid; 251 int high, low, mid;
171 252
172 low = 0; 253 low = 0;
173 high = ARRAY_SIZE(v2_code_table) - 1; 254 high = table.length - 1;
174 mid = (high + low) / 2; 255 mid = (high + low) / 2;
175 256
176 if (temp < v2_code_table[low].temp || temp > v2_code_table[high].temp) 257 if (temp < table.id[low].temp || temp > table.id[high].temp)
177 return 0; 258 return 0;
178 259
179 while (low <= high) { 260 while (low <= high) {
180 if (temp == v2_code_table[mid].temp) 261 if (temp == table.id[mid].temp)
181 return v2_code_table[mid].code; 262 return table.id[mid].code;
182 else if (temp < v2_code_table[mid].temp) 263 else if (temp < table.id[mid].temp)
183 high = mid - 1; 264 high = mid - 1;
184 else 265 else
185 low = mid + 1; 266 low = mid + 1;
@@ -189,29 +270,54 @@ static u32 rk_tsadcv2_temp_to_code(long temp)
189 return 0; 270 return 0;
190} 271}
191 272
192static int rk_tsadcv2_code_to_temp(u32 code, int *temp) 273static int rk_tsadcv2_code_to_temp(struct chip_tsadc_table table, u32 code,
274 int *temp)
193{ 275{
194 unsigned int low = 1; 276 unsigned int low = 1;
195 unsigned int high = ARRAY_SIZE(v2_code_table) - 1; 277 unsigned int high = table.length - 1;
196 unsigned int mid = (low + high) / 2; 278 unsigned int mid = (low + high) / 2;
197 unsigned int num; 279 unsigned int num;
198 unsigned long denom; 280 unsigned long denom;
199 281
200 BUILD_BUG_ON(ARRAY_SIZE(v2_code_table) < 2); 282 WARN_ON(table.length < 2);
201 283
202 code &= TSADCV2_DATA_MASK; 284 switch (table.mode) {
203 if (code < v2_code_table[high].code) 285 case ADC_DECREMENT:
204 return -EAGAIN; /* Incorrect reading */ 286 code &= table.data_mask;
205 287 if (code < table.id[high].code)
206 while (low <= high) { 288 return -EAGAIN; /* Incorrect reading */
207 if (code >= v2_code_table[mid].code && 289
208 code < v2_code_table[mid - 1].code) 290 while (low <= high) {
209 break; 291 if (code >= table.id[mid].code &&
210 else if (code < v2_code_table[mid].code) 292 code < table.id[mid - 1].code)
211 low = mid + 1; 293 break;
212 else 294 else if (code < table.id[mid].code)
213 high = mid - 1; 295 low = mid + 1;
214 mid = (low + high) / 2; 296 else
297 high = mid - 1;
298
299 mid = (low + high) / 2;
300 }
301 break;
302 case ADC_INCREMENT:
303 code &= table.data_mask;
304 if (code < table.id[low].code)
305 return -EAGAIN; /* Incorrect reading */
306
307 while (low <= high) {
308 if (code >= table.id[mid - 1].code &&
309 code < table.id[mid].code)
310 break;
311 else if (code > table.id[mid].code)
312 low = mid + 1;
313 else
314 high = mid - 1;
315
316 mid = (low + high) / 2;
317 }
318 break;
319 default:
320 pr_err("Invalid the conversion table\n");
215 } 321 }
216 322
217 /* 323 /*
@@ -220,24 +326,28 @@ static int rk_tsadcv2_code_to_temp(u32 code, int *temp)
220 * temperature between 2 table entries is linear and interpolate 326 * temperature between 2 table entries is linear and interpolate
221 * to produce less granular result. 327 * to produce less granular result.
222 */ 328 */
223 num = v2_code_table[mid].temp - v2_code_table[mid - 1].temp; 329 num = table.id[mid].temp - v2_code_table[mid - 1].temp;
224 num *= v2_code_table[mid - 1].code - code; 330 num *= abs(table.id[mid - 1].code - code);
225 denom = v2_code_table[mid - 1].code - v2_code_table[mid].code; 331 denom = abs(table.id[mid - 1].code - table.id[mid].code);
226 *temp = v2_code_table[mid - 1].temp + (num / denom); 332 *temp = table.id[mid - 1].temp + (num / denom);
227 333
228 return 0; 334 return 0;
229} 335}
230 336
231/** 337/**
232 * rk_tsadcv2_initialize - initialize TASDC Controller 338 * rk_tsadcv2_initialize - initialize TASDC Controller.
233 * (1) Set TSADCV2_AUTO_PERIOD, configure the interleave between 339 *
234 * every two accessing of TSADC in normal operation. 340 * (1) Set TSADC_V2_AUTO_PERIOD:
235 * (2) Set TSADCV2_AUTO_PERIOD_HT, configure the interleave between 341 * Configure the interleave between every two accessing of
236 * every two accessing of TSADC after the temperature is higher 342 * TSADC in normal operation.
237 * than COM_SHUT or COM_INT. 343 *
238 * (3) Set TSADCV2_HIGH_INT_DEBOUNCE and TSADC_HIGHT_TSHUT_DEBOUNCE, 344 * (2) Set TSADCV2_AUTO_PERIOD_HT:
239 * if the temperature is higher than COMP_INT or COMP_SHUT for 345 * Configure the interleave between every two accessing of
240 * "debounce" times, TSADC controller will generate interrupt or TSHUT. 346 * TSADC after the temperature is higher than COM_SHUT or COM_INT.
347 *
348 * (3) Set TSADCV2_HIGH_INT_DEBOUNCE and TSADC_HIGHT_TSHUT_DEBOUNCE:
349 * If the temperature is higher than COMP_INT or COMP_SHUT for
350 * "debounce" times, TSADC controller will generate interrupt or TSHUT.
241 */ 351 */
242static void rk_tsadcv2_initialize(void __iomem *regs, 352static void rk_tsadcv2_initialize(void __iomem *regs,
243 enum tshut_polarity tshut_polarity) 353 enum tshut_polarity tshut_polarity)
@@ -279,20 +389,22 @@ static void rk_tsadcv2_control(void __iomem *regs, bool enable)
279 writel_relaxed(val, regs + TSADCV2_AUTO_CON); 389 writel_relaxed(val, regs + TSADCV2_AUTO_CON);
280} 390}
281 391
282static int rk_tsadcv2_get_temp(int chn, void __iomem *regs, int *temp) 392static int rk_tsadcv2_get_temp(struct chip_tsadc_table table,
393 int chn, void __iomem *regs, int *temp)
283{ 394{
284 u32 val; 395 u32 val;
285 396
286 val = readl_relaxed(regs + TSADCV2_DATA(chn)); 397 val = readl_relaxed(regs + TSADCV2_DATA(chn));
287 398
288 return rk_tsadcv2_code_to_temp(val, temp); 399 return rk_tsadcv2_code_to_temp(table, val, temp);
289} 400}
290 401
291static void rk_tsadcv2_tshut_temp(int chn, void __iomem *regs, long temp) 402static void rk_tsadcv2_tshut_temp(struct chip_tsadc_table table,
403 int chn, void __iomem *regs, int temp)
292{ 404{
293 u32 tshut_value, val; 405 u32 tshut_value, val;
294 406
295 tshut_value = rk_tsadcv2_temp_to_code(temp); 407 tshut_value = rk_tsadcv2_temp_to_code(table, temp);
296 writel_relaxed(tshut_value, regs + TSADCV2_COMP_SHUT(chn)); 408 writel_relaxed(tshut_value, regs + TSADCV2_COMP_SHUT(chn));
297 409
298 /* TSHUT will be valid */ 410 /* TSHUT will be valid */
@@ -318,6 +430,10 @@ static void rk_tsadcv2_tshut_mode(int chn, void __iomem *regs,
318} 430}
319 431
320static const struct rockchip_tsadc_chip rk3288_tsadc_data = { 432static const struct rockchip_tsadc_chip rk3288_tsadc_data = {
433 .chn_id[SENSOR_CPU] = 1, /* cpu sensor is channel 1 */
434 .chn_id[SENSOR_GPU] = 2, /* gpu sensor is channel 2 */
435 .chn_num = 2, /* two channels for tsadc */
436
321 .tshut_mode = TSHUT_MODE_GPIO, /* default TSHUT via GPIO give PMIC */ 437 .tshut_mode = TSHUT_MODE_GPIO, /* default TSHUT via GPIO give PMIC */
322 .tshut_polarity = TSHUT_LOW_ACTIVE, /* default TSHUT LOW ACTIVE */ 438 .tshut_polarity = TSHUT_LOW_ACTIVE, /* default TSHUT LOW ACTIVE */
323 .tshut_temp = 95000, 439 .tshut_temp = 95000,
@@ -328,6 +444,37 @@ static const struct rockchip_tsadc_chip rk3288_tsadc_data = {
328 .get_temp = rk_tsadcv2_get_temp, 444 .get_temp = rk_tsadcv2_get_temp,
329 .set_tshut_temp = rk_tsadcv2_tshut_temp, 445 .set_tshut_temp = rk_tsadcv2_tshut_temp,
330 .set_tshut_mode = rk_tsadcv2_tshut_mode, 446 .set_tshut_mode = rk_tsadcv2_tshut_mode,
447
448 .table = {
449 .id = v2_code_table,
450 .length = ARRAY_SIZE(v2_code_table),
451 .data_mask = TSADCV2_DATA_MASK,
452 .mode = ADC_DECREMENT,
453 },
454};
455
456static const struct rockchip_tsadc_chip rk3368_tsadc_data = {
457 .chn_id[SENSOR_CPU] = 0, /* cpu sensor is channel 0 */
458 .chn_id[SENSOR_GPU] = 1, /* gpu sensor is channel 1 */
459 .chn_num = 2, /* two channels for tsadc */
460
461 .tshut_mode = TSHUT_MODE_GPIO, /* default TSHUT via GPIO give PMIC */
462 .tshut_polarity = TSHUT_LOW_ACTIVE, /* default TSHUT LOW ACTIVE */
463 .tshut_temp = 95000,
464
465 .initialize = rk_tsadcv2_initialize,
466 .irq_ack = rk_tsadcv2_irq_ack,
467 .control = rk_tsadcv2_control,
468 .get_temp = rk_tsadcv2_get_temp,
469 .set_tshut_temp = rk_tsadcv2_tshut_temp,
470 .set_tshut_mode = rk_tsadcv2_tshut_mode,
471
472 .table = {
473 .id = v3_code_table,
474 .length = ARRAY_SIZE(v3_code_table),
475 .data_mask = TSADCV3_DATA_MASK,
476 .mode = ADC_INCREMENT,
477 },
331}; 478};
332 479
333static const struct of_device_id of_rockchip_thermal_match[] = { 480static const struct of_device_id of_rockchip_thermal_match[] = {
@@ -335,6 +482,10 @@ static const struct of_device_id of_rockchip_thermal_match[] = {
335 .compatible = "rockchip,rk3288-tsadc", 482 .compatible = "rockchip,rk3288-tsadc",
336 .data = (void *)&rk3288_tsadc_data, 483 .data = (void *)&rk3288_tsadc_data,
337 }, 484 },
485 {
486 .compatible = "rockchip,rk3368-tsadc",
487 .data = (void *)&rk3368_tsadc_data,
488 },
338 { /* end */ }, 489 { /* end */ },
339}; 490};
340MODULE_DEVICE_TABLE(of, of_rockchip_thermal_match); 491MODULE_DEVICE_TABLE(of, of_rockchip_thermal_match);
@@ -357,7 +508,7 @@ static irqreturn_t rockchip_thermal_alarm_irq_thread(int irq, void *dev)
357 508
358 thermal->chip->irq_ack(thermal->regs); 509 thermal->chip->irq_ack(thermal->regs);
359 510
360 for (i = 0; i < ARRAY_SIZE(thermal->sensors); i++) 511 for (i = 0; i < thermal->chip->chn_num; i++)
361 thermal_zone_device_update(thermal->sensors[i].tzd); 512 thermal_zone_device_update(thermal->sensors[i].tzd);
362 513
363 return IRQ_HANDLED; 514 return IRQ_HANDLED;
@@ -370,7 +521,8 @@ static int rockchip_thermal_get_temp(void *_sensor, int *out_temp)
370 const struct rockchip_tsadc_chip *tsadc = sensor->thermal->chip; 521 const struct rockchip_tsadc_chip *tsadc = sensor->thermal->chip;
371 int retval; 522 int retval;
372 523
373 retval = tsadc->get_temp(sensor->id, thermal->regs, out_temp); 524 retval = tsadc->get_temp(tsadc->table,
525 sensor->id, thermal->regs, out_temp);
374 dev_dbg(&thermal->pdev->dev, "sensor %d - temp: %d, retval: %d\n", 526 dev_dbg(&thermal->pdev->dev, "sensor %d - temp: %d, retval: %d\n",
375 sensor->id, *out_temp, retval); 527 sensor->id, *out_temp, retval);
376 528
@@ -389,7 +541,7 @@ static int rockchip_configure_from_dt(struct device *dev,
389 541
390 if (of_property_read_u32(np, "rockchip,hw-tshut-temp", &shut_temp)) { 542 if (of_property_read_u32(np, "rockchip,hw-tshut-temp", &shut_temp)) {
391 dev_warn(dev, 543 dev_warn(dev,
392 "Missing tshut temp property, using default %ld\n", 544 "Missing tshut temp property, using default %d\n",
393 thermal->chip->tshut_temp); 545 thermal->chip->tshut_temp);
394 thermal->tshut_temp = thermal->chip->tshut_temp; 546 thermal->tshut_temp = thermal->chip->tshut_temp;
395 } else { 547 } else {
@@ -397,7 +549,7 @@ static int rockchip_configure_from_dt(struct device *dev,
397 } 549 }
398 550
399 if (thermal->tshut_temp > INT_MAX) { 551 if (thermal->tshut_temp > INT_MAX) {
400 dev_err(dev, "Invalid tshut temperature specified: %ld\n", 552 dev_err(dev, "Invalid tshut temperature specified: %d\n",
401 thermal->tshut_temp); 553 thermal->tshut_temp);
402 return -ERANGE; 554 return -ERANGE;
403 } 555 }
@@ -442,13 +594,14 @@ static int
442rockchip_thermal_register_sensor(struct platform_device *pdev, 594rockchip_thermal_register_sensor(struct platform_device *pdev,
443 struct rockchip_thermal_data *thermal, 595 struct rockchip_thermal_data *thermal,
444 struct rockchip_thermal_sensor *sensor, 596 struct rockchip_thermal_sensor *sensor,
445 enum sensor_id id) 597 int id)
446{ 598{
447 const struct rockchip_tsadc_chip *tsadc = thermal->chip; 599 const struct rockchip_tsadc_chip *tsadc = thermal->chip;
448 int error; 600 int error;
449 601
450 tsadc->set_tshut_mode(id, thermal->regs, thermal->tshut_mode); 602 tsadc->set_tshut_mode(id, thermal->regs, thermal->tshut_mode);
451 tsadc->set_tshut_temp(id, thermal->regs, thermal->tshut_temp); 603 tsadc->set_tshut_temp(tsadc->table, id, thermal->regs,
604 thermal->tshut_temp);
452 605
453 sensor->thermal = thermal; 606 sensor->thermal = thermal;
454 sensor->id = id; 607 sensor->id = id;
@@ -481,7 +634,7 @@ static int rockchip_thermal_probe(struct platform_device *pdev)
481 const struct of_device_id *match; 634 const struct of_device_id *match;
482 struct resource *res; 635 struct resource *res;
483 int irq; 636 int irq;
484 int i; 637 int i, j;
485 int error; 638 int error;
486 639
487 match = of_match_node(of_rockchip_thermal_match, np); 640 match = of_match_node(of_rockchip_thermal_match, np);
@@ -556,22 +709,19 @@ static int rockchip_thermal_probe(struct platform_device *pdev)
556 709
557 thermal->chip->initialize(thermal->regs, thermal->tshut_polarity); 710 thermal->chip->initialize(thermal->regs, thermal->tshut_polarity);
558 711
559 error = rockchip_thermal_register_sensor(pdev, thermal, 712 for (i = 0; i < thermal->chip->chn_num; i++) {
560 &thermal->sensors[0], 713 error = rockchip_thermal_register_sensor(pdev, thermal,
561 SENSOR_CPU); 714 &thermal->sensors[i],
562 if (error) { 715 thermal->chip->chn_id[i]);
563 dev_err(&pdev->dev, 716 if (error) {
564 "failed to register CPU thermal sensor: %d\n", error); 717 dev_err(&pdev->dev,
565 goto err_disable_pclk; 718 "failed to register sensor[%d] : error = %d\n",
566 } 719 i, error);
567 720 for (j = 0; j < i; j++)
568 error = rockchip_thermal_register_sensor(pdev, thermal, 721 thermal_zone_of_sensor_unregister(&pdev->dev,
569 &thermal->sensors[1], 722 thermal->sensors[j].tzd);
570 SENSOR_GPU); 723 goto err_disable_pclk;
571 if (error) { 724 }
572 dev_err(&pdev->dev,
573 "failed to register GPU thermal sensor: %d\n", error);
574 goto err_unregister_cpu_sensor;
575 } 725 }
576 726
577 error = devm_request_threaded_irq(&pdev->dev, irq, NULL, 727 error = devm_request_threaded_irq(&pdev->dev, irq, NULL,
@@ -581,22 +731,23 @@ static int rockchip_thermal_probe(struct platform_device *pdev)
581 if (error) { 731 if (error) {
582 dev_err(&pdev->dev, 732 dev_err(&pdev->dev,
583 "failed to request tsadc irq: %d\n", error); 733 "failed to request tsadc irq: %d\n", error);
584 goto err_unregister_gpu_sensor; 734 goto err_unregister_sensor;
585 } 735 }
586 736
587 thermal->chip->control(thermal->regs, true); 737 thermal->chip->control(thermal->regs, true);
588 738
589 for (i = 0; i < ARRAY_SIZE(thermal->sensors); i++) 739 for (i = 0; i < thermal->chip->chn_num; i++)
590 rockchip_thermal_toggle_sensor(&thermal->sensors[i], true); 740 rockchip_thermal_toggle_sensor(&thermal->sensors[i], true);
591 741
592 platform_set_drvdata(pdev, thermal); 742 platform_set_drvdata(pdev, thermal);
593 743
594 return 0; 744 return 0;
595 745
596err_unregister_gpu_sensor: 746err_unregister_sensor:
597 thermal_zone_of_sensor_unregister(&pdev->dev, thermal->sensors[1].tzd); 747 while (i--)
598err_unregister_cpu_sensor: 748 thermal_zone_of_sensor_unregister(&pdev->dev,
599 thermal_zone_of_sensor_unregister(&pdev->dev, thermal->sensors[0].tzd); 749 thermal->sensors[i].tzd);
750
600err_disable_pclk: 751err_disable_pclk:
601 clk_disable_unprepare(thermal->pclk); 752 clk_disable_unprepare(thermal->pclk);
602err_disable_clk: 753err_disable_clk:
@@ -610,7 +761,7 @@ static int rockchip_thermal_remove(struct platform_device *pdev)
610 struct rockchip_thermal_data *thermal = platform_get_drvdata(pdev); 761 struct rockchip_thermal_data *thermal = platform_get_drvdata(pdev);
611 int i; 762 int i;
612 763
613 for (i = 0; i < ARRAY_SIZE(thermal->sensors); i++) { 764 for (i = 0; i < thermal->chip->chn_num; i++) {
614 struct rockchip_thermal_sensor *sensor = &thermal->sensors[i]; 765 struct rockchip_thermal_sensor *sensor = &thermal->sensors[i];
615 766
616 rockchip_thermal_toggle_sensor(sensor, false); 767 rockchip_thermal_toggle_sensor(sensor, false);
@@ -631,7 +782,7 @@ static int __maybe_unused rockchip_thermal_suspend(struct device *dev)
631 struct rockchip_thermal_data *thermal = platform_get_drvdata(pdev); 782 struct rockchip_thermal_data *thermal = platform_get_drvdata(pdev);
632 int i; 783 int i;
633 784
634 for (i = 0; i < ARRAY_SIZE(thermal->sensors); i++) 785 for (i = 0; i < thermal->chip->chn_num; i++)
635 rockchip_thermal_toggle_sensor(&thermal->sensors[i], false); 786 rockchip_thermal_toggle_sensor(&thermal->sensors[i], false);
636 787
637 thermal->chip->control(thermal->regs, false); 788 thermal->chip->control(thermal->regs, false);
@@ -663,18 +814,19 @@ static int __maybe_unused rockchip_thermal_resume(struct device *dev)
663 814
664 thermal->chip->initialize(thermal->regs, thermal->tshut_polarity); 815 thermal->chip->initialize(thermal->regs, thermal->tshut_polarity);
665 816
666 for (i = 0; i < ARRAY_SIZE(thermal->sensors); i++) { 817 for (i = 0; i < thermal->chip->chn_num; i++) {
667 enum sensor_id id = thermal->sensors[i].id; 818 int id = thermal->sensors[i].id;
668 819
669 thermal->chip->set_tshut_mode(id, thermal->regs, 820 thermal->chip->set_tshut_mode(id, thermal->regs,
670 thermal->tshut_mode); 821 thermal->tshut_mode);
671 thermal->chip->set_tshut_temp(id, thermal->regs, 822 thermal->chip->set_tshut_temp(thermal->chip->table,
823 id, thermal->regs,
672 thermal->tshut_temp); 824 thermal->tshut_temp);
673 } 825 }
674 826
675 thermal->chip->control(thermal->regs, true); 827 thermal->chip->control(thermal->regs, true);
676 828
677 for (i = 0; i < ARRAY_SIZE(thermal->sensors); i++) 829 for (i = 0; i < thermal->chip->chn_num; i++)
678 rockchip_thermal_toggle_sensor(&thermal->sensors[i], true); 830 rockchip_thermal_toggle_sensor(&thermal->sensors[i], true);
679 831
680 pinctrl_pm_select_default_state(dev); 832 pinctrl_pm_select_default_state(dev);
diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
index 13844261cd5f..e49c2bce551d 100644
--- a/drivers/tty/n_tty.c
+++ b/drivers/tty/n_tty.c
@@ -169,7 +169,7 @@ static inline int tty_copy_to_user(struct tty_struct *tty,
169{ 169{
170 struct n_tty_data *ldata = tty->disc_data; 170 struct n_tty_data *ldata = tty->disc_data;
171 171
172 tty_audit_add_data(tty, to, n, ldata->icanon); 172 tty_audit_add_data(tty, from, n, ldata->icanon);
173 return copy_to_user(to, from, n); 173 return copy_to_user(to, from, n);
174} 174}
175 175
@@ -2054,13 +2054,13 @@ static int canon_copy_from_read_buf(struct tty_struct *tty,
2054 size_t eol; 2054 size_t eol;
2055 size_t tail; 2055 size_t tail;
2056 int ret, found = 0; 2056 int ret, found = 0;
2057 bool eof_push = 0;
2058 2057
2059 /* N.B. avoid overrun if nr == 0 */ 2058 /* N.B. avoid overrun if nr == 0 */
2060 n = min(*nr, smp_load_acquire(&ldata->canon_head) - ldata->read_tail); 2059 if (!*nr)
2061 if (!n)
2062 return 0; 2060 return 0;
2063 2061
2062 n = min(*nr + 1, smp_load_acquire(&ldata->canon_head) - ldata->read_tail);
2063
2064 tail = ldata->read_tail & (N_TTY_BUF_SIZE - 1); 2064 tail = ldata->read_tail & (N_TTY_BUF_SIZE - 1);
2065 size = min_t(size_t, tail + n, N_TTY_BUF_SIZE); 2065 size = min_t(size_t, tail + n, N_TTY_BUF_SIZE);
2066 2066
@@ -2081,12 +2081,11 @@ static int canon_copy_from_read_buf(struct tty_struct *tty,
2081 n = eol - tail; 2081 n = eol - tail;
2082 if (n > N_TTY_BUF_SIZE) 2082 if (n > N_TTY_BUF_SIZE)
2083 n += N_TTY_BUF_SIZE; 2083 n += N_TTY_BUF_SIZE;
2084 n += found; 2084 c = n + found;
2085 c = n;
2086 2085
2087 if (found && !ldata->push && read_buf(ldata, eol) == __DISABLED_CHAR) { 2086 if (!found || read_buf(ldata, eol) != __DISABLED_CHAR) {
2088 n--; 2087 c = min(*nr, c);
2089 eof_push = !n && ldata->read_tail != ldata->line_start; 2088 n = c;
2090 } 2089 }
2091 2090
2092 n_tty_trace("%s: eol:%zu found:%d n:%zu c:%zu size:%zu more:%zu\n", 2091 n_tty_trace("%s: eol:%zu found:%d n:%zu c:%zu size:%zu more:%zu\n",
@@ -2116,7 +2115,7 @@ static int canon_copy_from_read_buf(struct tty_struct *tty,
2116 ldata->push = 0; 2115 ldata->push = 0;
2117 tty_audit_push(tty); 2116 tty_audit_push(tty);
2118 } 2117 }
2119 return eof_push ? -EAGAIN : 0; 2118 return 0;
2120} 2119}
2121 2120
2122extern ssize_t redirected_tty_write(struct file *, const char __user *, 2121extern ssize_t redirected_tty_write(struct file *, const char __user *,
@@ -2273,10 +2272,7 @@ static ssize_t n_tty_read(struct tty_struct *tty, struct file *file,
2273 2272
2274 if (ldata->icanon && !L_EXTPROC(tty)) { 2273 if (ldata->icanon && !L_EXTPROC(tty)) {
2275 retval = canon_copy_from_read_buf(tty, &b, &nr); 2274 retval = canon_copy_from_read_buf(tty, &b, &nr);
2276 if (retval == -EAGAIN) { 2275 if (retval)
2277 retval = 0;
2278 continue;
2279 } else if (retval)
2280 break; 2276 break;
2281 } else { 2277 } else {
2282 int uncopied; 2278 int uncopied;
diff --git a/drivers/tty/serial/8250/8250_fsl.c b/drivers/tty/serial/8250/8250_fsl.c
index c0533a57ec53..910bfee5a88b 100644
--- a/drivers/tty/serial/8250/8250_fsl.c
+++ b/drivers/tty/serial/8250/8250_fsl.c
@@ -60,3 +60,4 @@ int fsl8250_handle_irq(struct uart_port *port)
60 spin_unlock_irqrestore(&up->port.lock, flags); 60 spin_unlock_irqrestore(&up->port.lock, flags);
61 return 1; 61 return 1;
62} 62}
63EXPORT_SYMBOL_GPL(fsl8250_handle_irq);
diff --git a/drivers/tty/serial/8250/8250_uniphier.c b/drivers/tty/serial/8250/8250_uniphier.c
index d11621e2cf1d..245edbb68d4b 100644
--- a/drivers/tty/serial/8250/8250_uniphier.c
+++ b/drivers/tty/serial/8250/8250_uniphier.c
@@ -115,12 +115,16 @@ static void uniphier_serial_out(struct uart_port *p, int offset, int value)
115 */ 115 */
116static int uniphier_serial_dl_read(struct uart_8250_port *up) 116static int uniphier_serial_dl_read(struct uart_8250_port *up)
117{ 117{
118 return readl(up->port.membase + UNIPHIER_UART_DLR); 118 int offset = UNIPHIER_UART_DLR << up->port.regshift;
119
120 return readl(up->port.membase + offset);
119} 121}
120 122
121static void uniphier_serial_dl_write(struct uart_8250_port *up, int value) 123static void uniphier_serial_dl_write(struct uart_8250_port *up, int value)
122{ 124{
123 writel(value, up->port.membase + UNIPHIER_UART_DLR); 125 int offset = UNIPHIER_UART_DLR << up->port.regshift;
126
127 writel(value, up->port.membase + offset);
124} 128}
125 129
126static int uniphier_of_serial_setup(struct device *dev, struct uart_port *port, 130static int uniphier_of_serial_setup(struct device *dev, struct uart_port *port,
diff --git a/drivers/tty/serial/8250/Kconfig b/drivers/tty/serial/8250/Kconfig
index e6f5e12a2d83..6412f1455beb 100644
--- a/drivers/tty/serial/8250/Kconfig
+++ b/drivers/tty/serial/8250/Kconfig
@@ -373,6 +373,7 @@ config SERIAL_8250_MID
373 depends on SERIAL_8250 && PCI 373 depends on SERIAL_8250 && PCI
374 select HSU_DMA if SERIAL_8250_DMA 374 select HSU_DMA if SERIAL_8250_DMA
375 select HSU_DMA_PCI if X86_INTEL_MID 375 select HSU_DMA_PCI if X86_INTEL_MID
376 select RATIONAL
376 help 377 help
377 Selecting this option will enable handling of the extra features 378 Selecting this option will enable handling of the extra features
378 present on the UART found on Intel Medfield SOC and various other 379 present on the UART found on Intel Medfield SOC and various other
diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig
index 1aec4404062d..f38beb28e7ae 100644
--- a/drivers/tty/serial/Kconfig
+++ b/drivers/tty/serial/Kconfig
@@ -1539,7 +1539,6 @@ config SERIAL_FSL_LPUART
1539 tristate "Freescale lpuart serial port support" 1539 tristate "Freescale lpuart serial port support"
1540 depends on HAS_DMA 1540 depends on HAS_DMA
1541 select SERIAL_CORE 1541 select SERIAL_CORE
1542 select SERIAL_EARLYCON
1543 help 1542 help
1544 Support for the on-chip lpuart on some Freescale SOCs. 1543 Support for the on-chip lpuart on some Freescale SOCs.
1545 1544
@@ -1547,6 +1546,7 @@ config SERIAL_FSL_LPUART_CONSOLE
1547 bool "Console on Freescale lpuart serial port" 1546 bool "Console on Freescale lpuart serial port"
1548 depends on SERIAL_FSL_LPUART=y 1547 depends on SERIAL_FSL_LPUART=y
1549 select SERIAL_CORE_CONSOLE 1548 select SERIAL_CORE_CONSOLE
1549 select SERIAL_EARLYCON
1550 help 1550 help
1551 If you have enabled the lpuart serial port on the Freescale SoCs, 1551 If you have enabled the lpuart serial port on the Freescale SoCs,
1552 you can make it the console by answering Y to this option. 1552 you can make it the console by answering Y to this option.
diff --git a/drivers/tty/serial/bcm63xx_uart.c b/drivers/tty/serial/bcm63xx_uart.c
index 681e0f3d5e0e..a1c0a89d9c7f 100644
--- a/drivers/tty/serial/bcm63xx_uart.c
+++ b/drivers/tty/serial/bcm63xx_uart.c
@@ -474,7 +474,7 @@ static int bcm_uart_startup(struct uart_port *port)
474 474
475 /* register irq and enable rx interrupts */ 475 /* register irq and enable rx interrupts */
476 ret = request_irq(port->irq, bcm_uart_interrupt, 0, 476 ret = request_irq(port->irq, bcm_uart_interrupt, 0,
477 bcm_uart_type(port), port); 477 dev_name(port->dev), port);
478 if (ret) 478 if (ret)
479 return ret; 479 return ret;
480 bcm_uart_writel(port, UART_RX_INT_MASK, UART_IR_REG); 480 bcm_uart_writel(port, UART_RX_INT_MASK, UART_IR_REG);
diff --git a/drivers/tty/serial/earlycon.c b/drivers/tty/serial/earlycon.c
index f09636083426..b5b2f2be6be7 100644
--- a/drivers/tty/serial/earlycon.c
+++ b/drivers/tty/serial/earlycon.c
@@ -115,6 +115,7 @@ static int __init register_earlycon(char *buf, const struct earlycon_id *match)
115 if (buf && !parse_options(&early_console_dev, buf)) 115 if (buf && !parse_options(&early_console_dev, buf))
116 buf = NULL; 116 buf = NULL;
117 117
118 spin_lock_init(&port->lock);
118 port->uartclk = BASE_BAUD * 16; 119 port->uartclk = BASE_BAUD * 16;
119 if (port->mapbase) 120 if (port->mapbase)
120 port->membase = earlycon_map(port->mapbase, 64); 121 port->membase = earlycon_map(port->mapbase, 64);
@@ -202,6 +203,7 @@ int __init of_setup_earlycon(unsigned long addr,
202 int err; 203 int err;
203 struct uart_port *port = &early_console_dev.port; 204 struct uart_port *port = &early_console_dev.port;
204 205
206 spin_lock_init(&port->lock);
205 port->iotype = UPIO_MEM; 207 port->iotype = UPIO_MEM;
206 port->mapbase = addr; 208 port->mapbase = addr;
207 port->uartclk = BASE_BAUD * 16; 209 port->uartclk = BASE_BAUD * 16;
diff --git a/drivers/tty/serial/etraxfs-uart.c b/drivers/tty/serial/etraxfs-uart.c
index 6813e316e9ff..2f80bc7e44fb 100644
--- a/drivers/tty/serial/etraxfs-uart.c
+++ b/drivers/tty/serial/etraxfs-uart.c
@@ -894,7 +894,7 @@ static int etraxfs_uart_probe(struct platform_device *pdev)
894 up->regi_ser = of_iomap(np, 0); 894 up->regi_ser = of_iomap(np, 0);
895 up->port.dev = &pdev->dev; 895 up->port.dev = &pdev->dev;
896 896
897 up->gpios = mctrl_gpio_init(&pdev->dev, 0); 897 up->gpios = mctrl_gpio_init_noauto(&pdev->dev, 0);
898 if (IS_ERR(up->gpios)) 898 if (IS_ERR(up->gpios))
899 return PTR_ERR(up->gpios); 899 return PTR_ERR(up->gpios);
900 900
diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
index 960e50a97558..51c7507b0444 100644
--- a/drivers/tty/serial/sh-sci.c
+++ b/drivers/tty/serial/sh-sci.c
@@ -1437,7 +1437,7 @@ static void sci_request_dma(struct uart_port *port)
1437 sg_init_table(sg, 1); 1437 sg_init_table(sg, 1);
1438 s->rx_buf[i] = buf; 1438 s->rx_buf[i] = buf;
1439 sg_dma_address(sg) = dma; 1439 sg_dma_address(sg) = dma;
1440 sg->length = s->buf_len_rx; 1440 sg_dma_len(sg) = s->buf_len_rx;
1441 1441
1442 buf += s->buf_len_rx; 1442 buf += s->buf_len_rx;
1443 dma += s->buf_len_rx; 1443 dma += s->buf_len_rx;
diff --git a/drivers/tty/tty_audit.c b/drivers/tty/tty_audit.c
index 90ca082935f6..3d245cd3d8e6 100644
--- a/drivers/tty/tty_audit.c
+++ b/drivers/tty/tty_audit.c
@@ -265,7 +265,7 @@ static struct tty_audit_buf *tty_audit_buf_get(struct tty_struct *tty,
265 * 265 *
266 * Audit @data of @size from @tty, if necessary. 266 * Audit @data of @size from @tty, if necessary.
267 */ 267 */
268void tty_audit_add_data(struct tty_struct *tty, unsigned char *data, 268void tty_audit_add_data(struct tty_struct *tty, const void *data,
269 size_t size, unsigned icanon) 269 size_t size, unsigned icanon)
270{ 270{
271 struct tty_audit_buf *buf; 271 struct tty_audit_buf *buf;
diff --git a/drivers/tty/tty_buffer.c b/drivers/tty/tty_buffer.c
index 9a479e61791a..3cd31e0d4bd9 100644
--- a/drivers/tty/tty_buffer.c
+++ b/drivers/tty/tty_buffer.c
@@ -450,7 +450,7 @@ receive_buf(struct tty_struct *tty, struct tty_buffer *head, int count)
450 count = disc->ops->receive_buf2(tty, p, f, count); 450 count = disc->ops->receive_buf2(tty, p, f, count);
451 else { 451 else {
452 count = min_t(int, count, tty->receive_room); 452 count = min_t(int, count, tty->receive_room);
453 if (count) 453 if (count && disc->ops->receive_buf)
454 disc->ops->receive_buf(tty, p, f, count); 454 disc->ops->receive_buf(tty, p, f, count);
455 } 455 }
456 return count; 456 return count;
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
index 0c41dbcb90b8..bcc8e1e8bb72 100644
--- a/drivers/tty/tty_io.c
+++ b/drivers/tty/tty_io.c
@@ -1282,18 +1282,22 @@ int tty_send_xchar(struct tty_struct *tty, char ch)
1282 int was_stopped = tty->stopped; 1282 int was_stopped = tty->stopped;
1283 1283
1284 if (tty->ops->send_xchar) { 1284 if (tty->ops->send_xchar) {
1285 down_read(&tty->termios_rwsem);
1285 tty->ops->send_xchar(tty, ch); 1286 tty->ops->send_xchar(tty, ch);
1287 up_read(&tty->termios_rwsem);
1286 return 0; 1288 return 0;
1287 } 1289 }
1288 1290
1289 if (tty_write_lock(tty, 0) < 0) 1291 if (tty_write_lock(tty, 0) < 0)
1290 return -ERESTARTSYS; 1292 return -ERESTARTSYS;
1291 1293
1294 down_read(&tty->termios_rwsem);
1292 if (was_stopped) 1295 if (was_stopped)
1293 start_tty(tty); 1296 start_tty(tty);
1294 tty->ops->write(tty, &ch, 1); 1297 tty->ops->write(tty, &ch, 1);
1295 if (was_stopped) 1298 if (was_stopped)
1296 stop_tty(tty); 1299 stop_tty(tty);
1300 up_read(&tty->termios_rwsem);
1297 tty_write_unlock(tty); 1301 tty_write_unlock(tty);
1298 return 0; 1302 return 0;
1299} 1303}
diff --git a/drivers/tty/tty_ioctl.c b/drivers/tty/tty_ioctl.c
index 9c5aebfe7053..1445dd39aa62 100644
--- a/drivers/tty/tty_ioctl.c
+++ b/drivers/tty/tty_ioctl.c
@@ -1147,16 +1147,12 @@ int n_tty_ioctl_helper(struct tty_struct *tty, struct file *file,
1147 spin_unlock_irq(&tty->flow_lock); 1147 spin_unlock_irq(&tty->flow_lock);
1148 break; 1148 break;
1149 case TCIOFF: 1149 case TCIOFF:
1150 down_read(&tty->termios_rwsem);
1151 if (STOP_CHAR(tty) != __DISABLED_CHAR) 1150 if (STOP_CHAR(tty) != __DISABLED_CHAR)
1152 retval = tty_send_xchar(tty, STOP_CHAR(tty)); 1151 retval = tty_send_xchar(tty, STOP_CHAR(tty));
1153 up_read(&tty->termios_rwsem);
1154 break; 1152 break;
1155 case TCION: 1153 case TCION:
1156 down_read(&tty->termios_rwsem);
1157 if (START_CHAR(tty) != __DISABLED_CHAR) 1154 if (START_CHAR(tty) != __DISABLED_CHAR)
1158 retval = tty_send_xchar(tty, START_CHAR(tty)); 1155 retval = tty_send_xchar(tty, START_CHAR(tty));
1159 up_read(&tty->termios_rwsem);
1160 break; 1156 break;
1161 default: 1157 default:
1162 return -EINVAL; 1158 return -EINVAL;
diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
index 5af8f1874c1a..629e3c865072 100644
--- a/drivers/tty/tty_ldisc.c
+++ b/drivers/tty/tty_ldisc.c
@@ -592,7 +592,7 @@ int tty_set_ldisc(struct tty_struct *tty, int ldisc)
592 592
593 /* Restart the work queue in case no characters kick it off. Safe if 593 /* Restart the work queue in case no characters kick it off. Safe if
594 already running */ 594 already running */
595 schedule_work(&tty->port->buf.work); 595 tty_buffer_restart_work(tty->port);
596 596
597 tty_unlock(tty); 597 tty_unlock(tty);
598 return retval; 598 return retval;
diff --git a/drivers/usb/chipidea/ci_hdrc_imx.c b/drivers/usb/chipidea/ci_hdrc_imx.c
index 6ccbf60cdd5c..5a048b7b92e8 100644
--- a/drivers/usb/chipidea/ci_hdrc_imx.c
+++ b/drivers/usb/chipidea/ci_hdrc_imx.c
@@ -84,6 +84,12 @@ struct ci_hdrc_imx_data {
84 struct imx_usbmisc_data *usbmisc_data; 84 struct imx_usbmisc_data *usbmisc_data;
85 bool supports_runtime_pm; 85 bool supports_runtime_pm;
86 bool in_lpm; 86 bool in_lpm;
87 /* SoC before i.mx6 (except imx23/imx28) needs three clks */
88 bool need_three_clks;
89 struct clk *clk_ipg;
90 struct clk *clk_ahb;
91 struct clk *clk_per;
92 /* --------------------------------- */
87}; 93};
88 94
89/* Common functions shared by usbmisc drivers */ 95/* Common functions shared by usbmisc drivers */
@@ -135,6 +141,102 @@ static struct imx_usbmisc_data *usbmisc_get_init_data(struct device *dev)
135} 141}
136 142
137/* End of common functions shared by usbmisc drivers*/ 143/* End of common functions shared by usbmisc drivers*/
144static int imx_get_clks(struct device *dev)
145{
146 struct ci_hdrc_imx_data *data = dev_get_drvdata(dev);
147 int ret = 0;
148
149 data->clk_ipg = devm_clk_get(dev, "ipg");
150 if (IS_ERR(data->clk_ipg)) {
151 /* If the platform only needs one clocks */
152 data->clk = devm_clk_get(dev, NULL);
153 if (IS_ERR(data->clk)) {
154 ret = PTR_ERR(data->clk);
155 dev_err(dev,
156 "Failed to get clks, err=%ld,%ld\n",
157 PTR_ERR(data->clk), PTR_ERR(data->clk_ipg));
158 return ret;
159 }
160 return ret;
161 }
162
163 data->clk_ahb = devm_clk_get(dev, "ahb");
164 if (IS_ERR(data->clk_ahb)) {
165 ret = PTR_ERR(data->clk_ahb);
166 dev_err(dev,
167 "Failed to get ahb clock, err=%d\n", ret);
168 return ret;
169 }
170
171 data->clk_per = devm_clk_get(dev, "per");
172 if (IS_ERR(data->clk_per)) {
173 ret = PTR_ERR(data->clk_per);
174 dev_err(dev,
175 "Failed to get per clock, err=%d\n", ret);
176 return ret;
177 }
178
179 data->need_three_clks = true;
180 return ret;
181}
182
183static int imx_prepare_enable_clks(struct device *dev)
184{
185 struct ci_hdrc_imx_data *data = dev_get_drvdata(dev);
186 int ret = 0;
187
188 if (data->need_three_clks) {
189 ret = clk_prepare_enable(data->clk_ipg);
190 if (ret) {
191 dev_err(dev,
192 "Failed to prepare/enable ipg clk, err=%d\n",
193 ret);
194 return ret;
195 }
196
197 ret = clk_prepare_enable(data->clk_ahb);
198 if (ret) {
199 dev_err(dev,
200 "Failed to prepare/enable ahb clk, err=%d\n",
201 ret);
202 clk_disable_unprepare(data->clk_ipg);
203 return ret;
204 }
205
206 ret = clk_prepare_enable(data->clk_per);
207 if (ret) {
208 dev_err(dev,
209 "Failed to prepare/enable per clk, err=%d\n",
210 ret);
211 clk_disable_unprepare(data->clk_ahb);
212 clk_disable_unprepare(data->clk_ipg);
213 return ret;
214 }
215 } else {
216 ret = clk_prepare_enable(data->clk);
217 if (ret) {
218 dev_err(dev,
219 "Failed to prepare/enable clk, err=%d\n",
220 ret);
221 return ret;
222 }
223 }
224
225 return ret;
226}
227
228static void imx_disable_unprepare_clks(struct device *dev)
229{
230 struct ci_hdrc_imx_data *data = dev_get_drvdata(dev);
231
232 if (data->need_three_clks) {
233 clk_disable_unprepare(data->clk_per);
234 clk_disable_unprepare(data->clk_ahb);
235 clk_disable_unprepare(data->clk_ipg);
236 } else {
237 clk_disable_unprepare(data->clk);
238 }
239}
138 240
139static int ci_hdrc_imx_probe(struct platform_device *pdev) 241static int ci_hdrc_imx_probe(struct platform_device *pdev)
140{ 242{
@@ -145,31 +247,31 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev)
145 .flags = CI_HDRC_SET_NON_ZERO_TTHA, 247 .flags = CI_HDRC_SET_NON_ZERO_TTHA,
146 }; 248 };
147 int ret; 249 int ret;
148 const struct of_device_id *of_id = 250 const struct of_device_id *of_id;
149 of_match_device(ci_hdrc_imx_dt_ids, &pdev->dev); 251 const struct ci_hdrc_imx_platform_flag *imx_platform_flag;
150 const struct ci_hdrc_imx_platform_flag *imx_platform_flag = of_id->data; 252
253 of_id = of_match_device(ci_hdrc_imx_dt_ids, &pdev->dev);
254 if (!of_id)
255 return -ENODEV;
256
257 imx_platform_flag = of_id->data;
151 258
152 data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL); 259 data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
153 if (!data) 260 if (!data)
154 return -ENOMEM; 261 return -ENOMEM;
155 262
263 platform_set_drvdata(pdev, data);
156 data->usbmisc_data = usbmisc_get_init_data(&pdev->dev); 264 data->usbmisc_data = usbmisc_get_init_data(&pdev->dev);
157 if (IS_ERR(data->usbmisc_data)) 265 if (IS_ERR(data->usbmisc_data))
158 return PTR_ERR(data->usbmisc_data); 266 return PTR_ERR(data->usbmisc_data);
159 267
160 data->clk = devm_clk_get(&pdev->dev, NULL); 268 ret = imx_get_clks(&pdev->dev);
161 if (IS_ERR(data->clk)) { 269 if (ret)
162 dev_err(&pdev->dev, 270 return ret;
163 "Failed to get clock, err=%ld\n", PTR_ERR(data->clk));
164 return PTR_ERR(data->clk);
165 }
166 271
167 ret = clk_prepare_enable(data->clk); 272 ret = imx_prepare_enable_clks(&pdev->dev);
168 if (ret) { 273 if (ret)
169 dev_err(&pdev->dev,
170 "Failed to prepare or enable clock, err=%d\n", ret);
171 return ret; 274 return ret;
172 }
173 275
174 data->phy = devm_usb_get_phy_by_phandle(&pdev->dev, "fsl,usbphy", 0); 276 data->phy = devm_usb_get_phy_by_phandle(&pdev->dev, "fsl,usbphy", 0);
175 if (IS_ERR(data->phy)) { 277 if (IS_ERR(data->phy)) {
@@ -212,8 +314,6 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev)
212 goto disable_device; 314 goto disable_device;
213 } 315 }
214 316
215 platform_set_drvdata(pdev, data);
216
217 if (data->supports_runtime_pm) { 317 if (data->supports_runtime_pm) {
218 pm_runtime_set_active(&pdev->dev); 318 pm_runtime_set_active(&pdev->dev);
219 pm_runtime_enable(&pdev->dev); 319 pm_runtime_enable(&pdev->dev);
@@ -226,7 +326,7 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev)
226disable_device: 326disable_device:
227 ci_hdrc_remove_device(data->ci_pdev); 327 ci_hdrc_remove_device(data->ci_pdev);
228err_clk: 328err_clk:
229 clk_disable_unprepare(data->clk); 329 imx_disable_unprepare_clks(&pdev->dev);
230 return ret; 330 return ret;
231} 331}
232 332
@@ -240,7 +340,7 @@ static int ci_hdrc_imx_remove(struct platform_device *pdev)
240 pm_runtime_put_noidle(&pdev->dev); 340 pm_runtime_put_noidle(&pdev->dev);
241 } 341 }
242 ci_hdrc_remove_device(data->ci_pdev); 342 ci_hdrc_remove_device(data->ci_pdev);
243 clk_disable_unprepare(data->clk); 343 imx_disable_unprepare_clks(&pdev->dev);
244 344
245 return 0; 345 return 0;
246} 346}
@@ -252,7 +352,7 @@ static int imx_controller_suspend(struct device *dev)
252 352
253 dev_dbg(dev, "at %s\n", __func__); 353 dev_dbg(dev, "at %s\n", __func__);
254 354
255 clk_disable_unprepare(data->clk); 355 imx_disable_unprepare_clks(dev);
256 data->in_lpm = true; 356 data->in_lpm = true;
257 357
258 return 0; 358 return 0;
@@ -270,7 +370,7 @@ static int imx_controller_resume(struct device *dev)
270 return 0; 370 return 0;
271 } 371 }
272 372
273 ret = clk_prepare_enable(data->clk); 373 ret = imx_prepare_enable_clks(dev);
274 if (ret) 374 if (ret)
275 return ret; 375 return ret;
276 376
@@ -285,7 +385,7 @@ static int imx_controller_resume(struct device *dev)
285 return 0; 385 return 0;
286 386
287clk_disable: 387clk_disable:
288 clk_disable_unprepare(data->clk); 388 imx_disable_unprepare_clks(dev);
289 return ret; 389 return ret;
290} 390}
291 391
diff --git a/drivers/usb/chipidea/debug.c b/drivers/usb/chipidea/debug.c
index 080b7be3daf0..58c8485a0715 100644
--- a/drivers/usb/chipidea/debug.c
+++ b/drivers/usb/chipidea/debug.c
@@ -322,8 +322,10 @@ static ssize_t ci_role_write(struct file *file, const char __user *ubuf,
322 return -EINVAL; 322 return -EINVAL;
323 323
324 pm_runtime_get_sync(ci->dev); 324 pm_runtime_get_sync(ci->dev);
325 disable_irq(ci->irq);
325 ci_role_stop(ci); 326 ci_role_stop(ci);
326 ret = ci_role_start(ci, role); 327 ret = ci_role_start(ci, role);
328 enable_irq(ci->irq);
327 pm_runtime_put_sync(ci->dev); 329 pm_runtime_put_sync(ci->dev);
328 330
329 return ret ? ret : count; 331 return ret ? ret : count;
diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c
index 8223fe73ea85..391a1225b0ba 100644
--- a/drivers/usb/chipidea/udc.c
+++ b/drivers/usb/chipidea/udc.c
@@ -1751,6 +1751,22 @@ static int ci_udc_start(struct usb_gadget *gadget,
1751 return retval; 1751 return retval;
1752} 1752}
1753 1753
1754static void ci_udc_stop_for_otg_fsm(struct ci_hdrc *ci)
1755{
1756 if (!ci_otg_is_fsm_mode(ci))
1757 return;
1758
1759 mutex_lock(&ci->fsm.lock);
1760 if (ci->fsm.otg->state == OTG_STATE_A_PERIPHERAL) {
1761 ci->fsm.a_bidl_adis_tmout = 1;
1762 ci_hdrc_otg_fsm_start(ci);
1763 } else if (ci->fsm.otg->state == OTG_STATE_B_PERIPHERAL) {
1764 ci->fsm.protocol = PROTO_UNDEF;
1765 ci->fsm.otg->state = OTG_STATE_UNDEFINED;
1766 }
1767 mutex_unlock(&ci->fsm.lock);
1768}
1769
1754/** 1770/**
1755 * ci_udc_stop: unregister a gadget driver 1771 * ci_udc_stop: unregister a gadget driver
1756 */ 1772 */
@@ -1775,6 +1791,7 @@ static int ci_udc_stop(struct usb_gadget *gadget)
1775 ci->driver = NULL; 1791 ci->driver = NULL;
1776 spin_unlock_irqrestore(&ci->lock, flags); 1792 spin_unlock_irqrestore(&ci->lock, flags);
1777 1793
1794 ci_udc_stop_for_otg_fsm(ci);
1778 return 0; 1795 return 0;
1779} 1796}
1780 1797
diff --git a/drivers/usb/chipidea/usbmisc_imx.c b/drivers/usb/chipidea/usbmisc_imx.c
index fcea4eb36eee..ab8b027e8cc8 100644
--- a/drivers/usb/chipidea/usbmisc_imx.c
+++ b/drivers/usb/chipidea/usbmisc_imx.c
@@ -500,7 +500,11 @@ static int usbmisc_imx_probe(struct platform_device *pdev)
500{ 500{
501 struct resource *res; 501 struct resource *res;
502 struct imx_usbmisc *data; 502 struct imx_usbmisc *data;
503 struct of_device_id *tmp_dev; 503 const struct of_device_id *of_id;
504
505 of_id = of_match_device(usbmisc_imx_dt_ids, &pdev->dev);
506 if (!of_id)
507 return -ENODEV;
504 508
505 data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL); 509 data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
506 if (!data) 510 if (!data)
@@ -513,9 +517,7 @@ static int usbmisc_imx_probe(struct platform_device *pdev)
513 if (IS_ERR(data->base)) 517 if (IS_ERR(data->base))
514 return PTR_ERR(data->base); 518 return PTR_ERR(data->base);
515 519
516 tmp_dev = (struct of_device_id *) 520 data->ops = (const struct usbmisc_ops *)of_id->data;
517 of_match_device(usbmisc_imx_dt_ids, &pdev->dev);
518 data->ops = (const struct usbmisc_ops *)tmp_dev->data;
519 platform_set_drvdata(pdev, data); 521 platform_set_drvdata(pdev, data);
520 522
521 return 0; 523 return 0;
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index b30e7423549b..26ca4f910cb0 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -1838,6 +1838,11 @@ static const struct usb_device_id acm_ids[] = {
1838 }, 1838 },
1839#endif 1839#endif
1840 1840
1841 /* Exclude Infineon Flash Loader utility */
1842 { USB_DEVICE(0x058b, 0x0041),
1843 .driver_info = IGNORE_DEVICE,
1844 },
1845
1841 /* control interfaces without any protocol set */ 1846 /* control interfaces without any protocol set */
1842 { USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM, 1847 { USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM,
1843 USB_CDC_PROTO_NONE) }, 1848 USB_CDC_PROTO_NONE) },
diff --git a/drivers/usb/class/usblp.c b/drivers/usb/class/usblp.c
index 433bbc34a8a4..071964c7847f 100644
--- a/drivers/usb/class/usblp.c
+++ b/drivers/usb/class/usblp.c
@@ -884,11 +884,11 @@ static int usblp_wwait(struct usblp *usblp, int nonblock)
884 884
885 add_wait_queue(&usblp->wwait, &waita); 885 add_wait_queue(&usblp->wwait, &waita);
886 for (;;) { 886 for (;;) {
887 set_current_state(TASK_INTERRUPTIBLE);
888 if (mutex_lock_interruptible(&usblp->mut)) { 887 if (mutex_lock_interruptible(&usblp->mut)) {
889 rc = -EINTR; 888 rc = -EINTR;
890 break; 889 break;
891 } 890 }
891 set_current_state(TASK_INTERRUPTIBLE);
892 rc = usblp_wtest(usblp, nonblock); 892 rc = usblp_wtest(usblp, nonblock);
893 mutex_unlock(&usblp->mut); 893 mutex_unlock(&usblp->mut);
894 if (rc <= 0) 894 if (rc <= 0)
diff --git a/drivers/usb/core/Kconfig b/drivers/usb/core/Kconfig
index a99c89e78126..dd280108758f 100644
--- a/drivers/usb/core/Kconfig
+++ b/drivers/usb/core/Kconfig
@@ -77,8 +77,7 @@ config USB_OTG_BLACKLIST_HUB
77 77
78config USB_OTG_FSM 78config USB_OTG_FSM
79 tristate "USB 2.0 OTG FSM implementation" 79 tristate "USB 2.0 OTG FSM implementation"
80 depends on USB 80 depends on USB && USB_OTG
81 select USB_OTG
82 select USB_PHY 81 select USB_PHY
83 help 82 help
84 Implements OTG Finite State Machine as specified in On-The-Go 83 Implements OTG Finite State Machine as specified in On-The-Go
diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
index 7caff020106e..5050760f5e17 100644
--- a/drivers/usb/core/config.c
+++ b/drivers/usb/core/config.c
@@ -115,7 +115,8 @@ static void usb_parse_ss_endpoint_companion(struct device *ddev, int cfgno,
115 USB_SS_MULT(desc->bmAttributes) > 3) { 115 USB_SS_MULT(desc->bmAttributes) > 3) {
116 dev_warn(ddev, "Isoc endpoint has Mult of %d in " 116 dev_warn(ddev, "Isoc endpoint has Mult of %d in "
117 "config %d interface %d altsetting %d ep %d: " 117 "config %d interface %d altsetting %d ep %d: "
118 "setting to 3\n", desc->bmAttributes + 1, 118 "setting to 3\n",
119 USB_SS_MULT(desc->bmAttributes),
119 cfgno, inum, asnum, ep->desc.bEndpointAddress); 120 cfgno, inum, asnum, ep->desc.bEndpointAddress);
120 ep->ss_ep_comp.bmAttributes = 2; 121 ep->ss_ep_comp.bmAttributes = 2;
121 } 122 }
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index bdeadc112d29..ddbf32d599cb 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -124,6 +124,10 @@ struct usb_hub *usb_hub_to_struct_hub(struct usb_device *hdev)
124 124
125int usb_device_supports_lpm(struct usb_device *udev) 125int usb_device_supports_lpm(struct usb_device *udev)
126{ 126{
127 /* Some devices have trouble with LPM */
128 if (udev->quirks & USB_QUIRK_NO_LPM)
129 return 0;
130
127 /* USB 2.1 (and greater) devices indicate LPM support through 131 /* USB 2.1 (and greater) devices indicate LPM support through
128 * their USB 2.0 Extended Capabilities BOS descriptor. 132 * their USB 2.0 Extended Capabilities BOS descriptor.
129 */ 133 */
@@ -1031,10 +1035,20 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
1031 unsigned delay; 1035 unsigned delay;
1032 1036
1033 /* Continue a partial initialization */ 1037 /* Continue a partial initialization */
1034 if (type == HUB_INIT2) 1038 if (type == HUB_INIT2 || type == HUB_INIT3) {
1035 goto init2; 1039 device_lock(hub->intfdev);
1036 if (type == HUB_INIT3) 1040
1041 /* Was the hub disconnected while we were waiting? */
1042 if (hub->disconnected) {
1043 device_unlock(hub->intfdev);
1044 kref_put(&hub->kref, hub_release);
1045 return;
1046 }
1047 if (type == HUB_INIT2)
1048 goto init2;
1037 goto init3; 1049 goto init3;
1050 }
1051 kref_get(&hub->kref);
1038 1052
1039 /* The superspeed hub except for root hub has to use Hub Depth 1053 /* The superspeed hub except for root hub has to use Hub Depth
1040 * value as an offset into the route string to locate the bits 1054 * value as an offset into the route string to locate the bits
@@ -1232,6 +1246,7 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
1232 queue_delayed_work(system_power_efficient_wq, 1246 queue_delayed_work(system_power_efficient_wq,
1233 &hub->init_work, 1247 &hub->init_work,
1234 msecs_to_jiffies(delay)); 1248 msecs_to_jiffies(delay));
1249 device_unlock(hub->intfdev);
1235 return; /* Continues at init3: below */ 1250 return; /* Continues at init3: below */
1236 } else { 1251 } else {
1237 msleep(delay); 1252 msleep(delay);
@@ -1253,6 +1268,11 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
1253 /* Allow autosuspend if it was suppressed */ 1268 /* Allow autosuspend if it was suppressed */
1254 if (type <= HUB_INIT3) 1269 if (type <= HUB_INIT3)
1255 usb_autopm_put_interface_async(to_usb_interface(hub->intfdev)); 1270 usb_autopm_put_interface_async(to_usb_interface(hub->intfdev));
1271
1272 if (type == HUB_INIT2 || type == HUB_INIT3)
1273 device_unlock(hub->intfdev);
1274
1275 kref_put(&hub->kref, hub_release);
1256} 1276}
1257 1277
1258/* Implement the continuations for the delays above */ 1278/* Implement the continuations for the delays above */
@@ -4512,6 +4532,8 @@ hub_port_init(struct usb_hub *hub, struct usb_device *udev, int port1,
4512 goto fail; 4532 goto fail;
4513 } 4533 }
4514 4534
4535 usb_detect_quirks(udev);
4536
4515 if (udev->wusb == 0 && le16_to_cpu(udev->descriptor.bcdUSB) >= 0x0201) { 4537 if (udev->wusb == 0 && le16_to_cpu(udev->descriptor.bcdUSB) >= 0x0201) {
4516 retval = usb_get_bos_descriptor(udev); 4538 retval = usb_get_bos_descriptor(udev);
4517 if (!retval) { 4539 if (!retval) {
@@ -4710,7 +4732,6 @@ static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus,
4710 if (status < 0) 4732 if (status < 0)
4711 goto loop; 4733 goto loop;
4712 4734
4713 usb_detect_quirks(udev);
4714 if (udev->quirks & USB_QUIRK_DELAY_INIT) 4735 if (udev->quirks & USB_QUIRK_DELAY_INIT)
4715 msleep(1000); 4736 msleep(1000);
4716 4737
@@ -5326,9 +5347,6 @@ static int usb_reset_and_verify_device(struct usb_device *udev)
5326 if (udev->usb2_hw_lpm_enabled == 1) 5347 if (udev->usb2_hw_lpm_enabled == 1)
5327 usb_set_usb2_hardware_lpm(udev, 0); 5348 usb_set_usb2_hardware_lpm(udev, 0);
5328 5349
5329 bos = udev->bos;
5330 udev->bos = NULL;
5331
5332 /* Disable LPM and LTM while we reset the device and reinstall the alt 5350 /* Disable LPM and LTM while we reset the device and reinstall the alt
5333 * settings. Device-initiated LPM settings, and system exit latency 5351 * settings. Device-initiated LPM settings, and system exit latency
5334 * settings are cleared when the device is reset, so we have to set 5352 * settings are cleared when the device is reset, so we have to set
@@ -5337,15 +5355,18 @@ static int usb_reset_and_verify_device(struct usb_device *udev)
5337 ret = usb_unlocked_disable_lpm(udev); 5355 ret = usb_unlocked_disable_lpm(udev);
5338 if (ret) { 5356 if (ret) {
5339 dev_err(&udev->dev, "%s Failed to disable LPM\n.", __func__); 5357 dev_err(&udev->dev, "%s Failed to disable LPM\n.", __func__);
5340 goto re_enumerate; 5358 goto re_enumerate_no_bos;
5341 } 5359 }
5342 ret = usb_disable_ltm(udev); 5360 ret = usb_disable_ltm(udev);
5343 if (ret) { 5361 if (ret) {
5344 dev_err(&udev->dev, "%s Failed to disable LTM\n.", 5362 dev_err(&udev->dev, "%s Failed to disable LTM\n.",
5345 __func__); 5363 __func__);
5346 goto re_enumerate; 5364 goto re_enumerate_no_bos;
5347 } 5365 }
5348 5366
5367 bos = udev->bos;
5368 udev->bos = NULL;
5369
5349 for (i = 0; i < SET_CONFIG_TRIES; ++i) { 5370 for (i = 0; i < SET_CONFIG_TRIES; ++i) {
5350 5371
5351 /* ep0 maxpacket size may change; let the HCD know about it. 5372 /* ep0 maxpacket size may change; let the HCD know about it.
@@ -5442,10 +5463,11 @@ done:
5442 return 0; 5463 return 0;
5443 5464
5444re_enumerate: 5465re_enumerate:
5445 /* LPM state doesn't matter when we're about to destroy the device. */
5446 hub_port_logical_disconnect(parent_hub, port1);
5447 usb_release_bos_descriptor(udev); 5466 usb_release_bos_descriptor(udev);
5448 udev->bos = bos; 5467 udev->bos = bos;
5468re_enumerate_no_bos:
5469 /* LPM state doesn't matter when we're about to destroy the device. */
5470 hub_port_logical_disconnect(parent_hub, port1);
5449 return -ENODEV; 5471 return -ENODEV;
5450} 5472}
5451 5473
diff --git a/drivers/usb/core/port.c b/drivers/usb/core/port.c
index 210618319f10..5487fe308f01 100644
--- a/drivers/usb/core/port.c
+++ b/drivers/usb/core/port.c
@@ -206,7 +206,7 @@ static int link_peers(struct usb_port *left, struct usb_port *right)
206 else 206 else
207 method = "default"; 207 method = "default";
208 208
209 pr_warn("usb: failed to peer %s and %s by %s (%s:%s) (%s:%s)\n", 209 pr_debug("usb: failed to peer %s and %s by %s (%s:%s) (%s:%s)\n",
210 dev_name(&left->dev), dev_name(&right->dev), method, 210 dev_name(&left->dev), dev_name(&right->dev), method,
211 dev_name(&left->dev), 211 dev_name(&left->dev),
212 lpeer ? dev_name(&lpeer->dev) : "none", 212 lpeer ? dev_name(&lpeer->dev) : "none",
@@ -265,7 +265,7 @@ static void link_peers_report(struct usb_port *left, struct usb_port *right)
265 if (rc == 0) { 265 if (rc == 0) {
266 dev_dbg(&left->dev, "peered to %s\n", dev_name(&right->dev)); 266 dev_dbg(&left->dev, "peered to %s\n", dev_name(&right->dev));
267 } else { 267 } else {
268 dev_warn(&left->dev, "failed to peer to %s (%d)\n", 268 dev_dbg(&left->dev, "failed to peer to %s (%d)\n",
269 dev_name(&right->dev), rc); 269 dev_name(&right->dev), rc);
270 pr_warn_once("usb: port power management may be unreliable\n"); 270 pr_warn_once("usb: port power management may be unreliable\n");
271 usb_port_block_power_off = 1; 271 usb_port_block_power_off = 1;
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index f5a381945db2..6dc810bce295 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -125,6 +125,9 @@ static const struct usb_device_id usb_quirk_list[] = {
125 { USB_DEVICE(0x04f3, 0x016f), .driver_info = 125 { USB_DEVICE(0x04f3, 0x016f), .driver_info =
126 USB_QUIRK_DEVICE_QUALIFIER }, 126 USB_QUIRK_DEVICE_QUALIFIER },
127 127
128 { USB_DEVICE(0x04f3, 0x21b8), .driver_info =
129 USB_QUIRK_DEVICE_QUALIFIER },
130
128 /* Roland SC-8820 */ 131 /* Roland SC-8820 */
129 { USB_DEVICE(0x0582, 0x0007), .driver_info = USB_QUIRK_RESET_RESUME }, 132 { USB_DEVICE(0x0582, 0x0007), .driver_info = USB_QUIRK_RESET_RESUME },
130 133
@@ -199,6 +202,12 @@ static const struct usb_device_id usb_quirk_list[] = {
199 { USB_DEVICE(0x1a0a, 0x0200), .driver_info = 202 { USB_DEVICE(0x1a0a, 0x0200), .driver_info =
200 USB_QUIRK_LINEAR_UFRAME_INTR_BINTERVAL }, 203 USB_QUIRK_LINEAR_UFRAME_INTR_BINTERVAL },
201 204
205 /* Blackmagic Design Intensity Shuttle */
206 { USB_DEVICE(0x1edb, 0xbd3b), .driver_info = USB_QUIRK_NO_LPM },
207
208 /* Blackmagic Design UltraStudio SDI */
209 { USB_DEVICE(0x1edb, 0xbd4f), .driver_info = USB_QUIRK_NO_LPM },
210
202 { } /* terminating entry must be last */ 211 { } /* terminating entry must be last */
203}; 212};
204 213
diff --git a/drivers/usb/dwc2/hcd.c b/drivers/usb/dwc2/hcd.c
index e79baf73c234..571c21727ff9 100644
--- a/drivers/usb/dwc2/hcd.c
+++ b/drivers/usb/dwc2/hcd.c
@@ -324,12 +324,13 @@ void dwc2_hcd_disconnect(struct dwc2_hsotg *hsotg)
324 */ 324 */
325static void dwc2_hcd_rem_wakeup(struct dwc2_hsotg *hsotg) 325static void dwc2_hcd_rem_wakeup(struct dwc2_hsotg *hsotg)
326{ 326{
327 if (hsotg->lx_state == DWC2_L2) { 327 if (hsotg->bus_suspended) {
328 hsotg->flags.b.port_suspend_change = 1; 328 hsotg->flags.b.port_suspend_change = 1;
329 usb_hcd_resume_root_hub(hsotg->priv); 329 usb_hcd_resume_root_hub(hsotg->priv);
330 } else {
331 hsotg->flags.b.port_l1_change = 1;
332 } 330 }
331
332 if (hsotg->lx_state == DWC2_L1)
333 hsotg->flags.b.port_l1_change = 1;
333} 334}
334 335
335/** 336/**
@@ -1428,8 +1429,8 @@ static void dwc2_wakeup_detected(unsigned long data)
1428 dev_dbg(hsotg->dev, "Clear Resume: HPRT0=%0x\n", 1429 dev_dbg(hsotg->dev, "Clear Resume: HPRT0=%0x\n",
1429 dwc2_readl(hsotg->regs + HPRT0)); 1430 dwc2_readl(hsotg->regs + HPRT0));
1430 1431
1431 hsotg->bus_suspended = 0;
1432 dwc2_hcd_rem_wakeup(hsotg); 1432 dwc2_hcd_rem_wakeup(hsotg);
1433 hsotg->bus_suspended = 0;
1433 1434
1434 /* Change to L0 state */ 1435 /* Change to L0 state */
1435 hsotg->lx_state = DWC2_L0; 1436 hsotg->lx_state = DWC2_L0;
diff --git a/drivers/usb/dwc2/platform.c b/drivers/usb/dwc2/platform.c
index 5859b0fa19ee..39c1cbf0e75d 100644
--- a/drivers/usb/dwc2/platform.c
+++ b/drivers/usb/dwc2/platform.c
@@ -108,7 +108,8 @@ static const struct dwc2_core_params params_rk3066 = {
108 .host_ls_low_power_phy_clk = -1, 108 .host_ls_low_power_phy_clk = -1,
109 .ts_dline = -1, 109 .ts_dline = -1,
110 .reload_ctl = -1, 110 .reload_ctl = -1,
111 .ahbcfg = 0x7, /* INCR16 */ 111 .ahbcfg = GAHBCFG_HBSTLEN_INCR16 <<
112 GAHBCFG_HBSTLEN_SHIFT,
112 .uframe_sched = -1, 113 .uframe_sched = -1,
113 .external_id_pin_ctl = -1, 114 .external_id_pin_ctl = -1,
114 .hibernation = -1, 115 .hibernation = -1,
@@ -124,9 +125,11 @@ static int __dwc2_lowlevel_hw_enable(struct dwc2_hsotg *hsotg)
124 if (ret) 125 if (ret)
125 return ret; 126 return ret;
126 127
127 ret = clk_prepare_enable(hsotg->clk); 128 if (hsotg->clk) {
128 if (ret) 129 ret = clk_prepare_enable(hsotg->clk);
129 return ret; 130 if (ret)
131 return ret;
132 }
130 133
131 if (hsotg->uphy) 134 if (hsotg->uphy)
132 ret = usb_phy_init(hsotg->uphy); 135 ret = usb_phy_init(hsotg->uphy);
@@ -174,7 +177,8 @@ static int __dwc2_lowlevel_hw_disable(struct dwc2_hsotg *hsotg)
174 if (ret) 177 if (ret)
175 return ret; 178 return ret;
176 179
177 clk_disable_unprepare(hsotg->clk); 180 if (hsotg->clk)
181 clk_disable_unprepare(hsotg->clk);
178 182
179 ret = regulator_bulk_disable(ARRAY_SIZE(hsotg->supplies), 183 ret = regulator_bulk_disable(ARRAY_SIZE(hsotg->supplies),
180 hsotg->supplies); 184 hsotg->supplies);
@@ -211,14 +215,41 @@ static int dwc2_lowlevel_hw_init(struct dwc2_hsotg *hsotg)
211 */ 215 */
212 hsotg->phy = devm_phy_get(hsotg->dev, "usb2-phy"); 216 hsotg->phy = devm_phy_get(hsotg->dev, "usb2-phy");
213 if (IS_ERR(hsotg->phy)) { 217 if (IS_ERR(hsotg->phy)) {
214 hsotg->phy = NULL; 218 ret = PTR_ERR(hsotg->phy);
219 switch (ret) {
220 case -ENODEV:
221 case -ENOSYS:
222 hsotg->phy = NULL;
223 break;
224 case -EPROBE_DEFER:
225 return ret;
226 default:
227 dev_err(hsotg->dev, "error getting phy %d\n", ret);
228 return ret;
229 }
230 }
231
232 if (!hsotg->phy) {
215 hsotg->uphy = devm_usb_get_phy(hsotg->dev, USB_PHY_TYPE_USB2); 233 hsotg->uphy = devm_usb_get_phy(hsotg->dev, USB_PHY_TYPE_USB2);
216 if (IS_ERR(hsotg->uphy)) 234 if (IS_ERR(hsotg->uphy)) {
217 hsotg->uphy = NULL; 235 ret = PTR_ERR(hsotg->uphy);
218 else 236 switch (ret) {
219 hsotg->plat = dev_get_platdata(hsotg->dev); 237 case -ENODEV:
238 case -ENXIO:
239 hsotg->uphy = NULL;
240 break;
241 case -EPROBE_DEFER:
242 return ret;
243 default:
244 dev_err(hsotg->dev, "error getting usb phy %d\n",
245 ret);
246 return ret;
247 }
248 }
220 } 249 }
221 250
251 hsotg->plat = dev_get_platdata(hsotg->dev);
252
222 if (hsotg->phy) { 253 if (hsotg->phy) {
223 /* 254 /*
224 * If using the generic PHY framework, check if the PHY bus 255 * If using the generic PHY framework, check if the PHY bus
@@ -228,11 +259,6 @@ static int dwc2_lowlevel_hw_init(struct dwc2_hsotg *hsotg)
228 hsotg->phyif = GUSBCFG_PHYIF8; 259 hsotg->phyif = GUSBCFG_PHYIF8;
229 } 260 }
230 261
231 if (!hsotg->phy && !hsotg->uphy && !hsotg->plat) {
232 dev_err(hsotg->dev, "no platform data or transceiver defined\n");
233 return -EPROBE_DEFER;
234 }
235
236 /* Clock */ 262 /* Clock */
237 hsotg->clk = devm_clk_get(hsotg->dev, "otg"); 263 hsotg->clk = devm_clk_get(hsotg->dev, "otg");
238 if (IS_ERR(hsotg->clk)) { 264 if (IS_ERR(hsotg->clk)) {
@@ -341,20 +367,6 @@ static int dwc2_driver_probe(struct platform_device *dev)
341 if (retval) 367 if (retval)
342 return retval; 368 return retval;
343 369
344 irq = platform_get_irq(dev, 0);
345 if (irq < 0) {
346 dev_err(&dev->dev, "missing IRQ resource\n");
347 return irq;
348 }
349
350 dev_dbg(hsotg->dev, "registering common handler for irq%d\n",
351 irq);
352 retval = devm_request_irq(hsotg->dev, irq,
353 dwc2_handle_common_intr, IRQF_SHARED,
354 dev_name(hsotg->dev), hsotg);
355 if (retval)
356 return retval;
357
358 res = platform_get_resource(dev, IORESOURCE_MEM, 0); 370 res = platform_get_resource(dev, IORESOURCE_MEM, 0);
359 hsotg->regs = devm_ioremap_resource(&dev->dev, res); 371 hsotg->regs = devm_ioremap_resource(&dev->dev, res);
360 if (IS_ERR(hsotg->regs)) 372 if (IS_ERR(hsotg->regs))
@@ -389,6 +401,20 @@ static int dwc2_driver_probe(struct platform_device *dev)
389 401
390 dwc2_set_all_params(hsotg->core_params, -1); 402 dwc2_set_all_params(hsotg->core_params, -1);
391 403
404 irq = platform_get_irq(dev, 0);
405 if (irq < 0) {
406 dev_err(&dev->dev, "missing IRQ resource\n");
407 return irq;
408 }
409
410 dev_dbg(hsotg->dev, "registering common handler for irq%d\n",
411 irq);
412 retval = devm_request_irq(hsotg->dev, irq,
413 dwc2_handle_common_intr, IRQF_SHARED,
414 dev_name(hsotg->dev), hsotg);
415 if (retval)
416 return retval;
417
392 retval = dwc2_lowlevel_hw_enable(hsotg); 418 retval = dwc2_lowlevel_hw_enable(hsotg);
393 if (retval) 419 if (retval)
394 return retval; 420 return retval;
diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c
index 77a622cb48ab..009d83048c8c 100644
--- a/drivers/usb/dwc3/dwc3-pci.c
+++ b/drivers/usb/dwc3/dwc3-pci.c
@@ -34,6 +34,8 @@
34#define PCI_DEVICE_ID_INTEL_BSW 0x22b7 34#define PCI_DEVICE_ID_INTEL_BSW 0x22b7
35#define PCI_DEVICE_ID_INTEL_SPTLP 0x9d30 35#define PCI_DEVICE_ID_INTEL_SPTLP 0x9d30
36#define PCI_DEVICE_ID_INTEL_SPTH 0xa130 36#define PCI_DEVICE_ID_INTEL_SPTH 0xa130
37#define PCI_DEVICE_ID_INTEL_BXT 0x0aaa
38#define PCI_DEVICE_ID_INTEL_APL 0x5aaa
37 39
38static const struct acpi_gpio_params reset_gpios = { 0, 0, false }; 40static const struct acpi_gpio_params reset_gpios = { 0, 0, false };
39static const struct acpi_gpio_params cs_gpios = { 1, 0, false }; 41static const struct acpi_gpio_params cs_gpios = { 1, 0, false };
@@ -210,6 +212,8 @@ static const struct pci_device_id dwc3_pci_id_table[] = {
210 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_MRFLD), }, 212 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_MRFLD), },
211 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SPTLP), }, 213 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SPTLP), },
212 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SPTH), }, 214 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SPTH), },
215 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BXT), },
216 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_APL), },
213 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_NL_USB), }, 217 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_NL_USB), },
214 { } /* Terminating Entry */ 218 { } /* Terminating Entry */
215}; 219};
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 55ba447fdf8b..a58376fd65fe 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -1078,6 +1078,7 @@ static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
1078 * little bit faster. 1078 * little bit faster.
1079 */ 1079 */
1080 if (!usb_endpoint_xfer_isoc(dep->endpoint.desc) && 1080 if (!usb_endpoint_xfer_isoc(dep->endpoint.desc) &&
1081 !usb_endpoint_xfer_int(dep->endpoint.desc) &&
1081 !(dep->flags & DWC3_EP_BUSY)) { 1082 !(dep->flags & DWC3_EP_BUSY)) {
1082 ret = __dwc3_gadget_kick_transfer(dep, 0, true); 1083 ret = __dwc3_gadget_kick_transfer(dep, 0, true);
1083 goto out; 1084 goto out;
@@ -2744,12 +2745,34 @@ int dwc3_gadget_init(struct dwc3 *dwc)
2744 } 2745 }
2745 2746
2746 dwc->gadget.ops = &dwc3_gadget_ops; 2747 dwc->gadget.ops = &dwc3_gadget_ops;
2747 dwc->gadget.max_speed = USB_SPEED_SUPER;
2748 dwc->gadget.speed = USB_SPEED_UNKNOWN; 2748 dwc->gadget.speed = USB_SPEED_UNKNOWN;
2749 dwc->gadget.sg_supported = true; 2749 dwc->gadget.sg_supported = true;
2750 dwc->gadget.name = "dwc3-gadget"; 2750 dwc->gadget.name = "dwc3-gadget";
2751 2751
2752 /* 2752 /*
2753 * FIXME We might be setting max_speed to <SUPER, however versions
2754 * <2.20a of dwc3 have an issue with metastability (documented
2755 * elsewhere in this driver) which tells us we can't set max speed to
2756 * anything lower than SUPER.
2757 *
2758 * Because gadget.max_speed is only used by composite.c and function
2759 * drivers (i.e. it won't go into dwc3's registers) we are allowing this
2760 * to happen so we avoid sending SuperSpeed Capability descriptor
2761 * together with our BOS descriptor as that could confuse host into
2762 * thinking we can handle super speed.
2763 *
2764 * Note that, in fact, we won't even support GetBOS requests when speed
2765 * is less than super speed because we don't have means, yet, to tell
2766 * composite.c that we are USB 2.0 + LPM ECN.
2767 */
2768 if (dwc->revision < DWC3_REVISION_220A)
2769 dwc3_trace(trace_dwc3_gadget,
2770 "Changing max_speed on rev %08x\n",
2771 dwc->revision);
2772
2773 dwc->gadget.max_speed = dwc->maximum_speed;
2774
2775 /*
2753 * Per databook, DWC3 needs buffer size to be aligned to MaxPacketSize 2776 * Per databook, DWC3 needs buffer size to be aligned to MaxPacketSize
2754 * on ep out. 2777 * on ep out.
2755 */ 2778 */
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
index adc6d52efa46..cf43e9e18368 100644
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -423,7 +423,7 @@ static ssize_t __ffs_ep0_read_events(struct ffs_data *ffs, char __user *buf,
423 spin_unlock_irq(&ffs->ev.waitq.lock); 423 spin_unlock_irq(&ffs->ev.waitq.lock);
424 mutex_unlock(&ffs->mutex); 424 mutex_unlock(&ffs->mutex);
425 425
426 return unlikely(__copy_to_user(buf, events, size)) ? -EFAULT : size; 426 return unlikely(copy_to_user(buf, events, size)) ? -EFAULT : size;
427} 427}
428 428
429static ssize_t ffs_ep0_read(struct file *file, char __user *buf, 429static ssize_t ffs_ep0_read(struct file *file, char __user *buf,
@@ -513,7 +513,7 @@ static ssize_t ffs_ep0_read(struct file *file, char __user *buf,
513 513
514 /* unlocks spinlock */ 514 /* unlocks spinlock */
515 ret = __ffs_ep0_queue_wait(ffs, data, len); 515 ret = __ffs_ep0_queue_wait(ffs, data, len);
516 if (likely(ret > 0) && unlikely(__copy_to_user(buf, data, len))) 516 if (likely(ret > 0) && unlikely(copy_to_user(buf, data, len)))
517 ret = -EFAULT; 517 ret = -EFAULT;
518 goto done_mutex; 518 goto done_mutex;
519 519
@@ -3493,7 +3493,7 @@ static char *ffs_prepare_buffer(const char __user *buf, size_t len)
3493 if (unlikely(!data)) 3493 if (unlikely(!data))
3494 return ERR_PTR(-ENOMEM); 3494 return ERR_PTR(-ENOMEM);
3495 3495
3496 if (unlikely(__copy_from_user(data, buf, len))) { 3496 if (unlikely(copy_from_user(data, buf, len))) {
3497 kfree(data); 3497 kfree(data);
3498 return ERR_PTR(-EFAULT); 3498 return ERR_PTR(-EFAULT);
3499 } 3499 }
diff --git a/drivers/usb/gadget/function/f_loopback.c b/drivers/usb/gadget/function/f_loopback.c
index 23933bdf2d9d..ddc3aad886b7 100644
--- a/drivers/usb/gadget/function/f_loopback.c
+++ b/drivers/usb/gadget/function/f_loopback.c
@@ -329,7 +329,7 @@ static int alloc_requests(struct usb_composite_dev *cdev,
329 for (i = 0; i < loop->qlen && result == 0; i++) { 329 for (i = 0; i < loop->qlen && result == 0; i++) {
330 result = -ENOMEM; 330 result = -ENOMEM;
331 331
332 in_req = usb_ep_alloc_request(loop->in_ep, GFP_KERNEL); 332 in_req = usb_ep_alloc_request(loop->in_ep, GFP_ATOMIC);
333 if (!in_req) 333 if (!in_req)
334 goto fail; 334 goto fail;
335 335
diff --git a/drivers/usb/gadget/function/f_midi.c b/drivers/usb/gadget/function/f_midi.c
index 42acb45e1ab4..898a570319f1 100644
--- a/drivers/usb/gadget/function/f_midi.c
+++ b/drivers/usb/gadget/function/f_midi.c
@@ -370,6 +370,7 @@ static int f_midi_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
370 if (err) { 370 if (err) {
371 ERROR(midi, "%s queue req: %d\n", 371 ERROR(midi, "%s queue req: %d\n",
372 midi->out_ep->name, err); 372 midi->out_ep->name, err);
373 free_ep_req(midi->out_ep, req);
373 } 374 }
374 } 375 }
375 376
@@ -545,7 +546,7 @@ static void f_midi_transmit(struct f_midi *midi, struct usb_request *req)
545 } 546 }
546 } 547 }
547 548
548 if (req->length > 0) { 549 if (req->length > 0 && ep->enabled) {
549 int err; 550 int err;
550 551
551 err = usb_ep_queue(ep, req, GFP_ATOMIC); 552 err = usb_ep_queue(ep, req, GFP_ATOMIC);
diff --git a/drivers/usb/gadget/function/uvc_configfs.c b/drivers/usb/gadget/function/uvc_configfs.c
index 289ebca316d3..ad8c9b05572d 100644
--- a/drivers/usb/gadget/function/uvc_configfs.c
+++ b/drivers/usb/gadget/function/uvc_configfs.c
@@ -20,7 +20,7 @@
20#define UVC_ATTR(prefix, cname, aname) \ 20#define UVC_ATTR(prefix, cname, aname) \
21static struct configfs_attribute prefix##attr_##cname = { \ 21static struct configfs_attribute prefix##attr_##cname = { \
22 .ca_name = __stringify(aname), \ 22 .ca_name = __stringify(aname), \
23 .ca_mode = S_IRUGO, \ 23 .ca_mode = S_IRUGO | S_IWUGO, \
24 .ca_owner = THIS_MODULE, \ 24 .ca_owner = THIS_MODULE, \
25 .show = prefix##cname##_show, \ 25 .show = prefix##cname##_show, \
26 .store = prefix##cname##_store, \ 26 .store = prefix##cname##_store, \
diff --git a/drivers/usb/gadget/udc/atmel_usba_udc.c b/drivers/usb/gadget/udc/atmel_usba_udc.c
index f0f2b066ac08..f92f5aff0dd5 100644
--- a/drivers/usb/gadget/udc/atmel_usba_udc.c
+++ b/drivers/usb/gadget/udc/atmel_usba_udc.c
@@ -1633,7 +1633,7 @@ static irqreturn_t usba_udc_irq(int irq, void *devid)
1633 spin_lock(&udc->lock); 1633 spin_lock(&udc->lock);
1634 1634
1635 int_enb = usba_int_enb_get(udc); 1635 int_enb = usba_int_enb_get(udc);
1636 status = usba_readl(udc, INT_STA) & int_enb; 1636 status = usba_readl(udc, INT_STA) & (int_enb | USBA_HIGH_SPEED);
1637 DBG(DBG_INT, "irq, status=%#08x\n", status); 1637 DBG(DBG_INT, "irq, status=%#08x\n", status);
1638 1638
1639 if (status & USBA_DET_SUSPEND) { 1639 if (status & USBA_DET_SUSPEND) {
diff --git a/drivers/usb/gadget/udc/pxa27x_udc.c b/drivers/usb/gadget/udc/pxa27x_udc.c
index 670ac0b12f00..001a3b74a993 100644
--- a/drivers/usb/gadget/udc/pxa27x_udc.c
+++ b/drivers/usb/gadget/udc/pxa27x_udc.c
@@ -2536,6 +2536,9 @@ static int pxa_udc_suspend(struct platform_device *_dev, pm_message_t state)
2536 udc->pullup_resume = udc->pullup_on; 2536 udc->pullup_resume = udc->pullup_on;
2537 dplus_pullup(udc, 0); 2537 dplus_pullup(udc, 0);
2538 2538
2539 if (udc->driver)
2540 udc->driver->disconnect(&udc->gadget);
2541
2539 return 0; 2542 return 0;
2540} 2543}
2541 2544
diff --git a/drivers/usb/host/ohci-at91.c b/drivers/usb/host/ohci-at91.c
index 342ffd140122..8c6e15bd6ff0 100644
--- a/drivers/usb/host/ohci-at91.c
+++ b/drivers/usb/host/ohci-at91.c
@@ -473,6 +473,8 @@ static int ohci_hcd_at91_drv_probe(struct platform_device *pdev)
473 if (!pdata) 473 if (!pdata)
474 return -ENOMEM; 474 return -ENOMEM;
475 475
476 pdev->dev.platform_data = pdata;
477
476 if (!of_property_read_u32(np, "num-ports", &ports)) 478 if (!of_property_read_u32(np, "num-ports", &ports))
477 pdata->ports = ports; 479 pdata->ports = ports;
478 480
@@ -483,6 +485,7 @@ static int ohci_hcd_at91_drv_probe(struct platform_device *pdev)
483 */ 485 */
484 if (i >= pdata->ports) { 486 if (i >= pdata->ports) {
485 pdata->vbus_pin[i] = -EINVAL; 487 pdata->vbus_pin[i] = -EINVAL;
488 pdata->overcurrent_pin[i] = -EINVAL;
486 continue; 489 continue;
487 } 490 }
488 491
@@ -513,10 +516,8 @@ static int ohci_hcd_at91_drv_probe(struct platform_device *pdev)
513 } 516 }
514 517
515 at91_for_each_port(i) { 518 at91_for_each_port(i) {
516 if (i >= pdata->ports) { 519 if (i >= pdata->ports)
517 pdata->overcurrent_pin[i] = -EINVAL; 520 break;
518 continue;
519 }
520 521
521 pdata->overcurrent_pin[i] = 522 pdata->overcurrent_pin[i] =
522 of_get_named_gpio_flags(np, "atmel,oc-gpio", i, &flags); 523 of_get_named_gpio_flags(np, "atmel,oc-gpio", i, &flags);
@@ -552,8 +553,6 @@ static int ohci_hcd_at91_drv_probe(struct platform_device *pdev)
552 } 553 }
553 } 554 }
554 555
555 pdev->dev.platform_data = pdata;
556
557 device_init_wakeup(&pdev->dev, 1); 556 device_init_wakeup(&pdev->dev, 1);
558 return usb_hcd_at91_probe(&ohci_at91_hc_driver, pdev); 557 return usb_hcd_at91_probe(&ohci_at91_hc_driver, pdev);
559} 558}
diff --git a/drivers/usb/host/whci/qset.c b/drivers/usb/host/whci/qset.c
index dc31c425ce01..9f1c0538b211 100644
--- a/drivers/usb/host/whci/qset.c
+++ b/drivers/usb/host/whci/qset.c
@@ -377,6 +377,10 @@ static int qset_fill_page_list(struct whc *whc, struct whc_std *std, gfp_t mem_f
377 if (std->pl_virt == NULL) 377 if (std->pl_virt == NULL)
378 return -ENOMEM; 378 return -ENOMEM;
379 std->dma_addr = dma_map_single(whc->wusbhc.dev, std->pl_virt, pl_len, DMA_TO_DEVICE); 379 std->dma_addr = dma_map_single(whc->wusbhc.dev, std->pl_virt, pl_len, DMA_TO_DEVICE);
380 if (dma_mapping_error(whc->wusbhc.dev, std->dma_addr)) {
381 kfree(std->pl_virt);
382 return -EFAULT;
383 }
380 384
381 for (p = 0; p < std->num_pointers; p++) { 385 for (p = 0; p < std->num_pointers; p++) {
382 std->pl_virt[p].buf_ptr = cpu_to_le64(dma_addr); 386 std->pl_virt[p].buf_ptr = cpu_to_le64(dma_addr);
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
index 5d2d7e954bd4..f980c239eded 100644
--- a/drivers/usb/host/xhci-hub.c
+++ b/drivers/usb/host/xhci-hub.c
@@ -733,8 +733,30 @@ static u32 xhci_get_port_status(struct usb_hcd *hcd,
733 if ((raw_port_status & PORT_RESET) || 733 if ((raw_port_status & PORT_RESET) ||
734 !(raw_port_status & PORT_PE)) 734 !(raw_port_status & PORT_PE))
735 return 0xffffffff; 735 return 0xffffffff;
736 if (time_after_eq(jiffies, 736 /* did port event handler already start resume timing? */
737 bus_state->resume_done[wIndex])) { 737 if (!bus_state->resume_done[wIndex]) {
738 /* If not, maybe we are in a host initated resume? */
739 if (test_bit(wIndex, &bus_state->resuming_ports)) {
740 /* Host initated resume doesn't time the resume
741 * signalling using resume_done[].
742 * It manually sets RESUME state, sleeps 20ms
743 * and sets U0 state. This should probably be
744 * changed, but not right now.
745 */
746 } else {
747 /* port resume was discovered now and here,
748 * start resume timing
749 */
750 unsigned long timeout = jiffies +
751 msecs_to_jiffies(USB_RESUME_TIMEOUT);
752
753 set_bit(wIndex, &bus_state->resuming_ports);
754 bus_state->resume_done[wIndex] = timeout;
755 mod_timer(&hcd->rh_timer, timeout);
756 }
757 /* Has resume been signalled for USB_RESUME_TIME yet? */
758 } else if (time_after_eq(jiffies,
759 bus_state->resume_done[wIndex])) {
738 int time_left; 760 int time_left;
739 761
740 xhci_dbg(xhci, "Resume USB2 port %d\n", 762 xhci_dbg(xhci, "Resume USB2 port %d\n",
@@ -775,19 +797,35 @@ static u32 xhci_get_port_status(struct usb_hcd *hcd,
775 } else { 797 } else {
776 /* 798 /*
777 * The resume has been signaling for less than 799 * The resume has been signaling for less than
778 * 20ms. Report the port status as SUSPEND, 800 * USB_RESUME_TIME. Report the port status as SUSPEND,
779 * let the usbcore check port status again 801 * let the usbcore check port status again and clear
780 * and clear resume signaling later. 802 * resume signaling later.
781 */ 803 */
782 status |= USB_PORT_STAT_SUSPEND; 804 status |= USB_PORT_STAT_SUSPEND;
783 } 805 }
784 } 806 }
785 if ((raw_port_status & PORT_PLS_MASK) == XDEV_U0 807 /*
786 && (raw_port_status & PORT_POWER) 808 * Clear stale usb2 resume signalling variables in case port changed
787 && (bus_state->suspended_ports & (1 << wIndex))) { 809 * state during resume signalling. For example on error
788 bus_state->suspended_ports &= ~(1 << wIndex); 810 */
789 if (hcd->speed < HCD_USB3) 811 if ((bus_state->resume_done[wIndex] ||
790 bus_state->port_c_suspend |= 1 << wIndex; 812 test_bit(wIndex, &bus_state->resuming_ports)) &&
813 (raw_port_status & PORT_PLS_MASK) != XDEV_U3 &&
814 (raw_port_status & PORT_PLS_MASK) != XDEV_RESUME) {
815 bus_state->resume_done[wIndex] = 0;
816 clear_bit(wIndex, &bus_state->resuming_ports);
817 }
818
819
820 if ((raw_port_status & PORT_PLS_MASK) == XDEV_U0 &&
821 (raw_port_status & PORT_POWER)) {
822 if (bus_state->suspended_ports & (1 << wIndex)) {
823 bus_state->suspended_ports &= ~(1 << wIndex);
824 if (hcd->speed < HCD_USB3)
825 bus_state->port_c_suspend |= 1 << wIndex;
826 }
827 bus_state->resume_done[wIndex] = 0;
828 clear_bit(wIndex, &bus_state->resuming_ports);
791 } 829 }
792 if (raw_port_status & PORT_CONNECT) { 830 if (raw_port_status & PORT_CONNECT) {
793 status |= USB_PORT_STAT_CONNECTION; 831 status |= USB_PORT_STAT_CONNECTION;
@@ -1112,6 +1150,7 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
1112 if ((temp & PORT_PE) == 0) 1150 if ((temp & PORT_PE) == 0)
1113 goto error; 1151 goto error;
1114 1152
1153 set_bit(wIndex, &bus_state->resuming_ports);
1115 xhci_set_link_state(xhci, port_array, wIndex, 1154 xhci_set_link_state(xhci, port_array, wIndex,
1116 XDEV_RESUME); 1155 XDEV_RESUME);
1117 spin_unlock_irqrestore(&xhci->lock, flags); 1156 spin_unlock_irqrestore(&xhci->lock, flags);
@@ -1119,6 +1158,7 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
1119 spin_lock_irqsave(&xhci->lock, flags); 1158 spin_lock_irqsave(&xhci->lock, flags);
1120 xhci_set_link_state(xhci, port_array, wIndex, 1159 xhci_set_link_state(xhci, port_array, wIndex,
1121 XDEV_U0); 1160 XDEV_U0);
1161 clear_bit(wIndex, &bus_state->resuming_ports);
1122 } 1162 }
1123 bus_state->port_c_suspend |= 1 << wIndex; 1163 bus_state->port_c_suspend |= 1 << wIndex;
1124 1164
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index 17f6897acde2..c62109091d12 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -188,10 +188,14 @@ static void xhci_pme_acpi_rtd3_enable(struct pci_dev *dev)
188 0xb7, 0x0c, 0x34, 0xac, 0x01, 0xe9, 0xbf, 0x45, 188 0xb7, 0x0c, 0x34, 0xac, 0x01, 0xe9, 0xbf, 0x45,
189 0xb7, 0xe6, 0x2b, 0x34, 0xec, 0x93, 0x1e, 0x23, 189 0xb7, 0xe6, 0x2b, 0x34, 0xec, 0x93, 0x1e, 0x23,
190 }; 190 };
191 acpi_evaluate_dsm(ACPI_HANDLE(&dev->dev), intel_dsm_uuid, 3, 1, NULL); 191 union acpi_object *obj;
192
193 obj = acpi_evaluate_dsm(ACPI_HANDLE(&dev->dev), intel_dsm_uuid, 3, 1,
194 NULL);
195 ACPI_FREE(obj);
192} 196}
193#else 197#else
194 static void xhci_pme_acpi_rtd3_enable(struct pci_dev *dev) { } 198static void xhci_pme_acpi_rtd3_enable(struct pci_dev *dev) { }
195#endif /* CONFIG_ACPI */ 199#endif /* CONFIG_ACPI */
196 200
197/* called during probe() after chip reset completes */ 201/* called during probe() after chip reset completes */
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index fa836251ca21..eeaa6c6bd540 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -1583,7 +1583,8 @@ static void handle_port_status(struct xhci_hcd *xhci,
1583 */ 1583 */
1584 bogus_port_status = true; 1584 bogus_port_status = true;
1585 goto cleanup; 1585 goto cleanup;
1586 } else { 1586 } else if (!test_bit(faked_port_index,
1587 &bus_state->resuming_ports)) {
1587 xhci_dbg(xhci, "resume HS port %d\n", port_id); 1588 xhci_dbg(xhci, "resume HS port %d\n", port_id);
1588 bus_state->resume_done[faked_port_index] = jiffies + 1589 bus_state->resume_done[faked_port_index] = jiffies +
1589 msecs_to_jiffies(USB_RESUME_TIMEOUT); 1590 msecs_to_jiffies(USB_RESUME_TIMEOUT);
@@ -3896,28 +3897,6 @@ cleanup:
3896 return ret; 3897 return ret;
3897} 3898}
3898 3899
3899static int ep_ring_is_processing(struct xhci_hcd *xhci,
3900 int slot_id, unsigned int ep_index)
3901{
3902 struct xhci_virt_device *xdev;
3903 struct xhci_ring *ep_ring;
3904 struct xhci_ep_ctx *ep_ctx;
3905 struct xhci_virt_ep *xep;
3906 dma_addr_t hw_deq;
3907
3908 xdev = xhci->devs[slot_id];
3909 xep = &xhci->devs[slot_id]->eps[ep_index];
3910 ep_ring = xep->ring;
3911 ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
3912
3913 if ((le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK) != EP_STATE_RUNNING)
3914 return 0;
3915
3916 hw_deq = le64_to_cpu(ep_ctx->deq) & ~EP_CTX_CYCLE_MASK;
3917 return (hw_deq !=
3918 xhci_trb_virt_to_dma(ep_ring->enq_seg, ep_ring->enqueue));
3919}
3920
3921/* 3900/*
3922 * Check transfer ring to guarantee there is enough room for the urb. 3901 * Check transfer ring to guarantee there is enough room for the urb.
3923 * Update ISO URB start_frame and interval. 3902 * Update ISO URB start_frame and interval.
@@ -3983,10 +3962,12 @@ int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags,
3983 } 3962 }
3984 3963
3985 /* Calculate the start frame and put it in urb->start_frame. */ 3964 /* Calculate the start frame and put it in urb->start_frame. */
3986 if (HCC_CFC(xhci->hcc_params) && 3965 if (HCC_CFC(xhci->hcc_params) && !list_empty(&ep_ring->td_list)) {
3987 ep_ring_is_processing(xhci, slot_id, ep_index)) { 3966 if ((le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK) ==
3988 urb->start_frame = xep->next_frame_id; 3967 EP_STATE_RUNNING) {
3989 goto skip_start_over; 3968 urb->start_frame = xep->next_frame_id;
3969 goto skip_start_over;
3970 }
3990 } 3971 }
3991 3972
3992 start_frame = readl(&xhci->run_regs->microframe_index); 3973 start_frame = readl(&xhci->run_regs->microframe_index);
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 6e7dc6f93978..3f912705dcef 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -175,6 +175,16 @@ int xhci_reset(struct xhci_hcd *xhci)
175 command |= CMD_RESET; 175 command |= CMD_RESET;
176 writel(command, &xhci->op_regs->command); 176 writel(command, &xhci->op_regs->command);
177 177
178 /* Existing Intel xHCI controllers require a delay of 1 mS,
179 * after setting the CMD_RESET bit, and before accessing any
180 * HC registers. This allows the HC to complete the
181 * reset operation and be ready for HC register access.
182 * Without this delay, the subsequent HC register access,
183 * may result in a system hang very rarely.
184 */
185 if (xhci->quirks & XHCI_INTEL_HOST)
186 udelay(1000);
187
178 ret = xhci_handshake(&xhci->op_regs->command, 188 ret = xhci_handshake(&xhci->op_regs->command,
179 CMD_RESET, 0, 10 * 1000 * 1000); 189 CMD_RESET, 0, 10 * 1000 * 1000);
180 if (ret) 190 if (ret)
@@ -4768,8 +4778,16 @@ int xhci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev,
4768 ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG); 4778 ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
4769 slot_ctx = xhci_get_slot_ctx(xhci, config_cmd->in_ctx); 4779 slot_ctx = xhci_get_slot_ctx(xhci, config_cmd->in_ctx);
4770 slot_ctx->dev_info |= cpu_to_le32(DEV_HUB); 4780 slot_ctx->dev_info |= cpu_to_le32(DEV_HUB);
4781 /*
4782 * refer to section 6.2.2: MTT should be 0 for full speed hub,
4783 * but it may be already set to 1 when setup an xHCI virtual
4784 * device, so clear it anyway.
4785 */
4771 if (tt->multi) 4786 if (tt->multi)
4772 slot_ctx->dev_info |= cpu_to_le32(DEV_MTT); 4787 slot_ctx->dev_info |= cpu_to_le32(DEV_MTT);
4788 else if (hdev->speed == USB_SPEED_FULL)
4789 slot_ctx->dev_info &= cpu_to_le32(~DEV_MTT);
4790
4773 if (xhci->hci_version > 0x95) { 4791 if (xhci->hci_version > 0x95) {
4774 xhci_dbg(xhci, "xHCI version %x needs hub " 4792 xhci_dbg(xhci, "xHCI version %x needs hub "
4775 "TT think time and number of ports\n", 4793 "TT think time and number of ports\n",
diff --git a/drivers/usb/musb/Kconfig b/drivers/usb/musb/Kconfig
index 1f2037bbeb0d..45c83baf675d 100644
--- a/drivers/usb/musb/Kconfig
+++ b/drivers/usb/musb/Kconfig
@@ -159,7 +159,7 @@ config USB_TI_CPPI_DMA
159 159
160config USB_TI_CPPI41_DMA 160config USB_TI_CPPI41_DMA
161 bool 'TI CPPI 4.1 (AM335x)' 161 bool 'TI CPPI 4.1 (AM335x)'
162 depends on ARCH_OMAP 162 depends on ARCH_OMAP && DMADEVICES
163 select TI_CPPI41 163 select TI_CPPI41
164 164
165config USB_TUSB_OMAP_DMA 165config USB_TUSB_OMAP_DMA
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index ba13529cbd52..ee9ff7028b92 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -132,7 +132,7 @@ static inline struct musb *dev_to_musb(struct device *dev)
132/*-------------------------------------------------------------------------*/ 132/*-------------------------------------------------------------------------*/
133 133
134#ifndef CONFIG_BLACKFIN 134#ifndef CONFIG_BLACKFIN
135static int musb_ulpi_read(struct usb_phy *phy, u32 offset) 135static int musb_ulpi_read(struct usb_phy *phy, u32 reg)
136{ 136{
137 void __iomem *addr = phy->io_priv; 137 void __iomem *addr = phy->io_priv;
138 int i = 0; 138 int i = 0;
@@ -151,7 +151,7 @@ static int musb_ulpi_read(struct usb_phy *phy, u32 offset)
151 * ULPICarKitControlDisableUTMI after clearing POWER_SUSPENDM. 151 * ULPICarKitControlDisableUTMI after clearing POWER_SUSPENDM.
152 */ 152 */
153 153
154 musb_writeb(addr, MUSB_ULPI_REG_ADDR, (u8)offset); 154 musb_writeb(addr, MUSB_ULPI_REG_ADDR, (u8)reg);
155 musb_writeb(addr, MUSB_ULPI_REG_CONTROL, 155 musb_writeb(addr, MUSB_ULPI_REG_CONTROL,
156 MUSB_ULPI_REG_REQ | MUSB_ULPI_RDN_WR); 156 MUSB_ULPI_REG_REQ | MUSB_ULPI_RDN_WR);
157 157
@@ -176,7 +176,7 @@ out:
176 return ret; 176 return ret;
177} 177}
178 178
179static int musb_ulpi_write(struct usb_phy *phy, u32 offset, u32 data) 179static int musb_ulpi_write(struct usb_phy *phy, u32 val, u32 reg)
180{ 180{
181 void __iomem *addr = phy->io_priv; 181 void __iomem *addr = phy->io_priv;
182 int i = 0; 182 int i = 0;
@@ -191,8 +191,8 @@ static int musb_ulpi_write(struct usb_phy *phy, u32 offset, u32 data)
191 power &= ~MUSB_POWER_SUSPENDM; 191 power &= ~MUSB_POWER_SUSPENDM;
192 musb_writeb(addr, MUSB_POWER, power); 192 musb_writeb(addr, MUSB_POWER, power);
193 193
194 musb_writeb(addr, MUSB_ULPI_REG_ADDR, (u8)offset); 194 musb_writeb(addr, MUSB_ULPI_REG_ADDR, (u8)reg);
195 musb_writeb(addr, MUSB_ULPI_REG_DATA, (u8)data); 195 musb_writeb(addr, MUSB_ULPI_REG_DATA, (u8)val);
196 musb_writeb(addr, MUSB_ULPI_REG_CONTROL, MUSB_ULPI_REG_REQ); 196 musb_writeb(addr, MUSB_ULPI_REG_CONTROL, MUSB_ULPI_REG_REQ);
197 197
198 while (!(musb_readb(addr, MUSB_ULPI_REG_CONTROL) 198 while (!(musb_readb(addr, MUSB_ULPI_REG_CONTROL)
@@ -1668,7 +1668,7 @@ EXPORT_SYMBOL_GPL(musb_interrupt);
1668static bool use_dma = 1; 1668static bool use_dma = 1;
1669 1669
1670/* "modprobe ... use_dma=0" etc */ 1670/* "modprobe ... use_dma=0" etc */
1671module_param(use_dma, bool, 0); 1671module_param(use_dma, bool, 0644);
1672MODULE_PARM_DESC(use_dma, "enable/disable use of DMA"); 1672MODULE_PARM_DESC(use_dma, "enable/disable use of DMA");
1673 1673
1674void musb_dma_completion(struct musb *musb, u8 epnum, u8 transmit) 1674void musb_dma_completion(struct musb *musb, u8 epnum, u8 transmit)
@@ -2017,7 +2017,6 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl)
2017 /* We need musb_read/write functions initialized for PM */ 2017 /* We need musb_read/write functions initialized for PM */
2018 pm_runtime_use_autosuspend(musb->controller); 2018 pm_runtime_use_autosuspend(musb->controller);
2019 pm_runtime_set_autosuspend_delay(musb->controller, 200); 2019 pm_runtime_set_autosuspend_delay(musb->controller, 200);
2020 pm_runtime_irq_safe(musb->controller);
2021 pm_runtime_enable(musb->controller); 2020 pm_runtime_enable(musb->controller);
2022 2021
2023 /* The musb_platform_init() call: 2022 /* The musb_platform_init() call:
@@ -2095,6 +2094,7 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl)
2095#ifndef CONFIG_MUSB_PIO_ONLY 2094#ifndef CONFIG_MUSB_PIO_ONLY
2096 if (!musb->ops->dma_init || !musb->ops->dma_exit) { 2095 if (!musb->ops->dma_init || !musb->ops->dma_exit) {
2097 dev_err(dev, "DMA controller not set\n"); 2096 dev_err(dev, "DMA controller not set\n");
2097 status = -ENODEV;
2098 goto fail2; 2098 goto fail2;
2099 } 2099 }
2100 musb_dma_controller_create = musb->ops->dma_init; 2100 musb_dma_controller_create = musb->ops->dma_init;
@@ -2218,6 +2218,12 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl)
2218 2218
2219 pm_runtime_put(musb->controller); 2219 pm_runtime_put(musb->controller);
2220 2220
2221 /*
2222 * For why this is currently needed, see commit 3e43a0725637
2223 * ("usb: musb: core: add pm_runtime_irq_safe()")
2224 */
2225 pm_runtime_irq_safe(musb->controller);
2226
2221 return 0; 2227 return 0;
2222 2228
2223fail5: 2229fail5:
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
index 26c65e66cc0f..795a45b1b25b 100644
--- a/drivers/usb/musb/musb_host.c
+++ b/drivers/usb/musb/musb_host.c
@@ -112,22 +112,32 @@ static void musb_h_tx_flush_fifo(struct musb_hw_ep *ep)
112 struct musb *musb = ep->musb; 112 struct musb *musb = ep->musb;
113 void __iomem *epio = ep->regs; 113 void __iomem *epio = ep->regs;
114 u16 csr; 114 u16 csr;
115 u16 lastcsr = 0;
116 int retries = 1000; 115 int retries = 1000;
117 116
118 csr = musb_readw(epio, MUSB_TXCSR); 117 csr = musb_readw(epio, MUSB_TXCSR);
119 while (csr & MUSB_TXCSR_FIFONOTEMPTY) { 118 while (csr & MUSB_TXCSR_FIFONOTEMPTY) {
120 if (csr != lastcsr)
121 dev_dbg(musb->controller, "Host TX FIFONOTEMPTY csr: %02x\n", csr);
122 lastcsr = csr;
123 csr |= MUSB_TXCSR_FLUSHFIFO | MUSB_TXCSR_TXPKTRDY; 119 csr |= MUSB_TXCSR_FLUSHFIFO | MUSB_TXCSR_TXPKTRDY;
124 musb_writew(epio, MUSB_TXCSR, csr); 120 musb_writew(epio, MUSB_TXCSR, csr);
125 csr = musb_readw(epio, MUSB_TXCSR); 121 csr = musb_readw(epio, MUSB_TXCSR);
126 if (WARN(retries-- < 1, 122
123 /*
124 * FIXME: sometimes the tx fifo flush failed, it has been
125 * observed during device disconnect on AM335x.
126 *
127 * To reproduce the issue, ensure tx urb(s) are queued when
128 * unplug the usb device which is connected to AM335x usb
129 * host port.
130 *
131 * I found using a usb-ethernet device and running iperf
132 * (client on AM335x) has very high chance to trigger it.
133 *
134 * Better to turn on dev_dbg() in musb_cleanup_urb() with
135 * CPPI enabled to see the issue when aborting the tx channel.
136 */
137 if (dev_WARN_ONCE(musb->controller, retries-- < 1,
127 "Could not flush host TX%d fifo: csr: %04x\n", 138 "Could not flush host TX%d fifo: csr: %04x\n",
128 ep->epnum, csr)) 139 ep->epnum, csr))
129 return; 140 return;
130 mdelay(1);
131 } 141 }
132} 142}
133 143
diff --git a/drivers/usb/phy/Kconfig b/drivers/usb/phy/Kconfig
index 173132416170..22e8ecb6bfbd 100644
--- a/drivers/usb/phy/Kconfig
+++ b/drivers/usb/phy/Kconfig
@@ -21,7 +21,6 @@ config AB8500_USB
21config FSL_USB2_OTG 21config FSL_USB2_OTG
22 bool "Freescale USB OTG Transceiver Driver" 22 bool "Freescale USB OTG Transceiver Driver"
23 depends on USB_EHCI_FSL && USB_FSL_USB2 && USB_OTG_FSM && PM 23 depends on USB_EHCI_FSL && USB_FSL_USB2 && USB_OTG_FSM && PM
24 select USB_OTG
25 select USB_PHY 24 select USB_PHY
26 help 25 help
27 Enable this to support Freescale USB OTG transceiver. 26 Enable this to support Freescale USB OTG transceiver.
@@ -168,8 +167,7 @@ config USB_QCOM_8X16_PHY
168 167
169config USB_MV_OTG 168config USB_MV_OTG
170 tristate "Marvell USB OTG support" 169 tristate "Marvell USB OTG support"
171 depends on USB_EHCI_MV && USB_MV_UDC && PM 170 depends on USB_EHCI_MV && USB_MV_UDC && PM && USB_OTG
172 select USB_OTG
173 select USB_PHY 171 select USB_PHY
174 help 172 help
175 Say Y here if you want to build Marvell USB OTG transciever 173 Say Y here if you want to build Marvell USB OTG transciever
diff --git a/drivers/usb/phy/phy-msm-usb.c b/drivers/usb/phy/phy-msm-usb.c
index 80eb991c2506..0d19a6d61a71 100644
--- a/drivers/usb/phy/phy-msm-usb.c
+++ b/drivers/usb/phy/phy-msm-usb.c
@@ -1506,7 +1506,6 @@ static int msm_otg_read_dt(struct platform_device *pdev, struct msm_otg *motg)
1506{ 1506{
1507 struct msm_otg_platform_data *pdata; 1507 struct msm_otg_platform_data *pdata;
1508 struct extcon_dev *ext_id, *ext_vbus; 1508 struct extcon_dev *ext_id, *ext_vbus;
1509 const struct of_device_id *id;
1510 struct device_node *node = pdev->dev.of_node; 1509 struct device_node *node = pdev->dev.of_node;
1511 struct property *prop; 1510 struct property *prop;
1512 int len, ret, words; 1511 int len, ret, words;
@@ -1518,8 +1517,9 @@ static int msm_otg_read_dt(struct platform_device *pdev, struct msm_otg *motg)
1518 1517
1519 motg->pdata = pdata; 1518 motg->pdata = pdata;
1520 1519
1521 id = of_match_device(msm_otg_dt_match, &pdev->dev); 1520 pdata->phy_type = (enum msm_usb_phy_type)of_device_get_match_data(&pdev->dev);
1522 pdata->phy_type = (enum msm_usb_phy_type) id->data; 1521 if (!pdata->phy_type)
1522 return 1;
1523 1523
1524 motg->link_rst = devm_reset_control_get(&pdev->dev, "link"); 1524 motg->link_rst = devm_reset_control_get(&pdev->dev, "link");
1525 if (IS_ERR(motg->link_rst)) 1525 if (IS_ERR(motg->link_rst))
diff --git a/drivers/usb/phy/phy-mxs-usb.c b/drivers/usb/phy/phy-mxs-usb.c
index 4d863ebc117c..c2936dc48ca7 100644
--- a/drivers/usb/phy/phy-mxs-usb.c
+++ b/drivers/usb/phy/phy-mxs-usb.c
@@ -143,12 +143,17 @@ static const struct mxs_phy_data imx6sx_phy_data = {
143 .flags = MXS_PHY_DISCONNECT_LINE_WITHOUT_VBUS, 143 .flags = MXS_PHY_DISCONNECT_LINE_WITHOUT_VBUS,
144}; 144};
145 145
146static const struct mxs_phy_data imx6ul_phy_data = {
147 .flags = MXS_PHY_DISCONNECT_LINE_WITHOUT_VBUS,
148};
149
146static const struct of_device_id mxs_phy_dt_ids[] = { 150static const struct of_device_id mxs_phy_dt_ids[] = {
147 { .compatible = "fsl,imx6sx-usbphy", .data = &imx6sx_phy_data, }, 151 { .compatible = "fsl,imx6sx-usbphy", .data = &imx6sx_phy_data, },
148 { .compatible = "fsl,imx6sl-usbphy", .data = &imx6sl_phy_data, }, 152 { .compatible = "fsl,imx6sl-usbphy", .data = &imx6sl_phy_data, },
149 { .compatible = "fsl,imx6q-usbphy", .data = &imx6q_phy_data, }, 153 { .compatible = "fsl,imx6q-usbphy", .data = &imx6q_phy_data, },
150 { .compatible = "fsl,imx23-usbphy", .data = &imx23_phy_data, }, 154 { .compatible = "fsl,imx23-usbphy", .data = &imx23_phy_data, },
151 { .compatible = "fsl,vf610-usbphy", .data = &vf610_phy_data, }, 155 { .compatible = "fsl,vf610-usbphy", .data = &vf610_phy_data, },
156 { .compatible = "fsl,imx6ul-usbphy", .data = &imx6ul_phy_data, },
152 { /* sentinel */ } 157 { /* sentinel */ }
153}; 158};
154MODULE_DEVICE_TABLE(of, mxs_phy_dt_ids); 159MODULE_DEVICE_TABLE(of, mxs_phy_dt_ids);
@@ -452,10 +457,13 @@ static int mxs_phy_probe(struct platform_device *pdev)
452 struct clk *clk; 457 struct clk *clk;
453 struct mxs_phy *mxs_phy; 458 struct mxs_phy *mxs_phy;
454 int ret; 459 int ret;
455 const struct of_device_id *of_id = 460 const struct of_device_id *of_id;
456 of_match_device(mxs_phy_dt_ids, &pdev->dev);
457 struct device_node *np = pdev->dev.of_node; 461 struct device_node *np = pdev->dev.of_node;
458 462
463 of_id = of_match_device(mxs_phy_dt_ids, &pdev->dev);
464 if (!of_id)
465 return -ENODEV;
466
459 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 467 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
460 base = devm_ioremap_resource(&pdev->dev, res); 468 base = devm_ioremap_resource(&pdev->dev, res);
461 if (IS_ERR(base)) 469 if (IS_ERR(base))
diff --git a/drivers/usb/phy/phy-omap-otg.c b/drivers/usb/phy/phy-omap-otg.c
index 1270906ccb95..c4bf2de6d14e 100644
--- a/drivers/usb/phy/phy-omap-otg.c
+++ b/drivers/usb/phy/phy-omap-otg.c
@@ -105,7 +105,6 @@ static int omap_otg_probe(struct platform_device *pdev)
105 extcon = extcon_get_extcon_dev(config->extcon); 105 extcon = extcon_get_extcon_dev(config->extcon);
106 if (!extcon) 106 if (!extcon)
107 return -EPROBE_DEFER; 107 return -EPROBE_DEFER;
108 otg_dev->extcon = extcon;
109 108
110 otg_dev = devm_kzalloc(&pdev->dev, sizeof(*otg_dev), GFP_KERNEL); 109 otg_dev = devm_kzalloc(&pdev->dev, sizeof(*otg_dev), GFP_KERNEL);
111 if (!otg_dev) 110 if (!otg_dev)
@@ -115,6 +114,7 @@ static int omap_otg_probe(struct platform_device *pdev)
115 if (IS_ERR(otg_dev->base)) 114 if (IS_ERR(otg_dev->base))
116 return PTR_ERR(otg_dev->base); 115 return PTR_ERR(otg_dev->base);
117 116
117 otg_dev->extcon = extcon;
118 otg_dev->id_nb.notifier_call = omap_otg_id_notifier; 118 otg_dev->id_nb.notifier_call = omap_otg_id_notifier;
119 otg_dev->vbus_nb.notifier_call = omap_otg_vbus_notifier; 119 otg_dev->vbus_nb.notifier_call = omap_otg_vbus_notifier;
120 120
diff --git a/drivers/usb/renesas_usbhs/mod_gadget.c b/drivers/usb/renesas_usbhs/mod_gadget.c
index de4f97d84a82..8f7a78e70975 100644
--- a/drivers/usb/renesas_usbhs/mod_gadget.c
+++ b/drivers/usb/renesas_usbhs/mod_gadget.c
@@ -131,7 +131,8 @@ static void __usbhsg_queue_pop(struct usbhsg_uep *uep,
131 struct device *dev = usbhsg_gpriv_to_dev(gpriv); 131 struct device *dev = usbhsg_gpriv_to_dev(gpriv);
132 struct usbhs_priv *priv = usbhsg_gpriv_to_priv(gpriv); 132 struct usbhs_priv *priv = usbhsg_gpriv_to_priv(gpriv);
133 133
134 dev_dbg(dev, "pipe %d : queue pop\n", usbhs_pipe_number(pipe)); 134 if (pipe)
135 dev_dbg(dev, "pipe %d : queue pop\n", usbhs_pipe_number(pipe));
135 136
136 ureq->req.status = status; 137 ureq->req.status = status;
137 spin_unlock(usbhs_priv_to_lock(priv)); 138 spin_unlock(usbhs_priv_to_lock(priv));
@@ -685,7 +686,13 @@ static int usbhsg_ep_dequeue(struct usb_ep *ep, struct usb_request *req)
685 struct usbhsg_request *ureq = usbhsg_req_to_ureq(req); 686 struct usbhsg_request *ureq = usbhsg_req_to_ureq(req);
686 struct usbhs_pipe *pipe = usbhsg_uep_to_pipe(uep); 687 struct usbhs_pipe *pipe = usbhsg_uep_to_pipe(uep);
687 688
688 usbhs_pkt_pop(pipe, usbhsg_ureq_to_pkt(ureq)); 689 if (pipe)
690 usbhs_pkt_pop(pipe, usbhsg_ureq_to_pkt(ureq));
691
692 /*
693 * To dequeue a request, this driver should call the usbhsg_queue_pop()
694 * even if the pipe is NULL.
695 */
689 usbhsg_queue_pop(uep, ureq, -ECONNRESET); 696 usbhsg_queue_pop(uep, ureq, -ECONNRESET);
690 697
691 return 0; 698 return 0;
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index eac7ccaa3c85..7d4f51a32e66 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -132,7 +132,6 @@ static const struct usb_device_id id_table[] = {
132 { USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */ 132 { USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */
133 { USB_DEVICE(0x10C4, 0xEA61) }, /* Silicon Labs factory default */ 133 { USB_DEVICE(0x10C4, 0xEA61) }, /* Silicon Labs factory default */
134 { USB_DEVICE(0x10C4, 0xEA70) }, /* Silicon Labs factory default */ 134 { USB_DEVICE(0x10C4, 0xEA70) }, /* Silicon Labs factory default */
135 { USB_DEVICE(0x10C4, 0xEA80) }, /* Silicon Labs factory default */
136 { USB_DEVICE(0x10C4, 0xEA71) }, /* Infinity GPS-MIC-1 Radio Monophone */ 135 { USB_DEVICE(0x10C4, 0xEA71) }, /* Infinity GPS-MIC-1 Radio Monophone */
137 { USB_DEVICE(0x10C4, 0xF001) }, /* Elan Digital Systems USBscope50 */ 136 { USB_DEVICE(0x10C4, 0xF001) }, /* Elan Digital Systems USBscope50 */
138 { USB_DEVICE(0x10C4, 0xF002) }, /* Elan Digital Systems USBwave12 */ 137 { USB_DEVICE(0x10C4, 0xF002) }, /* Elan Digital Systems USBwave12 */
diff --git a/drivers/usb/serial/ipaq.c b/drivers/usb/serial/ipaq.c
index f51a5d52c0ed..ec1b8f2c1183 100644
--- a/drivers/usb/serial/ipaq.c
+++ b/drivers/usb/serial/ipaq.c
@@ -531,7 +531,8 @@ static int ipaq_open(struct tty_struct *tty,
531 * through. Since this has a reasonably high failure rate, we retry 531 * through. Since this has a reasonably high failure rate, we retry
532 * several times. 532 * several times.
533 */ 533 */
534 while (retries--) { 534 while (retries) {
535 retries--;
535 result = usb_control_msg(serial->dev, 536 result = usb_control_msg(serial->dev,
536 usb_sndctrlpipe(serial->dev, 0), 0x22, 0x21, 537 usb_sndctrlpipe(serial->dev, 0), 0x22, 0x21,
537 0x1, 0, NULL, 0, 100); 538 0x1, 0, NULL, 0, 100);
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 685fef71d3d1..f2280606b73c 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -161,6 +161,7 @@ static void option_instat_callback(struct urb *urb);
161#define NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_HIGHSPEED 0x9001 161#define NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_HIGHSPEED 0x9001
162#define NOVATELWIRELESS_PRODUCT_E362 0x9010 162#define NOVATELWIRELESS_PRODUCT_E362 0x9010
163#define NOVATELWIRELESS_PRODUCT_E371 0x9011 163#define NOVATELWIRELESS_PRODUCT_E371 0x9011
164#define NOVATELWIRELESS_PRODUCT_U620L 0x9022
164#define NOVATELWIRELESS_PRODUCT_G2 0xA010 165#define NOVATELWIRELESS_PRODUCT_G2 0xA010
165#define NOVATELWIRELESS_PRODUCT_MC551 0xB001 166#define NOVATELWIRELESS_PRODUCT_MC551 0xB001
166 167
@@ -354,6 +355,7 @@ static void option_instat_callback(struct urb *urb);
354/* This is the 4G XS Stick W14 a.k.a. Mobilcom Debitel Surf-Stick * 355/* This is the 4G XS Stick W14 a.k.a. Mobilcom Debitel Surf-Stick *
355 * It seems to contain a Qualcomm QSC6240/6290 chipset */ 356 * It seems to contain a Qualcomm QSC6240/6290 chipset */
356#define FOUR_G_SYSTEMS_PRODUCT_W14 0x9603 357#define FOUR_G_SYSTEMS_PRODUCT_W14 0x9603
358#define FOUR_G_SYSTEMS_PRODUCT_W100 0x9b01
357 359
358/* iBall 3.5G connect wireless modem */ 360/* iBall 3.5G connect wireless modem */
359#define IBALL_3_5G_CONNECT 0x9605 361#define IBALL_3_5G_CONNECT 0x9605
@@ -519,6 +521,11 @@ static const struct option_blacklist_info four_g_w14_blacklist = {
519 .sendsetup = BIT(0) | BIT(1), 521 .sendsetup = BIT(0) | BIT(1),
520}; 522};
521 523
524static const struct option_blacklist_info four_g_w100_blacklist = {
525 .sendsetup = BIT(1) | BIT(2),
526 .reserved = BIT(3),
527};
528
522static const struct option_blacklist_info alcatel_x200_blacklist = { 529static const struct option_blacklist_info alcatel_x200_blacklist = {
523 .sendsetup = BIT(0) | BIT(1), 530 .sendsetup = BIT(0) | BIT(1),
524 .reserved = BIT(4), 531 .reserved = BIT(4),
@@ -1052,6 +1059,7 @@ static const struct usb_device_id option_ids[] = {
1052 { USB_DEVICE_AND_INTERFACE_INFO(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC551, 0xff, 0xff, 0xff) }, 1059 { USB_DEVICE_AND_INTERFACE_INFO(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC551, 0xff, 0xff, 0xff) },
1053 { USB_DEVICE_AND_INTERFACE_INFO(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_E362, 0xff, 0xff, 0xff) }, 1060 { USB_DEVICE_AND_INTERFACE_INFO(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_E362, 0xff, 0xff, 0xff) },
1054 { USB_DEVICE_AND_INTERFACE_INFO(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_E371, 0xff, 0xff, 0xff) }, 1061 { USB_DEVICE_AND_INTERFACE_INFO(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_E371, 0xff, 0xff, 0xff) },
1062 { USB_DEVICE_AND_INTERFACE_INFO(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_U620L, 0xff, 0x00, 0x00) },
1055 1063
1056 { USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H01) }, 1064 { USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H01) },
1057 { USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H01A) }, 1065 { USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H01A) },
@@ -1641,6 +1649,9 @@ static const struct usb_device_id option_ids[] = {
1641 { USB_DEVICE(LONGCHEER_VENDOR_ID, FOUR_G_SYSTEMS_PRODUCT_W14), 1649 { USB_DEVICE(LONGCHEER_VENDOR_ID, FOUR_G_SYSTEMS_PRODUCT_W14),
1642 .driver_info = (kernel_ulong_t)&four_g_w14_blacklist 1650 .driver_info = (kernel_ulong_t)&four_g_w14_blacklist
1643 }, 1651 },
1652 { USB_DEVICE(LONGCHEER_VENDOR_ID, FOUR_G_SYSTEMS_PRODUCT_W100),
1653 .driver_info = (kernel_ulong_t)&four_g_w100_blacklist
1654 },
1644 { USB_DEVICE_INTERFACE_CLASS(LONGCHEER_VENDOR_ID, SPEEDUP_PRODUCT_SU9800, 0xff) }, 1655 { USB_DEVICE_INTERFACE_CLASS(LONGCHEER_VENDOR_ID, SPEEDUP_PRODUCT_SU9800, 0xff) },
1645 { USB_DEVICE(LONGCHEER_VENDOR_ID, ZOOM_PRODUCT_4597) }, 1656 { USB_DEVICE(LONGCHEER_VENDOR_ID, ZOOM_PRODUCT_4597) },
1646 { USB_DEVICE(LONGCHEER_VENDOR_ID, IBALL_3_5G_CONNECT) }, 1657 { USB_DEVICE(LONGCHEER_VENDOR_ID, IBALL_3_5G_CONNECT) },
diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
index 5022fcfa0260..9919d2a9faf2 100644
--- a/drivers/usb/serial/qcserial.c
+++ b/drivers/usb/serial/qcserial.c
@@ -22,6 +22,8 @@
22#define DRIVER_AUTHOR "Qualcomm Inc" 22#define DRIVER_AUTHOR "Qualcomm Inc"
23#define DRIVER_DESC "Qualcomm USB Serial driver" 23#define DRIVER_DESC "Qualcomm USB Serial driver"
24 24
25#define QUECTEL_EC20_PID 0x9215
26
25/* standard device layouts supported by this driver */ 27/* standard device layouts supported by this driver */
26enum qcserial_layouts { 28enum qcserial_layouts {
27 QCSERIAL_G2K = 0, /* Gobi 2000 */ 29 QCSERIAL_G2K = 0, /* Gobi 2000 */
@@ -171,6 +173,38 @@ static const struct usb_device_id id_table[] = {
171}; 173};
172MODULE_DEVICE_TABLE(usb, id_table); 174MODULE_DEVICE_TABLE(usb, id_table);
173 175
176static int handle_quectel_ec20(struct device *dev, int ifnum)
177{
178 int altsetting = 0;
179
180 /*
181 * Quectel EC20 Mini PCIe LTE module layout:
182 * 0: DM/DIAG (use libqcdm from ModemManager for communication)
183 * 1: NMEA
184 * 2: AT-capable modem port
185 * 3: Modem interface
186 * 4: NDIS
187 */
188 switch (ifnum) {
189 case 0:
190 dev_dbg(dev, "Quectel EC20 DM/DIAG interface found\n");
191 break;
192 case 1:
193 dev_dbg(dev, "Quectel EC20 NMEA GPS interface found\n");
194 break;
195 case 2:
196 case 3:
197 dev_dbg(dev, "Quectel EC20 Modem port found\n");
198 break;
199 case 4:
200 /* Don't claim the QMI/net interface */
201 altsetting = -1;
202 break;
203 }
204
205 return altsetting;
206}
207
174static int qcprobe(struct usb_serial *serial, const struct usb_device_id *id) 208static int qcprobe(struct usb_serial *serial, const struct usb_device_id *id)
175{ 209{
176 struct usb_host_interface *intf = serial->interface->cur_altsetting; 210 struct usb_host_interface *intf = serial->interface->cur_altsetting;
@@ -181,6 +215,10 @@ static int qcprobe(struct usb_serial *serial, const struct usb_device_id *id)
181 int altsetting = -1; 215 int altsetting = -1;
182 bool sendsetup = false; 216 bool sendsetup = false;
183 217
218 /* we only support vendor specific functions */
219 if (intf->desc.bInterfaceClass != USB_CLASS_VENDOR_SPEC)
220 goto done;
221
184 nintf = serial->dev->actconfig->desc.bNumInterfaces; 222 nintf = serial->dev->actconfig->desc.bNumInterfaces;
185 dev_dbg(dev, "Num Interfaces = %d\n", nintf); 223 dev_dbg(dev, "Num Interfaces = %d\n", nintf);
186 ifnum = intf->desc.bInterfaceNumber; 224 ifnum = intf->desc.bInterfaceNumber;
@@ -240,6 +278,12 @@ static int qcprobe(struct usb_serial *serial, const struct usb_device_id *id)
240 altsetting = -1; 278 altsetting = -1;
241 break; 279 break;
242 case QCSERIAL_G2K: 280 case QCSERIAL_G2K:
281 /* handle non-standard layouts */
282 if (nintf == 5 && id->idProduct == QUECTEL_EC20_PID) {
283 altsetting = handle_quectel_ec20(dev, ifnum);
284 goto done;
285 }
286
243 /* 287 /*
244 * Gobi 2K+ USB layout: 288 * Gobi 2K+ USB layout:
245 * 0: QMI/net 289 * 0: QMI/net
@@ -301,29 +345,39 @@ static int qcprobe(struct usb_serial *serial, const struct usb_device_id *id)
301 break; 345 break;
302 case QCSERIAL_HWI: 346 case QCSERIAL_HWI:
303 /* 347 /*
304 * Huawei layout: 348 * Huawei devices map functions by subclass + protocol
305 * 0: AT-capable modem port 349 * instead of interface numbers. The protocol identify
306 * 1: DM/DIAG 350 * a specific function, while the subclass indicate a
307 * 2: AT-capable modem port 351 * specific firmware source
308 * 3: CCID-compatible PCSC interface 352 *
309 * 4: QMI/net 353 * This is a blacklist of functions known to be
310 * 5: NMEA 354 * non-serial. The rest are assumed to be serial and
355 * will be handled by this driver
311 */ 356 */
312 switch (ifnum) { 357 switch (intf->desc.bInterfaceProtocol) {
313 case 0: 358 /* QMI combined (qmi_wwan) */
314 case 2: 359 case 0x07:
315 dev_dbg(dev, "Modem port found\n"); 360 case 0x37:
316 break; 361 case 0x67:
317 case 1: 362 /* QMI data (qmi_wwan) */
318 dev_dbg(dev, "DM/DIAG interface found\n"); 363 case 0x08:
319 break; 364 case 0x38:
320 case 5: 365 case 0x68:
321 dev_dbg(dev, "NMEA GPS interface found\n"); 366 /* QMI control (qmi_wwan) */
322 break; 367 case 0x09:
323 default: 368 case 0x39:
324 /* don't claim any unsupported interface */ 369 case 0x69:
370 /* NCM like (huawei_cdc_ncm) */
371 case 0x16:
372 case 0x46:
373 case 0x76:
325 altsetting = -1; 374 altsetting = -1;
326 break; 375 break;
376 default:
377 dev_dbg(dev, "Huawei type serial port found (%02x/%02x/%02x)\n",
378 intf->desc.bInterfaceClass,
379 intf->desc.bInterfaceSubClass,
380 intf->desc.bInterfaceProtocol);
327 } 381 }
328 break; 382 break;
329 default: 383 default:
diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c
index e9da41d9fe7f..2694df2f4559 100644
--- a/drivers/usb/serial/ti_usb_3410_5052.c
+++ b/drivers/usb/serial/ti_usb_3410_5052.c
@@ -159,6 +159,7 @@ static const struct usb_device_id ti_id_table_3410[] = {
159 { USB_DEVICE(ABBOTT_VENDOR_ID, ABBOTT_STEREO_PLUG_ID) }, 159 { USB_DEVICE(ABBOTT_VENDOR_ID, ABBOTT_STEREO_PLUG_ID) },
160 { USB_DEVICE(ABBOTT_VENDOR_ID, ABBOTT_STRIP_PORT_ID) }, 160 { USB_DEVICE(ABBOTT_VENDOR_ID, ABBOTT_STRIP_PORT_ID) },
161 { USB_DEVICE(TI_VENDOR_ID, FRI2_PRODUCT_ID) }, 161 { USB_DEVICE(TI_VENDOR_ID, FRI2_PRODUCT_ID) },
162 { USB_DEVICE(HONEYWELL_VENDOR_ID, HONEYWELL_HGI80_PRODUCT_ID) },
162 { } /* terminator */ 163 { } /* terminator */
163}; 164};
164 165
@@ -191,6 +192,7 @@ static const struct usb_device_id ti_id_table_combined[] = {
191 { USB_DEVICE(ABBOTT_VENDOR_ID, ABBOTT_PRODUCT_ID) }, 192 { USB_DEVICE(ABBOTT_VENDOR_ID, ABBOTT_PRODUCT_ID) },
192 { USB_DEVICE(ABBOTT_VENDOR_ID, ABBOTT_STRIP_PORT_ID) }, 193 { USB_DEVICE(ABBOTT_VENDOR_ID, ABBOTT_STRIP_PORT_ID) },
193 { USB_DEVICE(TI_VENDOR_ID, FRI2_PRODUCT_ID) }, 194 { USB_DEVICE(TI_VENDOR_ID, FRI2_PRODUCT_ID) },
195 { USB_DEVICE(HONEYWELL_VENDOR_ID, HONEYWELL_HGI80_PRODUCT_ID) },
194 { } /* terminator */ 196 { } /* terminator */
195}; 197};
196 198
diff --git a/drivers/usb/serial/ti_usb_3410_5052.h b/drivers/usb/serial/ti_usb_3410_5052.h
index 4a2423e84d55..98f35c656c02 100644
--- a/drivers/usb/serial/ti_usb_3410_5052.h
+++ b/drivers/usb/serial/ti_usb_3410_5052.h
@@ -56,6 +56,10 @@
56#define ABBOTT_PRODUCT_ID ABBOTT_STEREO_PLUG_ID 56#define ABBOTT_PRODUCT_ID ABBOTT_STEREO_PLUG_ID
57#define ABBOTT_STRIP_PORT_ID 0x3420 57#define ABBOTT_STRIP_PORT_ID 0x3420
58 58
59/* Honeywell vendor and product IDs */
60#define HONEYWELL_VENDOR_ID 0x10ac
61#define HONEYWELL_HGI80_PRODUCT_ID 0x0102 /* Honeywell HGI80 */
62
59/* Commands */ 63/* Commands */
60#define TI_GET_VERSION 0x01 64#define TI_GET_VERSION 0x01
61#define TI_GET_PORT_STATUS 0x02 65#define TI_GET_PORT_STATUS 0x02
diff --git a/drivers/usb/serial/usb-serial-simple.c b/drivers/usb/serial/usb-serial-simple.c
index 3658662898fc..a204782ae530 100644
--- a/drivers/usb/serial/usb-serial-simple.c
+++ b/drivers/usb/serial/usb-serial-simple.c
@@ -53,6 +53,7 @@ DEVICE(funsoft, FUNSOFT_IDS);
53 53
54/* Infineon Flashloader driver */ 54/* Infineon Flashloader driver */
55#define FLASHLOADER_IDS() \ 55#define FLASHLOADER_IDS() \
56 { USB_DEVICE_INTERFACE_CLASS(0x058b, 0x0041, USB_CLASS_CDC_DATA) }, \
56 { USB_DEVICE(0x8087, 0x0716) } 57 { USB_DEVICE(0x8087, 0x0716) }
57DEVICE(flashloader, FLASHLOADER_IDS); 58DEVICE(flashloader, FLASHLOADER_IDS);
58 59
diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c
index e69151664436..5c66d3f7a6d0 100644
--- a/drivers/usb/storage/uas.c
+++ b/drivers/usb/storage/uas.c
@@ -796,6 +796,10 @@ static int uas_slave_configure(struct scsi_device *sdev)
796 if (devinfo->flags & US_FL_NO_REPORT_OPCODES) 796 if (devinfo->flags & US_FL_NO_REPORT_OPCODES)
797 sdev->no_report_opcodes = 1; 797 sdev->no_report_opcodes = 1;
798 798
799 /* A few buggy USB-ATA bridges don't understand FUA */
800 if (devinfo->flags & US_FL_BROKEN_FUA)
801 sdev->broken_fua = 1;
802
799 scsi_change_queue_depth(sdev, devinfo->qdepth - 2); 803 scsi_change_queue_depth(sdev, devinfo->qdepth - 2);
800 return 0; 804 return 0;
801} 805}
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index 6b2479123de7..7ffe4209067b 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -1987,7 +1987,7 @@ UNUSUAL_DEV( 0x14cd, 0x6600, 0x0201, 0x0201,
1987 US_FL_IGNORE_RESIDUE ), 1987 US_FL_IGNORE_RESIDUE ),
1988 1988
1989/* Reported by Michael Büsch <m@bues.ch> */ 1989/* Reported by Michael Büsch <m@bues.ch> */
1990UNUSUAL_DEV( 0x152d, 0x0567, 0x0114, 0x0114, 1990UNUSUAL_DEV( 0x152d, 0x0567, 0x0114, 0x0116,
1991 "JMicron", 1991 "JMicron",
1992 "USB to ATA/ATAPI Bridge", 1992 "USB to ATA/ATAPI Bridge",
1993 USB_SC_DEVICE, USB_PR_DEVICE, NULL, 1993 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
diff --git a/drivers/usb/storage/unusual_uas.h b/drivers/usb/storage/unusual_uas.h
index c85ea530085f..ccc113e83d88 100644
--- a/drivers/usb/storage/unusual_uas.h
+++ b/drivers/usb/storage/unusual_uas.h
@@ -132,7 +132,7 @@ UNUSUAL_DEV(0x152d, 0x0567, 0x0000, 0x9999,
132 "JMicron", 132 "JMicron",
133 "JMS567", 133 "JMS567",
134 USB_SC_DEVICE, USB_PR_DEVICE, NULL, 134 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
135 US_FL_NO_REPORT_OPCODES), 135 US_FL_BROKEN_FUA | US_FL_NO_REPORT_OPCODES),
136 136
137/* Reported-by: Hans de Goede <hdegoede@redhat.com> */ 137/* Reported-by: Hans de Goede <hdegoede@redhat.com> */
138UNUSUAL_DEV(0x2109, 0x0711, 0x0000, 0x9999, 138UNUSUAL_DEV(0x2109, 0x0711, 0x0000, 0x9999,
diff --git a/drivers/vfio/Kconfig b/drivers/vfio/Kconfig
index da6e2ce77495..850d86ca685b 100644
--- a/drivers/vfio/Kconfig
+++ b/drivers/vfio/Kconfig
@@ -31,21 +31,6 @@ menuconfig VFIO
31 31
32 If you don't know what to do here, say N. 32 If you don't know what to do here, say N.
33 33
34menuconfig VFIO_NOIOMMU
35 bool "VFIO No-IOMMU support"
36 depends on VFIO
37 help
38 VFIO is built on the ability to isolate devices using the IOMMU.
39 Only with an IOMMU can userspace access to DMA capable devices be
40 considered secure. VFIO No-IOMMU mode enables IOMMU groups for
41 devices without IOMMU backing for the purpose of re-using the VFIO
42 infrastructure in a non-secure mode. Use of this mode will result
43 in an unsupportable kernel and will therefore taint the kernel.
44 Device assignment to virtual machines is also not possible with
45 this mode since there is no IOMMU to provide DMA translation.
46
47 If you don't know what to do here, say N.
48
49source "drivers/vfio/pci/Kconfig" 34source "drivers/vfio/pci/Kconfig"
50source "drivers/vfio/platform/Kconfig" 35source "drivers/vfio/platform/Kconfig"
51source "virt/lib/Kconfig" 36source "virt/lib/Kconfig"
diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c
index 32b88bd2c82c..56bf6dbb93db 100644
--- a/drivers/vfio/pci/vfio_pci.c
+++ b/drivers/vfio/pci/vfio_pci.c
@@ -940,13 +940,13 @@ static int vfio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
940 if (pdev->hdr_type != PCI_HEADER_TYPE_NORMAL) 940 if (pdev->hdr_type != PCI_HEADER_TYPE_NORMAL)
941 return -EINVAL; 941 return -EINVAL;
942 942
943 group = vfio_iommu_group_get(&pdev->dev); 943 group = iommu_group_get(&pdev->dev);
944 if (!group) 944 if (!group)
945 return -EINVAL; 945 return -EINVAL;
946 946
947 vdev = kzalloc(sizeof(*vdev), GFP_KERNEL); 947 vdev = kzalloc(sizeof(*vdev), GFP_KERNEL);
948 if (!vdev) { 948 if (!vdev) {
949 vfio_iommu_group_put(group, &pdev->dev); 949 iommu_group_put(group);
950 return -ENOMEM; 950 return -ENOMEM;
951 } 951 }
952 952
@@ -957,7 +957,7 @@ static int vfio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
957 957
958 ret = vfio_add_group_dev(&pdev->dev, &vfio_pci_ops, vdev); 958 ret = vfio_add_group_dev(&pdev->dev, &vfio_pci_ops, vdev);
959 if (ret) { 959 if (ret) {
960 vfio_iommu_group_put(group, &pdev->dev); 960 iommu_group_put(group);
961 kfree(vdev); 961 kfree(vdev);
962 return ret; 962 return ret;
963 } 963 }
@@ -993,7 +993,7 @@ static void vfio_pci_remove(struct pci_dev *pdev)
993 if (!vdev) 993 if (!vdev)
994 return; 994 return;
995 995
996 vfio_iommu_group_put(pdev->dev.iommu_group, &pdev->dev); 996 iommu_group_put(pdev->dev.iommu_group);
997 kfree(vdev); 997 kfree(vdev);
998 998
999 if (vfio_pci_is_vga(pdev)) { 999 if (vfio_pci_is_vga(pdev)) {
@@ -1035,7 +1035,7 @@ static pci_ers_result_t vfio_pci_aer_err_detected(struct pci_dev *pdev,
1035 return PCI_ERS_RESULT_CAN_RECOVER; 1035 return PCI_ERS_RESULT_CAN_RECOVER;
1036} 1036}
1037 1037
1038static struct pci_error_handlers vfio_err_handlers = { 1038static const struct pci_error_handlers vfio_err_handlers = {
1039 .error_detected = vfio_pci_aer_err_detected, 1039 .error_detected = vfio_pci_aer_err_detected,
1040}; 1040};
1041 1041
diff --git a/drivers/vfio/platform/vfio_platform.c b/drivers/vfio/platform/vfio_platform.c
index f1625dcfbb23..b1cc3a768784 100644
--- a/drivers/vfio/platform/vfio_platform.c
+++ b/drivers/vfio/platform/vfio_platform.c
@@ -92,7 +92,6 @@ static struct platform_driver vfio_platform_driver = {
92 .remove = vfio_platform_remove, 92 .remove = vfio_platform_remove,
93 .driver = { 93 .driver = {
94 .name = "vfio-platform", 94 .name = "vfio-platform",
95 .owner = THIS_MODULE,
96 }, 95 },
97}; 96};
98 97
diff --git a/drivers/vfio/platform/vfio_platform_common.c b/drivers/vfio/platform/vfio_platform_common.c
index a1c50d630792..418cdd9ba3f4 100644
--- a/drivers/vfio/platform/vfio_platform_common.c
+++ b/drivers/vfio/platform/vfio_platform_common.c
@@ -51,13 +51,10 @@ static vfio_platform_reset_fn_t vfio_platform_lookup_reset(const char *compat,
51 51
52static void vfio_platform_get_reset(struct vfio_platform_device *vdev) 52static void vfio_platform_get_reset(struct vfio_platform_device *vdev)
53{ 53{
54 char modname[256];
55
56 vdev->reset = vfio_platform_lookup_reset(vdev->compat, 54 vdev->reset = vfio_platform_lookup_reset(vdev->compat,
57 &vdev->reset_module); 55 &vdev->reset_module);
58 if (!vdev->reset) { 56 if (!vdev->reset) {
59 snprintf(modname, 256, "vfio-reset:%s", vdev->compat); 57 request_module("vfio-reset:%s", vdev->compat);
60 request_module(modname);
61 vdev->reset = vfio_platform_lookup_reset(vdev->compat, 58 vdev->reset = vfio_platform_lookup_reset(vdev->compat,
62 &vdev->reset_module); 59 &vdev->reset_module);
63 } 60 }
diff --git a/drivers/vfio/vfio.c b/drivers/vfio/vfio.c
index de632da2e22f..6070b793cbcb 100644
--- a/drivers/vfio/vfio.c
+++ b/drivers/vfio/vfio.c
@@ -62,7 +62,6 @@ struct vfio_container {
62 struct rw_semaphore group_lock; 62 struct rw_semaphore group_lock;
63 struct vfio_iommu_driver *iommu_driver; 63 struct vfio_iommu_driver *iommu_driver;
64 void *iommu_data; 64 void *iommu_data;
65 bool noiommu;
66}; 65};
67 66
68struct vfio_unbound_dev { 67struct vfio_unbound_dev {
@@ -85,7 +84,6 @@ struct vfio_group {
85 struct list_head unbound_list; 84 struct list_head unbound_list;
86 struct mutex unbound_lock; 85 struct mutex unbound_lock;
87 atomic_t opened; 86 atomic_t opened;
88 bool noiommu;
89}; 87};
90 88
91struct vfio_device { 89struct vfio_device {
@@ -97,147 +95,6 @@ struct vfio_device {
97 void *device_data; 95 void *device_data;
98}; 96};
99 97
100#ifdef CONFIG_VFIO_NOIOMMU
101static bool noiommu __read_mostly;
102module_param_named(enable_unsafe_noiommu_support,
103 noiommu, bool, S_IRUGO | S_IWUSR);
104MODULE_PARM_DESC(enable_unsafe_noiommu_mode, "Enable UNSAFE, no-IOMMU mode. This mode provides no device isolation, no DMA translation, no host kernel protection, cannot be used for device assignment to virtual machines, requires RAWIO permissions, and will taint the kernel. If you do not know what this is for, step away. (default: false)");
105#endif
106
107/*
108 * vfio_iommu_group_{get,put} are only intended for VFIO bus driver probe
109 * and remove functions, any use cases other than acquiring the first
110 * reference for the purpose of calling vfio_add_group_dev() or removing
111 * that symmetric reference after vfio_del_group_dev() should use the raw
112 * iommu_group_{get,put} functions. In particular, vfio_iommu_group_put()
113 * removes the device from the dummy group and cannot be nested.
114 */
115struct iommu_group *vfio_iommu_group_get(struct device *dev)
116{
117 struct iommu_group *group;
118 int __maybe_unused ret;
119
120 group = iommu_group_get(dev);
121
122#ifdef CONFIG_VFIO_NOIOMMU
123 /*
124 * With noiommu enabled, an IOMMU group will be created for a device
125 * that doesn't already have one and doesn't have an iommu_ops on their
126 * bus. We use iommu_present() again in the main code to detect these
127 * fake groups.
128 */
129 if (group || !noiommu || iommu_present(dev->bus))
130 return group;
131
132 group = iommu_group_alloc();
133 if (IS_ERR(group))
134 return NULL;
135
136 iommu_group_set_name(group, "vfio-noiommu");
137 ret = iommu_group_add_device(group, dev);
138 iommu_group_put(group);
139 if (ret)
140 return NULL;
141
142 /*
143 * Where to taint? At this point we've added an IOMMU group for a
144 * device that is not backed by iommu_ops, therefore any iommu_
145 * callback using iommu_ops can legitimately Oops. So, while we may
146 * be about to give a DMA capable device to a user without IOMMU
147 * protection, which is clearly taint-worthy, let's go ahead and do
148 * it here.
149 */
150 add_taint(TAINT_USER, LOCKDEP_STILL_OK);
151 dev_warn(dev, "Adding kernel taint for vfio-noiommu group on device\n");
152#endif
153
154 return group;
155}
156EXPORT_SYMBOL_GPL(vfio_iommu_group_get);
157
158void vfio_iommu_group_put(struct iommu_group *group, struct device *dev)
159{
160#ifdef CONFIG_VFIO_NOIOMMU
161 if (!iommu_present(dev->bus))
162 iommu_group_remove_device(dev);
163#endif
164
165 iommu_group_put(group);
166}
167EXPORT_SYMBOL_GPL(vfio_iommu_group_put);
168
169#ifdef CONFIG_VFIO_NOIOMMU
170static void *vfio_noiommu_open(unsigned long arg)
171{
172 if (arg != VFIO_NOIOMMU_IOMMU)
173 return ERR_PTR(-EINVAL);
174 if (!capable(CAP_SYS_RAWIO))
175 return ERR_PTR(-EPERM);
176
177 return NULL;
178}
179
180static void vfio_noiommu_release(void *iommu_data)
181{
182}
183
184static long vfio_noiommu_ioctl(void *iommu_data,
185 unsigned int cmd, unsigned long arg)
186{
187 if (cmd == VFIO_CHECK_EXTENSION)
188 return arg == VFIO_NOIOMMU_IOMMU ? 1 : 0;
189
190 return -ENOTTY;
191}
192
193static int vfio_iommu_present(struct device *dev, void *unused)
194{
195 return iommu_present(dev->bus) ? 1 : 0;
196}
197
198static int vfio_noiommu_attach_group(void *iommu_data,
199 struct iommu_group *iommu_group)
200{
201 return iommu_group_for_each_dev(iommu_group, NULL,
202 vfio_iommu_present) ? -EINVAL : 0;
203}
204
205static void vfio_noiommu_detach_group(void *iommu_data,
206 struct iommu_group *iommu_group)
207{
208}
209
210static struct vfio_iommu_driver_ops vfio_noiommu_ops = {
211 .name = "vfio-noiommu",
212 .owner = THIS_MODULE,
213 .open = vfio_noiommu_open,
214 .release = vfio_noiommu_release,
215 .ioctl = vfio_noiommu_ioctl,
216 .attach_group = vfio_noiommu_attach_group,
217 .detach_group = vfio_noiommu_detach_group,
218};
219
220static struct vfio_iommu_driver vfio_noiommu_driver = {
221 .ops = &vfio_noiommu_ops,
222};
223
224/*
225 * Wrap IOMMU drivers, the noiommu driver is the one and only driver for
226 * noiommu groups (and thus containers) and not available for normal groups.
227 */
228#define vfio_for_each_iommu_driver(con, pos) \
229 for (pos = con->noiommu ? &vfio_noiommu_driver : \
230 list_first_entry(&vfio.iommu_drivers_list, \
231 struct vfio_iommu_driver, vfio_next); \
232 (con->noiommu ? pos != NULL : \
233 &pos->vfio_next != &vfio.iommu_drivers_list); \
234 pos = con->noiommu ? NULL : list_next_entry(pos, vfio_next))
235#else
236#define vfio_for_each_iommu_driver(con, pos) \
237 list_for_each_entry(pos, &vfio.iommu_drivers_list, vfio_next)
238#endif
239
240
241/** 98/**
242 * IOMMU driver registration 99 * IOMMU driver registration
243 */ 100 */
@@ -342,8 +199,7 @@ static void vfio_group_unlock_and_free(struct vfio_group *group)
342/** 199/**
343 * Group objects - create, release, get, put, search 200 * Group objects - create, release, get, put, search
344 */ 201 */
345static struct vfio_group *vfio_create_group(struct iommu_group *iommu_group, 202static struct vfio_group *vfio_create_group(struct iommu_group *iommu_group)
346 bool noiommu)
347{ 203{
348 struct vfio_group *group, *tmp; 204 struct vfio_group *group, *tmp;
349 struct device *dev; 205 struct device *dev;
@@ -361,7 +217,6 @@ static struct vfio_group *vfio_create_group(struct iommu_group *iommu_group,
361 atomic_set(&group->container_users, 0); 217 atomic_set(&group->container_users, 0);
362 atomic_set(&group->opened, 0); 218 atomic_set(&group->opened, 0);
363 group->iommu_group = iommu_group; 219 group->iommu_group = iommu_group;
364 group->noiommu = noiommu;
365 220
366 group->nb.notifier_call = vfio_iommu_group_notifier; 221 group->nb.notifier_call = vfio_iommu_group_notifier;
367 222
@@ -397,8 +252,7 @@ static struct vfio_group *vfio_create_group(struct iommu_group *iommu_group,
397 252
398 dev = device_create(vfio.class, NULL, 253 dev = device_create(vfio.class, NULL,
399 MKDEV(MAJOR(vfio.group_devt), minor), 254 MKDEV(MAJOR(vfio.group_devt), minor),
400 group, "%s%d", noiommu ? "noiommu-" : "", 255 group, "%d", iommu_group_id(iommu_group));
401 iommu_group_id(iommu_group));
402 if (IS_ERR(dev)) { 256 if (IS_ERR(dev)) {
403 vfio_free_group_minor(minor); 257 vfio_free_group_minor(minor);
404 vfio_group_unlock_and_free(group); 258 vfio_group_unlock_and_free(group);
@@ -682,7 +536,7 @@ static int vfio_group_nb_add_dev(struct vfio_group *group, struct device *dev)
682 return 0; 536 return 0;
683 537
684 /* TODO Prevent device auto probing */ 538 /* TODO Prevent device auto probing */
685 WARN("Device %s added to live group %d!\n", dev_name(dev), 539 WARN(1, "Device %s added to live group %d!\n", dev_name(dev),
686 iommu_group_id(group->iommu_group)); 540 iommu_group_id(group->iommu_group));
687 541
688 return 0; 542 return 0;
@@ -786,8 +640,7 @@ int vfio_add_group_dev(struct device *dev,
786 640
787 group = vfio_group_get_from_iommu(iommu_group); 641 group = vfio_group_get_from_iommu(iommu_group);
788 if (!group) { 642 if (!group) {
789 group = vfio_create_group(iommu_group, 643 group = vfio_create_group(iommu_group);
790 !iommu_present(dev->bus));
791 if (IS_ERR(group)) { 644 if (IS_ERR(group)) {
792 iommu_group_put(iommu_group); 645 iommu_group_put(iommu_group);
793 return PTR_ERR(group); 646 return PTR_ERR(group);
@@ -999,7 +852,8 @@ static long vfio_ioctl_check_extension(struct vfio_container *container,
999 */ 852 */
1000 if (!driver) { 853 if (!driver) {
1001 mutex_lock(&vfio.iommu_drivers_lock); 854 mutex_lock(&vfio.iommu_drivers_lock);
1002 vfio_for_each_iommu_driver(container, driver) { 855 list_for_each_entry(driver, &vfio.iommu_drivers_list,
856 vfio_next) {
1003 if (!try_module_get(driver->ops->owner)) 857 if (!try_module_get(driver->ops->owner))
1004 continue; 858 continue;
1005 859
@@ -1068,7 +922,7 @@ static long vfio_ioctl_set_iommu(struct vfio_container *container,
1068 } 922 }
1069 923
1070 mutex_lock(&vfio.iommu_drivers_lock); 924 mutex_lock(&vfio.iommu_drivers_lock);
1071 vfio_for_each_iommu_driver(container, driver) { 925 list_for_each_entry(driver, &vfio.iommu_drivers_list, vfio_next) {
1072 void *data; 926 void *data;
1073 927
1074 if (!try_module_get(driver->ops->owner)) 928 if (!try_module_get(driver->ops->owner))
@@ -1333,9 +1187,6 @@ static int vfio_group_set_container(struct vfio_group *group, int container_fd)
1333 if (atomic_read(&group->container_users)) 1187 if (atomic_read(&group->container_users))
1334 return -EINVAL; 1188 return -EINVAL;
1335 1189
1336 if (group->noiommu && !capable(CAP_SYS_RAWIO))
1337 return -EPERM;
1338
1339 f = fdget(container_fd); 1190 f = fdget(container_fd);
1340 if (!f.file) 1191 if (!f.file)
1341 return -EBADF; 1192 return -EBADF;
@@ -1351,13 +1202,6 @@ static int vfio_group_set_container(struct vfio_group *group, int container_fd)
1351 1202
1352 down_write(&container->group_lock); 1203 down_write(&container->group_lock);
1353 1204
1354 /* Real groups and fake groups cannot mix */
1355 if (!list_empty(&container->group_list) &&
1356 container->noiommu != group->noiommu) {
1357 ret = -EPERM;
1358 goto unlock_out;
1359 }
1360
1361 driver = container->iommu_driver; 1205 driver = container->iommu_driver;
1362 if (driver) { 1206 if (driver) {
1363 ret = driver->ops->attach_group(container->iommu_data, 1207 ret = driver->ops->attach_group(container->iommu_data,
@@ -1367,7 +1211,6 @@ static int vfio_group_set_container(struct vfio_group *group, int container_fd)
1367 } 1211 }
1368 1212
1369 group->container = container; 1213 group->container = container;
1370 container->noiommu = group->noiommu;
1371 list_add(&group->container_next, &container->group_list); 1214 list_add(&group->container_next, &container->group_list);
1372 1215
1373 /* Get a reference on the container and mark a user within the group */ 1216 /* Get a reference on the container and mark a user within the group */
@@ -1398,9 +1241,6 @@ static int vfio_group_get_device_fd(struct vfio_group *group, char *buf)
1398 !group->container->iommu_driver || !vfio_group_viable(group)) 1241 !group->container->iommu_driver || !vfio_group_viable(group))
1399 return -EINVAL; 1242 return -EINVAL;
1400 1243
1401 if (group->noiommu && !capable(CAP_SYS_RAWIO))
1402 return -EPERM;
1403
1404 device = vfio_device_get_from_name(group, buf); 1244 device = vfio_device_get_from_name(group, buf);
1405 if (!device) 1245 if (!device)
1406 return -ENODEV; 1246 return -ENODEV;
@@ -1443,10 +1283,6 @@ static int vfio_group_get_device_fd(struct vfio_group *group, char *buf)
1443 1283
1444 fd_install(ret, filep); 1284 fd_install(ret, filep);
1445 1285
1446 if (group->noiommu)
1447 dev_warn(device->dev, "vfio-noiommu device opened by user "
1448 "(%s:%d)\n", current->comm, task_pid_nr(current));
1449
1450 return ret; 1286 return ret;
1451} 1287}
1452 1288
@@ -1535,11 +1371,6 @@ static int vfio_group_fops_open(struct inode *inode, struct file *filep)
1535 if (!group) 1371 if (!group)
1536 return -ENODEV; 1372 return -ENODEV;
1537 1373
1538 if (group->noiommu && !capable(CAP_SYS_RAWIO)) {
1539 vfio_group_put(group);
1540 return -EPERM;
1541 }
1542
1543 /* Do we need multiple instances of the group open? Seems not. */ 1374 /* Do we need multiple instances of the group open? Seems not. */
1544 opened = atomic_cmpxchg(&group->opened, 0, 1); 1375 opened = atomic_cmpxchg(&group->opened, 0, 1);
1545 if (opened) { 1376 if (opened) {
@@ -1702,11 +1533,6 @@ struct vfio_group *vfio_group_get_external_user(struct file *filep)
1702 if (!atomic_inc_not_zero(&group->container_users)) 1533 if (!atomic_inc_not_zero(&group->container_users))
1703 return ERR_PTR(-EINVAL); 1534 return ERR_PTR(-EINVAL);
1704 1535
1705 if (group->noiommu) {
1706 atomic_dec(&group->container_users);
1707 return ERR_PTR(-EPERM);
1708 }
1709
1710 if (!group->container->iommu_driver || 1536 if (!group->container->iommu_driver ||
1711 !vfio_group_viable(group)) { 1537 !vfio_group_viable(group)) {
1712 atomic_dec(&group->container_users); 1538 atomic_dec(&group->container_users);
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index eec2f11809ff..ad2146a9ab2d 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -819,7 +819,7 @@ long vhost_vring_ioctl(struct vhost_dev *d, int ioctl, void __user *argp)
819 BUILD_BUG_ON(__alignof__ *vq->used > VRING_USED_ALIGN_SIZE); 819 BUILD_BUG_ON(__alignof__ *vq->used > VRING_USED_ALIGN_SIZE);
820 if ((a.avail_user_addr & (VRING_AVAIL_ALIGN_SIZE - 1)) || 820 if ((a.avail_user_addr & (VRING_AVAIL_ALIGN_SIZE - 1)) ||
821 (a.used_user_addr & (VRING_USED_ALIGN_SIZE - 1)) || 821 (a.used_user_addr & (VRING_USED_ALIGN_SIZE - 1)) ||
822 (a.log_guest_addr & (sizeof(u64) - 1))) { 822 (a.log_guest_addr & (VRING_USED_ALIGN_SIZE - 1))) {
823 r = -EINVAL; 823 r = -EINVAL;
824 break; 824 break;
825 } 825 }
@@ -1369,7 +1369,7 @@ int vhost_get_vq_desc(struct vhost_virtqueue *vq,
1369 /* Grab the next descriptor number they're advertising, and increment 1369 /* Grab the next descriptor number they're advertising, and increment
1370 * the index we've seen. */ 1370 * the index we've seen. */
1371 if (unlikely(__get_user(ring_head, 1371 if (unlikely(__get_user(ring_head,
1372 &vq->avail->ring[last_avail_idx % vq->num]))) { 1372 &vq->avail->ring[last_avail_idx & (vq->num - 1)]))) {
1373 vq_err(vq, "Failed to read head: idx %d address %p\n", 1373 vq_err(vq, "Failed to read head: idx %d address %p\n",
1374 last_avail_idx, 1374 last_avail_idx,
1375 &vq->avail->ring[last_avail_idx % vq->num]); 1375 &vq->avail->ring[last_avail_idx % vq->num]);
@@ -1489,7 +1489,7 @@ static int __vhost_add_used_n(struct vhost_virtqueue *vq,
1489 u16 old, new; 1489 u16 old, new;
1490 int start; 1490 int start;
1491 1491
1492 start = vq->last_used_idx % vq->num; 1492 start = vq->last_used_idx & (vq->num - 1);
1493 used = vq->used->ring + start; 1493 used = vq->used->ring + start;
1494 if (count == 1) { 1494 if (count == 1) {
1495 if (__put_user(heads[0].id, &used->id)) { 1495 if (__put_user(heads[0].id, &used->id)) {
@@ -1531,7 +1531,7 @@ int vhost_add_used_n(struct vhost_virtqueue *vq, struct vring_used_elem *heads,
1531{ 1531{
1532 int start, n, r; 1532 int start, n, r;
1533 1533
1534 start = vq->last_used_idx % vq->num; 1534 start = vq->last_used_idx & (vq->num - 1);
1535 n = vq->num - start; 1535 n = vq->num - start;
1536 if (n < count) { 1536 if (n < count) {
1537 r = __vhost_add_used_n(vq, heads, n); 1537 r = __vhost_add_used_n(vq, heads, n);
diff --git a/drivers/video/fbdev/fsl-diu-fb.c b/drivers/video/fbdev/fsl-diu-fb.c
index b335c1ae8625..fe00a07c122e 100644
--- a/drivers/video/fbdev/fsl-diu-fb.c
+++ b/drivers/video/fbdev/fsl-diu-fb.c
@@ -479,7 +479,10 @@ static enum fsl_diu_monitor_port fsl_diu_name_to_port(const char *s)
479 port = FSL_DIU_PORT_DLVDS; 479 port = FSL_DIU_PORT_DLVDS;
480 } 480 }
481 481
482 return diu_ops.valid_monitor_port(port); 482 if (diu_ops.valid_monitor_port)
483 port = diu_ops.valid_monitor_port(port);
484
485 return port;
483} 486}
484 487
485/* 488/*
@@ -1915,6 +1918,14 @@ static int __init fsl_diu_init(void)
1915#else 1918#else
1916 monitor_port = fsl_diu_name_to_port(monitor_string); 1919 monitor_port = fsl_diu_name_to_port(monitor_string);
1917#endif 1920#endif
1921
1922 /*
1923 * Must to verify set_pixel_clock. If not implement on platform,
1924 * then that means that there is no platform support for the DIU.
1925 */
1926 if (!diu_ops.set_pixel_clock)
1927 return -ENODEV;
1928
1918 pr_info("Freescale Display Interface Unit (DIU) framebuffer driver\n"); 1929 pr_info("Freescale Display Interface Unit (DIU) framebuffer driver\n");
1919 1930
1920#ifdef CONFIG_NOT_COHERENT_CACHE 1931#ifdef CONFIG_NOT_COHERENT_CACHE
diff --git a/drivers/video/fbdev/omap2/dss/venc.c b/drivers/video/fbdev/omap2/dss/venc.c
index 99ca268c1cdd..d05a54922ba6 100644
--- a/drivers/video/fbdev/omap2/dss/venc.c
+++ b/drivers/video/fbdev/omap2/dss/venc.c
@@ -275,6 +275,12 @@ const struct omap_video_timings omap_dss_pal_timings = {
275 .vbp = 41, 275 .vbp = 41,
276 276
277 .interlace = true, 277 .interlace = true,
278
279 .hsync_level = OMAPDSS_SIG_ACTIVE_LOW,
280 .vsync_level = OMAPDSS_SIG_ACTIVE_LOW,
281 .data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE,
282 .de_level = OMAPDSS_SIG_ACTIVE_HIGH,
283 .sync_pclk_edge = OMAPDSS_DRIVE_SIG_FALLING_EDGE,
278}; 284};
279EXPORT_SYMBOL(omap_dss_pal_timings); 285EXPORT_SYMBOL(omap_dss_pal_timings);
280 286
@@ -290,6 +296,12 @@ const struct omap_video_timings omap_dss_ntsc_timings = {
290 .vbp = 31, 296 .vbp = 31,
291 297
292 .interlace = true, 298 .interlace = true,
299
300 .hsync_level = OMAPDSS_SIG_ACTIVE_LOW,
301 .vsync_level = OMAPDSS_SIG_ACTIVE_LOW,
302 .data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE,
303 .de_level = OMAPDSS_SIG_ACTIVE_HIGH,
304 .sync_pclk_edge = OMAPDSS_DRIVE_SIG_FALLING_EDGE,
293}; 305};
294EXPORT_SYMBOL(omap_dss_ntsc_timings); 306EXPORT_SYMBOL(omap_dss_ntsc_timings);
295 307
diff --git a/drivers/virtio/virtio.c b/drivers/virtio/virtio.c
index b1877d73fa56..7062bb0975a5 100644
--- a/drivers/virtio/virtio.c
+++ b/drivers/virtio/virtio.c
@@ -412,6 +412,7 @@ static int virtio_init(void)
412static void __exit virtio_exit(void) 412static void __exit virtio_exit(void)
413{ 413{
414 bus_unregister(&virtio_bus); 414 bus_unregister(&virtio_bus);
415 ida_destroy(&virtio_index_ida);
415} 416}
416core_initcall(virtio_init); 417core_initcall(virtio_init);
417module_exit(virtio_exit); 418module_exit(virtio_exit);
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index 096b857e7b75..ee663c458b20 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -80,6 +80,12 @@ struct vring_virtqueue {
80 /* Last used index we've seen. */ 80 /* Last used index we've seen. */
81 u16 last_used_idx; 81 u16 last_used_idx;
82 82
83 /* Last written value to avail->flags */
84 u16 avail_flags_shadow;
85
86 /* Last written value to avail->idx in guest byte order */
87 u16 avail_idx_shadow;
88
83 /* How to notify other side. FIXME: commonalize hcalls! */ 89 /* How to notify other side. FIXME: commonalize hcalls! */
84 bool (*notify)(struct virtqueue *vq); 90 bool (*notify)(struct virtqueue *vq);
85 91
@@ -109,7 +115,7 @@ static struct vring_desc *alloc_indirect(struct virtqueue *_vq,
109 * otherwise virt_to_phys will give us bogus addresses in the 115 * otherwise virt_to_phys will give us bogus addresses in the
110 * virtqueue. 116 * virtqueue.
111 */ 117 */
112 gfp &= ~(__GFP_HIGHMEM | __GFP_HIGH); 118 gfp &= ~__GFP_HIGHMEM;
113 119
114 desc = kmalloc(total_sg * sizeof(struct vring_desc), gfp); 120 desc = kmalloc(total_sg * sizeof(struct vring_desc), gfp);
115 if (!desc) 121 if (!desc)
@@ -235,13 +241,14 @@ static inline int virtqueue_add(struct virtqueue *_vq,
235 241
236 /* Put entry in available array (but don't update avail->idx until they 242 /* Put entry in available array (but don't update avail->idx until they
237 * do sync). */ 243 * do sync). */
238 avail = virtio16_to_cpu(_vq->vdev, vq->vring.avail->idx) & (vq->vring.num - 1); 244 avail = vq->avail_idx_shadow & (vq->vring.num - 1);
239 vq->vring.avail->ring[avail] = cpu_to_virtio16(_vq->vdev, head); 245 vq->vring.avail->ring[avail] = cpu_to_virtio16(_vq->vdev, head);
240 246
241 /* Descriptors and available array need to be set before we expose the 247 /* Descriptors and available array need to be set before we expose the
242 * new available array entries. */ 248 * new available array entries. */
243 virtio_wmb(vq->weak_barriers); 249 virtio_wmb(vq->weak_barriers);
244 vq->vring.avail->idx = cpu_to_virtio16(_vq->vdev, virtio16_to_cpu(_vq->vdev, vq->vring.avail->idx) + 1); 250 vq->avail_idx_shadow++;
251 vq->vring.avail->idx = cpu_to_virtio16(_vq->vdev, vq->avail_idx_shadow);
245 vq->num_added++; 252 vq->num_added++;
246 253
247 pr_debug("Added buffer head %i to %p\n", head, vq); 254 pr_debug("Added buffer head %i to %p\n", head, vq);
@@ -354,8 +361,8 @@ bool virtqueue_kick_prepare(struct virtqueue *_vq)
354 * event. */ 361 * event. */
355 virtio_mb(vq->weak_barriers); 362 virtio_mb(vq->weak_barriers);
356 363
357 old = virtio16_to_cpu(_vq->vdev, vq->vring.avail->idx) - vq->num_added; 364 old = vq->avail_idx_shadow - vq->num_added;
358 new = virtio16_to_cpu(_vq->vdev, vq->vring.avail->idx); 365 new = vq->avail_idx_shadow;
359 vq->num_added = 0; 366 vq->num_added = 0;
360 367
361#ifdef DEBUG 368#ifdef DEBUG
@@ -510,7 +517,7 @@ void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len)
510 /* If we expect an interrupt for the next entry, tell host 517 /* If we expect an interrupt for the next entry, tell host
511 * by writing event index and flush out the write before 518 * by writing event index and flush out the write before
512 * the read in the next get_buf call. */ 519 * the read in the next get_buf call. */
513 if (!(vq->vring.avail->flags & cpu_to_virtio16(_vq->vdev, VRING_AVAIL_F_NO_INTERRUPT))) { 520 if (!(vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT)) {
514 vring_used_event(&vq->vring) = cpu_to_virtio16(_vq->vdev, vq->last_used_idx); 521 vring_used_event(&vq->vring) = cpu_to_virtio16(_vq->vdev, vq->last_used_idx);
515 virtio_mb(vq->weak_barriers); 522 virtio_mb(vq->weak_barriers);
516 } 523 }
@@ -537,7 +544,11 @@ void virtqueue_disable_cb(struct virtqueue *_vq)
537{ 544{
538 struct vring_virtqueue *vq = to_vvq(_vq); 545 struct vring_virtqueue *vq = to_vvq(_vq);
539 546
540 vq->vring.avail->flags |= cpu_to_virtio16(_vq->vdev, VRING_AVAIL_F_NO_INTERRUPT); 547 if (!(vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT)) {
548 vq->avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT;
549 vq->vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->avail_flags_shadow);
550 }
551
541} 552}
542EXPORT_SYMBOL_GPL(virtqueue_disable_cb); 553EXPORT_SYMBOL_GPL(virtqueue_disable_cb);
543 554
@@ -565,7 +576,10 @@ unsigned virtqueue_enable_cb_prepare(struct virtqueue *_vq)
565 /* Depending on the VIRTIO_RING_F_EVENT_IDX feature, we need to 576 /* Depending on the VIRTIO_RING_F_EVENT_IDX feature, we need to
566 * either clear the flags bit or point the event index at the next 577 * either clear the flags bit or point the event index at the next
567 * entry. Always do both to keep code simple. */ 578 * entry. Always do both to keep code simple. */
568 vq->vring.avail->flags &= cpu_to_virtio16(_vq->vdev, ~VRING_AVAIL_F_NO_INTERRUPT); 579 if (vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) {
580 vq->avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT;
581 vq->vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->avail_flags_shadow);
582 }
569 vring_used_event(&vq->vring) = cpu_to_virtio16(_vq->vdev, last_used_idx = vq->last_used_idx); 583 vring_used_event(&vq->vring) = cpu_to_virtio16(_vq->vdev, last_used_idx = vq->last_used_idx);
570 END_USE(vq); 584 END_USE(vq);
571 return last_used_idx; 585 return last_used_idx;
@@ -633,9 +647,12 @@ bool virtqueue_enable_cb_delayed(struct virtqueue *_vq)
633 /* Depending on the VIRTIO_RING_F_USED_EVENT_IDX feature, we need to 647 /* Depending on the VIRTIO_RING_F_USED_EVENT_IDX feature, we need to
634 * either clear the flags bit or point the event index at the next 648 * either clear the flags bit or point the event index at the next
635 * entry. Always do both to keep code simple. */ 649 * entry. Always do both to keep code simple. */
636 vq->vring.avail->flags &= cpu_to_virtio16(_vq->vdev, ~VRING_AVAIL_F_NO_INTERRUPT); 650 if (vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) {
651 vq->avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT;
652 vq->vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->avail_flags_shadow);
653 }
637 /* TODO: tune this threshold */ 654 /* TODO: tune this threshold */
638 bufs = (u16)(virtio16_to_cpu(_vq->vdev, vq->vring.avail->idx) - vq->last_used_idx) * 3 / 4; 655 bufs = (u16)(vq->avail_idx_shadow - vq->last_used_idx) * 3 / 4;
639 vring_used_event(&vq->vring) = cpu_to_virtio16(_vq->vdev, vq->last_used_idx + bufs); 656 vring_used_event(&vq->vring) = cpu_to_virtio16(_vq->vdev, vq->last_used_idx + bufs);
640 virtio_mb(vq->weak_barriers); 657 virtio_mb(vq->weak_barriers);
641 if (unlikely((u16)(virtio16_to_cpu(_vq->vdev, vq->vring.used->idx) - vq->last_used_idx) > bufs)) { 658 if (unlikely((u16)(virtio16_to_cpu(_vq->vdev, vq->vring.used->idx) - vq->last_used_idx) > bufs)) {
@@ -670,7 +687,8 @@ void *virtqueue_detach_unused_buf(struct virtqueue *_vq)
670 /* detach_buf clears data, so grab it now. */ 687 /* detach_buf clears data, so grab it now. */
671 buf = vq->data[i]; 688 buf = vq->data[i];
672 detach_buf(vq, i); 689 detach_buf(vq, i);
673 vq->vring.avail->idx = cpu_to_virtio16(_vq->vdev, virtio16_to_cpu(_vq->vdev, vq->vring.avail->idx) - 1); 690 vq->avail_idx_shadow--;
691 vq->vring.avail->idx = cpu_to_virtio16(_vq->vdev, vq->avail_idx_shadow);
674 END_USE(vq); 692 END_USE(vq);
675 return buf; 693 return buf;
676 } 694 }
@@ -735,6 +753,8 @@ struct virtqueue *vring_new_virtqueue(unsigned int index,
735 vq->weak_barriers = weak_barriers; 753 vq->weak_barriers = weak_barriers;
736 vq->broken = false; 754 vq->broken = false;
737 vq->last_used_idx = 0; 755 vq->last_used_idx = 0;
756 vq->avail_flags_shadow = 0;
757 vq->avail_idx_shadow = 0;
738 vq->num_added = 0; 758 vq->num_added = 0;
739 list_add_tail(&vq->vq.list, &vdev->vqs); 759 list_add_tail(&vq->vq.list, &vdev->vqs);
740#ifdef DEBUG 760#ifdef DEBUG
@@ -746,8 +766,10 @@ struct virtqueue *vring_new_virtqueue(unsigned int index,
746 vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX); 766 vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX);
747 767
748 /* No callback? Tell other side not to bother us. */ 768 /* No callback? Tell other side not to bother us. */
749 if (!callback) 769 if (!callback) {
750 vq->vring.avail->flags |= cpu_to_virtio16(vdev, VRING_AVAIL_F_NO_INTERRUPT); 770 vq->avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT;
771 vq->vring.avail->flags = cpu_to_virtio16(vdev, vq->avail_flags_shadow);
772 }
751 773
752 /* Put everything in free lists. */ 774 /* Put everything in free lists. */
753 vq->free_head = 0; 775 vq->free_head = 0;
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index 7a8a6c6952e9..1c427beffadd 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -446,7 +446,7 @@ config MAX63XX_WATCHDOG
446 446
447config IMX2_WDT 447config IMX2_WDT
448 tristate "IMX2+ Watchdog" 448 tristate "IMX2+ Watchdog"
449 depends on ARCH_MXC 449 depends on ARCH_MXC || ARCH_LAYERSCAPE
450 select REGMAP_MMIO 450 select REGMAP_MMIO
451 select WATCHDOG_CORE 451 select WATCHDOG_CORE
452 help 452 help
diff --git a/drivers/watchdog/mtk_wdt.c b/drivers/watchdog/mtk_wdt.c
index 6ad9df948711..b751f43d76ed 100644
--- a/drivers/watchdog/mtk_wdt.c
+++ b/drivers/watchdog/mtk_wdt.c
@@ -123,6 +123,7 @@ static int mtk_wdt_stop(struct watchdog_device *wdt_dev)
123 123
124 reg = readl(wdt_base + WDT_MODE); 124 reg = readl(wdt_base + WDT_MODE);
125 reg &= ~WDT_MODE_EN; 125 reg &= ~WDT_MODE_EN;
126 reg |= WDT_MODE_KEY;
126 iowrite32(reg, wdt_base + WDT_MODE); 127 iowrite32(reg, wdt_base + WDT_MODE);
127 128
128 return 0; 129 return 0;
diff --git a/drivers/watchdog/omap_wdt.c b/drivers/watchdog/omap_wdt.c
index d96bee017fd3..6f17c935a6cf 100644
--- a/drivers/watchdog/omap_wdt.c
+++ b/drivers/watchdog/omap_wdt.c
@@ -205,7 +205,7 @@ static int omap_wdt_set_timeout(struct watchdog_device *wdog,
205 205
206static unsigned int omap_wdt_get_timeleft(struct watchdog_device *wdog) 206static unsigned int omap_wdt_get_timeleft(struct watchdog_device *wdog)
207{ 207{
208 struct omap_wdt_dev *wdev = watchdog_get_drvdata(wdog); 208 struct omap_wdt_dev *wdev = to_omap_wdt_dev(wdog);
209 void __iomem *base = wdev->base; 209 void __iomem *base = wdev->base;
210 u32 value; 210 u32 value;
211 211
diff --git a/drivers/watchdog/pnx4008_wdt.c b/drivers/watchdog/pnx4008_wdt.c
index 4224b3ec83a5..313cd1c6fda0 100644
--- a/drivers/watchdog/pnx4008_wdt.c
+++ b/drivers/watchdog/pnx4008_wdt.c
@@ -80,7 +80,7 @@ static unsigned int heartbeat = DEFAULT_HEARTBEAT;
80 80
81static DEFINE_SPINLOCK(io_lock); 81static DEFINE_SPINLOCK(io_lock);
82static void __iomem *wdt_base; 82static void __iomem *wdt_base;
83struct clk *wdt_clk; 83static struct clk *wdt_clk;
84 84
85static int pnx4008_wdt_start(struct watchdog_device *wdd) 85static int pnx4008_wdt_start(struct watchdog_device *wdd)
86{ 86{
@@ -161,7 +161,7 @@ static int pnx4008_wdt_probe(struct platform_device *pdev)
161 if (IS_ERR(wdt_clk)) 161 if (IS_ERR(wdt_clk))
162 return PTR_ERR(wdt_clk); 162 return PTR_ERR(wdt_clk);
163 163
164 ret = clk_enable(wdt_clk); 164 ret = clk_prepare_enable(wdt_clk);
165 if (ret) 165 if (ret)
166 return ret; 166 return ret;
167 167
@@ -184,7 +184,7 @@ static int pnx4008_wdt_probe(struct platform_device *pdev)
184 return 0; 184 return 0;
185 185
186disable_clk: 186disable_clk:
187 clk_disable(wdt_clk); 187 clk_disable_unprepare(wdt_clk);
188 return ret; 188 return ret;
189} 189}
190 190
@@ -192,7 +192,7 @@ static int pnx4008_wdt_remove(struct platform_device *pdev)
192{ 192{
193 watchdog_unregister_device(&pnx4008_wdd); 193 watchdog_unregister_device(&pnx4008_wdd);
194 194
195 clk_disable(wdt_clk); 195 clk_disable_unprepare(wdt_clk);
196 196
197 return 0; 197 return 0;
198} 198}
diff --git a/drivers/watchdog/tegra_wdt.c b/drivers/watchdog/tegra_wdt.c
index 7f97cdd53f29..9ec57608da82 100644
--- a/drivers/watchdog/tegra_wdt.c
+++ b/drivers/watchdog/tegra_wdt.c
@@ -140,8 +140,10 @@ static int tegra_wdt_set_timeout(struct watchdog_device *wdd,
140{ 140{
141 wdd->timeout = timeout; 141 wdd->timeout = timeout;
142 142
143 if (watchdog_active(wdd)) 143 if (watchdog_active(wdd)) {
144 tegra_wdt_stop(wdd);
144 return tegra_wdt_start(wdd); 145 return tegra_wdt_start(wdd);
146 }
145 147
146 return 0; 148 return 0;
147} 149}
diff --git a/drivers/watchdog/w83977f_wdt.c b/drivers/watchdog/w83977f_wdt.c
index 91bf55a20024..20e2bba10400 100644
--- a/drivers/watchdog/w83977f_wdt.c
+++ b/drivers/watchdog/w83977f_wdt.c
@@ -224,7 +224,7 @@ static int wdt_keepalive(void)
224 224
225static int wdt_set_timeout(int t) 225static int wdt_set_timeout(int t)
226{ 226{
227 int tmrval; 227 unsigned int tmrval;
228 228
229 /* 229 /*
230 * Convert seconds to watchdog counter time units, rounding up. 230 * Convert seconds to watchdog counter time units, rounding up.
diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
index 849500e4e14d..524c22146429 100644
--- a/drivers/xen/events/events_base.c
+++ b/drivers/xen/events/events_base.c
@@ -39,6 +39,7 @@
39#include <asm/irq.h> 39#include <asm/irq.h>
40#include <asm/idle.h> 40#include <asm/idle.h>
41#include <asm/io_apic.h> 41#include <asm/io_apic.h>
42#include <asm/i8259.h>
42#include <asm/xen/pci.h> 43#include <asm/xen/pci.h>
43#endif 44#endif
44#include <asm/sync_bitops.h> 45#include <asm/sync_bitops.h>
@@ -420,7 +421,7 @@ static int __must_check xen_allocate_irq_gsi(unsigned gsi)
420 return xen_allocate_irq_dynamic(); 421 return xen_allocate_irq_dynamic();
421 422
422 /* Legacy IRQ descriptors are already allocated by the arch. */ 423 /* Legacy IRQ descriptors are already allocated by the arch. */
423 if (gsi < NR_IRQS_LEGACY) 424 if (gsi < nr_legacy_irqs())
424 irq = gsi; 425 irq = gsi;
425 else 426 else
426 irq = irq_alloc_desc_at(gsi, -1); 427 irq = irq_alloc_desc_at(gsi, -1);
@@ -446,7 +447,7 @@ static void xen_free_irq(unsigned irq)
446 kfree(info); 447 kfree(info);
447 448
448 /* Legacy IRQ descriptors are managed by the arch. */ 449 /* Legacy IRQ descriptors are managed by the arch. */
449 if (irq < NR_IRQS_LEGACY) 450 if (irq < nr_legacy_irqs())
450 return; 451 return;
451 452
452 irq_free_desc(irq); 453 irq_free_desc(irq);
diff --git a/drivers/xen/events/events_fifo.c b/drivers/xen/events/events_fifo.c
index e3e9e3d46d1b..96a1b8da5371 100644
--- a/drivers/xen/events/events_fifo.c
+++ b/drivers/xen/events/events_fifo.c
@@ -281,7 +281,8 @@ static void handle_irq_for_port(unsigned port)
281 281
282static void consume_one_event(unsigned cpu, 282static void consume_one_event(unsigned cpu,
283 struct evtchn_fifo_control_block *control_block, 283 struct evtchn_fifo_control_block *control_block,
284 unsigned priority, unsigned long *ready) 284 unsigned priority, unsigned long *ready,
285 bool drop)
285{ 286{
286 struct evtchn_fifo_queue *q = &per_cpu(cpu_queue, cpu); 287 struct evtchn_fifo_queue *q = &per_cpu(cpu_queue, cpu);
287 uint32_t head; 288 uint32_t head;
@@ -313,13 +314,17 @@ static void consume_one_event(unsigned cpu,
313 if (head == 0) 314 if (head == 0)
314 clear_bit(priority, ready); 315 clear_bit(priority, ready);
315 316
316 if (evtchn_fifo_is_pending(port) && !evtchn_fifo_is_masked(port)) 317 if (evtchn_fifo_is_pending(port) && !evtchn_fifo_is_masked(port)) {
317 handle_irq_for_port(port); 318 if (unlikely(drop))
319 pr_warn("Dropping pending event for port %u\n", port);
320 else
321 handle_irq_for_port(port);
322 }
318 323
319 q->head[priority] = head; 324 q->head[priority] = head;
320} 325}
321 326
322static void evtchn_fifo_handle_events(unsigned cpu) 327static void __evtchn_fifo_handle_events(unsigned cpu, bool drop)
323{ 328{
324 struct evtchn_fifo_control_block *control_block; 329 struct evtchn_fifo_control_block *control_block;
325 unsigned long ready; 330 unsigned long ready;
@@ -331,11 +336,16 @@ static void evtchn_fifo_handle_events(unsigned cpu)
331 336
332 while (ready) { 337 while (ready) {
333 q = find_first_bit(&ready, EVTCHN_FIFO_MAX_QUEUES); 338 q = find_first_bit(&ready, EVTCHN_FIFO_MAX_QUEUES);
334 consume_one_event(cpu, control_block, q, &ready); 339 consume_one_event(cpu, control_block, q, &ready, drop);
335 ready |= xchg(&control_block->ready, 0); 340 ready |= xchg(&control_block->ready, 0);
336 } 341 }
337} 342}
338 343
344static void evtchn_fifo_handle_events(unsigned cpu)
345{
346 __evtchn_fifo_handle_events(cpu, false);
347}
348
339static void evtchn_fifo_resume(void) 349static void evtchn_fifo_resume(void)
340{ 350{
341 unsigned cpu; 351 unsigned cpu;
@@ -420,6 +430,9 @@ static int evtchn_fifo_cpu_notification(struct notifier_block *self,
420 if (!per_cpu(cpu_control_block, cpu)) 430 if (!per_cpu(cpu_control_block, cpu))
421 ret = evtchn_fifo_alloc_control_block(cpu); 431 ret = evtchn_fifo_alloc_control_block(cpu);
422 break; 432 break;
433 case CPU_DEAD:
434 __evtchn_fifo_handle_events(cpu, true);
435 break;
423 default: 436 default:
424 break; 437 break;
425 } 438 }
diff --git a/drivers/xen/evtchn.c b/drivers/xen/evtchn.c
index 00f40f051d95..38272ad24551 100644
--- a/drivers/xen/evtchn.c
+++ b/drivers/xen/evtchn.c
@@ -49,6 +49,8 @@
49#include <linux/init.h> 49#include <linux/init.h>
50#include <linux/mutex.h> 50#include <linux/mutex.h>
51#include <linux/cpu.h> 51#include <linux/cpu.h>
52#include <linux/mm.h>
53#include <linux/vmalloc.h>
52 54
53#include <xen/xen.h> 55#include <xen/xen.h>
54#include <xen/events.h> 56#include <xen/events.h>
@@ -58,10 +60,10 @@
58struct per_user_data { 60struct per_user_data {
59 struct mutex bind_mutex; /* serialize bind/unbind operations */ 61 struct mutex bind_mutex; /* serialize bind/unbind operations */
60 struct rb_root evtchns; 62 struct rb_root evtchns;
63 unsigned int nr_evtchns;
61 64
62 /* Notification ring, accessed via /dev/xen/evtchn. */ 65 /* Notification ring, accessed via /dev/xen/evtchn. */
63#define EVTCHN_RING_SIZE (PAGE_SIZE / sizeof(evtchn_port_t)) 66 unsigned int ring_size;
64#define EVTCHN_RING_MASK(_i) ((_i)&(EVTCHN_RING_SIZE-1))
65 evtchn_port_t *ring; 67 evtchn_port_t *ring;
66 unsigned int ring_cons, ring_prod, ring_overflow; 68 unsigned int ring_cons, ring_prod, ring_overflow;
67 struct mutex ring_cons_mutex; /* protect against concurrent readers */ 69 struct mutex ring_cons_mutex; /* protect against concurrent readers */
@@ -80,10 +82,41 @@ struct user_evtchn {
80 bool enabled; 82 bool enabled;
81}; 83};
82 84
85static evtchn_port_t *evtchn_alloc_ring(unsigned int size)
86{
87 evtchn_port_t *ring;
88 size_t s = size * sizeof(*ring);
89
90 ring = kmalloc(s, GFP_KERNEL);
91 if (!ring)
92 ring = vmalloc(s);
93
94 return ring;
95}
96
97static void evtchn_free_ring(evtchn_port_t *ring)
98{
99 kvfree(ring);
100}
101
102static unsigned int evtchn_ring_offset(struct per_user_data *u,
103 unsigned int idx)
104{
105 return idx & (u->ring_size - 1);
106}
107
108static evtchn_port_t *evtchn_ring_entry(struct per_user_data *u,
109 unsigned int idx)
110{
111 return u->ring + evtchn_ring_offset(u, idx);
112}
113
83static int add_evtchn(struct per_user_data *u, struct user_evtchn *evtchn) 114static int add_evtchn(struct per_user_data *u, struct user_evtchn *evtchn)
84{ 115{
85 struct rb_node **new = &(u->evtchns.rb_node), *parent = NULL; 116 struct rb_node **new = &(u->evtchns.rb_node), *parent = NULL;
86 117
118 u->nr_evtchns++;
119
87 while (*new) { 120 while (*new) {
88 struct user_evtchn *this; 121 struct user_evtchn *this;
89 122
@@ -107,6 +140,7 @@ static int add_evtchn(struct per_user_data *u, struct user_evtchn *evtchn)
107 140
108static void del_evtchn(struct per_user_data *u, struct user_evtchn *evtchn) 141static void del_evtchn(struct per_user_data *u, struct user_evtchn *evtchn)
109{ 142{
143 u->nr_evtchns--;
110 rb_erase(&evtchn->node, &u->evtchns); 144 rb_erase(&evtchn->node, &u->evtchns);
111 kfree(evtchn); 145 kfree(evtchn);
112} 146}
@@ -144,8 +178,8 @@ static irqreturn_t evtchn_interrupt(int irq, void *data)
144 178
145 spin_lock(&u->ring_prod_lock); 179 spin_lock(&u->ring_prod_lock);
146 180
147 if ((u->ring_prod - u->ring_cons) < EVTCHN_RING_SIZE) { 181 if ((u->ring_prod - u->ring_cons) < u->ring_size) {
148 u->ring[EVTCHN_RING_MASK(u->ring_prod)] = evtchn->port; 182 *evtchn_ring_entry(u, u->ring_prod) = evtchn->port;
149 wmb(); /* Ensure ring contents visible */ 183 wmb(); /* Ensure ring contents visible */
150 if (u->ring_cons == u->ring_prod++) { 184 if (u->ring_cons == u->ring_prod++) {
151 wake_up_interruptible(&u->evtchn_wait); 185 wake_up_interruptible(&u->evtchn_wait);
@@ -200,10 +234,10 @@ static ssize_t evtchn_read(struct file *file, char __user *buf,
200 } 234 }
201 235
202 /* Byte lengths of two chunks. Chunk split (if any) is at ring wrap. */ 236 /* Byte lengths of two chunks. Chunk split (if any) is at ring wrap. */
203 if (((c ^ p) & EVTCHN_RING_SIZE) != 0) { 237 if (((c ^ p) & u->ring_size) != 0) {
204 bytes1 = (EVTCHN_RING_SIZE - EVTCHN_RING_MASK(c)) * 238 bytes1 = (u->ring_size - evtchn_ring_offset(u, c)) *
205 sizeof(evtchn_port_t); 239 sizeof(evtchn_port_t);
206 bytes2 = EVTCHN_RING_MASK(p) * sizeof(evtchn_port_t); 240 bytes2 = evtchn_ring_offset(u, p) * sizeof(evtchn_port_t);
207 } else { 241 } else {
208 bytes1 = (p - c) * sizeof(evtchn_port_t); 242 bytes1 = (p - c) * sizeof(evtchn_port_t);
209 bytes2 = 0; 243 bytes2 = 0;
@@ -219,7 +253,7 @@ static ssize_t evtchn_read(struct file *file, char __user *buf,
219 253
220 rc = -EFAULT; 254 rc = -EFAULT;
221 rmb(); /* Ensure that we see the port before we copy it. */ 255 rmb(); /* Ensure that we see the port before we copy it. */
222 if (copy_to_user(buf, &u->ring[EVTCHN_RING_MASK(c)], bytes1) || 256 if (copy_to_user(buf, evtchn_ring_entry(u, c), bytes1) ||
223 ((bytes2 != 0) && 257 ((bytes2 != 0) &&
224 copy_to_user(&buf[bytes1], &u->ring[0], bytes2))) 258 copy_to_user(&buf[bytes1], &u->ring[0], bytes2)))
225 goto unlock_out; 259 goto unlock_out;
@@ -278,6 +312,66 @@ static ssize_t evtchn_write(struct file *file, const char __user *buf,
278 return rc; 312 return rc;
279} 313}
280 314
315static int evtchn_resize_ring(struct per_user_data *u)
316{
317 unsigned int new_size;
318 evtchn_port_t *new_ring, *old_ring;
319 unsigned int p, c;
320
321 /*
322 * Ensure the ring is large enough to capture all possible
323 * events. i.e., one free slot for each bound event.
324 */
325 if (u->nr_evtchns <= u->ring_size)
326 return 0;
327
328 if (u->ring_size == 0)
329 new_size = 64;
330 else
331 new_size = 2 * u->ring_size;
332
333 new_ring = evtchn_alloc_ring(new_size);
334 if (!new_ring)
335 return -ENOMEM;
336
337 old_ring = u->ring;
338
339 /*
340 * Access to the ring contents is serialized by either the
341 * prod /or/ cons lock so take both when resizing.
342 */
343 mutex_lock(&u->ring_cons_mutex);
344 spin_lock_irq(&u->ring_prod_lock);
345
346 /*
347 * Copy the old ring contents to the new ring.
348 *
349 * If the ring contents crosses the end of the current ring,
350 * it needs to be copied in two chunks.
351 *
352 * +---------+ +------------------+
353 * |34567 12| -> | 1234567 |
354 * +-----p-c-+ +------------------+
355 */
356 p = evtchn_ring_offset(u, u->ring_prod);
357 c = evtchn_ring_offset(u, u->ring_cons);
358 if (p < c) {
359 memcpy(new_ring + c, u->ring + c, (u->ring_size - c) * sizeof(*u->ring));
360 memcpy(new_ring + u->ring_size, u->ring, p * sizeof(*u->ring));
361 } else
362 memcpy(new_ring + c, u->ring + c, (p - c) * sizeof(*u->ring));
363
364 u->ring = new_ring;
365 u->ring_size = new_size;
366
367 spin_unlock_irq(&u->ring_prod_lock);
368 mutex_unlock(&u->ring_cons_mutex);
369
370 evtchn_free_ring(old_ring);
371
372 return 0;
373}
374
281static int evtchn_bind_to_user(struct per_user_data *u, int port) 375static int evtchn_bind_to_user(struct per_user_data *u, int port)
282{ 376{
283 struct user_evtchn *evtchn; 377 struct user_evtchn *evtchn;
@@ -305,6 +399,10 @@ static int evtchn_bind_to_user(struct per_user_data *u, int port)
305 if (rc < 0) 399 if (rc < 0)
306 goto err; 400 goto err;
307 401
402 rc = evtchn_resize_ring(u);
403 if (rc < 0)
404 goto err;
405
308 rc = bind_evtchn_to_irqhandler(port, evtchn_interrupt, 0, 406 rc = bind_evtchn_to_irqhandler(port, evtchn_interrupt, 0,
309 u->name, evtchn); 407 u->name, evtchn);
310 if (rc < 0) 408 if (rc < 0)
@@ -503,13 +601,6 @@ static int evtchn_open(struct inode *inode, struct file *filp)
503 601
504 init_waitqueue_head(&u->evtchn_wait); 602 init_waitqueue_head(&u->evtchn_wait);
505 603
506 u->ring = (evtchn_port_t *)__get_free_page(GFP_KERNEL);
507 if (u->ring == NULL) {
508 kfree(u->name);
509 kfree(u);
510 return -ENOMEM;
511 }
512
513 mutex_init(&u->bind_mutex); 604 mutex_init(&u->bind_mutex);
514 mutex_init(&u->ring_cons_mutex); 605 mutex_init(&u->ring_cons_mutex);
515 spin_lock_init(&u->ring_prod_lock); 606 spin_lock_init(&u->ring_prod_lock);
@@ -532,7 +623,7 @@ static int evtchn_release(struct inode *inode, struct file *filp)
532 evtchn_unbind_from_user(u, evtchn); 623 evtchn_unbind_from_user(u, evtchn);
533 } 624 }
534 625
535 free_page((unsigned long)u->ring); 626 evtchn_free_ring(u->ring);
536 kfree(u->name); 627 kfree(u->name);
537 kfree(u); 628 kfree(u);
538 629
diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
index 2ea0b3b2a91d..1be5dd048622 100644
--- a/drivers/xen/gntdev.c
+++ b/drivers/xen/gntdev.c
@@ -804,7 +804,7 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma)
804 804
805 vma->vm_ops = &gntdev_vmops; 805 vma->vm_ops = &gntdev_vmops;
806 806
807 vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP; 807 vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP | VM_IO;
808 808
809 if (use_ptemod) 809 if (use_ptemod)
810 vma->vm_flags |= VM_DONTCOPY; 810 vma->vm_flags |= VM_DONTCOPY;
diff --git a/drivers/xen/xen-pciback/pciback.h b/drivers/xen/xen-pciback/pciback.h
index 58e38d586f52..4d529f3e40df 100644
--- a/drivers/xen/xen-pciback/pciback.h
+++ b/drivers/xen/xen-pciback/pciback.h
@@ -37,6 +37,7 @@ struct xen_pcibk_device {
37 struct xen_pci_sharedinfo *sh_info; 37 struct xen_pci_sharedinfo *sh_info;
38 unsigned long flags; 38 unsigned long flags;
39 struct work_struct op_work; 39 struct work_struct op_work;
40 struct xen_pci_op op;
40}; 41};
41 42
42struct xen_pcibk_dev_data { 43struct xen_pcibk_dev_data {
diff --git a/drivers/xen/xen-pciback/pciback_ops.c b/drivers/xen/xen-pciback/pciback_ops.c
index c4a0666de6f5..73dafdc494aa 100644
--- a/drivers/xen/xen-pciback/pciback_ops.c
+++ b/drivers/xen/xen-pciback/pciback_ops.c
@@ -70,6 +70,13 @@ static void xen_pcibk_control_isr(struct pci_dev *dev, int reset)
70 enable ? "enable" : "disable"); 70 enable ? "enable" : "disable");
71 71
72 if (enable) { 72 if (enable) {
73 /*
74 * The MSI or MSI-X should not have an IRQ handler. Otherwise
75 * if the guest terminates we BUG_ON in free_msi_irqs.
76 */
77 if (dev->msi_enabled || dev->msix_enabled)
78 goto out;
79
73 rc = request_irq(dev_data->irq, 80 rc = request_irq(dev_data->irq,
74 xen_pcibk_guest_interrupt, IRQF_SHARED, 81 xen_pcibk_guest_interrupt, IRQF_SHARED,
75 dev_data->irq_name, dev); 82 dev_data->irq_name, dev);
@@ -144,7 +151,12 @@ int xen_pcibk_enable_msi(struct xen_pcibk_device *pdev,
144 if (unlikely(verbose_request)) 151 if (unlikely(verbose_request))
145 printk(KERN_DEBUG DRV_NAME ": %s: enable MSI\n", pci_name(dev)); 152 printk(KERN_DEBUG DRV_NAME ": %s: enable MSI\n", pci_name(dev));
146 153
147 status = pci_enable_msi(dev); 154 if (dev->msi_enabled)
155 status = -EALREADY;
156 else if (dev->msix_enabled)
157 status = -ENXIO;
158 else
159 status = pci_enable_msi(dev);
148 160
149 if (status) { 161 if (status) {
150 pr_warn_ratelimited("%s: error enabling MSI for guest %u: err %d\n", 162 pr_warn_ratelimited("%s: error enabling MSI for guest %u: err %d\n",
@@ -173,20 +185,23 @@ static
173int xen_pcibk_disable_msi(struct xen_pcibk_device *pdev, 185int xen_pcibk_disable_msi(struct xen_pcibk_device *pdev,
174 struct pci_dev *dev, struct xen_pci_op *op) 186 struct pci_dev *dev, struct xen_pci_op *op)
175{ 187{
176 struct xen_pcibk_dev_data *dev_data;
177
178 if (unlikely(verbose_request)) 188 if (unlikely(verbose_request))
179 printk(KERN_DEBUG DRV_NAME ": %s: disable MSI\n", 189 printk(KERN_DEBUG DRV_NAME ": %s: disable MSI\n",
180 pci_name(dev)); 190 pci_name(dev));
181 pci_disable_msi(dev);
182 191
192 if (dev->msi_enabled) {
193 struct xen_pcibk_dev_data *dev_data;
194
195 pci_disable_msi(dev);
196
197 dev_data = pci_get_drvdata(dev);
198 if (dev_data)
199 dev_data->ack_intr = 1;
200 }
183 op->value = dev->irq ? xen_pirq_from_irq(dev->irq) : 0; 201 op->value = dev->irq ? xen_pirq_from_irq(dev->irq) : 0;
184 if (unlikely(verbose_request)) 202 if (unlikely(verbose_request))
185 printk(KERN_DEBUG DRV_NAME ": %s: MSI: %d\n", pci_name(dev), 203 printk(KERN_DEBUG DRV_NAME ": %s: MSI: %d\n", pci_name(dev),
186 op->value); 204 op->value);
187 dev_data = pci_get_drvdata(dev);
188 if (dev_data)
189 dev_data->ack_intr = 1;
190 return 0; 205 return 0;
191} 206}
192 207
@@ -197,13 +212,26 @@ int xen_pcibk_enable_msix(struct xen_pcibk_device *pdev,
197 struct xen_pcibk_dev_data *dev_data; 212 struct xen_pcibk_dev_data *dev_data;
198 int i, result; 213 int i, result;
199 struct msix_entry *entries; 214 struct msix_entry *entries;
215 u16 cmd;
200 216
201 if (unlikely(verbose_request)) 217 if (unlikely(verbose_request))
202 printk(KERN_DEBUG DRV_NAME ": %s: enable MSI-X\n", 218 printk(KERN_DEBUG DRV_NAME ": %s: enable MSI-X\n",
203 pci_name(dev)); 219 pci_name(dev));
220
204 if (op->value > SH_INFO_MAX_VEC) 221 if (op->value > SH_INFO_MAX_VEC)
205 return -EINVAL; 222 return -EINVAL;
206 223
224 if (dev->msix_enabled)
225 return -EALREADY;
226
227 /*
228 * PCI_COMMAND_MEMORY must be enabled, otherwise we may not be able
229 * to access the BARs where the MSI-X entries reside.
230 */
231 pci_read_config_word(dev, PCI_COMMAND, &cmd);
232 if (dev->msi_enabled || !(cmd & PCI_COMMAND_MEMORY))
233 return -ENXIO;
234
207 entries = kmalloc(op->value * sizeof(*entries), GFP_KERNEL); 235 entries = kmalloc(op->value * sizeof(*entries), GFP_KERNEL);
208 if (entries == NULL) 236 if (entries == NULL)
209 return -ENOMEM; 237 return -ENOMEM;
@@ -245,23 +273,27 @@ static
245int xen_pcibk_disable_msix(struct xen_pcibk_device *pdev, 273int xen_pcibk_disable_msix(struct xen_pcibk_device *pdev,
246 struct pci_dev *dev, struct xen_pci_op *op) 274 struct pci_dev *dev, struct xen_pci_op *op)
247{ 275{
248 struct xen_pcibk_dev_data *dev_data;
249 if (unlikely(verbose_request)) 276 if (unlikely(verbose_request))
250 printk(KERN_DEBUG DRV_NAME ": %s: disable MSI-X\n", 277 printk(KERN_DEBUG DRV_NAME ": %s: disable MSI-X\n",
251 pci_name(dev)); 278 pci_name(dev));
252 pci_disable_msix(dev);
253 279
280 if (dev->msix_enabled) {
281 struct xen_pcibk_dev_data *dev_data;
282
283 pci_disable_msix(dev);
284
285 dev_data = pci_get_drvdata(dev);
286 if (dev_data)
287 dev_data->ack_intr = 1;
288 }
254 /* 289 /*
255 * SR-IOV devices (which don't have any legacy IRQ) have 290 * SR-IOV devices (which don't have any legacy IRQ) have
256 * an undefined IRQ value of zero. 291 * an undefined IRQ value of zero.
257 */ 292 */
258 op->value = dev->irq ? xen_pirq_from_irq(dev->irq) : 0; 293 op->value = dev->irq ? xen_pirq_from_irq(dev->irq) : 0;
259 if (unlikely(verbose_request)) 294 if (unlikely(verbose_request))
260 printk(KERN_DEBUG DRV_NAME ": %s: MSI-X: %d\n", pci_name(dev), 295 printk(KERN_DEBUG DRV_NAME ": %s: MSI-X: %d\n",
261 op->value); 296 pci_name(dev), op->value);
262 dev_data = pci_get_drvdata(dev);
263 if (dev_data)
264 dev_data->ack_intr = 1;
265 return 0; 297 return 0;
266} 298}
267#endif 299#endif
@@ -298,9 +330,11 @@ void xen_pcibk_do_op(struct work_struct *data)
298 container_of(data, struct xen_pcibk_device, op_work); 330 container_of(data, struct xen_pcibk_device, op_work);
299 struct pci_dev *dev; 331 struct pci_dev *dev;
300 struct xen_pcibk_dev_data *dev_data = NULL; 332 struct xen_pcibk_dev_data *dev_data = NULL;
301 struct xen_pci_op *op = &pdev->sh_info->op; 333 struct xen_pci_op *op = &pdev->op;
302 int test_intx = 0; 334 int test_intx = 0;
303 335
336 *op = pdev->sh_info->op;
337 barrier();
304 dev = xen_pcibk_get_pci_dev(pdev, op->domain, op->bus, op->devfn); 338 dev = xen_pcibk_get_pci_dev(pdev, op->domain, op->bus, op->devfn);
305 339
306 if (dev == NULL) 340 if (dev == NULL)
@@ -342,6 +376,17 @@ void xen_pcibk_do_op(struct work_struct *data)
342 if ((dev_data->enable_intx != test_intx)) 376 if ((dev_data->enable_intx != test_intx))
343 xen_pcibk_control_isr(dev, 0 /* no reset */); 377 xen_pcibk_control_isr(dev, 0 /* no reset */);
344 } 378 }
379 pdev->sh_info->op.err = op->err;
380 pdev->sh_info->op.value = op->value;
381#ifdef CONFIG_PCI_MSI
382 if (op->cmd == XEN_PCI_OP_enable_msix && op->err == 0) {
383 unsigned int i;
384
385 for (i = 0; i < op->value; i++)
386 pdev->sh_info->op.msix_entries[i].vector =
387 op->msix_entries[i].vector;
388 }
389#endif
345 /* Tell the driver domain that we're done. */ 390 /* Tell the driver domain that we're done. */
346 wmb(); 391 wmb();
347 clear_bit(_XEN_PCIF_active, (unsigned long *)&pdev->sh_info->flags); 392 clear_bit(_XEN_PCIF_active, (unsigned long *)&pdev->sh_info->flags);
diff --git a/drivers/xen/xen-pciback/xenbus.c b/drivers/xen/xen-pciback/xenbus.c
index 98bc345f296e..4843741e703a 100644
--- a/drivers/xen/xen-pciback/xenbus.c
+++ b/drivers/xen/xen-pciback/xenbus.c
@@ -44,7 +44,6 @@ static struct xen_pcibk_device *alloc_pdev(struct xenbus_device *xdev)
44 dev_dbg(&xdev->dev, "allocated pdev @ 0x%p\n", pdev); 44 dev_dbg(&xdev->dev, "allocated pdev @ 0x%p\n", pdev);
45 45
46 pdev->xdev = xdev; 46 pdev->xdev = xdev;
47 dev_set_drvdata(&xdev->dev, pdev);
48 47
49 mutex_init(&pdev->dev_lock); 48 mutex_init(&pdev->dev_lock);
50 49
@@ -58,6 +57,9 @@ static struct xen_pcibk_device *alloc_pdev(struct xenbus_device *xdev)
58 kfree(pdev); 57 kfree(pdev);
59 pdev = NULL; 58 pdev = NULL;
60 } 59 }
60
61 dev_set_drvdata(&xdev->dev, pdev);
62
61out: 63out:
62 return pdev; 64 return pdev;
63} 65}
diff --git a/drivers/xen/xen-scsiback.c b/drivers/xen/xen-scsiback.c
index 43bcae852546..ad4eb1024d1f 100644
--- a/drivers/xen/xen-scsiback.c
+++ b/drivers/xen/xen-scsiback.c
@@ -726,7 +726,7 @@ static int scsiback_do_cmd_fn(struct vscsibk_info *info)
726 if (!pending_req) 726 if (!pending_req)
727 return 1; 727 return 1;
728 728
729 ring_req = *RING_GET_REQUEST(ring, rc); 729 RING_COPY_REQUEST(ring, rc, &ring_req);
730 ring->req_cons = ++rc; 730 ring->req_cons = ++rc;
731 731
732 err = prepare_pending_reqs(info, &ring_req, pending_req); 732 err = prepare_pending_reqs(info, &ring_req, pending_req);
diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
index 699941e90667..511078586fa1 100644
--- a/fs/9p/vfs_inode.c
+++ b/fs/9p/vfs_inode.c
@@ -451,9 +451,9 @@ void v9fs_evict_inode(struct inode *inode)
451{ 451{
452 struct v9fs_inode *v9inode = V9FS_I(inode); 452 struct v9fs_inode *v9inode = V9FS_I(inode);
453 453
454 truncate_inode_pages_final(inode->i_mapping); 454 truncate_inode_pages_final(&inode->i_data);
455 clear_inode(inode); 455 clear_inode(inode);
456 filemap_fdatawrite(inode->i_mapping); 456 filemap_fdatawrite(&inode->i_data);
457 457
458 v9fs_cache_inode_put_cookie(inode); 458 v9fs_cache_inode_put_cookie(inode);
459 /* clunk the fid stashed in writeback_fid */ 459 /* clunk the fid stashed in writeback_fid */
diff --git a/fs/Kconfig b/fs/Kconfig
index da3f32f1a4e4..6ce72d8d1ee1 100644
--- a/fs/Kconfig
+++ b/fs/Kconfig
@@ -46,6 +46,12 @@ config FS_DAX
46 or if unsure, say N. Saying Y will increase the size of the kernel 46 or if unsure, say N. Saying Y will increase the size of the kernel
47 by about 5kB. 47 by about 5kB.
48 48
49config FS_DAX_PMD
50 bool
51 default FS_DAX
52 depends on FS_DAX
53 depends on BROKEN
54
49endif # BLOCK 55endif # BLOCK
50 56
51# Posix ACL utility routines 57# Posix ACL utility routines
diff --git a/fs/block_dev.c b/fs/block_dev.c
index bb0dfb1c7af1..44d4a1e9244e 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -390,9 +390,17 @@ int bdev_read_page(struct block_device *bdev, sector_t sector,
390 struct page *page) 390 struct page *page)
391{ 391{
392 const struct block_device_operations *ops = bdev->bd_disk->fops; 392 const struct block_device_operations *ops = bdev->bd_disk->fops;
393 int result = -EOPNOTSUPP;
394
393 if (!ops->rw_page || bdev_get_integrity(bdev)) 395 if (!ops->rw_page || bdev_get_integrity(bdev))
394 return -EOPNOTSUPP; 396 return result;
395 return ops->rw_page(bdev, sector + get_start_sect(bdev), page, READ); 397
398 result = blk_queue_enter(bdev->bd_queue, GFP_KERNEL);
399 if (result)
400 return result;
401 result = ops->rw_page(bdev, sector + get_start_sect(bdev), page, READ);
402 blk_queue_exit(bdev->bd_queue);
403 return result;
396} 404}
397EXPORT_SYMBOL_GPL(bdev_read_page); 405EXPORT_SYMBOL_GPL(bdev_read_page);
398 406
@@ -421,14 +429,20 @@ int bdev_write_page(struct block_device *bdev, sector_t sector,
421 int result; 429 int result;
422 int rw = (wbc->sync_mode == WB_SYNC_ALL) ? WRITE_SYNC : WRITE; 430 int rw = (wbc->sync_mode == WB_SYNC_ALL) ? WRITE_SYNC : WRITE;
423 const struct block_device_operations *ops = bdev->bd_disk->fops; 431 const struct block_device_operations *ops = bdev->bd_disk->fops;
432
424 if (!ops->rw_page || bdev_get_integrity(bdev)) 433 if (!ops->rw_page || bdev_get_integrity(bdev))
425 return -EOPNOTSUPP; 434 return -EOPNOTSUPP;
435 result = blk_queue_enter(bdev->bd_queue, GFP_KERNEL);
436 if (result)
437 return result;
438
426 set_page_writeback(page); 439 set_page_writeback(page);
427 result = ops->rw_page(bdev, sector + get_start_sect(bdev), page, rw); 440 result = ops->rw_page(bdev, sector + get_start_sect(bdev), page, rw);
428 if (result) 441 if (result)
429 end_page_writeback(page); 442 end_page_writeback(page);
430 else 443 else
431 unlock_page(page); 444 unlock_page(page);
445 blk_queue_exit(bdev->bd_queue);
432 return result; 446 return result;
433} 447}
434EXPORT_SYMBOL_GPL(bdev_write_page); 448EXPORT_SYMBOL_GPL(bdev_write_page);
@@ -1509,11 +1523,14 @@ static void __blkdev_put(struct block_device *bdev, fmode_t mode, int for_part)
1509 WARN_ON_ONCE(bdev->bd_holders); 1523 WARN_ON_ONCE(bdev->bd_holders);
1510 sync_blockdev(bdev); 1524 sync_blockdev(bdev);
1511 kill_bdev(bdev); 1525 kill_bdev(bdev);
1526
1527 bdev_write_inode(bdev);
1512 /* 1528 /*
1513 * ->release can cause the queue to disappear, so flush all 1529 * Detaching bdev inode from its wb in __destroy_inode()
1514 * dirty data before. 1530 * is too late: the queue which embeds its bdi (along with
1531 * root wb) can be gone as soon as we put_disk() below.
1515 */ 1532 */
1516 bdev_write_inode(bdev); 1533 inode_detach_wb(bdev->bd_inode);
1517 } 1534 }
1518 if (bdev->bd_contains == bdev) { 1535 if (bdev->bd_contains == bdev) {
1519 if (disk->fops->release) 1536 if (disk->fops->release)
diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
index 6dcdb2ec9211..d453d62ab0c6 100644
--- a/fs/btrfs/backref.c
+++ b/fs/btrfs/backref.c
@@ -355,7 +355,7 @@ static int __resolve_indirect_ref(struct btrfs_fs_info *fs_info,
355 355
356 index = srcu_read_lock(&fs_info->subvol_srcu); 356 index = srcu_read_lock(&fs_info->subvol_srcu);
357 357
358 root = btrfs_read_fs_root_no_name(fs_info, &root_key); 358 root = btrfs_get_fs_root(fs_info, &root_key, false);
359 if (IS_ERR(root)) { 359 if (IS_ERR(root)) {
360 srcu_read_unlock(&fs_info->subvol_srcu, index); 360 srcu_read_unlock(&fs_info->subvol_srcu, index);
361 ret = PTR_ERR(root); 361 ret = PTR_ERR(root);
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 8c58191249cc..35489e7129a7 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -3416,6 +3416,7 @@ int btrfs_cross_ref_exist(struct btrfs_trans_handle *trans,
3416struct btrfs_block_group_cache *btrfs_lookup_block_group( 3416struct btrfs_block_group_cache *btrfs_lookup_block_group(
3417 struct btrfs_fs_info *info, 3417 struct btrfs_fs_info *info,
3418 u64 bytenr); 3418 u64 bytenr);
3419void btrfs_get_block_group(struct btrfs_block_group_cache *cache);
3419void btrfs_put_block_group(struct btrfs_block_group_cache *cache); 3420void btrfs_put_block_group(struct btrfs_block_group_cache *cache);
3420int get_block_group_index(struct btrfs_block_group_cache *cache); 3421int get_block_group_index(struct btrfs_block_group_cache *cache);
3421struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans, 3422struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans,
@@ -3479,6 +3480,9 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans,
3479 struct btrfs_root *root, u64 bytes_used, 3480 struct btrfs_root *root, u64 bytes_used,
3480 u64 type, u64 chunk_objectid, u64 chunk_offset, 3481 u64 type, u64 chunk_objectid, u64 chunk_offset,
3481 u64 size); 3482 u64 size);
3483struct btrfs_trans_handle *btrfs_start_trans_remove_block_group(
3484 struct btrfs_fs_info *fs_info,
3485 const u64 chunk_offset);
3482int btrfs_remove_block_group(struct btrfs_trans_handle *trans, 3486int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
3483 struct btrfs_root *root, u64 group_start, 3487 struct btrfs_root *root, u64 group_start,
3484 struct extent_map *em); 3488 struct extent_map *em);
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index acf3ed11cfb6..c4661db2b72a 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -124,7 +124,7 @@ static int block_group_bits(struct btrfs_block_group_cache *cache, u64 bits)
124 return (cache->flags & bits) == bits; 124 return (cache->flags & bits) == bits;
125} 125}
126 126
127static void btrfs_get_block_group(struct btrfs_block_group_cache *cache) 127void btrfs_get_block_group(struct btrfs_block_group_cache *cache)
128{ 128{
129 atomic_inc(&cache->count); 129 atomic_inc(&cache->count);
130} 130}
@@ -5915,19 +5915,6 @@ static int update_block_group(struct btrfs_trans_handle *trans,
5915 set_extent_dirty(info->pinned_extents, 5915 set_extent_dirty(info->pinned_extents,
5916 bytenr, bytenr + num_bytes - 1, 5916 bytenr, bytenr + num_bytes - 1,
5917 GFP_NOFS | __GFP_NOFAIL); 5917 GFP_NOFS | __GFP_NOFAIL);
5918 /*
5919 * No longer have used bytes in this block group, queue
5920 * it for deletion.
5921 */
5922 if (old_val == 0) {
5923 spin_lock(&info->unused_bgs_lock);
5924 if (list_empty(&cache->bg_list)) {
5925 btrfs_get_block_group(cache);
5926 list_add_tail(&cache->bg_list,
5927 &info->unused_bgs);
5928 }
5929 spin_unlock(&info->unused_bgs_lock);
5930 }
5931 } 5918 }
5932 5919
5933 spin_lock(&trans->transaction->dirty_bgs_lock); 5920 spin_lock(&trans->transaction->dirty_bgs_lock);
@@ -5939,6 +5926,22 @@ static int update_block_group(struct btrfs_trans_handle *trans,
5939 } 5926 }
5940 spin_unlock(&trans->transaction->dirty_bgs_lock); 5927 spin_unlock(&trans->transaction->dirty_bgs_lock);
5941 5928
5929 /*
5930 * No longer have used bytes in this block group, queue it for
5931 * deletion. We do this after adding the block group to the
5932 * dirty list to avoid races between cleaner kthread and space
5933 * cache writeout.
5934 */
5935 if (!alloc && old_val == 0) {
5936 spin_lock(&info->unused_bgs_lock);
5937 if (list_empty(&cache->bg_list)) {
5938 btrfs_get_block_group(cache);
5939 list_add_tail(&cache->bg_list,
5940 &info->unused_bgs);
5941 }
5942 spin_unlock(&info->unused_bgs_lock);
5943 }
5944
5942 btrfs_put_block_group(cache); 5945 btrfs_put_block_group(cache);
5943 total -= num_bytes; 5946 total -= num_bytes;
5944 bytenr += num_bytes; 5947 bytenr += num_bytes;
@@ -8105,21 +8108,47 @@ reada:
8105} 8108}
8106 8109
8107/* 8110/*
8108 * TODO: Modify related function to add related node/leaf to dirty_extent_root, 8111 * These may not be seen by the usual inc/dec ref code so we have to
8109 * for later qgroup accounting. 8112 * add them here.
8110 *
8111 * Current, this function does nothing.
8112 */ 8113 */
8114static int record_one_subtree_extent(struct btrfs_trans_handle *trans,
8115 struct btrfs_root *root, u64 bytenr,
8116 u64 num_bytes)
8117{
8118 struct btrfs_qgroup_extent_record *qrecord;
8119 struct btrfs_delayed_ref_root *delayed_refs;
8120
8121 qrecord = kmalloc(sizeof(*qrecord), GFP_NOFS);
8122 if (!qrecord)
8123 return -ENOMEM;
8124
8125 qrecord->bytenr = bytenr;
8126 qrecord->num_bytes = num_bytes;
8127 qrecord->old_roots = NULL;
8128
8129 delayed_refs = &trans->transaction->delayed_refs;
8130 spin_lock(&delayed_refs->lock);
8131 if (btrfs_qgroup_insert_dirty_extent(delayed_refs, qrecord))
8132 kfree(qrecord);
8133 spin_unlock(&delayed_refs->lock);
8134
8135 return 0;
8136}
8137
8113static int account_leaf_items(struct btrfs_trans_handle *trans, 8138static int account_leaf_items(struct btrfs_trans_handle *trans,
8114 struct btrfs_root *root, 8139 struct btrfs_root *root,
8115 struct extent_buffer *eb) 8140 struct extent_buffer *eb)
8116{ 8141{
8117 int nr = btrfs_header_nritems(eb); 8142 int nr = btrfs_header_nritems(eb);
8118 int i, extent_type; 8143 int i, extent_type, ret;
8119 struct btrfs_key key; 8144 struct btrfs_key key;
8120 struct btrfs_file_extent_item *fi; 8145 struct btrfs_file_extent_item *fi;
8121 u64 bytenr, num_bytes; 8146 u64 bytenr, num_bytes;
8122 8147
8148 /* We can be called directly from walk_up_proc() */
8149 if (!root->fs_info->quota_enabled)
8150 return 0;
8151
8123 for (i = 0; i < nr; i++) { 8152 for (i = 0; i < nr; i++) {
8124 btrfs_item_key_to_cpu(eb, &key, i); 8153 btrfs_item_key_to_cpu(eb, &key, i);
8125 8154
@@ -8138,6 +8167,10 @@ static int account_leaf_items(struct btrfs_trans_handle *trans,
8138 continue; 8167 continue;
8139 8168
8140 num_bytes = btrfs_file_extent_disk_num_bytes(eb, fi); 8169 num_bytes = btrfs_file_extent_disk_num_bytes(eb, fi);
8170
8171 ret = record_one_subtree_extent(trans, root, bytenr, num_bytes);
8172 if (ret)
8173 return ret;
8141 } 8174 }
8142 return 0; 8175 return 0;
8143} 8176}
@@ -8206,8 +8239,6 @@ static int adjust_slots_upwards(struct btrfs_root *root,
8206 8239
8207/* 8240/*
8208 * root_eb is the subtree root and is locked before this function is called. 8241 * root_eb is the subtree root and is locked before this function is called.
8209 * TODO: Modify this function to mark all (including complete shared node)
8210 * to dirty_extent_root to allow it get accounted in qgroup.
8211 */ 8242 */
8212static int account_shared_subtree(struct btrfs_trans_handle *trans, 8243static int account_shared_subtree(struct btrfs_trans_handle *trans,
8213 struct btrfs_root *root, 8244 struct btrfs_root *root,
@@ -8285,6 +8316,11 @@ walk_down:
8285 btrfs_tree_read_lock(eb); 8316 btrfs_tree_read_lock(eb);
8286 btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK); 8317 btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
8287 path->locks[level] = BTRFS_READ_LOCK_BLOCKING; 8318 path->locks[level] = BTRFS_READ_LOCK_BLOCKING;
8319
8320 ret = record_one_subtree_extent(trans, root, child_bytenr,
8321 root->nodesize);
8322 if (ret)
8323 goto out;
8288 } 8324 }
8289 8325
8290 if (level == 0) { 8326 if (level == 0) {
@@ -10256,6 +10292,47 @@ out:
10256 return ret; 10292 return ret;
10257} 10293}
10258 10294
10295struct btrfs_trans_handle *
10296btrfs_start_trans_remove_block_group(struct btrfs_fs_info *fs_info,
10297 const u64 chunk_offset)
10298{
10299 struct extent_map_tree *em_tree = &fs_info->mapping_tree.map_tree;
10300 struct extent_map *em;
10301 struct map_lookup *map;
10302 unsigned int num_items;
10303
10304 read_lock(&em_tree->lock);
10305 em = lookup_extent_mapping(em_tree, chunk_offset, 1);
10306 read_unlock(&em_tree->lock);
10307 ASSERT(em && em->start == chunk_offset);
10308
10309 /*
10310 * We need to reserve 3 + N units from the metadata space info in order
10311 * to remove a block group (done at btrfs_remove_chunk() and at
10312 * btrfs_remove_block_group()), which are used for:
10313 *
10314 * 1 unit for adding the free space inode's orphan (located in the tree
10315 * of tree roots).
10316 * 1 unit for deleting the block group item (located in the extent
10317 * tree).
10318 * 1 unit for deleting the free space item (located in tree of tree
10319 * roots).
10320 * N units for deleting N device extent items corresponding to each
10321 * stripe (located in the device tree).
10322 *
10323 * In order to remove a block group we also need to reserve units in the
10324 * system space info in order to update the chunk tree (update one or
10325 * more device items and remove one chunk item), but this is done at
10326 * btrfs_remove_chunk() through a call to check_system_chunk().
10327 */
10328 map = (struct map_lookup *)em->bdev;
10329 num_items = 3 + map->num_stripes;
10330 free_extent_map(em);
10331
10332 return btrfs_start_transaction_fallback_global_rsv(fs_info->extent_root,
10333 num_items, 1);
10334}
10335
10259/* 10336/*
10260 * Process the unused_bgs list and remove any that don't have any allocated 10337 * Process the unused_bgs list and remove any that don't have any allocated
10261 * space inside of them. 10338 * space inside of them.
@@ -10322,8 +10399,8 @@ void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info)
10322 * Want to do this before we do anything else so we can recover 10399 * Want to do this before we do anything else so we can recover
10323 * properly if we fail to join the transaction. 10400 * properly if we fail to join the transaction.
10324 */ 10401 */
10325 /* 1 for btrfs_orphan_reserve_metadata() */ 10402 trans = btrfs_start_trans_remove_block_group(fs_info,
10326 trans = btrfs_start_transaction(root, 1); 10403 block_group->key.objectid);
10327 if (IS_ERR(trans)) { 10404 if (IS_ERR(trans)) {
10328 btrfs_dec_block_group_ro(root, block_group); 10405 btrfs_dec_block_group_ro(root, block_group);
10329 ret = PTR_ERR(trans); 10406 ret = PTR_ERR(trans);
@@ -10403,11 +10480,15 @@ void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info)
10403 * until transaction commit to do the actual discard. 10480 * until transaction commit to do the actual discard.
10404 */ 10481 */
10405 if (trimming) { 10482 if (trimming) {
10406 WARN_ON(!list_empty(&block_group->bg_list)); 10483 spin_lock(&fs_info->unused_bgs_lock);
10407 spin_lock(&trans->transaction->deleted_bgs_lock); 10484 /*
10485 * A concurrent scrub might have added us to the list
10486 * fs_info->unused_bgs, so use a list_move operation
10487 * to add the block group to the deleted_bgs list.
10488 */
10408 list_move(&block_group->bg_list, 10489 list_move(&block_group->bg_list,
10409 &trans->transaction->deleted_bgs); 10490 &trans->transaction->deleted_bgs);
10410 spin_unlock(&trans->transaction->deleted_bgs_lock); 10491 spin_unlock(&fs_info->unused_bgs_lock);
10411 btrfs_get_block_group(block_group); 10492 btrfs_get_block_group(block_group);
10412 } 10493 }
10413end_trans: 10494end_trans:
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index 977e715f0bf2..0f09526aa7d9 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -1291,7 +1291,8 @@ out:
1291 * on error we return an unlocked page and the error value 1291 * on error we return an unlocked page and the error value
1292 * on success we return a locked page and 0 1292 * on success we return a locked page and 0
1293 */ 1293 */
1294static int prepare_uptodate_page(struct page *page, u64 pos, 1294static int prepare_uptodate_page(struct inode *inode,
1295 struct page *page, u64 pos,
1295 bool force_uptodate) 1296 bool force_uptodate)
1296{ 1297{
1297 int ret = 0; 1298 int ret = 0;
@@ -1306,6 +1307,10 @@ static int prepare_uptodate_page(struct page *page, u64 pos,
1306 unlock_page(page); 1307 unlock_page(page);
1307 return -EIO; 1308 return -EIO;
1308 } 1309 }
1310 if (page->mapping != inode->i_mapping) {
1311 unlock_page(page);
1312 return -EAGAIN;
1313 }
1309 } 1314 }
1310 return 0; 1315 return 0;
1311} 1316}
@@ -1324,6 +1329,7 @@ static noinline int prepare_pages(struct inode *inode, struct page **pages,
1324 int faili; 1329 int faili;
1325 1330
1326 for (i = 0; i < num_pages; i++) { 1331 for (i = 0; i < num_pages; i++) {
1332again:
1327 pages[i] = find_or_create_page(inode->i_mapping, index + i, 1333 pages[i] = find_or_create_page(inode->i_mapping, index + i,
1328 mask | __GFP_WRITE); 1334 mask | __GFP_WRITE);
1329 if (!pages[i]) { 1335 if (!pages[i]) {
@@ -1333,13 +1339,17 @@ static noinline int prepare_pages(struct inode *inode, struct page **pages,
1333 } 1339 }
1334 1340
1335 if (i == 0) 1341 if (i == 0)
1336 err = prepare_uptodate_page(pages[i], pos, 1342 err = prepare_uptodate_page(inode, pages[i], pos,
1337 force_uptodate); 1343 force_uptodate);
1338 if (i == num_pages - 1) 1344 if (!err && i == num_pages - 1)
1339 err = prepare_uptodate_page(pages[i], 1345 err = prepare_uptodate_page(inode, pages[i],
1340 pos + write_bytes, false); 1346 pos + write_bytes, false);
1341 if (err) { 1347 if (err) {
1342 page_cache_release(pages[i]); 1348 page_cache_release(pages[i]);
1349 if (err == -EAGAIN) {
1350 err = 0;
1351 goto again;
1352 }
1343 faili = i - 1; 1353 faili = i - 1;
1344 goto fail; 1354 goto fail;
1345 } 1355 }
@@ -1882,8 +1892,13 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
1882 struct btrfs_log_ctx ctx; 1892 struct btrfs_log_ctx ctx;
1883 int ret = 0; 1893 int ret = 0;
1884 bool full_sync = 0; 1894 bool full_sync = 0;
1885 const u64 len = end - start + 1; 1895 u64 len;
1886 1896
1897 /*
1898 * The range length can be represented by u64, we have to do the typecasts
1899 * to avoid signed overflow if it's [0, LLONG_MAX] eg. from fsync()
1900 */
1901 len = (u64)end - (u64)start + 1;
1887 trace_btrfs_sync_file(file, datasync); 1902 trace_btrfs_sync_file(file, datasync);
1888 1903
1889 /* 1904 /*
@@ -2071,8 +2086,7 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
2071 } 2086 }
2072 } 2087 }
2073 if (!full_sync) { 2088 if (!full_sync) {
2074 ret = btrfs_wait_ordered_range(inode, start, 2089 ret = btrfs_wait_ordered_range(inode, start, len);
2075 end - start + 1);
2076 if (ret) { 2090 if (ret) {
2077 btrfs_end_transaction(trans, root); 2091 btrfs_end_transaction(trans, root);
2078 goto out; 2092 goto out;
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
index 85a1f8621b51..cfe99bec49de 100644
--- a/fs/btrfs/free-space-cache.c
+++ b/fs/btrfs/free-space-cache.c
@@ -891,7 +891,7 @@ out:
891 spin_unlock(&block_group->lock); 891 spin_unlock(&block_group->lock);
892 ret = 0; 892 ret = 0;
893 893
894 btrfs_warn(fs_info, "failed to load free space cache for block group %llu, rebuild it now", 894 btrfs_warn(fs_info, "failed to load free space cache for block group %llu, rebuilding it now",
895 block_group->key.objectid); 895 block_group->key.objectid);
896 } 896 }
897 897
@@ -2972,7 +2972,7 @@ setup_cluster_bitmap(struct btrfs_block_group_cache *block_group,
2972 u64 cont1_bytes, u64 min_bytes) 2972 u64 cont1_bytes, u64 min_bytes)
2973{ 2973{
2974 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; 2974 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2975 struct btrfs_free_space *entry; 2975 struct btrfs_free_space *entry = NULL;
2976 int ret = -ENOSPC; 2976 int ret = -ENOSPC;
2977 u64 bitmap_offset = offset_to_bitmap(ctl, offset); 2977 u64 bitmap_offset = offset_to_bitmap(ctl, offset);
2978 2978
@@ -2983,8 +2983,10 @@ setup_cluster_bitmap(struct btrfs_block_group_cache *block_group,
2983 * The bitmap that covers offset won't be in the list unless offset 2983 * The bitmap that covers offset won't be in the list unless offset
2984 * is just its start offset. 2984 * is just its start offset.
2985 */ 2985 */
2986 entry = list_first_entry(bitmaps, struct btrfs_free_space, list); 2986 if (!list_empty(bitmaps))
2987 if (entry->offset != bitmap_offset) { 2987 entry = list_first_entry(bitmaps, struct btrfs_free_space, list);
2988
2989 if (!entry || entry->offset != bitmap_offset) {
2988 entry = tree_search_offset(ctl, bitmap_offset, 1, 0); 2990 entry = tree_search_offset(ctl, bitmap_offset, 1, 0);
2989 if (entry && list_empty(&entry->list)) 2991 if (entry && list_empty(&entry->list))
2990 list_add(&entry->list, bitmaps); 2992 list_add(&entry->list, bitmaps);
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 994490d5fa64..a70c5790f8f5 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -4046,9 +4046,7 @@ int btrfs_unlink_inode(struct btrfs_trans_handle *trans,
4046 */ 4046 */
4047static struct btrfs_trans_handle *__unlink_start_trans(struct inode *dir) 4047static struct btrfs_trans_handle *__unlink_start_trans(struct inode *dir)
4048{ 4048{
4049 struct btrfs_trans_handle *trans;
4050 struct btrfs_root *root = BTRFS_I(dir)->root; 4049 struct btrfs_root *root = BTRFS_I(dir)->root;
4051 int ret;
4052 4050
4053 /* 4051 /*
4054 * 1 for the possible orphan item 4052 * 1 for the possible orphan item
@@ -4057,27 +4055,7 @@ static struct btrfs_trans_handle *__unlink_start_trans(struct inode *dir)
4057 * 1 for the inode ref 4055 * 1 for the inode ref
4058 * 1 for the inode 4056 * 1 for the inode
4059 */ 4057 */
4060 trans = btrfs_start_transaction(root, 5); 4058 return btrfs_start_transaction_fallback_global_rsv(root, 5, 5);
4061 if (!IS_ERR(trans) || PTR_ERR(trans) != -ENOSPC)
4062 return trans;
4063
4064 if (PTR_ERR(trans) == -ENOSPC) {
4065 u64 num_bytes = btrfs_calc_trans_metadata_size(root, 5);
4066
4067 trans = btrfs_start_transaction(root, 0);
4068 if (IS_ERR(trans))
4069 return trans;
4070 ret = btrfs_cond_migrate_bytes(root->fs_info,
4071 &root->fs_info->trans_block_rsv,
4072 num_bytes, 5);
4073 if (ret) {
4074 btrfs_end_transaction(trans, root);
4075 return ERR_PTR(ret);
4076 }
4077 trans->block_rsv = &root->fs_info->trans_block_rsv;
4078 trans->bytes_reserved = num_bytes;
4079 }
4080 return trans;
4081} 4059}
4082 4060
4083static int btrfs_unlink(struct inode *dir, struct dentry *dentry) 4061static int btrfs_unlink(struct inode *dir, struct dentry *dentry)
diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
index 93e12c18ffd7..5279fdae7142 100644
--- a/fs/btrfs/qgroup.c
+++ b/fs/btrfs/qgroup.c
@@ -993,9 +993,10 @@ int btrfs_quota_disable(struct btrfs_trans_handle *trans,
993 mutex_lock(&fs_info->qgroup_ioctl_lock); 993 mutex_lock(&fs_info->qgroup_ioctl_lock);
994 if (!fs_info->quota_root) 994 if (!fs_info->quota_root)
995 goto out; 995 goto out;
996 spin_lock(&fs_info->qgroup_lock);
997 fs_info->quota_enabled = 0; 996 fs_info->quota_enabled = 0;
998 fs_info->pending_quota_state = 0; 997 fs_info->pending_quota_state = 0;
998 btrfs_qgroup_wait_for_completion(fs_info);
999 spin_lock(&fs_info->qgroup_lock);
999 quota_root = fs_info->quota_root; 1000 quota_root = fs_info->quota_root;
1000 fs_info->quota_root = NULL; 1001 fs_info->quota_root = NULL;
1001 fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_ON; 1002 fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_ON;
@@ -1461,6 +1462,8 @@ struct btrfs_qgroup_extent_record
1461 struct btrfs_qgroup_extent_record *entry; 1462 struct btrfs_qgroup_extent_record *entry;
1462 u64 bytenr = record->bytenr; 1463 u64 bytenr = record->bytenr;
1463 1464
1465 assert_spin_locked(&delayed_refs->lock);
1466
1464 while (*p) { 1467 while (*p) {
1465 parent_node = *p; 1468 parent_node = *p;
1466 entry = rb_entry(parent_node, struct btrfs_qgroup_extent_record, 1469 entry = rb_entry(parent_node, struct btrfs_qgroup_extent_record,
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index 2907a77fb1f6..b091d94ceef6 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -3432,7 +3432,9 @@ out:
3432static noinline_for_stack int scrub_chunk(struct scrub_ctx *sctx, 3432static noinline_for_stack int scrub_chunk(struct scrub_ctx *sctx,
3433 struct btrfs_device *scrub_dev, 3433 struct btrfs_device *scrub_dev,
3434 u64 chunk_offset, u64 length, 3434 u64 chunk_offset, u64 length,
3435 u64 dev_offset, int is_dev_replace) 3435 u64 dev_offset,
3436 struct btrfs_block_group_cache *cache,
3437 int is_dev_replace)
3436{ 3438{
3437 struct btrfs_mapping_tree *map_tree = 3439 struct btrfs_mapping_tree *map_tree =
3438 &sctx->dev_root->fs_info->mapping_tree; 3440 &sctx->dev_root->fs_info->mapping_tree;
@@ -3445,8 +3447,18 @@ static noinline_for_stack int scrub_chunk(struct scrub_ctx *sctx,
3445 em = lookup_extent_mapping(&map_tree->map_tree, chunk_offset, 1); 3447 em = lookup_extent_mapping(&map_tree->map_tree, chunk_offset, 1);
3446 read_unlock(&map_tree->map_tree.lock); 3448 read_unlock(&map_tree->map_tree.lock);
3447 3449
3448 if (!em) 3450 if (!em) {
3449 return -EINVAL; 3451 /*
3452 * Might have been an unused block group deleted by the cleaner
3453 * kthread or relocation.
3454 */
3455 spin_lock(&cache->lock);
3456 if (!cache->removed)
3457 ret = -EINVAL;
3458 spin_unlock(&cache->lock);
3459
3460 return ret;
3461 }
3450 3462
3451 map = (struct map_lookup *)em->bdev; 3463 map = (struct map_lookup *)em->bdev;
3452 if (em->start != chunk_offset) 3464 if (em->start != chunk_offset)
@@ -3483,6 +3495,7 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx,
3483 u64 length; 3495 u64 length;
3484 u64 chunk_offset; 3496 u64 chunk_offset;
3485 int ret = 0; 3497 int ret = 0;
3498 int ro_set;
3486 int slot; 3499 int slot;
3487 struct extent_buffer *l; 3500 struct extent_buffer *l;
3488 struct btrfs_key key; 3501 struct btrfs_key key;
@@ -3568,7 +3581,21 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx,
3568 scrub_pause_on(fs_info); 3581 scrub_pause_on(fs_info);
3569 ret = btrfs_inc_block_group_ro(root, cache); 3582 ret = btrfs_inc_block_group_ro(root, cache);
3570 scrub_pause_off(fs_info); 3583 scrub_pause_off(fs_info);
3571 if (ret) { 3584
3585 if (ret == 0) {
3586 ro_set = 1;
3587 } else if (ret == -ENOSPC) {
3588 /*
3589 * btrfs_inc_block_group_ro return -ENOSPC when it
3590 * failed in creating new chunk for metadata.
3591 * It is not a problem for scrub/replace, because
3592 * metadata are always cowed, and our scrub paused
3593 * commit_transactions.
3594 */
3595 ro_set = 0;
3596 } else {
3597 btrfs_warn(fs_info, "failed setting block group ro, ret=%d\n",
3598 ret);
3572 btrfs_put_block_group(cache); 3599 btrfs_put_block_group(cache);
3573 break; 3600 break;
3574 } 3601 }
@@ -3577,7 +3604,7 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx,
3577 dev_replace->cursor_left = found_key.offset; 3604 dev_replace->cursor_left = found_key.offset;
3578 dev_replace->item_needs_writeback = 1; 3605 dev_replace->item_needs_writeback = 1;
3579 ret = scrub_chunk(sctx, scrub_dev, chunk_offset, length, 3606 ret = scrub_chunk(sctx, scrub_dev, chunk_offset, length,
3580 found_key.offset, is_dev_replace); 3607 found_key.offset, cache, is_dev_replace);
3581 3608
3582 /* 3609 /*
3583 * flush, submit all pending read and write bios, afterwards 3610 * flush, submit all pending read and write bios, afterwards
@@ -3611,7 +3638,30 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx,
3611 3638
3612 scrub_pause_off(fs_info); 3639 scrub_pause_off(fs_info);
3613 3640
3614 btrfs_dec_block_group_ro(root, cache); 3641 if (ro_set)
3642 btrfs_dec_block_group_ro(root, cache);
3643
3644 /*
3645 * We might have prevented the cleaner kthread from deleting
3646 * this block group if it was already unused because we raced
3647 * and set it to RO mode first. So add it back to the unused
3648 * list, otherwise it might not ever be deleted unless a manual
3649 * balance is triggered or it becomes used and unused again.
3650 */
3651 spin_lock(&cache->lock);
3652 if (!cache->removed && !cache->ro && cache->reserved == 0 &&
3653 btrfs_block_group_used(&cache->item) == 0) {
3654 spin_unlock(&cache->lock);
3655 spin_lock(&fs_info->unused_bgs_lock);
3656 if (list_empty(&cache->bg_list)) {
3657 btrfs_get_block_group(cache);
3658 list_add_tail(&cache->bg_list,
3659 &fs_info->unused_bgs);
3660 }
3661 spin_unlock(&fs_info->unused_bgs_lock);
3662 } else {
3663 spin_unlock(&cache->lock);
3664 }
3615 3665
3616 btrfs_put_block_group(cache); 3666 btrfs_put_block_group(cache);
3617 if (ret) 3667 if (ret)
diff --git a/fs/btrfs/tests/free-space-tests.c b/fs/btrfs/tests/free-space-tests.c
index c8c3d70c31ff..8b72b005bfb9 100644
--- a/fs/btrfs/tests/free-space-tests.c
+++ b/fs/btrfs/tests/free-space-tests.c
@@ -898,8 +898,10 @@ int btrfs_test_free_space_cache(void)
898 } 898 }
899 899
900 root = btrfs_alloc_dummy_root(); 900 root = btrfs_alloc_dummy_root();
901 if (!root) 901 if (IS_ERR(root)) {
902 ret = PTR_ERR(root);
902 goto out; 903 goto out;
904 }
903 905
904 root->fs_info = btrfs_alloc_dummy_fs_info(); 906 root->fs_info = btrfs_alloc_dummy_fs_info();
905 if (!root->fs_info) 907 if (!root->fs_info)
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index 418c6a2ad7d8..be8eae80ff65 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -274,7 +274,6 @@ loop:
274 cur_trans->num_dirty_bgs = 0; 274 cur_trans->num_dirty_bgs = 0;
275 spin_lock_init(&cur_trans->dirty_bgs_lock); 275 spin_lock_init(&cur_trans->dirty_bgs_lock);
276 INIT_LIST_HEAD(&cur_trans->deleted_bgs); 276 INIT_LIST_HEAD(&cur_trans->deleted_bgs);
277 spin_lock_init(&cur_trans->deleted_bgs_lock);
278 spin_lock_init(&cur_trans->dropped_roots_lock); 277 spin_lock_init(&cur_trans->dropped_roots_lock);
279 list_add_tail(&cur_trans->list, &fs_info->trans_list); 278 list_add_tail(&cur_trans->list, &fs_info->trans_list);
280 extent_io_tree_init(&cur_trans->dirty_pages, 279 extent_io_tree_init(&cur_trans->dirty_pages,
@@ -592,6 +591,38 @@ struct btrfs_trans_handle *btrfs_start_transaction(struct btrfs_root *root,
592 return start_transaction(root, num_items, TRANS_START, 591 return start_transaction(root, num_items, TRANS_START,
593 BTRFS_RESERVE_FLUSH_ALL); 592 BTRFS_RESERVE_FLUSH_ALL);
594} 593}
594struct btrfs_trans_handle *btrfs_start_transaction_fallback_global_rsv(
595 struct btrfs_root *root,
596 unsigned int num_items,
597 int min_factor)
598{
599 struct btrfs_trans_handle *trans;
600 u64 num_bytes;
601 int ret;
602
603 trans = btrfs_start_transaction(root, num_items);
604 if (!IS_ERR(trans) || PTR_ERR(trans) != -ENOSPC)
605 return trans;
606
607 trans = btrfs_start_transaction(root, 0);
608 if (IS_ERR(trans))
609 return trans;
610
611 num_bytes = btrfs_calc_trans_metadata_size(root, num_items);
612 ret = btrfs_cond_migrate_bytes(root->fs_info,
613 &root->fs_info->trans_block_rsv,
614 num_bytes,
615 min_factor);
616 if (ret) {
617 btrfs_end_transaction(trans, root);
618 return ERR_PTR(ret);
619 }
620
621 trans->block_rsv = &root->fs_info->trans_block_rsv;
622 trans->bytes_reserved = num_bytes;
623
624 return trans;
625}
595 626
596struct btrfs_trans_handle *btrfs_start_transaction_lflush( 627struct btrfs_trans_handle *btrfs_start_transaction_lflush(
597 struct btrfs_root *root, 628 struct btrfs_root *root,
diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h
index b05b2f64d913..64c8221b6165 100644
--- a/fs/btrfs/transaction.h
+++ b/fs/btrfs/transaction.h
@@ -77,8 +77,8 @@ struct btrfs_transaction {
77 */ 77 */
78 struct mutex cache_write_mutex; 78 struct mutex cache_write_mutex;
79 spinlock_t dirty_bgs_lock; 79 spinlock_t dirty_bgs_lock;
80 /* Protected by spin lock fs_info->unused_bgs_lock. */
80 struct list_head deleted_bgs; 81 struct list_head deleted_bgs;
81 spinlock_t deleted_bgs_lock;
82 spinlock_t dropped_roots_lock; 82 spinlock_t dropped_roots_lock;
83 struct btrfs_delayed_ref_root delayed_refs; 83 struct btrfs_delayed_ref_root delayed_refs;
84 int aborted; 84 int aborted;
@@ -185,6 +185,10 @@ int btrfs_end_transaction(struct btrfs_trans_handle *trans,
185 struct btrfs_root *root); 185 struct btrfs_root *root);
186struct btrfs_trans_handle *btrfs_start_transaction(struct btrfs_root *root, 186struct btrfs_trans_handle *btrfs_start_transaction(struct btrfs_root *root,
187 unsigned int num_items); 187 unsigned int num_items);
188struct btrfs_trans_handle *btrfs_start_transaction_fallback_global_rsv(
189 struct btrfs_root *root,
190 unsigned int num_items,
191 int min_factor);
188struct btrfs_trans_handle *btrfs_start_transaction_lflush( 192struct btrfs_trans_handle *btrfs_start_transaction_lflush(
189 struct btrfs_root *root, 193 struct btrfs_root *root,
190 unsigned int num_items); 194 unsigned int num_items);
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index a6df8fdc1312..a23399e8e3ab 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -1973,8 +1973,7 @@ void btrfs_rm_dev_replace_remove_srcdev(struct btrfs_fs_info *fs_info,
1973 if (srcdev->writeable) { 1973 if (srcdev->writeable) {
1974 fs_devices->rw_devices--; 1974 fs_devices->rw_devices--;
1975 /* zero out the old super if it is writable */ 1975 /* zero out the old super if it is writable */
1976 btrfs_scratch_superblocks(srcdev->bdev, 1976 btrfs_scratch_superblocks(srcdev->bdev, srcdev->name->str);
1977 rcu_str_deref(srcdev->name));
1978 } 1977 }
1979 1978
1980 if (srcdev->bdev) 1979 if (srcdev->bdev)
@@ -2024,8 +2023,7 @@ void btrfs_destroy_dev_replace_tgtdev(struct btrfs_fs_info *fs_info,
2024 btrfs_sysfs_rm_device_link(fs_info->fs_devices, tgtdev); 2023 btrfs_sysfs_rm_device_link(fs_info->fs_devices, tgtdev);
2025 2024
2026 if (tgtdev->bdev) { 2025 if (tgtdev->bdev) {
2027 btrfs_scratch_superblocks(tgtdev->bdev, 2026 btrfs_scratch_superblocks(tgtdev->bdev, tgtdev->name->str);
2028 rcu_str_deref(tgtdev->name));
2029 fs_info->fs_devices->open_devices--; 2027 fs_info->fs_devices->open_devices--;
2030 } 2028 }
2031 fs_info->fs_devices->num_devices--; 2029 fs_info->fs_devices->num_devices--;
@@ -2853,7 +2851,8 @@ static int btrfs_relocate_chunk(struct btrfs_root *root, u64 chunk_offset)
2853 if (ret) 2851 if (ret)
2854 return ret; 2852 return ret;
2855 2853
2856 trans = btrfs_start_transaction(root, 0); 2854 trans = btrfs_start_trans_remove_block_group(root->fs_info,
2855 chunk_offset);
2857 if (IS_ERR(trans)) { 2856 if (IS_ERR(trans)) {
2858 ret = PTR_ERR(trans); 2857 ret = PTR_ERR(trans);
2859 btrfs_std_error(root->fs_info, ret, NULL); 2858 btrfs_std_error(root->fs_info, ret, NULL);
@@ -3123,7 +3122,7 @@ static int chunk_profiles_filter(u64 chunk_type,
3123 return 1; 3122 return 1;
3124} 3123}
3125 3124
3126static int chunk_usage_filter(struct btrfs_fs_info *fs_info, u64 chunk_offset, 3125static int chunk_usage_range_filter(struct btrfs_fs_info *fs_info, u64 chunk_offset,
3127 struct btrfs_balance_args *bargs) 3126 struct btrfs_balance_args *bargs)
3128{ 3127{
3129 struct btrfs_block_group_cache *cache; 3128 struct btrfs_block_group_cache *cache;
@@ -3156,7 +3155,7 @@ static int chunk_usage_filter(struct btrfs_fs_info *fs_info, u64 chunk_offset,
3156 return ret; 3155 return ret;
3157} 3156}
3158 3157
3159static int chunk_usage_range_filter(struct btrfs_fs_info *fs_info, 3158static int chunk_usage_filter(struct btrfs_fs_info *fs_info,
3160 u64 chunk_offset, struct btrfs_balance_args *bargs) 3159 u64 chunk_offset, struct btrfs_balance_args *bargs)
3161{ 3160{
3162 struct btrfs_block_group_cache *cache; 3161 struct btrfs_block_group_cache *cache;
@@ -3549,12 +3548,11 @@ again:
3549 3548
3550 ret = btrfs_force_chunk_alloc(trans, chunk_root, 3549 ret = btrfs_force_chunk_alloc(trans, chunk_root,
3551 BTRFS_BLOCK_GROUP_DATA); 3550 BTRFS_BLOCK_GROUP_DATA);
3551 btrfs_end_transaction(trans, chunk_root);
3552 if (ret < 0) { 3552 if (ret < 0) {
3553 mutex_unlock(&fs_info->delete_unused_bgs_mutex); 3553 mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3554 goto error; 3554 goto error;
3555 } 3555 }
3556
3557 btrfs_end_transaction(trans, chunk_root);
3558 chunk_reserved = 1; 3556 chunk_reserved = 1;
3559 } 3557 }
3560 3558
diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h
index ec5712372732..d5c84f6b1353 100644
--- a/fs/btrfs/volumes.h
+++ b/fs/btrfs/volumes.h
@@ -382,7 +382,7 @@ struct map_lookup {
382#define BTRFS_BALANCE_ARGS_LIMIT (1ULL << 5) 382#define BTRFS_BALANCE_ARGS_LIMIT (1ULL << 5)
383#define BTRFS_BALANCE_ARGS_LIMIT_RANGE (1ULL << 6) 383#define BTRFS_BALANCE_ARGS_LIMIT_RANGE (1ULL << 6)
384#define BTRFS_BALANCE_ARGS_STRIPES_RANGE (1ULL << 7) 384#define BTRFS_BALANCE_ARGS_STRIPES_RANGE (1ULL << 7)
385#define BTRFS_BALANCE_ARGS_USAGE_RANGE (1ULL << 8) 385#define BTRFS_BALANCE_ARGS_USAGE_RANGE (1ULL << 10)
386 386
387#define BTRFS_BALANCE_ARGS_MASK \ 387#define BTRFS_BALANCE_ARGS_MASK \
388 (BTRFS_BALANCE_ARGS_PROFILES | \ 388 (BTRFS_BALANCE_ARGS_PROFILES | \
diff --git a/fs/cachefiles/rdwr.c b/fs/cachefiles/rdwr.c
index 7a6b02f72787..c0f3da3926a0 100644
--- a/fs/cachefiles/rdwr.c
+++ b/fs/cachefiles/rdwr.c
@@ -879,7 +879,7 @@ int cachefiles_write_page(struct fscache_storage *op, struct page *page)
879 loff_t pos, eof; 879 loff_t pos, eof;
880 size_t len; 880 size_t len;
881 void *data; 881 void *data;
882 int ret; 882 int ret = -ENOBUFS;
883 883
884 ASSERT(op != NULL); 884 ASSERT(op != NULL);
885 ASSERT(page != NULL); 885 ASSERT(page != NULL);
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index 6b66dd5d1540..a329f5ba35aa 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -1831,11 +1831,11 @@ cifs_invalidate_mapping(struct inode *inode)
1831 * @word: long word containing the bit lock 1831 * @word: long word containing the bit lock
1832 */ 1832 */
1833static int 1833static int
1834cifs_wait_bit_killable(struct wait_bit_key *key) 1834cifs_wait_bit_killable(struct wait_bit_key *key, int mode)
1835{ 1835{
1836 if (fatal_signal_pending(current))
1837 return -ERESTARTSYS;
1838 freezable_schedule_unsafe(); 1836 freezable_schedule_unsafe();
1837 if (signal_pending_state(mode, current))
1838 return -ERESTARTSYS;
1839 return 0; 1839 return 0;
1840} 1840}
1841 1841
diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
index c81ce7f200a6..a7a1b218f308 100644
--- a/fs/configfs/dir.c
+++ b/fs/configfs/dir.c
@@ -1636,6 +1636,116 @@ const struct file_operations configfs_dir_operations = {
1636 .iterate = configfs_readdir, 1636 .iterate = configfs_readdir,
1637}; 1637};
1638 1638
1639/**
1640 * configfs_register_group - creates a parent-child relation between two groups
1641 * @parent_group: parent group
1642 * @group: child group
1643 *
1644 * link groups, creates dentry for the child and attaches it to the
1645 * parent dentry.
1646 *
1647 * Return: 0 on success, negative errno code on error
1648 */
1649int configfs_register_group(struct config_group *parent_group,
1650 struct config_group *group)
1651{
1652 struct configfs_subsystem *subsys = parent_group->cg_subsys;
1653 struct dentry *parent;
1654 int ret;
1655
1656 mutex_lock(&subsys->su_mutex);
1657 link_group(parent_group, group);
1658 mutex_unlock(&subsys->su_mutex);
1659
1660 parent = parent_group->cg_item.ci_dentry;
1661
1662 mutex_lock_nested(&d_inode(parent)->i_mutex, I_MUTEX_PARENT);
1663 ret = create_default_group(parent_group, group);
1664 if (!ret) {
1665 spin_lock(&configfs_dirent_lock);
1666 configfs_dir_set_ready(group->cg_item.ci_dentry->d_fsdata);
1667 spin_unlock(&configfs_dirent_lock);
1668 }
1669 mutex_unlock(&d_inode(parent)->i_mutex);
1670 return ret;
1671}
1672EXPORT_SYMBOL(configfs_register_group);
1673
1674/**
1675 * configfs_unregister_group() - unregisters a child group from its parent
1676 * @group: parent group to be unregistered
1677 *
1678 * Undoes configfs_register_group()
1679 */
1680void configfs_unregister_group(struct config_group *group)
1681{
1682 struct configfs_subsystem *subsys = group->cg_subsys;
1683 struct dentry *dentry = group->cg_item.ci_dentry;
1684 struct dentry *parent = group->cg_item.ci_parent->ci_dentry;
1685
1686 mutex_lock_nested(&d_inode(parent)->i_mutex, I_MUTEX_PARENT);
1687 spin_lock(&configfs_dirent_lock);
1688 configfs_detach_prep(dentry, NULL);
1689 spin_unlock(&configfs_dirent_lock);
1690
1691 configfs_detach_group(&group->cg_item);
1692 d_inode(dentry)->i_flags |= S_DEAD;
1693 dont_mount(dentry);
1694 d_delete(dentry);
1695 mutex_unlock(&d_inode(parent)->i_mutex);
1696
1697 dput(dentry);
1698
1699 mutex_lock(&subsys->su_mutex);
1700 unlink_group(group);
1701 mutex_unlock(&subsys->su_mutex);
1702}
1703EXPORT_SYMBOL(configfs_unregister_group);
1704
1705/**
1706 * configfs_register_default_group() - allocates and registers a child group
1707 * @parent_group: parent group
1708 * @name: child group name
1709 * @item_type: child item type description
1710 *
1711 * boilerplate to allocate and register a child group with its parent. We need
1712 * kzalloc'ed memory because child's default_group is initially empty.
1713 *
1714 * Return: allocated config group or ERR_PTR() on error
1715 */
1716struct config_group *
1717configfs_register_default_group(struct config_group *parent_group,
1718 const char *name,
1719 struct config_item_type *item_type)
1720{
1721 int ret;
1722 struct config_group *group;
1723
1724 group = kzalloc(sizeof(*group), GFP_KERNEL);
1725 if (!group)
1726 return ERR_PTR(-ENOMEM);
1727 config_group_init_type_name(group, name, item_type);
1728
1729 ret = configfs_register_group(parent_group, group);
1730 if (ret) {
1731 kfree(group);
1732 return ERR_PTR(ret);
1733 }
1734 return group;
1735}
1736EXPORT_SYMBOL(configfs_register_default_group);
1737
1738/**
1739 * configfs_unregister_default_group() - unregisters and frees a child group
1740 * @group: the group to act on
1741 */
1742void configfs_unregister_default_group(struct config_group *group)
1743{
1744 configfs_unregister_group(group);
1745 kfree(group);
1746}
1747EXPORT_SYMBOL(configfs_unregister_default_group);
1748
1639int configfs_register_subsystem(struct configfs_subsystem *subsys) 1749int configfs_register_subsystem(struct configfs_subsystem *subsys)
1640{ 1750{
1641 int err; 1751 int err;
diff --git a/fs/dax.c b/fs/dax.c
index d1e5cb7311a1..43671b68220e 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -541,6 +541,10 @@ int __dax_pmd_fault(struct vm_area_struct *vma, unsigned long address,
541 unsigned long pfn; 541 unsigned long pfn;
542 int result = 0; 542 int result = 0;
543 543
544 /* dax pmd mappings are broken wrt gup and fork */
545 if (!IS_ENABLED(CONFIG_FS_DAX_PMD))
546 return VM_FAULT_FALLBACK;
547
544 /* Fall back to PTEs if we're going to COW */ 548 /* Fall back to PTEs if we're going to COW */
545 if (write && !(vma->vm_flags & VM_SHARED)) 549 if (write && !(vma->vm_flags & VM_SHARED))
546 return VM_FAULT_FALLBACK; 550 return VM_FAULT_FALLBACK;
diff --git a/fs/direct-io.c b/fs/direct-io.c
index cb5337d8c273..602e8441bc0f 100644
--- a/fs/direct-io.c
+++ b/fs/direct-io.c
@@ -1169,6 +1169,16 @@ do_blockdev_direct_IO(struct kiocb *iocb, struct inode *inode,
1169 } 1169 }
1170 } 1170 }
1171 1171
1172 /* Once we sampled i_size check for reads beyond EOF */
1173 dio->i_size = i_size_read(inode);
1174 if (iov_iter_rw(iter) == READ && offset >= dio->i_size) {
1175 if (dio->flags & DIO_LOCKING)
1176 mutex_unlock(&inode->i_mutex);
1177 kmem_cache_free(dio_cache, dio);
1178 retval = 0;
1179 goto out;
1180 }
1181
1172 /* 1182 /*
1173 * For file extending writes updating i_size before data writeouts 1183 * For file extending writes updating i_size before data writeouts
1174 * complete can expose uninitialized blocks in dumb filesystems. 1184 * complete can expose uninitialized blocks in dumb filesystems.
@@ -1222,7 +1232,6 @@ do_blockdev_direct_IO(struct kiocb *iocb, struct inode *inode,
1222 sdio.next_block_for_io = -1; 1232 sdio.next_block_for_io = -1;
1223 1233
1224 dio->iocb = iocb; 1234 dio->iocb = iocb;
1225 dio->i_size = i_size_read(inode);
1226 1235
1227 spin_lock_init(&dio->bio_lock); 1236 spin_lock_init(&dio->bio_lock);
1228 dio->refcount = 1; 1237 dio->refcount = 1;
diff --git a/fs/dlm/lowcomms.c b/fs/dlm/lowcomms.c
index 87e9d796cf7d..3a37bd3f9637 100644
--- a/fs/dlm/lowcomms.c
+++ b/fs/dlm/lowcomms.c
@@ -421,7 +421,7 @@ static void lowcomms_write_space(struct sock *sk)
421 421
422 if (test_and_clear_bit(CF_APP_LIMITED, &con->flags)) { 422 if (test_and_clear_bit(CF_APP_LIMITED, &con->flags)) {
423 con->sock->sk->sk_write_pending--; 423 con->sock->sk->sk_write_pending--;
424 clear_bit(SOCK_ASYNC_NOSPACE, &con->sock->flags); 424 clear_bit(SOCKWQ_ASYNC_NOSPACE, &con->sock->flags);
425 } 425 }
426 426
427 if (!test_and_set_bit(CF_WRITE_PENDING, &con->flags)) 427 if (!test_and_set_bit(CF_WRITE_PENDING, &con->flags))
@@ -1448,7 +1448,7 @@ static void send_to_sock(struct connection *con)
1448 msg_flags); 1448 msg_flags);
1449 if (ret == -EAGAIN || ret == 0) { 1449 if (ret == -EAGAIN || ret == 0) {
1450 if (ret == -EAGAIN && 1450 if (ret == -EAGAIN &&
1451 test_bit(SOCK_ASYNC_NOSPACE, &con->sock->flags) && 1451 test_bit(SOCKWQ_ASYNC_NOSPACE, &con->sock->flags) &&
1452 !test_and_set_bit(CF_APP_LIMITED, &con->flags)) { 1452 !test_and_set_bit(CF_APP_LIMITED, &con->flags)) {
1453 /* Notify TCP that we're limited by the 1453 /* Notify TCP that we're limited by the
1454 * application window size. 1454 * application window size.
diff --git a/fs/exofs/inode.c b/fs/exofs/inode.c
index 73c64daa0f55..60f03b78914e 100644
--- a/fs/exofs/inode.c
+++ b/fs/exofs/inode.c
@@ -592,10 +592,7 @@ static struct page *__r4w_get_page(void *priv, u64 offset, bool *uptodate)
592 } 592 }
593 unlock_page(page); 593 unlock_page(page);
594 } 594 }
595 if (PageDirty(page) || PageWriteback(page)) 595 *uptodate = PageUptodate(page);
596 *uptodate = true;
597 else
598 *uptodate = PageUptodate(page);
599 EXOFS_DBGMSG2("index=0x%lx uptodate=%d\n", index, *uptodate); 596 EXOFS_DBGMSG2("index=0x%lx uptodate=%d\n", index, *uptodate);
600 return page; 597 return page;
601 } else { 598 } else {
diff --git a/fs/ext2/super.c b/fs/ext2/super.c
index 3a71cea68420..748d35afc902 100644
--- a/fs/ext2/super.c
+++ b/fs/ext2/super.c
@@ -569,6 +569,8 @@ static int parse_options(char *options, struct super_block *sb)
569 /* Fall through */ 569 /* Fall through */
570 case Opt_dax: 570 case Opt_dax:
571#ifdef CONFIG_FS_DAX 571#ifdef CONFIG_FS_DAX
572 ext2_msg(sb, KERN_WARNING,
573 "DAX enabled. Warning: EXPERIMENTAL, use at your own risk");
572 set_opt(sbi->s_mount_opt, DAX); 574 set_opt(sbi->s_mount_opt, DAX);
573#else 575#else
574 ext2_msg(sb, KERN_INFO, "dax option not supported"); 576 ext2_msg(sb, KERN_INFO, "dax option not supported");
diff --git a/fs/ext4/crypto.c b/fs/ext4/crypto.c
index af06830bfc00..1a0835073663 100644
--- a/fs/ext4/crypto.c
+++ b/fs/ext4/crypto.c
@@ -389,7 +389,7 @@ int ext4_encrypted_zeroout(struct inode *inode, struct ext4_extent *ex)
389 struct ext4_crypto_ctx *ctx; 389 struct ext4_crypto_ctx *ctx;
390 struct page *ciphertext_page = NULL; 390 struct page *ciphertext_page = NULL;
391 struct bio *bio; 391 struct bio *bio;
392 ext4_lblk_t lblk = ex->ee_block; 392 ext4_lblk_t lblk = le32_to_cpu(ex->ee_block);
393 ext4_fsblk_t pblk = ext4_ext_pblock(ex); 393 ext4_fsblk_t pblk = ext4_ext_pblock(ex);
394 unsigned int len = ext4_ext_get_actual_len(ex); 394 unsigned int len = ext4_ext_get_actual_len(ex);
395 int ret, err = 0; 395 int ret, err = 0;
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index 750063f7a50c..cc7ca4e87144 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -26,6 +26,7 @@
26#include <linux/seqlock.h> 26#include <linux/seqlock.h>
27#include <linux/mutex.h> 27#include <linux/mutex.h>
28#include <linux/timer.h> 28#include <linux/timer.h>
29#include <linux/version.h>
29#include <linux/wait.h> 30#include <linux/wait.h>
30#include <linux/blockgroup_lock.h> 31#include <linux/blockgroup_lock.h>
31#include <linux/percpu_counter.h> 32#include <linux/percpu_counter.h>
@@ -727,19 +728,55 @@ struct move_extent {
727 <= (EXT4_GOOD_OLD_INODE_SIZE + \ 728 <= (EXT4_GOOD_OLD_INODE_SIZE + \
728 (einode)->i_extra_isize)) \ 729 (einode)->i_extra_isize)) \
729 730
731/*
732 * We use an encoding that preserves the times for extra epoch "00":
733 *
734 * extra msb of adjust for signed
735 * epoch 32-bit 32-bit tv_sec to
736 * bits time decoded 64-bit tv_sec 64-bit tv_sec valid time range
737 * 0 0 1 -0x80000000..-0x00000001 0x000000000 1901-12-13..1969-12-31
738 * 0 0 0 0x000000000..0x07fffffff 0x000000000 1970-01-01..2038-01-19
739 * 0 1 1 0x080000000..0x0ffffffff 0x100000000 2038-01-19..2106-02-07
740 * 0 1 0 0x100000000..0x17fffffff 0x100000000 2106-02-07..2174-02-25
741 * 1 0 1 0x180000000..0x1ffffffff 0x200000000 2174-02-25..2242-03-16
742 * 1 0 0 0x200000000..0x27fffffff 0x200000000 2242-03-16..2310-04-04
743 * 1 1 1 0x280000000..0x2ffffffff 0x300000000 2310-04-04..2378-04-22
744 * 1 1 0 0x300000000..0x37fffffff 0x300000000 2378-04-22..2446-05-10
745 *
746 * Note that previous versions of the kernel on 64-bit systems would
747 * incorrectly use extra epoch bits 1,1 for dates between 1901 and
748 * 1970. e2fsck will correct this, assuming that it is run on the
749 * affected filesystem before 2242.
750 */
751
730static inline __le32 ext4_encode_extra_time(struct timespec *time) 752static inline __le32 ext4_encode_extra_time(struct timespec *time)
731{ 753{
732 return cpu_to_le32((sizeof(time->tv_sec) > 4 ? 754 u32 extra = sizeof(time->tv_sec) > 4 ?
733 (time->tv_sec >> 32) & EXT4_EPOCH_MASK : 0) | 755 ((time->tv_sec - (s32)time->tv_sec) >> 32) & EXT4_EPOCH_MASK : 0;
734 ((time->tv_nsec << EXT4_EPOCH_BITS) & EXT4_NSEC_MASK)); 756 return cpu_to_le32(extra | (time->tv_nsec << EXT4_EPOCH_BITS));
735} 757}
736 758
737static inline void ext4_decode_extra_time(struct timespec *time, __le32 extra) 759static inline void ext4_decode_extra_time(struct timespec *time, __le32 extra)
738{ 760{
739 if (sizeof(time->tv_sec) > 4) 761 if (unlikely(sizeof(time->tv_sec) > 4 &&
740 time->tv_sec |= (__u64)(le32_to_cpu(extra) & EXT4_EPOCH_MASK) 762 (extra & cpu_to_le32(EXT4_EPOCH_MASK)))) {
741 << 32; 763#if LINUX_VERSION_CODE < KERNEL_VERSION(4,20,0)
742 time->tv_nsec = (le32_to_cpu(extra) & EXT4_NSEC_MASK) >> EXT4_EPOCH_BITS; 764 /* Handle legacy encoding of pre-1970 dates with epoch
765 * bits 1,1. We assume that by kernel version 4.20,
766 * everyone will have run fsck over the affected
767 * filesystems to correct the problem. (This
768 * backwards compatibility may be removed before this
769 * time, at the discretion of the ext4 developers.)
770 */
771 u64 extra_bits = le32_to_cpu(extra) & EXT4_EPOCH_MASK;
772 if (extra_bits == 3 && ((time->tv_sec) & 0x80000000) != 0)
773 extra_bits = 0;
774 time->tv_sec += extra_bits << 32;
775#else
776 time->tv_sec += (u64)(le32_to_cpu(extra) & EXT4_EPOCH_MASK) << 32;
777#endif
778 }
779 time->tv_nsec = (le32_to_cpu(extra) & EXT4_NSEC_MASK) >> EXT4_EPOCH_BITS;
743} 780}
744 781
745#define EXT4_INODE_SET_XTIME(xtime, inode, raw_inode) \ 782#define EXT4_INODE_SET_XTIME(xtime, inode, raw_inode) \
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 753f4e68b820..c9ab67da6e5a 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -1664,8 +1664,12 @@ static int handle_mount_opt(struct super_block *sb, char *opt, int token,
1664 } 1664 }
1665 sbi->s_jquota_fmt = m->mount_opt; 1665 sbi->s_jquota_fmt = m->mount_opt;
1666#endif 1666#endif
1667#ifndef CONFIG_FS_DAX
1668 } else if (token == Opt_dax) { 1667 } else if (token == Opt_dax) {
1668#ifdef CONFIG_FS_DAX
1669 ext4_msg(sb, KERN_WARNING,
1670 "DAX enabled. Warning: EXPERIMENTAL, use at your own risk");
1671 sbi->s_mount_opt |= m->mount_opt;
1672#else
1669 ext4_msg(sb, KERN_INFO, "dax option not supported"); 1673 ext4_msg(sb, KERN_INFO, "dax option not supported");
1670 return -1; 1674 return -1;
1671#endif 1675#endif
diff --git a/fs/ext4/symlink.c b/fs/ext4/symlink.c
index abe2401ce405..e8e7af62ac95 100644
--- a/fs/ext4/symlink.c
+++ b/fs/ext4/symlink.c
@@ -52,7 +52,7 @@ static const char *ext4_encrypted_follow_link(struct dentry *dentry, void **cook
52 /* Symlink is encrypted */ 52 /* Symlink is encrypted */
53 sd = (struct ext4_encrypted_symlink_data *)caddr; 53 sd = (struct ext4_encrypted_symlink_data *)caddr;
54 cstr.name = sd->encrypted_path; 54 cstr.name = sd->encrypted_path;
55 cstr.len = le32_to_cpu(sd->len); 55 cstr.len = le16_to_cpu(sd->len);
56 if ((cstr.len + 56 if ((cstr.len +
57 sizeof(struct ext4_encrypted_symlink_data) - 1) > 57 sizeof(struct ext4_encrypted_symlink_data) - 1) >
58 max_size) { 58 max_size) {
diff --git a/fs/ext4/sysfs.c b/fs/ext4/sysfs.c
index 1b57c72f4a00..1420a3c614af 100644
--- a/fs/ext4/sysfs.c
+++ b/fs/ext4/sysfs.c
@@ -358,7 +358,7 @@ static int name##_open(struct inode *inode, struct file *file) \
358 return single_open(file, ext4_seq_##name##_show, PDE_DATA(inode)); \ 358 return single_open(file, ext4_seq_##name##_show, PDE_DATA(inode)); \
359} \ 359} \
360\ 360\
361const struct file_operations ext4_seq_##name##_fops = { \ 361static const struct file_operations ext4_seq_##name##_fops = { \
362 .owner = THIS_MODULE, \ 362 .owner = THIS_MODULE, \
363 .open = name##_open, \ 363 .open = name##_open, \
364 .read = seq_read, \ 364 .read = seq_read, \
diff --git a/fs/fat/dir.c b/fs/fat/dir.c
index 4afc4d9d2e41..8b2127ffb226 100644
--- a/fs/fat/dir.c
+++ b/fs/fat/dir.c
@@ -610,9 +610,9 @@ parse_record:
610 int status = fat_parse_long(inode, &cpos, &bh, &de, 610 int status = fat_parse_long(inode, &cpos, &bh, &de,
611 &unicode, &nr_slots); 611 &unicode, &nr_slots);
612 if (status < 0) { 612 if (status < 0) {
613 ctx->pos = cpos; 613 bh = NULL;
614 ret = status; 614 ret = status;
615 goto out; 615 goto end_of_dir;
616 } else if (status == PARSE_INVALID) 616 } else if (status == PARSE_INVALID)
617 goto record_end; 617 goto record_end;
618 else if (status == PARSE_NOT_LONGNAME) 618 else if (status == PARSE_NOT_LONGNAME)
@@ -654,8 +654,9 @@ parse_record:
654 fill_len = short_len; 654 fill_len = short_len;
655 655
656start_filldir: 656start_filldir:
657 if (!fake_offset) 657 ctx->pos = cpos - (nr_slots + 1) * sizeof(struct msdos_dir_entry);
658 ctx->pos = cpos - (nr_slots + 1) * sizeof(struct msdos_dir_entry); 658 if (fake_offset && ctx->pos < 2)
659 ctx->pos = 2;
659 660
660 if (!memcmp(de->name, MSDOS_DOT, MSDOS_NAME)) { 661 if (!memcmp(de->name, MSDOS_DOT, MSDOS_NAME)) {
661 if (!dir_emit_dot(file, ctx)) 662 if (!dir_emit_dot(file, ctx))
@@ -681,14 +682,19 @@ record_end:
681 fake_offset = 0; 682 fake_offset = 0;
682 ctx->pos = cpos; 683 ctx->pos = cpos;
683 goto get_new; 684 goto get_new;
685
684end_of_dir: 686end_of_dir:
685 ctx->pos = cpos; 687 if (fake_offset && cpos < 2)
688 ctx->pos = 2;
689 else
690 ctx->pos = cpos;
686fill_failed: 691fill_failed:
687 brelse(bh); 692 brelse(bh);
688 if (unicode) 693 if (unicode)
689 __putname(unicode); 694 __putname(unicode);
690out: 695out:
691 mutex_unlock(&sbi->s_lock); 696 mutex_unlock(&sbi->s_lock);
697
692 return ret; 698 return ret;
693} 699}
694 700
diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c
index eae2c11268bc..8e3ee1936c7e 100644
--- a/fs/fuse/cuse.c
+++ b/fs/fuse/cuse.c
@@ -549,6 +549,8 @@ static int cuse_channel_release(struct inode *inode, struct file *file)
549 unregister_chrdev_region(cc->cdev->dev, 1); 549 unregister_chrdev_region(cc->cdev->dev, 1);
550 cdev_del(cc->cdev); 550 cdev_del(cc->cdev);
551 } 551 }
552 /* Base reference is now owned by "fud" */
553 fuse_conn_put(&cc->fc);
552 554
553 rc = fuse_dev_release(inode, file); /* puts the base reference */ 555 rc = fuse_dev_release(inode, file); /* puts the base reference */
554 556
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index e0faf8f2c868..570ca4053c80 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -1049,6 +1049,7 @@ static ssize_t fuse_fill_write_pages(struct fuse_req *req,
1049 tmp = iov_iter_copy_from_user_atomic(page, ii, offset, bytes); 1049 tmp = iov_iter_copy_from_user_atomic(page, ii, offset, bytes);
1050 flush_dcache_page(page); 1050 flush_dcache_page(page);
1051 1051
1052 iov_iter_advance(ii, tmp);
1052 if (!tmp) { 1053 if (!tmp) {
1053 unlock_page(page); 1054 unlock_page(page);
1054 page_cache_release(page); 1055 page_cache_release(page);
@@ -1061,7 +1062,6 @@ static ssize_t fuse_fill_write_pages(struct fuse_req *req,
1061 req->page_descs[req->num_pages].length = tmp; 1062 req->page_descs[req->num_pages].length = tmp;
1062 req->num_pages++; 1063 req->num_pages++;
1063 1064
1064 iov_iter_advance(ii, tmp);
1065 count += tmp; 1065 count += tmp;
1066 pos += tmp; 1066 pos += tmp;
1067 offset += tmp; 1067 offset += tmp;
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index 316adb968b65..de4bdfac0cec 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -332,12 +332,17 @@ static void remove_huge_page(struct page *page)
332 * truncation is indicated by end of range being LLONG_MAX 332 * truncation is indicated by end of range being LLONG_MAX
333 * In this case, we first scan the range and release found pages. 333 * In this case, we first scan the range and release found pages.
334 * After releasing pages, hugetlb_unreserve_pages cleans up region/reserv 334 * After releasing pages, hugetlb_unreserve_pages cleans up region/reserv
335 * maps and global counts. 335 * maps and global counts. Page faults can not race with truncation
336 * in this routine. hugetlb_no_page() prevents page faults in the
337 * truncated range. It checks i_size before allocation, and again after
338 * with the page table lock for the page held. The same lock must be
339 * acquired to unmap a page.
336 * hole punch is indicated if end is not LLONG_MAX 340 * hole punch is indicated if end is not LLONG_MAX
337 * In the hole punch case we scan the range and release found pages. 341 * In the hole punch case we scan the range and release found pages.
338 * Only when releasing a page is the associated region/reserv map 342 * Only when releasing a page is the associated region/reserv map
339 * deleted. The region/reserv map for ranges without associated 343 * deleted. The region/reserv map for ranges without associated
340 * pages are not modified. 344 * pages are not modified. Page faults can race with hole punch.
345 * This is indicated if we find a mapped page.
341 * Note: If the passed end of range value is beyond the end of file, but 346 * Note: If the passed end of range value is beyond the end of file, but
342 * not LLONG_MAX this routine still performs a hole punch operation. 347 * not LLONG_MAX this routine still performs a hole punch operation.
343 */ 348 */
@@ -361,46 +366,37 @@ static void remove_inode_hugepages(struct inode *inode, loff_t lstart,
361 next = start; 366 next = start;
362 while (next < end) { 367 while (next < end) {
363 /* 368 /*
364 * Make sure to never grab more pages that we 369 * Don't grab more pages than the number left in the range.
365 * might possibly need.
366 */ 370 */
367 if (end - next < lookup_nr) 371 if (end - next < lookup_nr)
368 lookup_nr = end - next; 372 lookup_nr = end - next;
369 373
370 /* 374 /*
371 * This pagevec_lookup() may return pages past 'end', 375 * When no more pages are found, we are done.
372 * so we must check for page->index > end.
373 */ 376 */
374 if (!pagevec_lookup(&pvec, mapping, next, lookup_nr)) { 377 if (!pagevec_lookup(&pvec, mapping, next, lookup_nr))
375 if (next == start) 378 break;
376 break;
377 next = start;
378 continue;
379 }
380 379
381 for (i = 0; i < pagevec_count(&pvec); ++i) { 380 for (i = 0; i < pagevec_count(&pvec); ++i) {
382 struct page *page = pvec.pages[i]; 381 struct page *page = pvec.pages[i];
383 u32 hash; 382 u32 hash;
384 383
384 /*
385 * The page (index) could be beyond end. This is
386 * only possible in the punch hole case as end is
387 * max page offset in the truncate case.
388 */
389 next = page->index;
390 if (next >= end)
391 break;
392
385 hash = hugetlb_fault_mutex_hash(h, current->mm, 393 hash = hugetlb_fault_mutex_hash(h, current->mm,
386 &pseudo_vma, 394 &pseudo_vma,
387 mapping, next, 0); 395 mapping, next, 0);
388 mutex_lock(&hugetlb_fault_mutex_table[hash]); 396 mutex_lock(&hugetlb_fault_mutex_table[hash]);
389 397
390 lock_page(page); 398 lock_page(page);
391 if (page->index >= end) { 399 if (likely(!page_mapped(page))) {
392 unlock_page(page);
393 mutex_unlock(&hugetlb_fault_mutex_table[hash]);
394 next = end; /* we are done */
395 break;
396 }
397
398 /*
399 * If page is mapped, it was faulted in after being
400 * unmapped. Do nothing in this race case. In the
401 * normal case page is not mapped.
402 */
403 if (!page_mapped(page)) {
404 bool rsv_on_error = !PagePrivate(page); 400 bool rsv_on_error = !PagePrivate(page);
405 /* 401 /*
406 * We must free the huge page and remove 402 * We must free the huge page and remove
@@ -421,17 +417,23 @@ static void remove_inode_hugepages(struct inode *inode, loff_t lstart,
421 hugetlb_fix_reserve_counts( 417 hugetlb_fix_reserve_counts(
422 inode, rsv_on_error); 418 inode, rsv_on_error);
423 } 419 }
420 } else {
421 /*
422 * If page is mapped, it was faulted in after
423 * being unmapped. It indicates a race between
424 * hole punch and page fault. Do nothing in
425 * this case. Getting here in a truncate
426 * operation is a bug.
427 */
428 BUG_ON(truncate_op);
424 } 429 }
425 430
426 if (page->index > next)
427 next = page->index;
428
429 ++next;
430 unlock_page(page); 431 unlock_page(page);
431
432 mutex_unlock(&hugetlb_fault_mutex_table[hash]); 432 mutex_unlock(&hugetlb_fault_mutex_table[hash]);
433 } 433 }
434 ++next;
434 huge_pagevec_release(&pvec); 435 huge_pagevec_release(&pvec);
436 cond_resched();
435 } 437 }
436 438
437 if (truncate_op) 439 if (truncate_op)
@@ -647,9 +649,6 @@ static long hugetlbfs_fallocate(struct file *file, int mode, loff_t offset,
647 if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size) 649 if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size)
648 i_size_write(inode, offset + len); 650 i_size_write(inode, offset + len);
649 inode->i_ctime = CURRENT_TIME; 651 inode->i_ctime = CURRENT_TIME;
650 spin_lock(&inode->i_lock);
651 inode->i_private = NULL;
652 spin_unlock(&inode->i_lock);
653out: 652out:
654 mutex_unlock(&inode->i_mutex); 653 mutex_unlock(&inode->i_mutex);
655 return error; 654 return error;
diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
index 89463eee6791..ca181e81c765 100644
--- a/fs/jbd2/transaction.c
+++ b/fs/jbd2/transaction.c
@@ -1009,7 +1009,8 @@ out:
1009} 1009}
1010 1010
1011/* Fast check whether buffer is already attached to the required transaction */ 1011/* Fast check whether buffer is already attached to the required transaction */
1012static bool jbd2_write_access_granted(handle_t *handle, struct buffer_head *bh) 1012static bool jbd2_write_access_granted(handle_t *handle, struct buffer_head *bh,
1013 bool undo)
1013{ 1014{
1014 struct journal_head *jh; 1015 struct journal_head *jh;
1015 bool ret = false; 1016 bool ret = false;
@@ -1036,6 +1037,9 @@ static bool jbd2_write_access_granted(handle_t *handle, struct buffer_head *bh)
1036 jh = READ_ONCE(bh->b_private); 1037 jh = READ_ONCE(bh->b_private);
1037 if (!jh) 1038 if (!jh)
1038 goto out; 1039 goto out;
1040 /* For undo access buffer must have data copied */
1041 if (undo && !jh->b_committed_data)
1042 goto out;
1039 if (jh->b_transaction != handle->h_transaction && 1043 if (jh->b_transaction != handle->h_transaction &&
1040 jh->b_next_transaction != handle->h_transaction) 1044 jh->b_next_transaction != handle->h_transaction)
1041 goto out; 1045 goto out;
@@ -1073,7 +1077,7 @@ int jbd2_journal_get_write_access(handle_t *handle, struct buffer_head *bh)
1073 struct journal_head *jh; 1077 struct journal_head *jh;
1074 int rc; 1078 int rc;
1075 1079
1076 if (jbd2_write_access_granted(handle, bh)) 1080 if (jbd2_write_access_granted(handle, bh, false))
1077 return 0; 1081 return 0;
1078 1082
1079 jh = jbd2_journal_add_journal_head(bh); 1083 jh = jbd2_journal_add_journal_head(bh);
@@ -1210,7 +1214,7 @@ int jbd2_journal_get_undo_access(handle_t *handle, struct buffer_head *bh)
1210 char *committed_data = NULL; 1214 char *committed_data = NULL;
1211 1215
1212 JBUFFER_TRACE(jh, "entry"); 1216 JBUFFER_TRACE(jh, "entry");
1213 if (jbd2_write_access_granted(handle, bh)) 1217 if (jbd2_write_access_granted(handle, bh, true))
1214 return 0; 1218 return 0;
1215 1219
1216 jh = jbd2_journal_add_journal_head(bh); 1220 jh = jbd2_journal_add_journal_head(bh);
@@ -2152,6 +2156,7 @@ static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh,
2152 2156
2153 if (!buffer_dirty(bh)) { 2157 if (!buffer_dirty(bh)) {
2154 /* bdflush has written it. We can drop it now */ 2158 /* bdflush has written it. We can drop it now */
2159 __jbd2_journal_remove_checkpoint(jh);
2155 goto zap_buffer; 2160 goto zap_buffer;
2156 } 2161 }
2157 2162
@@ -2181,6 +2186,7 @@ static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh,
2181 /* The orphan record's transaction has 2186 /* The orphan record's transaction has
2182 * committed. We can cleanse this buffer */ 2187 * committed. We can cleanse this buffer */
2183 clear_buffer_jbddirty(bh); 2188 clear_buffer_jbddirty(bh);
2189 __jbd2_journal_remove_checkpoint(jh);
2184 goto zap_buffer; 2190 goto zap_buffer;
2185 } 2191 }
2186 } 2192 }
diff --git a/fs/namei.c b/fs/namei.c
index d84d7c7515fc..0c3974cd3ecd 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -1996,7 +1996,6 @@ static const char *path_init(struct nameidata *nd, unsigned flags)
1996 nd->last_type = LAST_ROOT; /* if there are only slashes... */ 1996 nd->last_type = LAST_ROOT; /* if there are only slashes... */
1997 nd->flags = flags | LOOKUP_JUMPED | LOOKUP_PARENT; 1997 nd->flags = flags | LOOKUP_JUMPED | LOOKUP_PARENT;
1998 nd->depth = 0; 1998 nd->depth = 0;
1999 nd->total_link_count = 0;
2000 if (flags & LOOKUP_ROOT) { 1999 if (flags & LOOKUP_ROOT) {
2001 struct dentry *root = nd->root.dentry; 2000 struct dentry *root = nd->root.dentry;
2002 struct inode *inode = root->d_inode; 2001 struct inode *inode = root->d_inode;
diff --git a/fs/ncpfs/ioctl.c b/fs/ncpfs/ioctl.c
index 79b113048eac..0a3f9b594602 100644
--- a/fs/ncpfs/ioctl.c
+++ b/fs/ncpfs/ioctl.c
@@ -525,6 +525,8 @@ static long __ncp_ioctl(struct inode *inode, unsigned int cmd, unsigned long arg
525 switch (rqdata.cmd) { 525 switch (rqdata.cmd) {
526 case NCP_LOCK_EX: 526 case NCP_LOCK_EX:
527 case NCP_LOCK_SH: 527 case NCP_LOCK_SH:
528 if (rqdata.timeout < 0)
529 return -EINVAL;
528 if (rqdata.timeout == 0) 530 if (rqdata.timeout == 0)
529 rqdata.timeout = NCP_LOCK_DEFAULT_TIMEOUT; 531 rqdata.timeout = NCP_LOCK_DEFAULT_TIMEOUT;
530 else if (rqdata.timeout > NCP_LOCK_MAX_TIMEOUT) 532 else if (rqdata.timeout > NCP_LOCK_MAX_TIMEOUT)
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index 326d9e10d833..c7e8b87da5b2 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -75,11 +75,11 @@ nfs_fattr_to_ino_t(struct nfs_fattr *fattr)
75 * nfs_wait_bit_killable - helper for functions that are sleeping on bit locks 75 * nfs_wait_bit_killable - helper for functions that are sleeping on bit locks
76 * @word: long word containing the bit lock 76 * @word: long word containing the bit lock
77 */ 77 */
78int nfs_wait_bit_killable(struct wait_bit_key *key) 78int nfs_wait_bit_killable(struct wait_bit_key *key, int mode)
79{ 79{
80 if (fatal_signal_pending(current))
81 return -ERESTARTSYS;
82 freezable_schedule_unsafe(); 80 freezable_schedule_unsafe();
81 if (signal_pending_state(mode, current))
82 return -ERESTARTSYS;
83 return 0; 83 return 0;
84} 84}
85EXPORT_SYMBOL_GPL(nfs_wait_bit_killable); 85EXPORT_SYMBOL_GPL(nfs_wait_bit_killable);
@@ -618,7 +618,10 @@ void nfs_setattr_update_inode(struct inode *inode, struct iattr *attr,
618 nfs_inc_stats(inode, NFSIOS_SETATTRTRUNC); 618 nfs_inc_stats(inode, NFSIOS_SETATTRTRUNC);
619 nfs_vmtruncate(inode, attr->ia_size); 619 nfs_vmtruncate(inode, attr->ia_size);
620 } 620 }
621 nfs_update_inode(inode, fattr); 621 if (fattr->valid)
622 nfs_update_inode(inode, fattr);
623 else
624 NFS_I(inode)->cache_validity |= NFS_INO_INVALID_ATTR;
622 spin_unlock(&inode->i_lock); 625 spin_unlock(&inode->i_lock);
623} 626}
624EXPORT_SYMBOL_GPL(nfs_setattr_update_inode); 627EXPORT_SYMBOL_GPL(nfs_setattr_update_inode);
@@ -1824,7 +1827,11 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
1824 if ((long)fattr->gencount - (long)nfsi->attr_gencount > 0) 1827 if ((long)fattr->gencount - (long)nfsi->attr_gencount > 0)
1825 nfsi->attr_gencount = fattr->gencount; 1828 nfsi->attr_gencount = fattr->gencount;
1826 } 1829 }
1827 invalid &= ~NFS_INO_INVALID_ATTR; 1830
1831 /* Don't declare attrcache up to date if there were no attrs! */
1832 if (fattr->valid != 0)
1833 invalid &= ~NFS_INO_INVALID_ATTR;
1834
1828 /* Don't invalidate the data if we were to blame */ 1835 /* Don't invalidate the data if we were to blame */
1829 if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) 1836 if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode)
1830 || S_ISLNK(inode->i_mode))) 1837 || S_ISLNK(inode->i_mode)))
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
index 56cfde26fb9c..9dea85f7f918 100644
--- a/fs/nfs/internal.h
+++ b/fs/nfs/internal.h
@@ -379,7 +379,7 @@ extern int nfs_drop_inode(struct inode *);
379extern void nfs_clear_inode(struct inode *); 379extern void nfs_clear_inode(struct inode *);
380extern void nfs_evict_inode(struct inode *); 380extern void nfs_evict_inode(struct inode *);
381void nfs_zap_acl_cache(struct inode *inode); 381void nfs_zap_acl_cache(struct inode *inode);
382extern int nfs_wait_bit_killable(struct wait_bit_key *key); 382extern int nfs_wait_bit_killable(struct wait_bit_key *key, int mode);
383 383
384/* super.c */ 384/* super.c */
385extern const struct super_operations nfs_sops; 385extern const struct super_operations nfs_sops;
diff --git a/fs/nfs/nfs42proc.c b/fs/nfs/nfs42proc.c
index 3e92a3cde15d..6b1ce9825430 100644
--- a/fs/nfs/nfs42proc.c
+++ b/fs/nfs/nfs42proc.c
@@ -14,7 +14,7 @@
14#include "pnfs.h" 14#include "pnfs.h"
15#include "internal.h" 15#include "internal.h"
16 16
17#define NFSDBG_FACILITY NFSDBG_PNFS 17#define NFSDBG_FACILITY NFSDBG_PROC
18 18
19static int nfs42_set_rw_stateid(nfs4_stateid *dst, struct file *file, 19static int nfs42_set_rw_stateid(nfs4_stateid *dst, struct file *file,
20 fmode_t fmode) 20 fmode_t fmode)
@@ -284,6 +284,7 @@ static int _nfs42_proc_clone(struct rpc_message *msg, struct file *src_f,
284 .dst_fh = NFS_FH(dst_inode), 284 .dst_fh = NFS_FH(dst_inode),
285 .src_offset = src_offset, 285 .src_offset = src_offset,
286 .dst_offset = dst_offset, 286 .dst_offset = dst_offset,
287 .count = count,
287 .dst_bitmask = server->cache_consistency_bitmask, 288 .dst_bitmask = server->cache_consistency_bitmask,
288 }; 289 };
289 struct nfs42_clone_res res = { 290 struct nfs42_clone_res res = {
diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c
index 223bedda64ae..10410e8b5853 100644
--- a/fs/nfs/nfs4client.c
+++ b/fs/nfs/nfs4client.c
@@ -33,7 +33,7 @@ static int nfs_get_cb_ident_idr(struct nfs_client *clp, int minorversion)
33 return ret; 33 return ret;
34 idr_preload(GFP_KERNEL); 34 idr_preload(GFP_KERNEL);
35 spin_lock(&nn->nfs_client_lock); 35 spin_lock(&nn->nfs_client_lock);
36 ret = idr_alloc(&nn->cb_ident_idr, clp, 0, 0, GFP_NOWAIT); 36 ret = idr_alloc(&nn->cb_ident_idr, clp, 1, 0, GFP_NOWAIT);
37 if (ret >= 0) 37 if (ret >= 0)
38 clp->cl_cb_ident = ret; 38 clp->cl_cb_ident = ret;
39 spin_unlock(&nn->nfs_client_lock); 39 spin_unlock(&nn->nfs_client_lock);
diff --git a/fs/nfs/nfs4file.c b/fs/nfs/nfs4file.c
index 4aa571956cd6..db9b5fea5b3e 100644
--- a/fs/nfs/nfs4file.c
+++ b/fs/nfs/nfs4file.c
@@ -7,6 +7,7 @@
7#include <linux/file.h> 7#include <linux/file.h>
8#include <linux/falloc.h> 8#include <linux/falloc.h>
9#include <linux/nfs_fs.h> 9#include <linux/nfs_fs.h>
10#include <uapi/linux/btrfs.h> /* BTRFS_IOC_CLONE/BTRFS_IOC_CLONE_RANGE */
10#include "delegation.h" 11#include "delegation.h"
11#include "internal.h" 12#include "internal.h"
12#include "iostat.h" 13#include "iostat.h"
@@ -203,6 +204,7 @@ nfs42_ioctl_clone(struct file *dst_file, unsigned long srcfd,
203 struct fd src_file; 204 struct fd src_file;
204 struct inode *src_inode; 205 struct inode *src_inode;
205 unsigned int bs = server->clone_blksize; 206 unsigned int bs = server->clone_blksize;
207 bool same_inode = false;
206 int ret; 208 int ret;
207 209
208 /* dst file must be opened for writing */ 210 /* dst file must be opened for writing */
@@ -221,10 +223,8 @@ nfs42_ioctl_clone(struct file *dst_file, unsigned long srcfd,
221 223
222 src_inode = file_inode(src_file.file); 224 src_inode = file_inode(src_file.file);
223 225
224 /* src and dst must be different files */
225 ret = -EINVAL;
226 if (src_inode == dst_inode) 226 if (src_inode == dst_inode)
227 goto out_fput; 227 same_inode = true;
228 228
229 /* src file must be opened for reading */ 229 /* src file must be opened for reading */
230 if (!(src_file.file->f_mode & FMODE_READ)) 230 if (!(src_file.file->f_mode & FMODE_READ))
@@ -249,8 +249,16 @@ nfs42_ioctl_clone(struct file *dst_file, unsigned long srcfd,
249 goto out_fput; 249 goto out_fput;
250 } 250 }
251 251
252 /* verify if ranges are overlapped within the same file */
253 if (same_inode) {
254 if (dst_off + count > src_off && dst_off < src_off + count)
255 goto out_fput;
256 }
257
252 /* XXX: do we lock at all? what if server needs CB_RECALL_LAYOUT? */ 258 /* XXX: do we lock at all? what if server needs CB_RECALL_LAYOUT? */
253 if (dst_inode < src_inode) { 259 if (same_inode) {
260 mutex_lock(&src_inode->i_mutex);
261 } else if (dst_inode < src_inode) {
254 mutex_lock_nested(&dst_inode->i_mutex, I_MUTEX_PARENT); 262 mutex_lock_nested(&dst_inode->i_mutex, I_MUTEX_PARENT);
255 mutex_lock_nested(&src_inode->i_mutex, I_MUTEX_CHILD); 263 mutex_lock_nested(&src_inode->i_mutex, I_MUTEX_CHILD);
256 } else { 264 } else {
@@ -275,7 +283,9 @@ nfs42_ioctl_clone(struct file *dst_file, unsigned long srcfd,
275 truncate_inode_pages_range(&dst_inode->i_data, dst_off, dst_off + count - 1); 283 truncate_inode_pages_range(&dst_inode->i_data, dst_off, dst_off + count - 1);
276 284
277out_unlock: 285out_unlock:
278 if (dst_inode < src_inode) { 286 if (same_inode) {
287 mutex_unlock(&src_inode->i_mutex);
288 } else if (dst_inode < src_inode) {
279 mutex_unlock(&src_inode->i_mutex); 289 mutex_unlock(&src_inode->i_mutex);
280 mutex_unlock(&dst_inode->i_mutex); 290 mutex_unlock(&dst_inode->i_mutex);
281 } else { 291 } else {
@@ -291,46 +301,31 @@ out_drop_write:
291 301
292static long nfs42_ioctl_clone_range(struct file *dst_file, void __user *argp) 302static long nfs42_ioctl_clone_range(struct file *dst_file, void __user *argp)
293{ 303{
294 struct nfs_ioctl_clone_range_args args; 304 struct btrfs_ioctl_clone_range_args args;
295 305
296 if (copy_from_user(&args, argp, sizeof(args))) 306 if (copy_from_user(&args, argp, sizeof(args)))
297 return -EFAULT; 307 return -EFAULT;
298 308
299 return nfs42_ioctl_clone(dst_file, args.src_fd, args.src_off, args.dst_off, args.count); 309 return nfs42_ioctl_clone(dst_file, args.src_fd, args.src_offset,
300} 310 args.dest_offset, args.src_length);
301#else
302static long nfs42_ioctl_clone(struct file *dst_file, unsigned long srcfd,
303 u64 src_off, u64 dst_off, u64 count)
304{
305 return -ENOTTY;
306}
307
308static long nfs42_ioctl_clone_range(struct file *dst_file, void __user *argp)
309{
310 return -ENOTTY;
311} 311}
312#endif /* CONFIG_NFS_V4_2 */
313 312
314long nfs4_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 313long nfs4_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
315{ 314{
316 void __user *argp = (void __user *)arg; 315 void __user *argp = (void __user *)arg;
317 316
318 switch (cmd) { 317 switch (cmd) {
319 case NFS_IOC_CLONE: 318 case BTRFS_IOC_CLONE:
320 return nfs42_ioctl_clone(file, arg, 0, 0, 0); 319 return nfs42_ioctl_clone(file, arg, 0, 0, 0);
321 case NFS_IOC_CLONE_RANGE: 320 case BTRFS_IOC_CLONE_RANGE:
322 return nfs42_ioctl_clone_range(file, argp); 321 return nfs42_ioctl_clone_range(file, argp);
323 } 322 }
324 323
325 return -ENOTTY; 324 return -ENOTTY;
326} 325}
326#endif /* CONFIG_NFS_V4_2 */
327 327
328const struct file_operations nfs4_file_operations = { 328const struct file_operations nfs4_file_operations = {
329#ifdef CONFIG_NFS_V4_2
330 .llseek = nfs4_file_llseek,
331#else
332 .llseek = nfs_file_llseek,
333#endif
334 .read_iter = nfs_file_read, 329 .read_iter = nfs_file_read,
335 .write_iter = nfs_file_write, 330 .write_iter = nfs_file_write,
336 .mmap = nfs_file_mmap, 331 .mmap = nfs_file_mmap,
@@ -342,14 +337,14 @@ const struct file_operations nfs4_file_operations = {
342 .flock = nfs_flock, 337 .flock = nfs_flock,
343 .splice_read = nfs_file_splice_read, 338 .splice_read = nfs_file_splice_read,
344 .splice_write = iter_file_splice_write, 339 .splice_write = iter_file_splice_write,
345#ifdef CONFIG_NFS_V4_2
346 .fallocate = nfs42_fallocate,
347#endif /* CONFIG_NFS_V4_2 */
348 .check_flags = nfs_check_flags, 340 .check_flags = nfs_check_flags,
349 .setlease = simple_nosetlease, 341 .setlease = simple_nosetlease,
350#ifdef CONFIG_COMPAT 342#ifdef CONFIG_NFS_V4_2
343 .llseek = nfs4_file_llseek,
344 .fallocate = nfs42_fallocate,
351 .unlocked_ioctl = nfs4_ioctl, 345 .unlocked_ioctl = nfs4_ioctl,
352#else
353 .compat_ioctl = nfs4_ioctl, 346 .compat_ioctl = nfs4_ioctl,
354#endif /* CONFIG_COMPAT */ 347#else
348 .llseek = nfs_file_llseek,
349#endif
355}; 350};
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 765a03559363..89818036f035 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -7866,7 +7866,7 @@ static void nfs4_layoutget_done(struct rpc_task *task, void *calldata)
7866 spin_unlock(&inode->i_lock); 7866 spin_unlock(&inode->i_lock);
7867 goto out_restart; 7867 goto out_restart;
7868 } 7868 }
7869 if (nfs4_async_handle_error(task, server, state, NULL) == -EAGAIN) 7869 if (nfs4_async_handle_error(task, server, state, &lgp->timeout) == -EAGAIN)
7870 goto out_restart; 7870 goto out_restart;
7871out: 7871out:
7872 dprintk("<-- %s\n", __func__); 7872 dprintk("<-- %s\n", __func__);
diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c
index dfed4f5c8fcc..4e4441216804 100644
--- a/fs/nfs/nfs4xdr.c
+++ b/fs/nfs/nfs4xdr.c
@@ -3615,6 +3615,7 @@ static int decode_attr_fs_locations(struct xdr_stream *xdr, uint32_t *bitmap, st
3615 status = 0; 3615 status = 0;
3616 if (unlikely(!(bitmap[0] & FATTR4_WORD0_FS_LOCATIONS))) 3616 if (unlikely(!(bitmap[0] & FATTR4_WORD0_FS_LOCATIONS)))
3617 goto out; 3617 goto out;
3618 bitmap[0] &= ~FATTR4_WORD0_FS_LOCATIONS;
3618 status = -EIO; 3619 status = -EIO;
3619 /* Ignore borken servers that return unrequested attrs */ 3620 /* Ignore borken servers that return unrequested attrs */
3620 if (unlikely(res == NULL)) 3621 if (unlikely(res == NULL))
diff --git a/fs/nfs/objlayout/objio_osd.c b/fs/nfs/objlayout/objio_osd.c
index 5c0c6b58157f..9aebffb40505 100644
--- a/fs/nfs/objlayout/objio_osd.c
+++ b/fs/nfs/objlayout/objio_osd.c
@@ -476,10 +476,7 @@ static struct page *__r4w_get_page(void *priv, u64 offset, bool *uptodate)
476 } 476 }
477 unlock_page(page); 477 unlock_page(page);
478 } 478 }
479 if (PageDirty(page) || PageWriteback(page)) 479 *uptodate = PageUptodate(page);
480 *uptodate = true;
481 else
482 *uptodate = PageUptodate(page);
483 dprintk("%s: index=0x%lx uptodate=%d\n", __func__, index, *uptodate); 480 dprintk("%s: index=0x%lx uptodate=%d\n", __func__, index, *uptodate);
484 return page; 481 return page;
485} 482}
diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c
index fe3ddd20ff89..452a011ba0d8 100644
--- a/fs/nfs/pagelist.c
+++ b/fs/nfs/pagelist.c
@@ -129,7 +129,7 @@ __nfs_iocounter_wait(struct nfs_io_counter *c)
129 set_bit(NFS_IO_INPROGRESS, &c->flags); 129 set_bit(NFS_IO_INPROGRESS, &c->flags);
130 if (atomic_read(&c->io_count) == 0) 130 if (atomic_read(&c->io_count) == 0)
131 break; 131 break;
132 ret = nfs_wait_bit_killable(&q.key); 132 ret = nfs_wait_bit_killable(&q.key, TASK_KILLABLE);
133 } while (atomic_read(&c->io_count) != 0 && !ret); 133 } while (atomic_read(&c->io_count) != 0 && !ret);
134 finish_wait(wq, &q.wait); 134 finish_wait(wq, &q.wait);
135 return ret; 135 return ret;
diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
index 93496c059837..bec0384499f7 100644
--- a/fs/nfs/pnfs.c
+++ b/fs/nfs/pnfs.c
@@ -872,33 +872,38 @@ send_layoutget(struct pnfs_layout_hdr *lo,
872 872
873 dprintk("--> %s\n", __func__); 873 dprintk("--> %s\n", __func__);
874 874
875 lgp = kzalloc(sizeof(*lgp), gfp_flags); 875 /*
876 if (lgp == NULL) 876 * Synchronously retrieve layout information from server and
877 return NULL; 877 * store in lseg. If we race with a concurrent seqid morphing
878 * op, then re-send the LAYOUTGET.
879 */
880 do {
881 lgp = kzalloc(sizeof(*lgp), gfp_flags);
882 if (lgp == NULL)
883 return NULL;
884
885 i_size = i_size_read(ino);
886
887 lgp->args.minlength = PAGE_CACHE_SIZE;
888 if (lgp->args.minlength > range->length)
889 lgp->args.minlength = range->length;
890 if (range->iomode == IOMODE_READ) {
891 if (range->offset >= i_size)
892 lgp->args.minlength = 0;
893 else if (i_size - range->offset < lgp->args.minlength)
894 lgp->args.minlength = i_size - range->offset;
895 }
896 lgp->args.maxcount = PNFS_LAYOUT_MAXSIZE;
897 lgp->args.range = *range;
898 lgp->args.type = server->pnfs_curr_ld->id;
899 lgp->args.inode = ino;
900 lgp->args.ctx = get_nfs_open_context(ctx);
901 lgp->gfp_flags = gfp_flags;
902 lgp->cred = lo->plh_lc_cred;
878 903
879 i_size = i_size_read(ino); 904 lseg = nfs4_proc_layoutget(lgp, gfp_flags);
905 } while (lseg == ERR_PTR(-EAGAIN));
880 906
881 lgp->args.minlength = PAGE_CACHE_SIZE;
882 if (lgp->args.minlength > range->length)
883 lgp->args.minlength = range->length;
884 if (range->iomode == IOMODE_READ) {
885 if (range->offset >= i_size)
886 lgp->args.minlength = 0;
887 else if (i_size - range->offset < lgp->args.minlength)
888 lgp->args.minlength = i_size - range->offset;
889 }
890 lgp->args.maxcount = PNFS_LAYOUT_MAXSIZE;
891 lgp->args.range = *range;
892 lgp->args.type = server->pnfs_curr_ld->id;
893 lgp->args.inode = ino;
894 lgp->args.ctx = get_nfs_open_context(ctx);
895 lgp->gfp_flags = gfp_flags;
896 lgp->cred = lo->plh_lc_cred;
897
898 /* Synchronously retrieve layout information from server and
899 * store in lseg.
900 */
901 lseg = nfs4_proc_layoutget(lgp, gfp_flags);
902 if (IS_ERR(lseg)) { 907 if (IS_ERR(lseg)) {
903 switch (PTR_ERR(lseg)) { 908 switch (PTR_ERR(lseg)) {
904 case -ENOMEM: 909 case -ENOMEM:
@@ -1461,11 +1466,11 @@ static bool pnfs_within_mdsthreshold(struct nfs_open_context *ctx,
1461} 1466}
1462 1467
1463/* stop waiting if someone clears NFS_LAYOUT_RETRY_LAYOUTGET bit. */ 1468/* stop waiting if someone clears NFS_LAYOUT_RETRY_LAYOUTGET bit. */
1464static int pnfs_layoutget_retry_bit_wait(struct wait_bit_key *key) 1469static int pnfs_layoutget_retry_bit_wait(struct wait_bit_key *key, int mode)
1465{ 1470{
1466 if (!test_bit(NFS_LAYOUT_RETRY_LAYOUTGET, key->flags)) 1471 if (!test_bit(NFS_LAYOUT_RETRY_LAYOUTGET, key->flags))
1467 return 1; 1472 return 1;
1468 return nfs_wait_bit_killable(key); 1473 return nfs_wait_bit_killable(key, mode);
1469} 1474}
1470 1475
1471static bool pnfs_prepare_to_retry_layoutget(struct pnfs_layout_hdr *lo) 1476static bool pnfs_prepare_to_retry_layoutget(struct pnfs_layout_hdr *lo)
@@ -1687,6 +1692,7 @@ pnfs_layout_process(struct nfs4_layoutget *lgp)
1687 /* existing state ID, make sure the sequence number matches. */ 1692 /* existing state ID, make sure the sequence number matches. */
1688 if (pnfs_layout_stateid_blocked(lo, &res->stateid)) { 1693 if (pnfs_layout_stateid_blocked(lo, &res->stateid)) {
1689 dprintk("%s forget reply due to sequence\n", __func__); 1694 dprintk("%s forget reply due to sequence\n", __func__);
1695 status = -EAGAIN;
1690 goto out_forget_reply; 1696 goto out_forget_reply;
1691 } 1697 }
1692 pnfs_set_layout_stateid(lo, &res->stateid, false); 1698 pnfs_set_layout_stateid(lo, &res->stateid, false);
diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c
index 3b48ac25d8a7..3123408da935 100644
--- a/fs/ocfs2/namei.c
+++ b/fs/ocfs2/namei.c
@@ -367,7 +367,7 @@ static int ocfs2_mknod(struct inode *dir,
367 goto leave; 367 goto leave;
368 } 368 }
369 369
370 status = posix_acl_create(dir, &mode, &default_acl, &acl); 370 status = posix_acl_create(dir, &inode->i_mode, &default_acl, &acl);
371 if (status) { 371 if (status) {
372 mlog_errno(status); 372 mlog_errno(status);
373 goto leave; 373 goto leave;
diff --git a/fs/overlayfs/copy_up.c b/fs/overlayfs/copy_up.c
index 871fcb67be97..0a8983492d91 100644
--- a/fs/overlayfs/copy_up.c
+++ b/fs/overlayfs/copy_up.c
@@ -195,8 +195,7 @@ int ovl_set_attr(struct dentry *upperdentry, struct kstat *stat)
195 195
196static int ovl_copy_up_locked(struct dentry *workdir, struct dentry *upperdir, 196static int ovl_copy_up_locked(struct dentry *workdir, struct dentry *upperdir,
197 struct dentry *dentry, struct path *lowerpath, 197 struct dentry *dentry, struct path *lowerpath,
198 struct kstat *stat, struct iattr *attr, 198 struct kstat *stat, const char *link)
199 const char *link)
200{ 199{
201 struct inode *wdir = workdir->d_inode; 200 struct inode *wdir = workdir->d_inode;
202 struct inode *udir = upperdir->d_inode; 201 struct inode *udir = upperdir->d_inode;
@@ -240,8 +239,6 @@ static int ovl_copy_up_locked(struct dentry *workdir, struct dentry *upperdir,
240 239
241 mutex_lock(&newdentry->d_inode->i_mutex); 240 mutex_lock(&newdentry->d_inode->i_mutex);
242 err = ovl_set_attr(newdentry, stat); 241 err = ovl_set_attr(newdentry, stat);
243 if (!err && attr)
244 err = notify_change(newdentry, attr, NULL);
245 mutex_unlock(&newdentry->d_inode->i_mutex); 242 mutex_unlock(&newdentry->d_inode->i_mutex);
246 if (err) 243 if (err)
247 goto out_cleanup; 244 goto out_cleanup;
@@ -286,8 +283,7 @@ out_cleanup:
286 * that point the file will have already been copied up anyway. 283 * that point the file will have already been copied up anyway.
287 */ 284 */
288int ovl_copy_up_one(struct dentry *parent, struct dentry *dentry, 285int ovl_copy_up_one(struct dentry *parent, struct dentry *dentry,
289 struct path *lowerpath, struct kstat *stat, 286 struct path *lowerpath, struct kstat *stat)
290 struct iattr *attr)
291{ 287{
292 struct dentry *workdir = ovl_workdir(dentry); 288 struct dentry *workdir = ovl_workdir(dentry);
293 int err; 289 int err;
@@ -345,26 +341,19 @@ int ovl_copy_up_one(struct dentry *parent, struct dentry *dentry,
345 } 341 }
346 upperdentry = ovl_dentry_upper(dentry); 342 upperdentry = ovl_dentry_upper(dentry);
347 if (upperdentry) { 343 if (upperdentry) {
348 unlock_rename(workdir, upperdir); 344 /* Raced with another copy-up? Nothing to do, then... */
349 err = 0; 345 err = 0;
350 /* Raced with another copy-up? Do the setattr here */ 346 goto out_unlock;
351 if (attr) {
352 mutex_lock(&upperdentry->d_inode->i_mutex);
353 err = notify_change(upperdentry, attr, NULL);
354 mutex_unlock(&upperdentry->d_inode->i_mutex);
355 }
356 goto out_put_cred;
357 } 347 }
358 348
359 err = ovl_copy_up_locked(workdir, upperdir, dentry, lowerpath, 349 err = ovl_copy_up_locked(workdir, upperdir, dentry, lowerpath,
360 stat, attr, link); 350 stat, link);
361 if (!err) { 351 if (!err) {
362 /* Restore timestamps on parent (best effort) */ 352 /* Restore timestamps on parent (best effort) */
363 ovl_set_timestamps(upperdir, &pstat); 353 ovl_set_timestamps(upperdir, &pstat);
364 } 354 }
365out_unlock: 355out_unlock:
366 unlock_rename(workdir, upperdir); 356 unlock_rename(workdir, upperdir);
367out_put_cred:
368 revert_creds(old_cred); 357 revert_creds(old_cred);
369 put_cred(override_cred); 358 put_cred(override_cred);
370 359
@@ -406,7 +395,7 @@ int ovl_copy_up(struct dentry *dentry)
406 ovl_path_lower(next, &lowerpath); 395 ovl_path_lower(next, &lowerpath);
407 err = vfs_getattr(&lowerpath, &stat); 396 err = vfs_getattr(&lowerpath, &stat);
408 if (!err) 397 if (!err)
409 err = ovl_copy_up_one(parent, next, &lowerpath, &stat, NULL); 398 err = ovl_copy_up_one(parent, next, &lowerpath, &stat);
410 399
411 dput(parent); 400 dput(parent);
412 dput(next); 401 dput(next);
diff --git a/fs/overlayfs/inode.c b/fs/overlayfs/inode.c
index ec0c2a050043..4060ffde8722 100644
--- a/fs/overlayfs/inode.c
+++ b/fs/overlayfs/inode.c
@@ -12,8 +12,7 @@
12#include <linux/xattr.h> 12#include <linux/xattr.h>
13#include "overlayfs.h" 13#include "overlayfs.h"
14 14
15static int ovl_copy_up_last(struct dentry *dentry, struct iattr *attr, 15static int ovl_copy_up_truncate(struct dentry *dentry)
16 bool no_data)
17{ 16{
18 int err; 17 int err;
19 struct dentry *parent; 18 struct dentry *parent;
@@ -30,10 +29,8 @@ static int ovl_copy_up_last(struct dentry *dentry, struct iattr *attr,
30 if (err) 29 if (err)
31 goto out_dput_parent; 30 goto out_dput_parent;
32 31
33 if (no_data) 32 stat.size = 0;
34 stat.size = 0; 33 err = ovl_copy_up_one(parent, dentry, &lowerpath, &stat);
35
36 err = ovl_copy_up_one(parent, dentry, &lowerpath, &stat, attr);
37 34
38out_dput_parent: 35out_dput_parent:
39 dput(parent); 36 dput(parent);
@@ -49,13 +46,13 @@ int ovl_setattr(struct dentry *dentry, struct iattr *attr)
49 if (err) 46 if (err)
50 goto out; 47 goto out;
51 48
52 upperdentry = ovl_dentry_upper(dentry); 49 err = ovl_copy_up(dentry);
53 if (upperdentry) { 50 if (!err) {
51 upperdentry = ovl_dentry_upper(dentry);
52
54 mutex_lock(&upperdentry->d_inode->i_mutex); 53 mutex_lock(&upperdentry->d_inode->i_mutex);
55 err = notify_change(upperdentry, attr, NULL); 54 err = notify_change(upperdentry, attr, NULL);
56 mutex_unlock(&upperdentry->d_inode->i_mutex); 55 mutex_unlock(&upperdentry->d_inode->i_mutex);
57 } else {
58 err = ovl_copy_up_last(dentry, attr, false);
59 } 56 }
60 ovl_drop_write(dentry); 57 ovl_drop_write(dentry);
61out: 58out:
@@ -353,7 +350,7 @@ struct inode *ovl_d_select_inode(struct dentry *dentry, unsigned file_flags)
353 return ERR_PTR(err); 350 return ERR_PTR(err);
354 351
355 if (file_flags & O_TRUNC) 352 if (file_flags & O_TRUNC)
356 err = ovl_copy_up_last(dentry, NULL, true); 353 err = ovl_copy_up_truncate(dentry);
357 else 354 else
358 err = ovl_copy_up(dentry); 355 err = ovl_copy_up(dentry);
359 ovl_drop_write(dentry); 356 ovl_drop_write(dentry);
diff --git a/fs/overlayfs/overlayfs.h b/fs/overlayfs/overlayfs.h
index ea5a40b06e3a..e17154aeaae4 100644
--- a/fs/overlayfs/overlayfs.h
+++ b/fs/overlayfs/overlayfs.h
@@ -194,7 +194,6 @@ void ovl_cleanup(struct inode *dir, struct dentry *dentry);
194/* copy_up.c */ 194/* copy_up.c */
195int ovl_copy_up(struct dentry *dentry); 195int ovl_copy_up(struct dentry *dentry);
196int ovl_copy_up_one(struct dentry *parent, struct dentry *dentry, 196int ovl_copy_up_one(struct dentry *parent, struct dentry *dentry,
197 struct path *lowerpath, struct kstat *stat, 197 struct path *lowerpath, struct kstat *stat);
198 struct iattr *attr);
199int ovl_copy_xattr(struct dentry *old, struct dentry *new); 198int ovl_copy_xattr(struct dentry *old, struct dentry *new);
200int ovl_set_attr(struct dentry *upper, struct kstat *stat); 199int ovl_set_attr(struct dentry *upper, struct kstat *stat);
diff --git a/fs/proc/base.c b/fs/proc/base.c
index bd3e9e68125b..4bd5d3118acd 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -2494,6 +2494,7 @@ static ssize_t proc_coredump_filter_write(struct file *file,
2494 mm = get_task_mm(task); 2494 mm = get_task_mm(task);
2495 if (!mm) 2495 if (!mm)
2496 goto out_no_mm; 2496 goto out_no_mm;
2497 ret = 0;
2497 2498
2498 for (i = 0, mask = 1; i < MMF_DUMP_FILTER_BITS; i++, mask <<= 1) { 2499 for (i = 0, mask = 1; i < MMF_DUMP_FILTER_BITS; i++, mask <<= 1) {
2499 if (val & mask) 2500 if (val & mask)
diff --git a/fs/splice.c b/fs/splice.c
index 801c21cd77fe..4cf700d50b40 100644
--- a/fs/splice.c
+++ b/fs/splice.c
@@ -809,6 +809,13 @@ static int splice_from_pipe_feed(struct pipe_inode_info *pipe, struct splice_des
809 */ 809 */
810static int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd) 810static int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd)
811{ 811{
812 /*
813 * Check for signal early to make process killable when there are
814 * always buffers available
815 */
816 if (signal_pending(current))
817 return -ERESTARTSYS;
818
812 while (!pipe->nrbufs) { 819 while (!pipe->nrbufs) {
813 if (!pipe->writers) 820 if (!pipe->writers)
814 return 0; 821 return 0;
@@ -884,6 +891,7 @@ ssize_t __splice_from_pipe(struct pipe_inode_info *pipe, struct splice_desc *sd,
884 891
885 splice_from_pipe_begin(sd); 892 splice_from_pipe_begin(sd);
886 do { 893 do {
894 cond_resched();
887 ret = splice_from_pipe_next(pipe, sd); 895 ret = splice_from_pipe_next(pipe, sd);
888 if (ret > 0) 896 if (ret > 0)
889 ret = splice_from_pipe_feed(pipe, sd, actor); 897 ret = splice_from_pipe_feed(pipe, sd, actor);
diff --git a/fs/sysv/inode.c b/fs/sysv/inode.c
index 590ad9206e3f..02fa1dcc5969 100644
--- a/fs/sysv/inode.c
+++ b/fs/sysv/inode.c
@@ -162,15 +162,8 @@ void sysv_set_inode(struct inode *inode, dev_t rdev)
162 inode->i_fop = &sysv_dir_operations; 162 inode->i_fop = &sysv_dir_operations;
163 inode->i_mapping->a_ops = &sysv_aops; 163 inode->i_mapping->a_ops = &sysv_aops;
164 } else if (S_ISLNK(inode->i_mode)) { 164 } else if (S_ISLNK(inode->i_mode)) {
165 if (inode->i_blocks) { 165 inode->i_op = &sysv_symlink_inode_operations;
166 inode->i_op = &sysv_symlink_inode_operations; 166 inode->i_mapping->a_ops = &sysv_aops;
167 inode->i_mapping->a_ops = &sysv_aops;
168 } else {
169 inode->i_op = &simple_symlink_inode_operations;
170 inode->i_link = (char *)SYSV_I(inode)->i_data;
171 nd_terminate_link(inode->i_link, inode->i_size,
172 sizeof(SYSV_I(inode)->i_data) - 1);
173 }
174 } else 167 } else
175 init_special_inode(inode, inode->i_mode, rdev); 168 init_special_inode(inode, inode->i_mode, rdev);
176} 169}
diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h
index db284bff29dc..9dbb739cafa0 100644
--- a/include/asm-generic/tlb.h
+++ b/include/asm-generic/tlb.h
@@ -5,7 +5,7 @@
5 * Copyright 2001 Red Hat, Inc. 5 * Copyright 2001 Red Hat, Inc.
6 * Based on code from mm/memory.c Copyright Linus Torvalds and others. 6 * Based on code from mm/memory.c Copyright Linus Torvalds and others.
7 * 7 *
8 * Copyright 2011 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> 8 * Copyright 2011 Red Hat, Inc., Peter Zijlstra
9 * 9 *
10 * This program is free software; you can redistribute it and/or 10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License 11 * modify it under the terms of the GNU General Public License
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index 0b921ae06cd8..0a271ca1f7c7 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -309,6 +309,11 @@ struct drm_file {
309 unsigned universal_planes:1; 309 unsigned universal_planes:1;
310 /* true if client understands atomic properties */ 310 /* true if client understands atomic properties */
311 unsigned atomic:1; 311 unsigned atomic:1;
312 /*
313 * This client is allowed to gain master privileges for @master.
314 * Protected by struct drm_device::master_mutex.
315 */
316 unsigned allowed_master:1;
312 317
313 struct pid *pid; 318 struct pid *pid;
314 kuid_t uid; 319 kuid_t uid;
@@ -910,6 +915,7 @@ extern int drm_open(struct inode *inode, struct file *filp);
910extern ssize_t drm_read(struct file *filp, char __user *buffer, 915extern ssize_t drm_read(struct file *filp, char __user *buffer,
911 size_t count, loff_t *offset); 916 size_t count, loff_t *offset);
912extern int drm_release(struct inode *inode, struct file *filp); 917extern int drm_release(struct inode *inode, struct file *filp);
918extern int drm_new_set_master(struct drm_device *dev, struct drm_file *fpriv);
913 919
914 /* Mapping support (drm_vm.h) */ 920 /* Mapping support (drm_vm.h) */
915extern unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait); 921extern unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait);
@@ -947,6 +953,10 @@ extern void drm_send_vblank_event(struct drm_device *dev, unsigned int pipe,
947 struct drm_pending_vblank_event *e); 953 struct drm_pending_vblank_event *e);
948extern void drm_crtc_send_vblank_event(struct drm_crtc *crtc, 954extern void drm_crtc_send_vblank_event(struct drm_crtc *crtc,
949 struct drm_pending_vblank_event *e); 955 struct drm_pending_vblank_event *e);
956extern void drm_arm_vblank_event(struct drm_device *dev, unsigned int pipe,
957 struct drm_pending_vblank_event *e);
958extern void drm_crtc_arm_vblank_event(struct drm_crtc *crtc,
959 struct drm_pending_vblank_event *e);
950extern bool drm_handle_vblank(struct drm_device *dev, unsigned int pipe); 960extern bool drm_handle_vblank(struct drm_device *dev, unsigned int pipe);
951extern bool drm_crtc_handle_vblank(struct drm_crtc *crtc); 961extern bool drm_crtc_handle_vblank(struct drm_crtc *crtc);
952extern int drm_vblank_get(struct drm_device *dev, unsigned int pipe); 962extern int drm_vblank_get(struct drm_device *dev, unsigned int pipe);
diff --git a/include/drm/drm_atomic.h b/include/drm/drm_atomic.h
index e67aeac2aee0..4b74c97d297a 100644
--- a/include/drm/drm_atomic.h
+++ b/include/drm/drm_atomic.h
@@ -136,6 +136,9 @@ drm_atomic_connectors_for_crtc(struct drm_atomic_state *state,
136 136
137void drm_atomic_legacy_backoff(struct drm_atomic_state *state); 137void drm_atomic_legacy_backoff(struct drm_atomic_state *state);
138 138
139void
140drm_atomic_clean_old_fb(struct drm_device *dev, unsigned plane_mask, int ret);
141
139int __must_check drm_atomic_check_only(struct drm_atomic_state *state); 142int __must_check drm_atomic_check_only(struct drm_atomic_state *state);
140int __must_check drm_atomic_commit(struct drm_atomic_state *state); 143int __must_check drm_atomic_commit(struct drm_atomic_state *state);
141int __must_check drm_atomic_async_commit(struct drm_atomic_state *state); 144int __must_check drm_atomic_async_commit(struct drm_atomic_state *state);
diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h
index 9c747cb14ad8..d2f41477f8ae 100644
--- a/include/kvm/arm_vgic.h
+++ b/include/kvm/arm_vgic.h
@@ -342,10 +342,10 @@ int kvm_vgic_inject_mapped_irq(struct kvm *kvm, int cpuid,
342 struct irq_phys_map *map, bool level); 342 struct irq_phys_map *map, bool level);
343void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg); 343void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg);
344int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu); 344int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu);
345int kvm_vgic_vcpu_active_irq(struct kvm_vcpu *vcpu);
346struct irq_phys_map *kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu, 345struct irq_phys_map *kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu,
347 int virt_irq, int irq); 346 int virt_irq, int irq);
348int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, struct irq_phys_map *map); 347int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, struct irq_phys_map *map);
348bool kvm_vgic_map_is_active(struct kvm_vcpu *vcpu, struct irq_phys_map *map);
349 349
350#define irqchip_in_kernel(k) (!!((k)->arch.vgic.in_kernel)) 350#define irqchip_in_kernel(k) (!!((k)->arch.vgic.in_kernel))
351#define vgic_initialized(k) (!!((k)->arch.vgic.nr_cpus)) 351#define vgic_initialized(k) (!!((k)->arch.vgic.nr_cpus))
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index 054833939995..1991aea2ec4c 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -870,8 +870,8 @@ static inline int acpi_dev_get_property(struct acpi_device *adev,
870} 870}
871 871
872static inline int acpi_node_get_property_reference(struct fwnode_handle *fwnode, 872static inline int acpi_node_get_property_reference(struct fwnode_handle *fwnode,
873 const char *name, const char *cells_name, 873 const char *name, size_t index,
874 size_t index, struct acpi_reference_args *args) 874 struct acpi_reference_args *args)
875{ 875{
876 return -ENXIO; 876 return -ENXIO;
877} 877}
diff --git a/include/linux/bitops.h b/include/linux/bitops.h
index 2b8ed123ad36..defeaac0745f 100644
--- a/include/linux/bitops.h
+++ b/include/linux/bitops.h
@@ -107,7 +107,7 @@ static inline __u64 ror64(__u64 word, unsigned int shift)
107 */ 107 */
108static inline __u32 rol32(__u32 word, unsigned int shift) 108static inline __u32 rol32(__u32 word, unsigned int shift)
109{ 109{
110 return (word << shift) | (word >> (32 - shift)); 110 return (word << shift) | (word >> ((-shift) & 31));
111} 111}
112 112
113/** 113/**
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 3fe27f8d91f0..0169ba2e2e64 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -254,6 +254,7 @@ struct queue_limits {
254 unsigned long virt_boundary_mask; 254 unsigned long virt_boundary_mask;
255 255
256 unsigned int max_hw_sectors; 256 unsigned int max_hw_sectors;
257 unsigned int max_dev_sectors;
257 unsigned int chunk_sectors; 258 unsigned int chunk_sectors;
258 unsigned int max_sectors; 259 unsigned int max_sectors;
259 unsigned int max_segment_size; 260 unsigned int max_segment_size;
@@ -773,7 +774,6 @@ extern void blk_rq_set_block_pc(struct request *);
773extern void blk_requeue_request(struct request_queue *, struct request *); 774extern void blk_requeue_request(struct request_queue *, struct request *);
774extern void blk_add_request_payload(struct request *rq, struct page *page, 775extern void blk_add_request_payload(struct request *rq, struct page *page,
775 unsigned int len); 776 unsigned int len);
776extern int blk_rq_check_limits(struct request_queue *q, struct request *rq);
777extern int blk_lld_busy(struct request_queue *q); 777extern int blk_lld_busy(struct request_queue *q);
778extern int blk_rq_prep_clone(struct request *rq, struct request *rq_src, 778extern int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
779 struct bio_set *bs, gfp_t gfp_mask, 779 struct bio_set *bs, gfp_t gfp_mask,
@@ -794,6 +794,8 @@ extern int scsi_cmd_ioctl(struct request_queue *, struct gendisk *, fmode_t,
794extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t, 794extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t,
795 struct scsi_ioctl_command __user *); 795 struct scsi_ioctl_command __user *);
796 796
797extern int blk_queue_enter(struct request_queue *q, gfp_t gfp);
798extern void blk_queue_exit(struct request_queue *q);
797extern void blk_start_queue(struct request_queue *q); 799extern void blk_start_queue(struct request_queue *q);
798extern void blk_stop_queue(struct request_queue *q); 800extern void blk_stop_queue(struct request_queue *q);
799extern void blk_sync_queue(struct request_queue *q); 801extern void blk_sync_queue(struct request_queue *q);
@@ -958,7 +960,6 @@ extern struct request_queue *blk_init_allocated_queue(struct request_queue *,
958extern void blk_cleanup_queue(struct request_queue *); 960extern void blk_cleanup_queue(struct request_queue *);
959extern void blk_queue_make_request(struct request_queue *, make_request_fn *); 961extern void blk_queue_make_request(struct request_queue *, make_request_fn *);
960extern void blk_queue_bounce_limit(struct request_queue *, u64); 962extern void blk_queue_bounce_limit(struct request_queue *, u64);
961extern void blk_limits_max_hw_sectors(struct queue_limits *, unsigned int);
962extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int); 963extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int);
963extern void blk_queue_chunk_sectors(struct request_queue *, unsigned int); 964extern void blk_queue_chunk_sectors(struct request_queue *, unsigned int);
964extern void blk_queue_max_segments(struct request_queue *, unsigned short); 965extern void blk_queue_max_segments(struct request_queue *, unsigned short);
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index de464e6683b6..83d1926c61e4 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -40,6 +40,7 @@ struct bpf_map {
40 struct user_struct *user; 40 struct user_struct *user;
41 const struct bpf_map_ops *ops; 41 const struct bpf_map_ops *ops;
42 struct work_struct work; 42 struct work_struct work;
43 atomic_t usercnt;
43}; 44};
44 45
45struct bpf_map_type_list { 46struct bpf_map_type_list {
@@ -167,8 +168,10 @@ struct bpf_prog *bpf_prog_get(u32 ufd);
167void bpf_prog_put(struct bpf_prog *prog); 168void bpf_prog_put(struct bpf_prog *prog);
168void bpf_prog_put_rcu(struct bpf_prog *prog); 169void bpf_prog_put_rcu(struct bpf_prog *prog);
169 170
170struct bpf_map *bpf_map_get(u32 ufd); 171struct bpf_map *bpf_map_get_with_uref(u32 ufd);
171struct bpf_map *__bpf_map_get(struct fd f); 172struct bpf_map *__bpf_map_get(struct fd f);
173void bpf_map_inc(struct bpf_map *map, bool uref);
174void bpf_map_put_with_uref(struct bpf_map *map);
172void bpf_map_put(struct bpf_map *map); 175void bpf_map_put(struct bpf_map *map);
173 176
174extern int sysctl_unprivileged_bpf_disabled; 177extern int sysctl_unprivileged_bpf_disabled;
diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
index 60d44b26276d..06b77f9dd3f2 100644
--- a/include/linux/cgroup-defs.h
+++ b/include/linux/cgroup-defs.h
@@ -90,7 +90,6 @@ enum {
90 */ 90 */
91struct cgroup_file { 91struct cgroup_file {
92 /* do not access any fields from outside cgroup core */ 92 /* do not access any fields from outside cgroup core */
93 struct list_head node; /* anchored at css->files */
94 struct kernfs_node *kn; 93 struct kernfs_node *kn;
95}; 94};
96 95
@@ -134,9 +133,6 @@ struct cgroup_subsys_state {
134 */ 133 */
135 u64 serial_nr; 134 u64 serial_nr;
136 135
137 /* all cgroup_files associated with this css */
138 struct list_head files;
139
140 /* percpu_ref killing and RCU release */ 136 /* percpu_ref killing and RCU release */
141 struct rcu_head rcu_head; 137 struct rcu_head rcu_head;
142 struct work_struct destroy_work; 138 struct work_struct destroy_work;
@@ -426,12 +422,9 @@ struct cgroup_subsys {
426 void (*css_reset)(struct cgroup_subsys_state *css); 422 void (*css_reset)(struct cgroup_subsys_state *css);
427 void (*css_e_css_changed)(struct cgroup_subsys_state *css); 423 void (*css_e_css_changed)(struct cgroup_subsys_state *css);
428 424
429 int (*can_attach)(struct cgroup_subsys_state *css, 425 int (*can_attach)(struct cgroup_taskset *tset);
430 struct cgroup_taskset *tset); 426 void (*cancel_attach)(struct cgroup_taskset *tset);
431 void (*cancel_attach)(struct cgroup_subsys_state *css, 427 void (*attach)(struct cgroup_taskset *tset);
432 struct cgroup_taskset *tset);
433 void (*attach)(struct cgroup_subsys_state *css,
434 struct cgroup_taskset *tset);
435 int (*can_fork)(struct task_struct *task, void **priv_p); 428 int (*can_fork)(struct task_struct *task, void **priv_p);
436 void (*cancel_fork)(struct task_struct *task, void *priv); 429 void (*cancel_fork)(struct task_struct *task, void *priv);
437 void (*fork)(struct task_struct *task, void *priv); 430 void (*fork)(struct task_struct *task, void *priv);
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index 22e3754f89c5..cb91b44f5f78 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -88,6 +88,7 @@ int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from);
88int cgroup_add_dfl_cftypes(struct cgroup_subsys *ss, struct cftype *cfts); 88int cgroup_add_dfl_cftypes(struct cgroup_subsys *ss, struct cftype *cfts);
89int cgroup_add_legacy_cftypes(struct cgroup_subsys *ss, struct cftype *cfts); 89int cgroup_add_legacy_cftypes(struct cgroup_subsys *ss, struct cftype *cfts);
90int cgroup_rm_cftypes(struct cftype *cfts); 90int cgroup_rm_cftypes(struct cftype *cfts);
91void cgroup_file_notify(struct cgroup_file *cfile);
91 92
92char *task_cgroup_path(struct task_struct *task, char *buf, size_t buflen); 93char *task_cgroup_path(struct task_struct *task, char *buf, size_t buflen);
93int cgroupstats_build(struct cgroupstats *stats, struct dentry *dentry); 94int cgroupstats_build(struct cgroupstats *stats, struct dentry *dentry);
@@ -119,8 +120,10 @@ struct cgroup_subsys_state *css_rightmost_descendant(struct cgroup_subsys_state
119struct cgroup_subsys_state *css_next_descendant_post(struct cgroup_subsys_state *pos, 120struct cgroup_subsys_state *css_next_descendant_post(struct cgroup_subsys_state *pos,
120 struct cgroup_subsys_state *css); 121 struct cgroup_subsys_state *css);
121 122
122struct task_struct *cgroup_taskset_first(struct cgroup_taskset *tset); 123struct task_struct *cgroup_taskset_first(struct cgroup_taskset *tset,
123struct task_struct *cgroup_taskset_next(struct cgroup_taskset *tset); 124 struct cgroup_subsys_state **dst_cssp);
125struct task_struct *cgroup_taskset_next(struct cgroup_taskset *tset,
126 struct cgroup_subsys_state **dst_cssp);
124 127
125void css_task_iter_start(struct cgroup_subsys_state *css, 128void css_task_iter_start(struct cgroup_subsys_state *css,
126 struct css_task_iter *it); 129 struct css_task_iter *it);
@@ -235,30 +238,39 @@ void css_task_iter_end(struct css_task_iter *it);
235/** 238/**
236 * cgroup_taskset_for_each - iterate cgroup_taskset 239 * cgroup_taskset_for_each - iterate cgroup_taskset
237 * @task: the loop cursor 240 * @task: the loop cursor
241 * @dst_css: the destination css
238 * @tset: taskset to iterate 242 * @tset: taskset to iterate
239 * 243 *
240 * @tset may contain multiple tasks and they may belong to multiple 244 * @tset may contain multiple tasks and they may belong to multiple
241 * processes. When there are multiple tasks in @tset, if a task of a 245 * processes.
242 * process is in @tset, all tasks of the process are in @tset. Also, all 246 *
243 * are guaranteed to share the same source and destination csses. 247 * On the v2 hierarchy, there may be tasks from multiple processes and they
248 * may not share the source or destination csses.
249 *
250 * On traditional hierarchies, when there are multiple tasks in @tset, if a
251 * task of a process is in @tset, all tasks of the process are in @tset.
252 * Also, all are guaranteed to share the same source and destination csses.
244 * 253 *
245 * Iteration is not in any specific order. 254 * Iteration is not in any specific order.
246 */ 255 */
247#define cgroup_taskset_for_each(task, tset) \ 256#define cgroup_taskset_for_each(task, dst_css, tset) \
248 for ((task) = cgroup_taskset_first((tset)); (task); \ 257 for ((task) = cgroup_taskset_first((tset), &(dst_css)); \
249 (task) = cgroup_taskset_next((tset))) 258 (task); \
259 (task) = cgroup_taskset_next((tset), &(dst_css)))
250 260
251/** 261/**
252 * cgroup_taskset_for_each_leader - iterate group leaders in a cgroup_taskset 262 * cgroup_taskset_for_each_leader - iterate group leaders in a cgroup_taskset
253 * @leader: the loop cursor 263 * @leader: the loop cursor
264 * @dst_css: the destination css
254 * @tset: takset to iterate 265 * @tset: takset to iterate
255 * 266 *
256 * Iterate threadgroup leaders of @tset. For single-task migrations, @tset 267 * Iterate threadgroup leaders of @tset. For single-task migrations, @tset
257 * may not contain any. 268 * may not contain any.
258 */ 269 */
259#define cgroup_taskset_for_each_leader(leader, tset) \ 270#define cgroup_taskset_for_each_leader(leader, dst_css, tset) \
260 for ((leader) = cgroup_taskset_first((tset)); (leader); \ 271 for ((leader) = cgroup_taskset_first((tset), &(dst_css)); \
261 (leader) = cgroup_taskset_next((tset))) \ 272 (leader); \
273 (leader) = cgroup_taskset_next((tset), &(dst_css))) \
262 if ((leader) != (leader)->group_leader) \ 274 if ((leader) != (leader)->group_leader) \
263 ; \ 275 ; \
264 else 276 else
@@ -516,19 +528,6 @@ static inline void pr_cont_cgroup_path(struct cgroup *cgrp)
516 pr_cont_kernfs_path(cgrp->kn); 528 pr_cont_kernfs_path(cgrp->kn);
517} 529}
518 530
519/**
520 * cgroup_file_notify - generate a file modified event for a cgroup_file
521 * @cfile: target cgroup_file
522 *
523 * @cfile must have been obtained by setting cftype->file_offset.
524 */
525static inline void cgroup_file_notify(struct cgroup_file *cfile)
526{
527 /* might not have been created due to one of the CFTYPE selector flags */
528 if (cfile->kn)
529 kernfs_notify(cfile->kn);
530}
531
532#else /* !CONFIG_CGROUPS */ 531#else /* !CONFIG_CGROUPS */
533 532
534struct cgroup_subsys_state; 533struct cgroup_subsys_state;
diff --git a/include/linux/configfs.h b/include/linux/configfs.h
index a8a335b7fce0..758a029011b1 100644
--- a/include/linux/configfs.h
+++ b/include/linux/configfs.h
@@ -197,6 +197,16 @@ static inline struct configfs_subsystem *to_configfs_subsystem(struct config_gro
197int configfs_register_subsystem(struct configfs_subsystem *subsys); 197int configfs_register_subsystem(struct configfs_subsystem *subsys);
198void configfs_unregister_subsystem(struct configfs_subsystem *subsys); 198void configfs_unregister_subsystem(struct configfs_subsystem *subsys);
199 199
200int configfs_register_group(struct config_group *parent_group,
201 struct config_group *group);
202void configfs_unregister_group(struct config_group *group);
203
204struct config_group *
205configfs_register_default_group(struct config_group *parent_group,
206 const char *name,
207 struct config_item_type *item_type);
208void configfs_unregister_default_group(struct config_group *group);
209
200/* These functions can sleep and can alloc with GFP_KERNEL */ 210/* These functions can sleep and can alloc with GFP_KERNEL */
201/* WARNING: These cannot be called underneath configfs callbacks!! */ 211/* WARNING: These cannot be called underneath configfs callbacks!! */
202int configfs_depend_item(struct configfs_subsystem *subsys, struct config_item *target); 212int configfs_depend_item(struct configfs_subsystem *subsys, struct config_item *target);
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index ef4c5b1a860f..177c7680c1a8 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -77,6 +77,7 @@ struct cpufreq_policy {
77 unsigned int suspend_freq; /* freq to set during suspend */ 77 unsigned int suspend_freq; /* freq to set during suspend */
78 78
79 unsigned int policy; /* see above */ 79 unsigned int policy; /* see above */
80 unsigned int last_policy; /* policy before unplug */
80 struct cpufreq_governor *governor; /* see below */ 81 struct cpufreq_governor *governor; /* see below */
81 void *governor_data; 82 void *governor_data;
82 bool governor_enabled; /* governor start/stop flag */ 83 bool governor_enabled; /* governor start/stop flag */
diff --git a/include/linux/dns_resolver.h b/include/linux/dns_resolver.h
index cc92268af89a..6ac3cad9aef1 100644
--- a/include/linux/dns_resolver.h
+++ b/include/linux/dns_resolver.h
@@ -27,7 +27,7 @@
27#ifdef __KERNEL__ 27#ifdef __KERNEL__
28 28
29extern int dns_query(const char *type, const char *name, size_t namelen, 29extern int dns_query(const char *type, const char *name, size_t namelen,
30 const char *options, char **_result, time_t *_expiry); 30 const char *options, char **_result, time64_t *_expiry);
31 31
32#endif /* KERNEL */ 32#endif /* KERNEL */
33 33
diff --git a/include/linux/enclosure.h b/include/linux/enclosure.h
index 7be22da321f3..a4cf57cd0f75 100644
--- a/include/linux/enclosure.h
+++ b/include/linux/enclosure.h
@@ -29,7 +29,11 @@
29/* A few generic types ... taken from ses-2 */ 29/* A few generic types ... taken from ses-2 */
30enum enclosure_component_type { 30enum enclosure_component_type {
31 ENCLOSURE_COMPONENT_DEVICE = 0x01, 31 ENCLOSURE_COMPONENT_DEVICE = 0x01,
32 ENCLOSURE_COMPONENT_CONTROLLER_ELECTRONICS = 0x07,
33 ENCLOSURE_COMPONENT_SCSI_TARGET_PORT = 0x14,
34 ENCLOSURE_COMPONENT_SCSI_INITIATOR_PORT = 0x15,
32 ENCLOSURE_COMPONENT_ARRAY_DEVICE = 0x17, 35 ENCLOSURE_COMPONENT_ARRAY_DEVICE = 0x17,
36 ENCLOSURE_COMPONENT_SAS_EXPANDER = 0x18,
33}; 37};
34 38
35/* ses-2 common element status */ 39/* ses-2 common element status */
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index 6523109e136d..8942af0813e3 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -271,7 +271,7 @@ static inline int gfpflags_to_migratetype(const gfp_t gfp_flags)
271 271
272static inline bool gfpflags_allow_blocking(const gfp_t gfp_flags) 272static inline bool gfpflags_allow_blocking(const gfp_t gfp_flags)
273{ 273{
274 return gfp_flags & __GFP_DIRECT_RECLAIM; 274 return (bool __force)(gfp_flags & __GFP_DIRECT_RECLAIM);
275} 275}
276 276
277#ifdef CONFIG_HIGHMEM 277#ifdef CONFIG_HIGHMEM
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
index 0ef2a97ccdb5..402753bccafa 100644
--- a/include/linux/ipv6.h
+++ b/include/linux/ipv6.h
@@ -227,7 +227,7 @@ struct ipv6_pinfo {
227 struct ipv6_ac_socklist *ipv6_ac_list; 227 struct ipv6_ac_socklist *ipv6_ac_list;
228 struct ipv6_fl_socklist __rcu *ipv6_fl_list; 228 struct ipv6_fl_socklist __rcu *ipv6_fl_list;
229 229
230 struct ipv6_txoptions *opt; 230 struct ipv6_txoptions __rcu *opt;
231 struct sk_buff *pktoptions; 231 struct sk_buff *pktoptions;
232 struct sk_buff *rxpmtu; 232 struct sk_buff *rxpmtu;
233 struct inet6_cork cork; 233 struct inet6_cork cork;
diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h
index c9ae0c6ec050..d5d798b35c1f 100644
--- a/include/linux/irqchip/arm-gic-v3.h
+++ b/include/linux/irqchip/arm-gic-v3.h
@@ -330,6 +330,7 @@ struct rdists {
330}; 330};
331 331
332struct irq_domain; 332struct irq_domain;
333struct device_node;
333int its_cpu_init(void); 334int its_cpu_init(void);
334int its_init(struct device_node *node, struct rdists *rdists, 335int its_init(struct device_node *node, struct rdists *rdists,
335 struct irq_domain *domain); 336 struct irq_domain *domain);
diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h
index 8dde55974f18..0536524bb9eb 100644
--- a/include/linux/jump_label.h
+++ b/include/linux/jump_label.h
@@ -5,7 +5,7 @@
5 * Jump label support 5 * Jump label support
6 * 6 *
7 * Copyright (C) 2009-2012 Jason Baron <jbaron@redhat.com> 7 * Copyright (C) 2009-2012 Jason Baron <jbaron@redhat.com>
8 * Copyright (C) 2011-2012 Peter Zijlstra <pzijlstr@redhat.com> 8 * Copyright (C) 2011-2012 Red Hat, Inc., Peter Zijlstra
9 * 9 *
10 * DEPRECATED API: 10 * DEPRECATED API:
11 * 11 *
diff --git a/include/linux/kmemleak.h b/include/linux/kmemleak.h
index d0a1f99e24e3..4894c6888bc6 100644
--- a/include/linux/kmemleak.h
+++ b/include/linux/kmemleak.h
@@ -25,7 +25,7 @@
25 25
26#ifdef CONFIG_DEBUG_KMEMLEAK 26#ifdef CONFIG_DEBUG_KMEMLEAK
27 27
28extern void kmemleak_init(void) __ref; 28extern void kmemleak_init(void) __init;
29extern void kmemleak_alloc(const void *ptr, size_t size, int min_count, 29extern void kmemleak_alloc(const void *ptr, size_t size, int min_count,
30 gfp_t gfp) __ref; 30 gfp_t gfp) __ref;
31extern void kmemleak_alloc_percpu(const void __percpu *ptr, size_t size, 31extern void kmemleak_alloc_percpu(const void __percpu *ptr, size_t size,
diff --git a/include/linux/kref.h b/include/linux/kref.h
index 484604d184be..e15828fd71f1 100644
--- a/include/linux/kref.h
+++ b/include/linux/kref.h
@@ -19,7 +19,6 @@
19#include <linux/atomic.h> 19#include <linux/atomic.h>
20#include <linux/kernel.h> 20#include <linux/kernel.h>
21#include <linux/mutex.h> 21#include <linux/mutex.h>
22#include <linux/spinlock.h>
23 22
24struct kref { 23struct kref {
25 atomic_t refcount; 24 atomic_t refcount;
@@ -99,38 +98,6 @@ static inline int kref_put(struct kref *kref, void (*release)(struct kref *kref)
99 return kref_sub(kref, 1, release); 98 return kref_sub(kref, 1, release);
100} 99}
101 100
102/**
103 * kref_put_spinlock_irqsave - decrement refcount for object.
104 * @kref: object.
105 * @release: pointer to the function that will clean up the object when the
106 * last reference to the object is released.
107 * This pointer is required, and it is not acceptable to pass kfree
108 * in as this function.
109 * @lock: lock to take in release case
110 *
111 * Behaves identical to kref_put with one exception. If the reference count
112 * drops to zero, the lock will be taken atomically wrt dropping the reference
113 * count. The release function has to call spin_unlock() without _irqrestore.
114 */
115static inline int kref_put_spinlock_irqsave(struct kref *kref,
116 void (*release)(struct kref *kref),
117 spinlock_t *lock)
118{
119 unsigned long flags;
120
121 WARN_ON(release == NULL);
122 if (atomic_add_unless(&kref->refcount, -1, 1))
123 return 0;
124 spin_lock_irqsave(lock, flags);
125 if (atomic_dec_and_test(&kref->refcount)) {
126 release(kref);
127 local_irq_restore(flags);
128 return 1;
129 }
130 spin_unlock_irqrestore(lock, flags);
131 return 0;
132}
133
134static inline int kref_put_mutex(struct kref *kref, 101static inline int kref_put_mutex(struct kref *kref,
135 void (*release)(struct kref *kref), 102 void (*release)(struct kref *kref),
136 struct mutex *lock) 103 struct mutex *lock)
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 5706a2108f0a..c923350ca20a 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -460,6 +460,17 @@ static inline struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i)
460 (vcpup = kvm_get_vcpu(kvm, idx)) != NULL; \ 460 (vcpup = kvm_get_vcpu(kvm, idx)) != NULL; \
461 idx++) 461 idx++)
462 462
463static inline struct kvm_vcpu *kvm_get_vcpu_by_id(struct kvm *kvm, int id)
464{
465 struct kvm_vcpu *vcpu;
466 int i;
467
468 kvm_for_each_vcpu(i, vcpu, kvm)
469 if (vcpu->vcpu_id == id)
470 return vcpu;
471 return NULL;
472}
473
463#define kvm_for_each_memslot(memslot, slots) \ 474#define kvm_for_each_memslot(memslot, slots) \
464 for (memslot = &slots->memslots[0]; \ 475 for (memslot = &slots->memslots[0]; \
465 memslot < slots->memslots + KVM_MEM_SLOTS_NUM && memslot->npages;\ 476 memslot < slots->memslots + KVM_MEM_SLOTS_NUM && memslot->npages;\
diff --git a/include/linux/libata.h b/include/linux/libata.h
index 83577f8fd15b..600c1e0626a5 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -210,6 +210,7 @@ enum {
210 ATA_FLAG_SLAVE_POSS = (1 << 0), /* host supports slave dev */ 210 ATA_FLAG_SLAVE_POSS = (1 << 0), /* host supports slave dev */
211 /* (doesn't imply presence) */ 211 /* (doesn't imply presence) */
212 ATA_FLAG_SATA = (1 << 1), 212 ATA_FLAG_SATA = (1 << 1),
213 ATA_FLAG_NO_LOG_PAGE = (1 << 5), /* do not issue log page read */
213 ATA_FLAG_NO_ATAPI = (1 << 6), /* No ATAPI support */ 214 ATA_FLAG_NO_ATAPI = (1 << 6), /* No ATAPI support */
214 ATA_FLAG_PIO_DMA = (1 << 7), /* PIO cmds via DMA */ 215 ATA_FLAG_PIO_DMA = (1 << 7), /* PIO cmds via DMA */
215 ATA_FLAG_PIO_LBA48 = (1 << 8), /* Host DMA engine is LBA28 only */ 216 ATA_FLAG_PIO_LBA48 = (1 << 8), /* Host DMA engine is LBA28 only */
diff --git a/include/linux/lightnvm.h b/include/linux/lightnvm.h
index 69c9057e1ab8..034117b3be5f 100644
--- a/include/linux/lightnvm.h
+++ b/include/linux/lightnvm.h
@@ -50,15 +50,21 @@ enum {
50 NVM_IO_DUAL_ACCESS = 0x1, 50 NVM_IO_DUAL_ACCESS = 0x1,
51 NVM_IO_QUAD_ACCESS = 0x2, 51 NVM_IO_QUAD_ACCESS = 0x2,
52 52
53 /* NAND Access Modes */
53 NVM_IO_SUSPEND = 0x80, 54 NVM_IO_SUSPEND = 0x80,
54 NVM_IO_SLC_MODE = 0x100, 55 NVM_IO_SLC_MODE = 0x100,
55 NVM_IO_SCRAMBLE_DISABLE = 0x200, 56 NVM_IO_SCRAMBLE_DISABLE = 0x200,
57
58 /* Block Types */
59 NVM_BLK_T_FREE = 0x0,
60 NVM_BLK_T_BAD = 0x1,
61 NVM_BLK_T_DEV = 0x2,
62 NVM_BLK_T_HOST = 0x4,
56}; 63};
57 64
58struct nvm_id_group { 65struct nvm_id_group {
59 u8 mtype; 66 u8 mtype;
60 u8 fmtype; 67 u8 fmtype;
61 u16 res16;
62 u8 num_ch; 68 u8 num_ch;
63 u8 num_lun; 69 u8 num_lun;
64 u8 num_pln; 70 u8 num_pln;
@@ -74,9 +80,9 @@ struct nvm_id_group {
74 u32 tbet; 80 u32 tbet;
75 u32 tbem; 81 u32 tbem;
76 u32 mpos; 82 u32 mpos;
83 u32 mccap;
77 u16 cpar; 84 u16 cpar;
78 u8 res[913]; 85};
79} __packed;
80 86
81struct nvm_addr_format { 87struct nvm_addr_format {
82 u8 ch_offset; 88 u8 ch_offset;
@@ -91,19 +97,15 @@ struct nvm_addr_format {
91 u8 pg_len; 97 u8 pg_len;
92 u8 sect_offset; 98 u8 sect_offset;
93 u8 sect_len; 99 u8 sect_len;
94 u8 res[4];
95}; 100};
96 101
97struct nvm_id { 102struct nvm_id {
98 u8 ver_id; 103 u8 ver_id;
99 u8 vmnt; 104 u8 vmnt;
100 u8 cgrps; 105 u8 cgrps;
101 u8 res[5];
102 u32 cap; 106 u32 cap;
103 u32 dom; 107 u32 dom;
104 struct nvm_addr_format ppaf; 108 struct nvm_addr_format ppaf;
105 u8 ppat;
106 u8 resv[224];
107 struct nvm_id_group groups[4]; 109 struct nvm_id_group groups[4];
108} __packed; 110} __packed;
109 111
@@ -123,39 +125,28 @@ struct nvm_tgt_instance {
123#define NVM_VERSION_MINOR 0 125#define NVM_VERSION_MINOR 0
124#define NVM_VERSION_PATCH 0 126#define NVM_VERSION_PATCH 0
125 127
126#define NVM_SEC_BITS (8)
127#define NVM_PL_BITS (6)
128#define NVM_PG_BITS (16)
129#define NVM_BLK_BITS (16) 128#define NVM_BLK_BITS (16)
130#define NVM_LUN_BITS (10) 129#define NVM_PG_BITS (16)
130#define NVM_SEC_BITS (8)
131#define NVM_PL_BITS (8)
132#define NVM_LUN_BITS (8)
131#define NVM_CH_BITS (8) 133#define NVM_CH_BITS (8)
132 134
133struct ppa_addr { 135struct ppa_addr {
136 /* Generic structure for all addresses */
134 union { 137 union {
135 /* Channel-based PPA format in nand 4x2x2x2x8x10 */
136 struct {
137 u64 ch : 4;
138 u64 sec : 2; /* 4 sectors per page */
139 u64 pl : 2; /* 4 planes per LUN */
140 u64 lun : 2; /* 4 LUNs per channel */
141 u64 pg : 8; /* 256 pages per block */
142 u64 blk : 10;/* 1024 blocks per plane */
143 u64 resved : 36;
144 } chnl;
145
146 /* Generic structure for all addresses */
147 struct { 138 struct {
139 u64 blk : NVM_BLK_BITS;
140 u64 pg : NVM_PG_BITS;
148 u64 sec : NVM_SEC_BITS; 141 u64 sec : NVM_SEC_BITS;
149 u64 pl : NVM_PL_BITS; 142 u64 pl : NVM_PL_BITS;
150 u64 pg : NVM_PG_BITS;
151 u64 blk : NVM_BLK_BITS;
152 u64 lun : NVM_LUN_BITS; 143 u64 lun : NVM_LUN_BITS;
153 u64 ch : NVM_CH_BITS; 144 u64 ch : NVM_CH_BITS;
154 } g; 145 } g;
155 146
156 u64 ppa; 147 u64 ppa;
157 }; 148 };
158} __packed; 149};
159 150
160struct nvm_rq { 151struct nvm_rq {
161 struct nvm_tgt_instance *ins; 152 struct nvm_tgt_instance *ins;
@@ -191,18 +182,18 @@ static inline void *nvm_rq_to_pdu(struct nvm_rq *rqdata)
191struct nvm_block; 182struct nvm_block;
192 183
193typedef int (nvm_l2p_update_fn)(u64, u32, __le64 *, void *); 184typedef int (nvm_l2p_update_fn)(u64, u32, __le64 *, void *);
194typedef int (nvm_bb_update_fn)(u32, void *, unsigned int, void *); 185typedef int (nvm_bb_update_fn)(struct ppa_addr, int, u8 *, void *);
195typedef int (nvm_id_fn)(struct request_queue *, struct nvm_id *); 186typedef int (nvm_id_fn)(struct nvm_dev *, struct nvm_id *);
196typedef int (nvm_get_l2p_tbl_fn)(struct request_queue *, u64, u32, 187typedef int (nvm_get_l2p_tbl_fn)(struct nvm_dev *, u64, u32,
197 nvm_l2p_update_fn *, void *); 188 nvm_l2p_update_fn *, void *);
198typedef int (nvm_op_bb_tbl_fn)(struct request_queue *, int, unsigned int, 189typedef int (nvm_op_bb_tbl_fn)(struct nvm_dev *, struct ppa_addr, int,
199 nvm_bb_update_fn *, void *); 190 nvm_bb_update_fn *, void *);
200typedef int (nvm_op_set_bb_fn)(struct request_queue *, struct nvm_rq *, int); 191typedef int (nvm_op_set_bb_fn)(struct nvm_dev *, struct nvm_rq *, int);
201typedef int (nvm_submit_io_fn)(struct request_queue *, struct nvm_rq *); 192typedef int (nvm_submit_io_fn)(struct nvm_dev *, struct nvm_rq *);
202typedef int (nvm_erase_blk_fn)(struct request_queue *, struct nvm_rq *); 193typedef int (nvm_erase_blk_fn)(struct nvm_dev *, struct nvm_rq *);
203typedef void *(nvm_create_dma_pool_fn)(struct request_queue *, char *); 194typedef void *(nvm_create_dma_pool_fn)(struct nvm_dev *, char *);
204typedef void (nvm_destroy_dma_pool_fn)(void *); 195typedef void (nvm_destroy_dma_pool_fn)(void *);
205typedef void *(nvm_dev_dma_alloc_fn)(struct request_queue *, void *, gfp_t, 196typedef void *(nvm_dev_dma_alloc_fn)(struct nvm_dev *, void *, gfp_t,
206 dma_addr_t *); 197 dma_addr_t *);
207typedef void (nvm_dev_dma_free_fn)(void *, void*, dma_addr_t); 198typedef void (nvm_dev_dma_free_fn)(void *, void*, dma_addr_t);
208 199
@@ -210,7 +201,7 @@ struct nvm_dev_ops {
210 nvm_id_fn *identity; 201 nvm_id_fn *identity;
211 nvm_get_l2p_tbl_fn *get_l2p_tbl; 202 nvm_get_l2p_tbl_fn *get_l2p_tbl;
212 nvm_op_bb_tbl_fn *get_bb_tbl; 203 nvm_op_bb_tbl_fn *get_bb_tbl;
213 nvm_op_set_bb_fn *set_bb; 204 nvm_op_set_bb_fn *set_bb_tbl;
214 205
215 nvm_submit_io_fn *submit_io; 206 nvm_submit_io_fn *submit_io;
216 nvm_erase_blk_fn *erase_block; 207 nvm_erase_blk_fn *erase_block;
@@ -220,7 +211,7 @@ struct nvm_dev_ops {
220 nvm_dev_dma_alloc_fn *dev_dma_alloc; 211 nvm_dev_dma_alloc_fn *dev_dma_alloc;
221 nvm_dev_dma_free_fn *dev_dma_free; 212 nvm_dev_dma_free_fn *dev_dma_free;
222 213
223 uint8_t max_phys_sect; 214 unsigned int max_phys_sect;
224}; 215};
225 216
226struct nvm_lun { 217struct nvm_lun {
@@ -229,7 +220,9 @@ struct nvm_lun {
229 int lun_id; 220 int lun_id;
230 int chnl_id; 221 int chnl_id;
231 222
223 unsigned int nr_inuse_blocks; /* Number of used blocks */
232 unsigned int nr_free_blocks; /* Number of unused blocks */ 224 unsigned int nr_free_blocks; /* Number of unused blocks */
225 unsigned int nr_bad_blocks; /* Number of bad blocks */
233 struct nvm_block *blocks; 226 struct nvm_block *blocks;
234 227
235 spinlock_t lock; 228 spinlock_t lock;
@@ -263,8 +256,7 @@ struct nvm_dev {
263 int blks_per_lun; 256 int blks_per_lun;
264 int sec_size; 257 int sec_size;
265 int oob_size; 258 int oob_size;
266 int addr_mode; 259 struct nvm_addr_format ppaf;
267 struct nvm_addr_format addr_format;
268 260
269 /* Calculated/Cached values. These do not reflect the actual usable 261 /* Calculated/Cached values. These do not reflect the actual usable
270 * blocks at run-time. 262 * blocks at run-time.
@@ -290,118 +282,45 @@ struct nvm_dev {
290 char name[DISK_NAME_LEN]; 282 char name[DISK_NAME_LEN];
291}; 283};
292 284
293/* fallback conversion */ 285static inline struct ppa_addr generic_to_dev_addr(struct nvm_dev *dev,
294static struct ppa_addr __generic_to_linear_addr(struct nvm_dev *dev, 286 struct ppa_addr r)
295 struct ppa_addr r)
296{ 287{
297 struct ppa_addr l; 288 struct ppa_addr l;
298 289
299 l.ppa = r.g.sec + 290 l.ppa = ((u64)r.g.blk) << dev->ppaf.blk_offset;
300 r.g.pg * dev->sec_per_pg + 291 l.ppa |= ((u64)r.g.pg) << dev->ppaf.pg_offset;
301 r.g.blk * (dev->pgs_per_blk * 292 l.ppa |= ((u64)r.g.sec) << dev->ppaf.sect_offset;
302 dev->sec_per_pg) + 293 l.ppa |= ((u64)r.g.pl) << dev->ppaf.pln_offset;
303 r.g.lun * (dev->blks_per_lun * 294 l.ppa |= ((u64)r.g.lun) << dev->ppaf.lun_offset;
304 dev->pgs_per_blk * 295 l.ppa |= ((u64)r.g.ch) << dev->ppaf.ch_offset;
305 dev->sec_per_pg) +
306 r.g.ch * (dev->blks_per_lun *
307 dev->pgs_per_blk *
308 dev->luns_per_chnl *
309 dev->sec_per_pg);
310 296
311 return l; 297 return l;
312} 298}
313 299
314/* fallback conversion */ 300static inline struct ppa_addr dev_to_generic_addr(struct nvm_dev *dev,
315static struct ppa_addr __linear_to_generic_addr(struct nvm_dev *dev, 301 struct ppa_addr r)
316 struct ppa_addr r)
317{ 302{
318 struct ppa_addr l; 303 struct ppa_addr l;
319 int secs, pgs, blks, luns;
320 sector_t ppa = r.ppa;
321
322 l.ppa = 0;
323
324 div_u64_rem(ppa, dev->sec_per_pg, &secs);
325 l.g.sec = secs;
326 304
327 sector_div(ppa, dev->sec_per_pg); 305 /*
328 div_u64_rem(ppa, dev->sec_per_blk, &pgs); 306 * (r.ppa << X offset) & X len bitmask. X eq. blk, pg, etc.
329 l.g.pg = pgs; 307 */
330 308 l.g.blk = (r.ppa >> dev->ppaf.blk_offset) &
331 sector_div(ppa, dev->pgs_per_blk); 309 (((1 << dev->ppaf.blk_len) - 1));
332 div_u64_rem(ppa, dev->blks_per_lun, &blks); 310 l.g.pg |= (r.ppa >> dev->ppaf.pg_offset) &
333 l.g.blk = blks; 311 (((1 << dev->ppaf.pg_len) - 1));
334 312 l.g.sec |= (r.ppa >> dev->ppaf.sect_offset) &
335 sector_div(ppa, dev->blks_per_lun); 313 (((1 << dev->ppaf.sect_len) - 1));
336 div_u64_rem(ppa, dev->luns_per_chnl, &luns); 314 l.g.pl |= (r.ppa >> dev->ppaf.pln_offset) &
337 l.g.lun = luns; 315 (((1 << dev->ppaf.pln_len) - 1));
338 316 l.g.lun |= (r.ppa >> dev->ppaf.lun_offset) &
339 sector_div(ppa, dev->luns_per_chnl); 317 (((1 << dev->ppaf.lun_len) - 1));
340 l.g.ch = ppa; 318 l.g.ch |= (r.ppa >> dev->ppaf.ch_offset) &
341 319 (((1 << dev->ppaf.ch_len) - 1));
342 return l;
343}
344
345static struct ppa_addr __generic_to_chnl_addr(struct ppa_addr r)
346{
347 struct ppa_addr l;
348
349 l.ppa = 0;
350
351 l.chnl.sec = r.g.sec;
352 l.chnl.pl = r.g.pl;
353 l.chnl.pg = r.g.pg;
354 l.chnl.blk = r.g.blk;
355 l.chnl.lun = r.g.lun;
356 l.chnl.ch = r.g.ch;
357
358 return l;
359}
360
361static struct ppa_addr __chnl_to_generic_addr(struct ppa_addr r)
362{
363 struct ppa_addr l;
364
365 l.ppa = 0;
366
367 l.g.sec = r.chnl.sec;
368 l.g.pl = r.chnl.pl;
369 l.g.pg = r.chnl.pg;
370 l.g.blk = r.chnl.blk;
371 l.g.lun = r.chnl.lun;
372 l.g.ch = r.chnl.ch;
373 320
374 return l; 321 return l;
375} 322}
376 323
377static inline struct ppa_addr addr_to_generic_mode(struct nvm_dev *dev,
378 struct ppa_addr gppa)
379{
380 switch (dev->addr_mode) {
381 case NVM_ADDRMODE_LINEAR:
382 return __linear_to_generic_addr(dev, gppa);
383 case NVM_ADDRMODE_CHANNEL:
384 return __chnl_to_generic_addr(gppa);
385 default:
386 BUG();
387 }
388 return gppa;
389}
390
391static inline struct ppa_addr generic_to_addr_mode(struct nvm_dev *dev,
392 struct ppa_addr gppa)
393{
394 switch (dev->addr_mode) {
395 case NVM_ADDRMODE_LINEAR:
396 return __generic_to_linear_addr(dev, gppa);
397 case NVM_ADDRMODE_CHANNEL:
398 return __generic_to_chnl_addr(gppa);
399 default:
400 BUG();
401 }
402 return gppa;
403}
404
405static inline int ppa_empty(struct ppa_addr ppa_addr) 324static inline int ppa_empty(struct ppa_addr ppa_addr)
406{ 325{
407 return (ppa_addr.ppa == ADDR_EMPTY); 326 return (ppa_addr.ppa == ADDR_EMPTY);
@@ -468,7 +387,7 @@ typedef int (nvmm_end_io_fn)(struct nvm_rq *, int);
468typedef int (nvmm_erase_blk_fn)(struct nvm_dev *, struct nvm_block *, 387typedef int (nvmm_erase_blk_fn)(struct nvm_dev *, struct nvm_block *,
469 unsigned long); 388 unsigned long);
470typedef struct nvm_lun *(nvmm_get_lun_fn)(struct nvm_dev *, int); 389typedef struct nvm_lun *(nvmm_get_lun_fn)(struct nvm_dev *, int);
471typedef void (nvmm_free_blocks_print_fn)(struct nvm_dev *); 390typedef void (nvmm_lun_info_print_fn)(struct nvm_dev *);
472 391
473struct nvmm_type { 392struct nvmm_type {
474 const char *name; 393 const char *name;
@@ -492,7 +411,7 @@ struct nvmm_type {
492 nvmm_get_lun_fn *get_lun; 411 nvmm_get_lun_fn *get_lun;
493 412
494 /* Statistics */ 413 /* Statistics */
495 nvmm_free_blocks_print_fn *free_blocks_print; 414 nvmm_lun_info_print_fn *lun_info_print;
496 struct list_head list; 415 struct list_head list;
497}; 416};
498 417
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index 70400dc7660f..c57e424d914b 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -2,7 +2,7 @@
2 * Runtime locking correctness validator 2 * Runtime locking correctness validator
3 * 3 *
4 * Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> 4 * Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
5 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> 5 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
6 * 6 *
7 * see Documentation/locking/lockdep-design.txt for more details. 7 * see Documentation/locking/lockdep-design.txt for more details.
8 */ 8 */
diff --git a/include/linux/marvell_phy.h b/include/linux/marvell_phy.h
index e6982ac3200d..a57f0dfb6db7 100644
--- a/include/linux/marvell_phy.h
+++ b/include/linux/marvell_phy.h
@@ -16,6 +16,7 @@
16#define MARVELL_PHY_ID_88E1318S 0x01410e90 16#define MARVELL_PHY_ID_88E1318S 0x01410e90
17#define MARVELL_PHY_ID_88E1116R 0x01410e40 17#define MARVELL_PHY_ID_88E1116R 0x01410e40
18#define MARVELL_PHY_ID_88E1510 0x01410dd0 18#define MARVELL_PHY_ID_88E1510 0x01410dd0
19#define MARVELL_PHY_ID_88E1540 0x01410eb0
19#define MARVELL_PHY_ID_88E3016 0x01410e60 20#define MARVELL_PHY_ID_88E3016 0x01410e60
20 21
21/* struct phy_device dev_flags definitions */ 22/* struct phy_device dev_flags definitions */
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index 7501626ab529..d3133be12d92 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -427,6 +427,17 @@ enum {
427}; 427};
428 428
429enum { 429enum {
430 /*
431 * Max wqe size for rdma read is 512 bytes, so this
432 * limits our max_sge_rd as the wqe needs to fit:
433 * - ctrl segment (16 bytes)
434 * - rdma segment (16 bytes)
435 * - scatter elements (16 bytes each)
436 */
437 MLX4_MAX_SGE_RD = (512 - 16 - 16) / 16
438};
439
440enum {
430 MLX4_DEV_PMC_SUBTYPE_GUID_INFO = 0x14, 441 MLX4_DEV_PMC_SUBTYPE_GUID_INFO = 0x14,
431 MLX4_DEV_PMC_SUBTYPE_PORT_INFO = 0x15, 442 MLX4_DEV_PMC_SUBTYPE_PORT_INFO = 0x15,
432 MLX4_DEV_PMC_SUBTYPE_PKEY_TABLE = 0x16, 443 MLX4_DEV_PMC_SUBTYPE_PKEY_TABLE = 0x16,
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index dd2097455a2e..1565324eb620 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -453,26 +453,28 @@ struct mlx5_ifc_per_protocol_networking_offload_caps_bits {
453 u8 lro_cap[0x1]; 453 u8 lro_cap[0x1];
454 u8 lro_psh_flag[0x1]; 454 u8 lro_psh_flag[0x1];
455 u8 lro_time_stamp[0x1]; 455 u8 lro_time_stamp[0x1];
456 u8 reserved_0[0x6]; 456 u8 reserved_0[0x3];
457 u8 self_lb_en_modifiable[0x1];
458 u8 reserved_1[0x2];
457 u8 max_lso_cap[0x5]; 459 u8 max_lso_cap[0x5];
458 u8 reserved_1[0x4]; 460 u8 reserved_2[0x4];
459 u8 rss_ind_tbl_cap[0x4]; 461 u8 rss_ind_tbl_cap[0x4];
460 u8 reserved_2[0x3]; 462 u8 reserved_3[0x3];
461 u8 tunnel_lso_const_out_ip_id[0x1]; 463 u8 tunnel_lso_const_out_ip_id[0x1];
462 u8 reserved_3[0x2]; 464 u8 reserved_4[0x2];
463 u8 tunnel_statless_gre[0x1]; 465 u8 tunnel_statless_gre[0x1];
464 u8 tunnel_stateless_vxlan[0x1]; 466 u8 tunnel_stateless_vxlan[0x1];
465 467
466 u8 reserved_4[0x20]; 468 u8 reserved_5[0x20];
467 469
468 u8 reserved_5[0x10]; 470 u8 reserved_6[0x10];
469 u8 lro_min_mss_size[0x10]; 471 u8 lro_min_mss_size[0x10];
470 472
471 u8 reserved_6[0x120]; 473 u8 reserved_7[0x120];
472 474
473 u8 lro_timer_supported_periods[4][0x20]; 475 u8 lro_timer_supported_periods[4][0x20];
474 476
475 u8 reserved_7[0x600]; 477 u8 reserved_8[0x600];
476}; 478};
477 479
478struct mlx5_ifc_roce_cap_bits { 480struct mlx5_ifc_roce_cap_bits {
@@ -4051,9 +4053,11 @@ struct mlx5_ifc_modify_tis_in_bits {
4051}; 4053};
4052 4054
4053struct mlx5_ifc_modify_tir_bitmask_bits { 4055struct mlx5_ifc_modify_tir_bitmask_bits {
4054 u8 reserved[0x20]; 4056 u8 reserved_0[0x20];
4055 4057
4056 u8 reserved1[0x1f]; 4058 u8 reserved_1[0x1b];
4059 u8 self_lb_en[0x1];
4060 u8 reserved_2[0x3];
4057 u8 lro[0x1]; 4061 u8 lro[0x1];
4058}; 4062};
4059 4063
diff --git a/include/linux/mmdebug.h b/include/linux/mmdebug.h
index 877ef226f90f..772362adf471 100644
--- a/include/linux/mmdebug.h
+++ b/include/linux/mmdebug.h
@@ -1,6 +1,7 @@
1#ifndef LINUX_MM_DEBUG_H 1#ifndef LINUX_MM_DEBUG_H
2#define LINUX_MM_DEBUG_H 1 2#define LINUX_MM_DEBUG_H 1
3 3
4#include <linux/bug.h>
4#include <linux/stringify.h> 5#include <linux/stringify.h>
5 6
6struct page; 7struct page;
diff --git a/include/linux/net.h b/include/linux/net.h
index 70ac5e28e6b7..0b4ac7da583a 100644
--- a/include/linux/net.h
+++ b/include/linux/net.h
@@ -34,8 +34,12 @@ struct inode;
34struct file; 34struct file;
35struct net; 35struct net;
36 36
37#define SOCK_ASYNC_NOSPACE 0 37/* Historically, SOCKWQ_ASYNC_NOSPACE & SOCKWQ_ASYNC_WAITDATA were located
38#define SOCK_ASYNC_WAITDATA 1 38 * in sock->flags, but moved into sk->sk_wq->flags to be RCU protected.
39 * Eventually all flags will be in sk->sk_wq_flags.
40 */
41#define SOCKWQ_ASYNC_NOSPACE 0
42#define SOCKWQ_ASYNC_WAITDATA 1
39#define SOCK_NOSPACE 2 43#define SOCK_NOSPACE 2
40#define SOCK_PASSCRED 3 44#define SOCK_PASSCRED 3
41#define SOCK_PASSSEC 4 45#define SOCK_PASSSEC 4
@@ -89,6 +93,7 @@ struct socket_wq {
89 /* Note: wait MUST be first field of socket_wq */ 93 /* Note: wait MUST be first field of socket_wq */
90 wait_queue_head_t wait; 94 wait_queue_head_t wait;
91 struct fasync_struct *fasync_list; 95 struct fasync_struct *fasync_list;
96 unsigned long flags; /* %SOCKWQ_ASYNC_NOSPACE, etc */
92 struct rcu_head rcu; 97 struct rcu_head rcu;
93} ____cacheline_aligned_in_smp; 98} ____cacheline_aligned_in_smp;
94 99
@@ -96,7 +101,7 @@ struct socket_wq {
96 * struct socket - general BSD socket 101 * struct socket - general BSD socket
97 * @state: socket state (%SS_CONNECTED, etc) 102 * @state: socket state (%SS_CONNECTED, etc)
98 * @type: socket type (%SOCK_STREAM, etc) 103 * @type: socket type (%SOCK_STREAM, etc)
99 * @flags: socket flags (%SOCK_ASYNC_NOSPACE, etc) 104 * @flags: socket flags (%SOCK_NOSPACE, etc)
100 * @ops: protocol specific socket operations 105 * @ops: protocol specific socket operations
101 * @file: File back pointer for gc 106 * @file: File back pointer for gc
102 * @sk: internal networking protocol agnostic socket representation 107 * @sk: internal networking protocol agnostic socket representation
@@ -202,7 +207,7 @@ enum {
202 SOCK_WAKE_URG, 207 SOCK_WAKE_URG,
203}; 208};
204 209
205int sock_wake_async(struct socket *sk, int how, int band); 210int sock_wake_async(struct socket_wq *sk_wq, int how, int band);
206int sock_register(const struct net_proto_family *fam); 211int sock_register(const struct net_proto_family *fam);
207void sock_unregister(int family); 212void sock_unregister(int family);
208int __sock_create(struct net *net, int family, int type, int proto, 213int __sock_create(struct net *net, int family, int type, int proto,
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index d20891465247..3143c847bddb 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -1398,7 +1398,8 @@ enum netdev_priv_flags {
1398 * @dma: DMA channel 1398 * @dma: DMA channel
1399 * @mtu: Interface MTU value 1399 * @mtu: Interface MTU value
1400 * @type: Interface hardware type 1400 * @type: Interface hardware type
1401 * @hard_header_len: Hardware header length 1401 * @hard_header_len: Hardware header length, which means that this is the
1402 * minimum size of a packet.
1402 * 1403 *
1403 * @needed_headroom: Extra headroom the hardware may need, but not in all 1404 * @needed_headroom: Extra headroom the hardware may need, but not in all
1404 * cases can this be guaranteed 1405 * cases can this be guaranteed
@@ -2068,20 +2069,23 @@ struct pcpu_sw_netstats {
2068 struct u64_stats_sync syncp; 2069 struct u64_stats_sync syncp;
2069}; 2070};
2070 2071
2071#define netdev_alloc_pcpu_stats(type) \ 2072#define __netdev_alloc_pcpu_stats(type, gfp) \
2072({ \ 2073({ \
2073 typeof(type) __percpu *pcpu_stats = alloc_percpu(type); \ 2074 typeof(type) __percpu *pcpu_stats = alloc_percpu_gfp(type, gfp);\
2074 if (pcpu_stats) { \ 2075 if (pcpu_stats) { \
2075 int __cpu; \ 2076 int __cpu; \
2076 for_each_possible_cpu(__cpu) { \ 2077 for_each_possible_cpu(__cpu) { \
2077 typeof(type) *stat; \ 2078 typeof(type) *stat; \
2078 stat = per_cpu_ptr(pcpu_stats, __cpu); \ 2079 stat = per_cpu_ptr(pcpu_stats, __cpu); \
2079 u64_stats_init(&stat->syncp); \ 2080 u64_stats_init(&stat->syncp); \
2080 } \ 2081 } \
2081 } \ 2082 } \
2082 pcpu_stats; \ 2083 pcpu_stats; \
2083}) 2084})
2084 2085
2086#define netdev_alloc_pcpu_stats(type) \
2087 __netdev_alloc_pcpu_stats(type, GFP_KERNEL)
2088
2085#include <linux/notifier.h> 2089#include <linux/notifier.h>
2086 2090
2087/* netdevice notifier chain. Please remember to update the rtnetlink 2091/* netdevice notifier chain. Please remember to update the rtnetlink
@@ -3854,6 +3858,11 @@ static inline bool netif_is_bridge_master(const struct net_device *dev)
3854 return dev->priv_flags & IFF_EBRIDGE; 3858 return dev->priv_flags & IFF_EBRIDGE;
3855} 3859}
3856 3860
3861static inline bool netif_is_bridge_port(const struct net_device *dev)
3862{
3863 return dev->priv_flags & IFF_BRIDGE_PORT;
3864}
3865
3857static inline bool netif_is_ovs_master(const struct net_device *dev) 3866static inline bool netif_is_ovs_master(const struct net_device *dev)
3858{ 3867{
3859 return dev->priv_flags & IFF_OPENVSWITCH; 3868 return dev->priv_flags & IFF_OPENVSWITCH;
diff --git a/include/linux/netfilter/ipset/ip_set.h b/include/linux/netfilter/ipset/ip_set.h
index 48bb01edcf30..0e1f433cc4b7 100644
--- a/include/linux/netfilter/ipset/ip_set.h
+++ b/include/linux/netfilter/ipset/ip_set.h
@@ -421,7 +421,7 @@ extern void ip_set_free(void *members);
421extern int ip_set_get_ipaddr4(struct nlattr *nla, __be32 *ipaddr); 421extern int ip_set_get_ipaddr4(struct nlattr *nla, __be32 *ipaddr);
422extern int ip_set_get_ipaddr6(struct nlattr *nla, union nf_inet_addr *ipaddr); 422extern int ip_set_get_ipaddr6(struct nlattr *nla, union nf_inet_addr *ipaddr);
423extern size_t ip_set_elem_len(struct ip_set *set, struct nlattr *tb[], 423extern size_t ip_set_elem_len(struct ip_set *set, struct nlattr *tb[],
424 size_t len); 424 size_t len, size_t align);
425extern int ip_set_get_extensions(struct ip_set *set, struct nlattr *tb[], 425extern int ip_set_get_extensions(struct ip_set *set, struct nlattr *tb[],
426 struct ip_set_ext *ext); 426 struct ip_set_ext *ext);
427 427
diff --git a/include/linux/netfilter/nfnetlink.h b/include/linux/netfilter/nfnetlink.h
index 249d1bb01e03..5646b24bfc64 100644
--- a/include/linux/netfilter/nfnetlink.h
+++ b/include/linux/netfilter/nfnetlink.h
@@ -14,7 +14,7 @@ struct nfnl_callback {
14 int (*call_rcu)(struct sock *nl, struct sk_buff *skb, 14 int (*call_rcu)(struct sock *nl, struct sk_buff *skb,
15 const struct nlmsghdr *nlh, 15 const struct nlmsghdr *nlh,
16 const struct nlattr * const cda[]); 16 const struct nlattr * const cda[]);
17 int (*call_batch)(struct sock *nl, struct sk_buff *skb, 17 int (*call_batch)(struct net *net, struct sock *nl, struct sk_buff *skb,
18 const struct nlmsghdr *nlh, 18 const struct nlmsghdr *nlh,
19 const struct nlattr * const cda[]); 19 const struct nlattr * const cda[]);
20 const struct nla_policy *policy; /* netlink attribute policy */ 20 const struct nla_policy *policy; /* netlink attribute policy */
diff --git a/include/linux/netfilter_ingress.h b/include/linux/netfilter_ingress.h
index 187feabe557c..5fcd375ef175 100644
--- a/include/linux/netfilter_ingress.h
+++ b/include/linux/netfilter_ingress.h
@@ -5,10 +5,13 @@
5#include <linux/netdevice.h> 5#include <linux/netdevice.h>
6 6
7#ifdef CONFIG_NETFILTER_INGRESS 7#ifdef CONFIG_NETFILTER_INGRESS
8static inline int nf_hook_ingress_active(struct sk_buff *skb) 8static inline bool nf_hook_ingress_active(const struct sk_buff *skb)
9{ 9{
10 return nf_hook_list_active(&skb->dev->nf_hooks_ingress, 10#ifdef HAVE_JUMP_LABEL
11 NFPROTO_NETDEV, NF_NETDEV_INGRESS); 11 if (!static_key_false(&nf_hooks_needed[NFPROTO_NETDEV][NF_NETDEV_INGRESS]))
12 return false;
13#endif
14 return !list_empty(&skb->dev->nf_hooks_ingress);
12} 15}
13 16
14static inline int nf_hook_ingress(struct sk_buff *skb) 17static inline int nf_hook_ingress(struct sk_buff *skb)
@@ -16,8 +19,8 @@ static inline int nf_hook_ingress(struct sk_buff *skb)
16 struct nf_hook_state state; 19 struct nf_hook_state state;
17 20
18 nf_hook_state_init(&state, &skb->dev->nf_hooks_ingress, 21 nf_hook_state_init(&state, &skb->dev->nf_hooks_ingress,
19 NF_NETDEV_INGRESS, INT_MIN, NFPROTO_NETDEV, NULL, 22 NF_NETDEV_INGRESS, INT_MIN, NFPROTO_NETDEV,
20 skb->dev, NULL, dev_net(skb->dev), NULL); 23 skb->dev, NULL, NULL, dev_net(skb->dev), NULL);
21 return nf_hook_slow(skb, &state); 24 return nf_hook_slow(skb, &state);
22} 25}
23 26
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
index 570d630f98ae..11bbae44f4cb 100644
--- a/include/linux/nfs_xdr.h
+++ b/include/linux/nfs_xdr.h
@@ -251,6 +251,7 @@ struct nfs4_layoutget {
251 struct nfs4_layoutget_res res; 251 struct nfs4_layoutget_res res;
252 struct rpc_cred *cred; 252 struct rpc_cred *cred;
253 gfp_t gfp_flags; 253 gfp_t gfp_flags;
254 long timeout;
254}; 255};
255 256
256struct nfs4_getdeviceinfo_args { 257struct nfs4_getdeviceinfo_args {
diff --git a/include/linux/of_dma.h b/include/linux/of_dma.h
index 36112cdd665a..b90d8ec57c1f 100644
--- a/include/linux/of_dma.h
+++ b/include/linux/of_dma.h
@@ -80,7 +80,7 @@ static inline int of_dma_router_register(struct device_node *np,
80static inline struct dma_chan *of_dma_request_slave_channel(struct device_node *np, 80static inline struct dma_chan *of_dma_request_slave_channel(struct device_node *np,
81 const char *name) 81 const char *name)
82{ 82{
83 return NULL; 83 return ERR_PTR(-ENODEV);
84} 84}
85 85
86static inline struct dma_chan *of_dma_simple_xlate(struct of_phandle_args *dma_spec, 86static inline struct dma_chan *of_dma_simple_xlate(struct of_phandle_args *dma_spec,
diff --git a/include/linux/of_irq.h b/include/linux/of_irq.h
index 039f2eec49ce..1e0deb8e8494 100644
--- a/include/linux/of_irq.h
+++ b/include/linux/of_irq.h
@@ -46,12 +46,14 @@ extern int of_irq_get(struct device_node *dev, int index);
46extern int of_irq_get_byname(struct device_node *dev, const char *name); 46extern int of_irq_get_byname(struct device_node *dev, const char *name);
47extern int of_irq_to_resource_table(struct device_node *dev, 47extern int of_irq_to_resource_table(struct device_node *dev,
48 struct resource *res, int nr_irqs); 48 struct resource *res, int nr_irqs);
49extern struct device_node *of_irq_find_parent(struct device_node *child);
49extern struct irq_domain *of_msi_get_domain(struct device *dev, 50extern struct irq_domain *of_msi_get_domain(struct device *dev,
50 struct device_node *np, 51 struct device_node *np,
51 enum irq_domain_bus_token token); 52 enum irq_domain_bus_token token);
52extern struct irq_domain *of_msi_map_get_device_domain(struct device *dev, 53extern struct irq_domain *of_msi_map_get_device_domain(struct device *dev,
53 u32 rid); 54 u32 rid);
54extern void of_msi_configure(struct device *dev, struct device_node *np); 55extern void of_msi_configure(struct device *dev, struct device_node *np);
56u32 of_msi_map_rid(struct device *dev, struct device_node *msi_np, u32 rid_in);
55#else 57#else
56static inline int of_irq_count(struct device_node *dev) 58static inline int of_irq_count(struct device_node *dev)
57{ 59{
@@ -70,6 +72,11 @@ static inline int of_irq_to_resource_table(struct device_node *dev,
70{ 72{
71 return 0; 73 return 0;
72} 74}
75static inline void *of_irq_find_parent(struct device_node *child)
76{
77 return NULL;
78}
79
73static inline struct irq_domain *of_msi_get_domain(struct device *dev, 80static inline struct irq_domain *of_msi_get_domain(struct device *dev,
74 struct device_node *np, 81 struct device_node *np,
75 enum irq_domain_bus_token token) 82 enum irq_domain_bus_token token)
@@ -84,6 +91,11 @@ static inline struct irq_domain *of_msi_map_get_device_domain(struct device *dev
84static inline void of_msi_configure(struct device *dev, struct device_node *np) 91static inline void of_msi_configure(struct device *dev, struct device_node *np)
85{ 92{
86} 93}
94static inline u32 of_msi_map_rid(struct device *dev,
95 struct device_node *msi_np, u32 rid_in)
96{
97 return rid_in;
98}
87#endif 99#endif
88 100
89#if defined(CONFIG_OF_IRQ) || defined(CONFIG_SPARC) 101#if defined(CONFIG_OF_IRQ) || defined(CONFIG_SPARC)
@@ -93,7 +105,6 @@ static inline void of_msi_configure(struct device *dev, struct device_node *np)
93 * so declare it here regardless of the CONFIG_OF_IRQ setting. 105 * so declare it here regardless of the CONFIG_OF_IRQ setting.
94 */ 106 */
95extern unsigned int irq_of_parse_and_map(struct device_node *node, int index); 107extern unsigned int irq_of_parse_and_map(struct device_node *node, int index);
96u32 of_msi_map_rid(struct device *dev, struct device_node *msi_np, u32 rid_in);
97 108
98#else /* !CONFIG_OF && !CONFIG_SPARC */ 109#else /* !CONFIG_OF && !CONFIG_SPARC */
99static inline unsigned int irq_of_parse_and_map(struct device_node *dev, 110static inline unsigned int irq_of_parse_and_map(struct device_node *dev,
@@ -101,12 +112,6 @@ static inline unsigned int irq_of_parse_and_map(struct device_node *dev,
101{ 112{
102 return 0; 113 return 0;
103} 114}
104
105static inline u32 of_msi_map_rid(struct device *dev,
106 struct device_node *msi_np, u32 rid_in)
107{
108 return rid_in;
109}
110#endif /* !CONFIG_OF */ 115#endif /* !CONFIG_OF */
111 116
112#endif /* __OF_IRQ_H */ 117#endif /* __OF_IRQ_H */
diff --git a/include/linux/pci.h b/include/linux/pci.h
index e828e7b4afec..6ae25aae88fd 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -412,9 +412,18 @@ struct pci_host_bridge {
412 void (*release_fn)(struct pci_host_bridge *); 412 void (*release_fn)(struct pci_host_bridge *);
413 void *release_data; 413 void *release_data;
414 unsigned int ignore_reset_delay:1; /* for entire hierarchy */ 414 unsigned int ignore_reset_delay:1; /* for entire hierarchy */
415 /* Resource alignment requirements */
416 resource_size_t (*align_resource)(struct pci_dev *dev,
417 const struct resource *res,
418 resource_size_t start,
419 resource_size_t size,
420 resource_size_t align);
415}; 421};
416 422
417#define to_pci_host_bridge(n) container_of(n, struct pci_host_bridge, dev) 423#define to_pci_host_bridge(n) container_of(n, struct pci_host_bridge, dev)
424
425struct pci_host_bridge *pci_find_host_bridge(struct pci_bus *bus);
426
418void pci_set_host_bridge_release(struct pci_host_bridge *bridge, 427void pci_set_host_bridge_release(struct pci_host_bridge *bridge,
419 void (*release_fn)(struct pci_host_bridge *), 428 void (*release_fn)(struct pci_host_bridge *),
420 void *release_data); 429 void *release_data);
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index d841d33bcdc9..f9828a48f16a 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -697,9 +697,11 @@ struct perf_cgroup {
697 * if there is no cgroup event for the current CPU context. 697 * if there is no cgroup event for the current CPU context.
698 */ 698 */
699static inline struct perf_cgroup * 699static inline struct perf_cgroup *
700perf_cgroup_from_task(struct task_struct *task) 700perf_cgroup_from_task(struct task_struct *task, struct perf_event_context *ctx)
701{ 701{
702 return container_of(task_css(task, perf_event_cgrp_id), 702 return container_of(task_css_check(task, perf_event_cgrp_id,
703 ctx ? lockdep_is_held(&ctx->lock)
704 : true),
703 struct perf_cgroup, css); 705 struct perf_cgroup, css);
704} 706}
705#endif /* CONFIG_CGROUP_PERF */ 707#endif /* CONFIG_CGROUP_PERF */
diff --git a/include/linux/platform_data/edma.h b/include/linux/platform_data/edma.h
index e2878baeb90e..4299f4ba03bd 100644
--- a/include/linux/platform_data/edma.h
+++ b/include/linux/platform_data/edma.h
@@ -72,7 +72,7 @@ struct edma_soc_info {
72 struct edma_rsv_info *rsv; 72 struct edma_rsv_info *rsv;
73 73
74 /* List of channels allocated for memcpy, terminated with -1 */ 74 /* List of channels allocated for memcpy, terminated with -1 */
75 s16 *memcpy_channels; 75 s32 *memcpy_channels;
76 76
77 s8 (*queue_priority_mapping)[2]; 77 s8 (*queue_priority_mapping)[2];
78 const s16 (*xbar_chans)[2]; 78 const s16 (*xbar_chans)[2];
diff --git a/include/linux/proportions.h b/include/linux/proportions.h
index 5440f64d2942..21221338ad18 100644
--- a/include/linux/proportions.h
+++ b/include/linux/proportions.h
@@ -1,7 +1,7 @@
1/* 1/*
2 * FLoating proportions 2 * FLoating proportions
3 * 3 *
4 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> 4 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
5 * 5 *
6 * This file contains the public data structure and API definitions. 6 * This file contains the public data structure and API definitions.
7 */ 7 */
diff --git a/include/linux/qed/common_hsi.h b/include/linux/qed/common_hsi.h
index 6a4347639c03..1d1ba2c5ee7a 100644
--- a/include/linux/qed/common_hsi.h
+++ b/include/linux/qed/common_hsi.h
@@ -9,6 +9,8 @@
9#ifndef __COMMON_HSI__ 9#ifndef __COMMON_HSI__
10#define __COMMON_HSI__ 10#define __COMMON_HSI__
11 11
12#define CORE_SPQE_PAGE_SIZE_BYTES 4096
13
12#define FW_MAJOR_VERSION 8 14#define FW_MAJOR_VERSION 8
13#define FW_MINOR_VERSION 4 15#define FW_MINOR_VERSION 4
14#define FW_REVISION_VERSION 2 16#define FW_REVISION_VERSION 2
diff --git a/include/linux/qed/qed_chain.h b/include/linux/qed/qed_chain.h
index b920c3605c46..41b9049b57e2 100644
--- a/include/linux/qed/qed_chain.h
+++ b/include/linux/qed/qed_chain.h
@@ -111,7 +111,8 @@ static inline u16 qed_chain_get_elem_left(struct qed_chain *p_chain)
111 used = ((u32)0x10000u + (u32)(p_chain->prod_idx)) - 111 used = ((u32)0x10000u + (u32)(p_chain->prod_idx)) -
112 (u32)p_chain->cons_idx; 112 (u32)p_chain->cons_idx;
113 if (p_chain->mode == QED_CHAIN_MODE_NEXT_PTR) 113 if (p_chain->mode == QED_CHAIN_MODE_NEXT_PTR)
114 used -= (used / p_chain->elem_per_page); 114 used -= p_chain->prod_idx / p_chain->elem_per_page -
115 p_chain->cons_idx / p_chain->elem_per_page;
115 116
116 return p_chain->capacity - used; 117 return p_chain->capacity - used;
117} 118}
diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h
index 843ceca9a21e..e50b31d18462 100644
--- a/include/linux/rhashtable.h
+++ b/include/linux/rhashtable.h
@@ -19,6 +19,7 @@
19 19
20#include <linux/atomic.h> 20#include <linux/atomic.h>
21#include <linux/compiler.h> 21#include <linux/compiler.h>
22#include <linux/err.h>
22#include <linux/errno.h> 23#include <linux/errno.h>
23#include <linux/jhash.h> 24#include <linux/jhash.h>
24#include <linux/list_nulls.h> 25#include <linux/list_nulls.h>
@@ -339,10 +340,11 @@ static inline int lockdep_rht_bucket_is_held(const struct bucket_table *tbl,
339int rhashtable_init(struct rhashtable *ht, 340int rhashtable_init(struct rhashtable *ht,
340 const struct rhashtable_params *params); 341 const struct rhashtable_params *params);
341 342
342int rhashtable_insert_slow(struct rhashtable *ht, const void *key, 343struct bucket_table *rhashtable_insert_slow(struct rhashtable *ht,
343 struct rhash_head *obj, 344 const void *key,
344 struct bucket_table *old_tbl); 345 struct rhash_head *obj,
345int rhashtable_insert_rehash(struct rhashtable *ht); 346 struct bucket_table *old_tbl);
347int rhashtable_insert_rehash(struct rhashtable *ht, struct bucket_table *tbl);
346 348
347int rhashtable_walk_init(struct rhashtable *ht, struct rhashtable_iter *iter); 349int rhashtable_walk_init(struct rhashtable *ht, struct rhashtable_iter *iter);
348void rhashtable_walk_exit(struct rhashtable_iter *iter); 350void rhashtable_walk_exit(struct rhashtable_iter *iter);
@@ -598,9 +600,11 @@ restart:
598 600
599 new_tbl = rht_dereference_rcu(tbl->future_tbl, ht); 601 new_tbl = rht_dereference_rcu(tbl->future_tbl, ht);
600 if (unlikely(new_tbl)) { 602 if (unlikely(new_tbl)) {
601 err = rhashtable_insert_slow(ht, key, obj, new_tbl); 603 tbl = rhashtable_insert_slow(ht, key, obj, new_tbl);
602 if (err == -EAGAIN) 604 if (!IS_ERR_OR_NULL(tbl))
603 goto slow_path; 605 goto slow_path;
606
607 err = PTR_ERR(tbl);
604 goto out; 608 goto out;
605 } 609 }
606 610
@@ -611,7 +615,7 @@ restart:
611 if (unlikely(rht_grow_above_100(ht, tbl))) { 615 if (unlikely(rht_grow_above_100(ht, tbl))) {
612slow_path: 616slow_path:
613 spin_unlock_bh(lock); 617 spin_unlock_bh(lock);
614 err = rhashtable_insert_rehash(ht); 618 err = rhashtable_insert_rehash(ht, tbl);
615 rcu_read_unlock(); 619 rcu_read_unlock();
616 if (err) 620 if (err)
617 return err; 621 return err;
diff --git a/include/linux/scpi_protocol.h b/include/linux/scpi_protocol.h
index 80af3cd35ae4..72ce932c69b2 100644
--- a/include/linux/scpi_protocol.h
+++ b/include/linux/scpi_protocol.h
@@ -71,7 +71,7 @@ struct scpi_ops {
71 int (*sensor_get_value)(u16, u32 *); 71 int (*sensor_get_value)(u16, u32 *);
72}; 72};
73 73
74#if IS_ENABLED(CONFIG_ARM_SCPI_PROTOCOL) 74#if IS_REACHABLE(CONFIG_ARM_SCPI_PROTOCOL)
75struct scpi_ops *get_scpi_ops(void); 75struct scpi_ops *get_scpi_ops(void);
76#else 76#else
77static inline struct scpi_ops *get_scpi_ops(void) { return NULL; } 77static inline struct scpi_ops *get_scpi_ops(void) { return NULL; }
diff --git a/include/linux/signal.h b/include/linux/signal.h
index ab1e0392b5ac..92557bbce7e7 100644
--- a/include/linux/signal.h
+++ b/include/linux/signal.h
@@ -239,7 +239,6 @@ extern int sigprocmask(int, sigset_t *, sigset_t *);
239extern void set_current_blocked(sigset_t *); 239extern void set_current_blocked(sigset_t *);
240extern void __set_current_blocked(const sigset_t *); 240extern void __set_current_blocked(const sigset_t *);
241extern int show_unhandled_signals; 241extern int show_unhandled_signals;
242extern int sigsuspend(sigset_t *);
243 242
244struct sigaction { 243struct sigaction {
245#ifndef __ARCH_HAS_IRIX_SIGACTION 244#ifndef __ARCH_HAS_IRIX_SIGACTION
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 7c82e3b307a3..2037a861e367 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -158,6 +158,24 @@ size_t ksize(const void *);
158#endif 158#endif
159 159
160/* 160/*
161 * Setting ARCH_SLAB_MINALIGN in arch headers allows a different alignment.
162 * Intended for arches that get misalignment faults even for 64 bit integer
163 * aligned buffers.
164 */
165#ifndef ARCH_SLAB_MINALIGN
166#define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
167#endif
168
169/*
170 * kmalloc and friends return ARCH_KMALLOC_MINALIGN aligned
171 * pointers. kmem_cache_alloc and friends return ARCH_SLAB_MINALIGN
172 * aligned pointers.
173 */
174#define __assume_kmalloc_alignment __assume_aligned(ARCH_KMALLOC_MINALIGN)
175#define __assume_slab_alignment __assume_aligned(ARCH_SLAB_MINALIGN)
176#define __assume_page_alignment __assume_aligned(PAGE_SIZE)
177
178/*
161 * Kmalloc array related definitions 179 * Kmalloc array related definitions
162 */ 180 */
163 181
@@ -286,8 +304,8 @@ static __always_inline int kmalloc_index(size_t size)
286} 304}
287#endif /* !CONFIG_SLOB */ 305#endif /* !CONFIG_SLOB */
288 306
289void *__kmalloc(size_t size, gfp_t flags); 307void *__kmalloc(size_t size, gfp_t flags) __assume_kmalloc_alignment;
290void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags); 308void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags) __assume_slab_alignment;
291void kmem_cache_free(struct kmem_cache *, void *); 309void kmem_cache_free(struct kmem_cache *, void *);
292 310
293/* 311/*
@@ -298,11 +316,11 @@ void kmem_cache_free(struct kmem_cache *, void *);
298 * Note that interrupts must be enabled when calling these functions. 316 * Note that interrupts must be enabled when calling these functions.
299 */ 317 */
300void kmem_cache_free_bulk(struct kmem_cache *, size_t, void **); 318void kmem_cache_free_bulk(struct kmem_cache *, size_t, void **);
301bool kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **); 319int kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **);
302 320
303#ifdef CONFIG_NUMA 321#ifdef CONFIG_NUMA
304void *__kmalloc_node(size_t size, gfp_t flags, int node); 322void *__kmalloc_node(size_t size, gfp_t flags, int node) __assume_kmalloc_alignment;
305void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); 323void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node) __assume_slab_alignment;
306#else 324#else
307static __always_inline void *__kmalloc_node(size_t size, gfp_t flags, int node) 325static __always_inline void *__kmalloc_node(size_t size, gfp_t flags, int node)
308{ 326{
@@ -316,12 +334,12 @@ static __always_inline void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t f
316#endif 334#endif
317 335
318#ifdef CONFIG_TRACING 336#ifdef CONFIG_TRACING
319extern void *kmem_cache_alloc_trace(struct kmem_cache *, gfp_t, size_t); 337extern void *kmem_cache_alloc_trace(struct kmem_cache *, gfp_t, size_t) __assume_slab_alignment;
320 338
321#ifdef CONFIG_NUMA 339#ifdef CONFIG_NUMA
322extern void *kmem_cache_alloc_node_trace(struct kmem_cache *s, 340extern void *kmem_cache_alloc_node_trace(struct kmem_cache *s,
323 gfp_t gfpflags, 341 gfp_t gfpflags,
324 int node, size_t size); 342 int node, size_t size) __assume_slab_alignment;
325#else 343#else
326static __always_inline void * 344static __always_inline void *
327kmem_cache_alloc_node_trace(struct kmem_cache *s, 345kmem_cache_alloc_node_trace(struct kmem_cache *s,
@@ -354,10 +372,10 @@ kmem_cache_alloc_node_trace(struct kmem_cache *s,
354} 372}
355#endif /* CONFIG_TRACING */ 373#endif /* CONFIG_TRACING */
356 374
357extern void *kmalloc_order(size_t size, gfp_t flags, unsigned int order); 375extern void *kmalloc_order(size_t size, gfp_t flags, unsigned int order) __assume_page_alignment;
358 376
359#ifdef CONFIG_TRACING 377#ifdef CONFIG_TRACING
360extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order); 378extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order) __assume_page_alignment;
361#else 379#else
362static __always_inline void * 380static __always_inline void *
363kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order) 381kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
@@ -482,15 +500,6 @@ static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
482 return __kmalloc_node(size, flags, node); 500 return __kmalloc_node(size, flags, node);
483} 501}
484 502
485/*
486 * Setting ARCH_SLAB_MINALIGN in arch headers allows a different alignment.
487 * Intended for arches that get misalignment faults even for 64 bit integer
488 * aligned buffers.
489 */
490#ifndef ARCH_SLAB_MINALIGN
491#define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
492#endif
493
494struct memcg_cache_array { 503struct memcg_cache_array {
495 struct rcu_head rcu; 504 struct rcu_head rcu;
496 struct kmem_cache *entries[0]; 505 struct kmem_cache *entries[0];
diff --git a/include/linux/stop_machine.h b/include/linux/stop_machine.h
index 0adedca24c5b..0e1b1540597a 100644
--- a/include/linux/stop_machine.h
+++ b/include/linux/stop_machine.h
@@ -99,7 +99,7 @@ static inline int try_stop_cpus(const struct cpumask *cpumask,
99 * grabbing every spinlock (and more). So the "read" side to such a 99 * grabbing every spinlock (and more). So the "read" side to such a
100 * lock is anything which disables preemption. 100 * lock is anything which disables preemption.
101 */ 101 */
102#if defined(CONFIG_STOP_MACHINE) && defined(CONFIG_SMP) 102#if defined(CONFIG_SMP) || defined(CONFIG_HOTPLUG_CPU)
103 103
104/** 104/**
105 * stop_machine: freeze the machine on all CPUs and run this function 105 * stop_machine: freeze the machine on all CPUs and run this function
@@ -118,7 +118,7 @@ int stop_machine(cpu_stop_fn_t fn, void *data, const struct cpumask *cpus);
118 118
119int stop_machine_from_inactive_cpu(cpu_stop_fn_t fn, void *data, 119int stop_machine_from_inactive_cpu(cpu_stop_fn_t fn, void *data,
120 const struct cpumask *cpus); 120 const struct cpumask *cpus);
121#else /* CONFIG_STOP_MACHINE && CONFIG_SMP */ 121#else /* CONFIG_SMP || CONFIG_HOTPLUG_CPU */
122 122
123static inline int stop_machine(cpu_stop_fn_t fn, void *data, 123static inline int stop_machine(cpu_stop_fn_t fn, void *data,
124 const struct cpumask *cpus) 124 const struct cpumask *cpus)
@@ -137,5 +137,5 @@ static inline int stop_machine_from_inactive_cpu(cpu_stop_fn_t fn, void *data,
137 return stop_machine(fn, data, cpus); 137 return stop_machine(fn, data, cpus);
138} 138}
139 139
140#endif /* CONFIG_STOP_MACHINE && CONFIG_SMP */ 140#endif /* CONFIG_SMP || CONFIG_HOTPLUG_CPU */
141#endif /* _LINUX_STOP_MACHINE */ 141#endif /* _LINUX_STOP_MACHINE */
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index a156b82dd14c..c2b66a277e98 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -524,7 +524,7 @@ asmlinkage long sys_chown(const char __user *filename,
524asmlinkage long sys_lchown(const char __user *filename, 524asmlinkage long sys_lchown(const char __user *filename,
525 uid_t user, gid_t group); 525 uid_t user, gid_t group);
526asmlinkage long sys_fchown(unsigned int fd, uid_t user, gid_t group); 526asmlinkage long sys_fchown(unsigned int fd, uid_t user, gid_t group);
527#ifdef CONFIG_UID16 527#ifdef CONFIG_HAVE_UID16
528asmlinkage long sys_chown16(const char __user *filename, 528asmlinkage long sys_chown16(const char __user *filename,
529 old_uid_t user, old_gid_t group); 529 old_uid_t user, old_gid_t group);
530asmlinkage long sys_lchown16(const char __user *filename, 530asmlinkage long sys_lchown16(const char __user *filename,
diff --git a/include/linux/thermal.h b/include/linux/thermal.h
index 4014a59828fc..613c29bd6baf 100644
--- a/include/linux/thermal.h
+++ b/include/linux/thermal.h
@@ -438,7 +438,8 @@ static inline void thermal_zone_device_unregister(
438static inline int thermal_zone_bind_cooling_device( 438static inline int thermal_zone_bind_cooling_device(
439 struct thermal_zone_device *tz, int trip, 439 struct thermal_zone_device *tz, int trip,
440 struct thermal_cooling_device *cdev, 440 struct thermal_cooling_device *cdev,
441 unsigned long upper, unsigned long lower) 441 unsigned long upper, unsigned long lower,
442 unsigned int weight)
442{ return -ENODEV; } 443{ return -ENODEV; }
443static inline int thermal_zone_unbind_cooling_device( 444static inline int thermal_zone_unbind_cooling_device(
444 struct thermal_zone_device *tz, int trip, 445 struct thermal_zone_device *tz, int trip,
diff --git a/include/linux/tty.h b/include/linux/tty.h
index 5b04b0a5375b..5e31f1b99037 100644
--- a/include/linux/tty.h
+++ b/include/linux/tty.h
@@ -607,7 +607,7 @@ extern void n_tty_inherit_ops(struct tty_ldisc_ops *ops);
607 607
608/* tty_audit.c */ 608/* tty_audit.c */
609#ifdef CONFIG_AUDIT 609#ifdef CONFIG_AUDIT
610extern void tty_audit_add_data(struct tty_struct *tty, unsigned char *data, 610extern void tty_audit_add_data(struct tty_struct *tty, const void *data,
611 size_t size, unsigned icanon); 611 size_t size, unsigned icanon);
612extern void tty_audit_exit(void); 612extern void tty_audit_exit(void);
613extern void tty_audit_fork(struct signal_struct *sig); 613extern void tty_audit_fork(struct signal_struct *sig);
@@ -615,8 +615,8 @@ extern void tty_audit_tiocsti(struct tty_struct *tty, char ch);
615extern void tty_audit_push(struct tty_struct *tty); 615extern void tty_audit_push(struct tty_struct *tty);
616extern int tty_audit_push_current(void); 616extern int tty_audit_push_current(void);
617#else 617#else
618static inline void tty_audit_add_data(struct tty_struct *tty, 618static inline void tty_audit_add_data(struct tty_struct *tty, const void *data,
619 unsigned char *data, size_t size, unsigned icanon) 619 size_t size, unsigned icanon)
620{ 620{
621} 621}
622static inline void tty_audit_tiocsti(struct tty_struct *tty, char ch) 622static inline void tty_audit_tiocsti(struct tty_struct *tty, char ch)
diff --git a/include/linux/types.h b/include/linux/types.h
index 70d8500bddf1..70dd3dfde631 100644
--- a/include/linux/types.h
+++ b/include/linux/types.h
@@ -35,7 +35,7 @@ typedef __kernel_gid16_t gid16_t;
35 35
36typedef unsigned long uintptr_t; 36typedef unsigned long uintptr_t;
37 37
38#ifdef CONFIG_UID16 38#ifdef CONFIG_HAVE_UID16
39/* This is defined by include/asm-{arch}/posix_types.h */ 39/* This is defined by include/asm-{arch}/posix_types.h */
40typedef __kernel_old_uid_t old_uid_t; 40typedef __kernel_old_uid_t old_uid_t;
41typedef __kernel_old_gid_t old_gid_t; 41typedef __kernel_old_gid_t old_gid_t;
diff --git a/include/linux/uprobes.h b/include/linux/uprobes.h
index 0bdc72f36905..4a29c75b146e 100644
--- a/include/linux/uprobes.h
+++ b/include/linux/uprobes.h
@@ -21,7 +21,7 @@
21 * Authors: 21 * Authors:
22 * Srikar Dronamraju 22 * Srikar Dronamraju
23 * Jim Keniston 23 * Jim Keniston
24 * Copyright (C) 2011-2012 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> 24 * Copyright (C) 2011-2012 Red Hat, Inc., Peter Zijlstra
25 */ 25 */
26 26
27#include <linux/errno.h> 27#include <linux/errno.h>
diff --git a/include/linux/usb/quirks.h b/include/linux/usb/quirks.h
index 9948c874e3f1..1d0043dc34e4 100644
--- a/include/linux/usb/quirks.h
+++ b/include/linux/usb/quirks.h
@@ -47,4 +47,7 @@
47/* device generates spurious wakeup, ignore remote wakeup capability */ 47/* device generates spurious wakeup, ignore remote wakeup capability */
48#define USB_QUIRK_IGNORE_REMOTE_WAKEUP BIT(9) 48#define USB_QUIRK_IGNORE_REMOTE_WAKEUP BIT(9)
49 49
50/* device can't handle Link Power Management */
51#define USB_QUIRK_NO_LPM BIT(10)
52
50#endif /* __LINUX_USB_QUIRKS_H */ 53#endif /* __LINUX_USB_QUIRKS_H */
diff --git a/include/linux/vfio.h b/include/linux/vfio.h
index 610a86a892b8..ddb440975382 100644
--- a/include/linux/vfio.h
+++ b/include/linux/vfio.h
@@ -44,9 +44,6 @@ struct vfio_device_ops {
44 void (*request)(void *device_data, unsigned int count); 44 void (*request)(void *device_data, unsigned int count);
45}; 45};
46 46
47extern struct iommu_group *vfio_iommu_group_get(struct device *dev);
48extern void vfio_iommu_group_put(struct iommu_group *group, struct device *dev);
49
50extern int vfio_add_group_dev(struct device *dev, 47extern int vfio_add_group_dev(struct device *dev,
51 const struct vfio_device_ops *ops, 48 const struct vfio_device_ops *ops,
52 void *device_data); 49 void *device_data);
diff --git a/include/linux/wait.h b/include/linux/wait.h
index 1e1bf9f963a9..513b36f04dfd 100644
--- a/include/linux/wait.h
+++ b/include/linux/wait.h
@@ -145,7 +145,7 @@ __remove_wait_queue(wait_queue_head_t *head, wait_queue_t *old)
145 list_del(&old->task_list); 145 list_del(&old->task_list);
146} 146}
147 147
148typedef int wait_bit_action_f(struct wait_bit_key *); 148typedef int wait_bit_action_f(struct wait_bit_key *, int mode);
149void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr, void *key); 149void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr, void *key);
150void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key); 150void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key);
151void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode, int nr, void *key); 151void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode, int nr, void *key);
@@ -960,10 +960,10 @@ int wake_bit_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
960 } while (0) 960 } while (0)
961 961
962 962
963extern int bit_wait(struct wait_bit_key *); 963extern int bit_wait(struct wait_bit_key *, int);
964extern int bit_wait_io(struct wait_bit_key *); 964extern int bit_wait_io(struct wait_bit_key *, int);
965extern int bit_wait_timeout(struct wait_bit_key *); 965extern int bit_wait_timeout(struct wait_bit_key *, int);
966extern int bit_wait_io_timeout(struct wait_bit_key *); 966extern int bit_wait_io_timeout(struct wait_bit_key *, int);
967 967
968/** 968/**
969 * wait_on_bit - wait for a bit to be cleared 969 * wait_on_bit - wait for a bit to be cleared
diff --git a/include/net/af_unix.h b/include/net/af_unix.h
index b36d837c701e..2a91a0561a47 100644
--- a/include/net/af_unix.h
+++ b/include/net/af_unix.h
@@ -62,6 +62,7 @@ struct unix_sock {
62#define UNIX_GC_CANDIDATE 0 62#define UNIX_GC_CANDIDATE 0
63#define UNIX_GC_MAYBE_CYCLE 1 63#define UNIX_GC_MAYBE_CYCLE 1
64 struct socket_wq peer_wq; 64 struct socket_wq peer_wq;
65 wait_queue_t peer_wake;
65}; 66};
66 67
67static inline struct unix_sock *unix_sk(const struct sock *sk) 68static inline struct unix_sock *unix_sk(const struct sock *sk)
diff --git a/include/net/dst.h b/include/net/dst.h
index 1279f9b09791..c7329dcd90cc 100644
--- a/include/net/dst.h
+++ b/include/net/dst.h
@@ -322,6 +322,39 @@ static inline void skb_dst_force(struct sk_buff *skb)
322 } 322 }
323} 323}
324 324
325/**
326 * dst_hold_safe - Take a reference on a dst if possible
327 * @dst: pointer to dst entry
328 *
329 * This helper returns false if it could not safely
330 * take a reference on a dst.
331 */
332static inline bool dst_hold_safe(struct dst_entry *dst)
333{
334 if (dst->flags & DST_NOCACHE)
335 return atomic_inc_not_zero(&dst->__refcnt);
336 dst_hold(dst);
337 return true;
338}
339
340/**
341 * skb_dst_force_safe - makes sure skb dst is refcounted
342 * @skb: buffer
343 *
344 * If dst is not yet refcounted and not destroyed, grab a ref on it.
345 */
346static inline void skb_dst_force_safe(struct sk_buff *skb)
347{
348 if (skb_dst_is_noref(skb)) {
349 struct dst_entry *dst = skb_dst(skb);
350
351 if (!dst_hold_safe(dst))
352 dst = NULL;
353
354 skb->_skb_refdst = (unsigned long)dst;
355 }
356}
357
325 358
326/** 359/**
327 * __skb_tunnel_rx - prepare skb for rx reinsert 360 * __skb_tunnel_rx - prepare skb for rx reinsert
diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h
index 2134e6d815bc..625bdf95d673 100644
--- a/include/net/inet_sock.h
+++ b/include/net/inet_sock.h
@@ -210,18 +210,37 @@ struct inet_sock {
210#define IP_CMSG_ORIGDSTADDR BIT(6) 210#define IP_CMSG_ORIGDSTADDR BIT(6)
211#define IP_CMSG_CHECKSUM BIT(7) 211#define IP_CMSG_CHECKSUM BIT(7)
212 212
213/* SYNACK messages might be attached to request sockets. 213/**
214 * sk_to_full_sk - Access to a full socket
215 * @sk: pointer to a socket
216 *
217 * SYNACK messages might be attached to request sockets.
214 * Some places want to reach the listener in this case. 218 * Some places want to reach the listener in this case.
215 */ 219 */
216static inline struct sock *skb_to_full_sk(const struct sk_buff *skb) 220static inline struct sock *sk_to_full_sk(struct sock *sk)
217{ 221{
218 struct sock *sk = skb->sk; 222#ifdef CONFIG_INET
219
220 if (sk && sk->sk_state == TCP_NEW_SYN_RECV) 223 if (sk && sk->sk_state == TCP_NEW_SYN_RECV)
221 sk = inet_reqsk(sk)->rsk_listener; 224 sk = inet_reqsk(sk)->rsk_listener;
225#endif
226 return sk;
227}
228
229/* sk_to_full_sk() variant with a const argument */
230static inline const struct sock *sk_const_to_full_sk(const struct sock *sk)
231{
232#ifdef CONFIG_INET
233 if (sk && sk->sk_state == TCP_NEW_SYN_RECV)
234 sk = ((const struct request_sock *)sk)->rsk_listener;
235#endif
222 return sk; 236 return sk;
223} 237}
224 238
239static inline struct sock *skb_to_full_sk(const struct sk_buff *skb)
240{
241 return sk_to_full_sk(skb->sk);
242}
243
225static inline struct inet_sock *inet_sk(const struct sock *sk) 244static inline struct inet_sock *inet_sk(const struct sock *sk)
226{ 245{
227 return (struct inet_sock *)sk; 246 return (struct inet_sock *)sk;
diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
index 4a6009d4486b..235c7811a86a 100644
--- a/include/net/inetpeer.h
+++ b/include/net/inetpeer.h
@@ -78,6 +78,7 @@ void inet_initpeers(void) __init;
78static inline void inetpeer_set_addr_v4(struct inetpeer_addr *iaddr, __be32 ip) 78static inline void inetpeer_set_addr_v4(struct inetpeer_addr *iaddr, __be32 ip)
79{ 79{
80 iaddr->a4.addr = ip; 80 iaddr->a4.addr = ip;
81 iaddr->a4.vif = 0;
81 iaddr->family = AF_INET; 82 iaddr->family = AF_INET;
82} 83}
83 84
diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h
index aaf9700fc9e5..fb961a576abe 100644
--- a/include/net/ip6_fib.h
+++ b/include/net/ip6_fib.h
@@ -167,7 +167,8 @@ static inline void rt6_update_expires(struct rt6_info *rt0, int timeout)
167 167
168static inline u32 rt6_get_cookie(const struct rt6_info *rt) 168static inline u32 rt6_get_cookie(const struct rt6_info *rt)
169{ 169{
170 if (rt->rt6i_flags & RTF_PCPU || unlikely(rt->dst.flags & DST_NOCACHE)) 170 if (rt->rt6i_flags & RTF_PCPU ||
171 (unlikely(rt->dst.flags & DST_NOCACHE) && rt->dst.from))
171 rt = (struct rt6_info *)(rt->dst.from); 172 rt = (struct rt6_info *)(rt->dst.from);
172 173
173 return rt->rt6i_node ? rt->rt6i_node->fn_sernum : 0; 174 return rt->rt6i_node ? rt->rt6i_node->fn_sernum : 0;
diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h
index 2bfb2ad2fab1..877f682989b8 100644
--- a/include/net/ip6_route.h
+++ b/include/net/ip6_route.h
@@ -133,27 +133,18 @@ void rt6_clean_tohost(struct net *net, struct in6_addr *gateway);
133/* 133/*
134 * Store a destination cache entry in a socket 134 * Store a destination cache entry in a socket
135 */ 135 */
136static inline void __ip6_dst_store(struct sock *sk, struct dst_entry *dst, 136static inline void ip6_dst_store(struct sock *sk, struct dst_entry *dst,
137 const struct in6_addr *daddr, 137 const struct in6_addr *daddr,
138 const struct in6_addr *saddr) 138 const struct in6_addr *saddr)
139{ 139{
140 struct ipv6_pinfo *np = inet6_sk(sk); 140 struct ipv6_pinfo *np = inet6_sk(sk);
141 struct rt6_info *rt = (struct rt6_info *) dst;
142 141
142 np->dst_cookie = rt6_get_cookie((struct rt6_info *)dst);
143 sk_setup_caps(sk, dst); 143 sk_setup_caps(sk, dst);
144 np->daddr_cache = daddr; 144 np->daddr_cache = daddr;
145#ifdef CONFIG_IPV6_SUBTREES 145#ifdef CONFIG_IPV6_SUBTREES
146 np->saddr_cache = saddr; 146 np->saddr_cache = saddr;
147#endif 147#endif
148 np->dst_cookie = rt6_get_cookie(rt);
149}
150
151static inline void ip6_dst_store(struct sock *sk, struct dst_entry *dst,
152 struct in6_addr *daddr, struct in6_addr *saddr)
153{
154 spin_lock(&sk->sk_dst_lock);
155 __ip6_dst_store(sk, dst, daddr, saddr);
156 spin_unlock(&sk->sk_dst_lock);
157} 148}
158 149
159static inline bool ipv6_unicast_destination(const struct sk_buff *skb) 150static inline bool ipv6_unicast_destination(const struct sk_buff *skb)
diff --git a/include/net/ip6_tunnel.h b/include/net/ip6_tunnel.h
index aaee6fa02cf1..ff788b665277 100644
--- a/include/net/ip6_tunnel.h
+++ b/include/net/ip6_tunnel.h
@@ -90,11 +90,12 @@ static inline void ip6tunnel_xmit(struct sock *sk, struct sk_buff *skb,
90 err = ip6_local_out(dev_net(skb_dst(skb)->dev), sk, skb); 90 err = ip6_local_out(dev_net(skb_dst(skb)->dev), sk, skb);
91 91
92 if (net_xmit_eval(err) == 0) { 92 if (net_xmit_eval(err) == 0) {
93 struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats); 93 struct pcpu_sw_netstats *tstats = get_cpu_ptr(dev->tstats);
94 u64_stats_update_begin(&tstats->syncp); 94 u64_stats_update_begin(&tstats->syncp);
95 tstats->tx_bytes += pkt_len; 95 tstats->tx_bytes += pkt_len;
96 tstats->tx_packets++; 96 tstats->tx_packets++;
97 u64_stats_update_end(&tstats->syncp); 97 u64_stats_update_end(&tstats->syncp);
98 put_cpu_ptr(tstats);
98 } else { 99 } else {
99 stats->tx_errors++; 100 stats->tx_errors++;
100 stats->tx_aborted_errors++; 101 stats->tx_aborted_errors++;
diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h
index f6dafec9102c..62a750a6a8f8 100644
--- a/include/net/ip_tunnels.h
+++ b/include/net/ip_tunnels.h
@@ -287,12 +287,13 @@ static inline void iptunnel_xmit_stats(int err,
287 struct pcpu_sw_netstats __percpu *stats) 287 struct pcpu_sw_netstats __percpu *stats)
288{ 288{
289 if (err > 0) { 289 if (err > 0) {
290 struct pcpu_sw_netstats *tstats = this_cpu_ptr(stats); 290 struct pcpu_sw_netstats *tstats = get_cpu_ptr(stats);
291 291
292 u64_stats_update_begin(&tstats->syncp); 292 u64_stats_update_begin(&tstats->syncp);
293 tstats->tx_bytes += err; 293 tstats->tx_bytes += err;
294 tstats->tx_packets++; 294 tstats->tx_packets++;
295 u64_stats_update_end(&tstats->syncp); 295 u64_stats_update_end(&tstats->syncp);
296 put_cpu_ptr(tstats);
296 } else if (err < 0) { 297 } else if (err < 0) {
297 err_stats->tx_errors++; 298 err_stats->tx_errors++;
298 err_stats->tx_aborted_errors++; 299 err_stats->tx_aborted_errors++;
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
index e1a10b0ac0b0..9a5c9f013784 100644
--- a/include/net/ipv6.h
+++ b/include/net/ipv6.h
@@ -205,6 +205,7 @@ extern rwlock_t ip6_ra_lock;
205 */ 205 */
206 206
207struct ipv6_txoptions { 207struct ipv6_txoptions {
208 atomic_t refcnt;
208 /* Length of this structure */ 209 /* Length of this structure */
209 int tot_len; 210 int tot_len;
210 211
@@ -217,7 +218,7 @@ struct ipv6_txoptions {
217 struct ipv6_opt_hdr *dst0opt; 218 struct ipv6_opt_hdr *dst0opt;
218 struct ipv6_rt_hdr *srcrt; /* Routing Header */ 219 struct ipv6_rt_hdr *srcrt; /* Routing Header */
219 struct ipv6_opt_hdr *dst1opt; 220 struct ipv6_opt_hdr *dst1opt;
220 221 struct rcu_head rcu;
221 /* Option buffer, as read by IPV6_PKTOPTIONS, starts here. */ 222 /* Option buffer, as read by IPV6_PKTOPTIONS, starts here. */
222}; 223};
223 224
@@ -252,6 +253,24 @@ struct ipv6_fl_socklist {
252 struct rcu_head rcu; 253 struct rcu_head rcu;
253}; 254};
254 255
256static inline struct ipv6_txoptions *txopt_get(const struct ipv6_pinfo *np)
257{
258 struct ipv6_txoptions *opt;
259
260 rcu_read_lock();
261 opt = rcu_dereference(np->opt);
262 if (opt && !atomic_inc_not_zero(&opt->refcnt))
263 opt = NULL;
264 rcu_read_unlock();
265 return opt;
266}
267
268static inline void txopt_put(struct ipv6_txoptions *opt)
269{
270 if (opt && atomic_dec_and_test(&opt->refcnt))
271 kfree_rcu(opt, rcu);
272}
273
255struct ip6_flowlabel *fl6_sock_lookup(struct sock *sk, __be32 label); 274struct ip6_flowlabel *fl6_sock_lookup(struct sock *sk, __be32 label);
256struct ipv6_txoptions *fl6_merge_options(struct ipv6_txoptions *opt_space, 275struct ipv6_txoptions *fl6_merge_options(struct ipv6_txoptions *opt_space,
257 struct ip6_flowlabel *fl, 276 struct ip6_flowlabel *fl,
@@ -490,6 +509,7 @@ struct ip6_create_arg {
490 u32 user; 509 u32 user;
491 const struct in6_addr *src; 510 const struct in6_addr *src;
492 const struct in6_addr *dst; 511 const struct in6_addr *dst;
512 int iif;
493 u8 ecn; 513 u8 ecn;
494}; 514};
495 515
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index 82045fca388b..760bc4d5a2cf 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -2003,8 +2003,10 @@ enum ieee80211_hw_flags {
2003 * it shouldn't be set. 2003 * it shouldn't be set.
2004 * 2004 *
2005 * @max_tx_aggregation_subframes: maximum number of subframes in an 2005 * @max_tx_aggregation_subframes: maximum number of subframes in an
2006 * aggregate an HT driver will transmit, used by the peer as a 2006 * aggregate an HT driver will transmit. Though ADDBA will advertise
2007 * hint to size its reorder buffer. 2007 * a constant value of 64 as some older APs can crash if the window
2008 * size is smaller (an example is LinkSys WRT120N with FW v1.0.07
2009 * build 002 Jun 18 2012).
2008 * 2010 *
2009 * @offchannel_tx_hw_queue: HW queue ID to use for offchannel TX 2011 * @offchannel_tx_hw_queue: HW queue ID to use for offchannel TX
2010 * (if %IEEE80211_HW_QUEUE_CONTROL is set) 2012 * (if %IEEE80211_HW_QUEUE_CONTROL is set)
diff --git a/include/net/ndisc.h b/include/net/ndisc.h
index bf3937431030..2d8edaad29cb 100644
--- a/include/net/ndisc.h
+++ b/include/net/ndisc.h
@@ -181,8 +181,7 @@ void ndisc_cleanup(void);
181int ndisc_rcv(struct sk_buff *skb); 181int ndisc_rcv(struct sk_buff *skb);
182 182
183void ndisc_send_ns(struct net_device *dev, const struct in6_addr *solicit, 183void ndisc_send_ns(struct net_device *dev, const struct in6_addr *solicit,
184 const struct in6_addr *daddr, const struct in6_addr *saddr, 184 const struct in6_addr *daddr, const struct in6_addr *saddr);
185 struct sk_buff *oskb);
186 185
187void ndisc_send_rs(struct net_device *dev, 186void ndisc_send_rs(struct net_device *dev,
188 const struct in6_addr *saddr, const struct in6_addr *daddr); 187 const struct in6_addr *saddr, const struct in6_addr *daddr);
diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
index c9149cc0a02d..4bd7508bedc9 100644
--- a/include/net/netfilter/nf_tables.h
+++ b/include/net/netfilter/nf_tables.h
@@ -618,6 +618,8 @@ struct nft_expr_ops {
618 void (*eval)(const struct nft_expr *expr, 618 void (*eval)(const struct nft_expr *expr,
619 struct nft_regs *regs, 619 struct nft_regs *regs,
620 const struct nft_pktinfo *pkt); 620 const struct nft_pktinfo *pkt);
621 int (*clone)(struct nft_expr *dst,
622 const struct nft_expr *src);
621 unsigned int size; 623 unsigned int size;
622 624
623 int (*init)(const struct nft_ctx *ctx, 625 int (*init)(const struct nft_ctx *ctx,
@@ -660,10 +662,20 @@ void nft_expr_destroy(const struct nft_ctx *ctx, struct nft_expr *expr);
660int nft_expr_dump(struct sk_buff *skb, unsigned int attr, 662int nft_expr_dump(struct sk_buff *skb, unsigned int attr,
661 const struct nft_expr *expr); 663 const struct nft_expr *expr);
662 664
663static inline void nft_expr_clone(struct nft_expr *dst, struct nft_expr *src) 665static inline int nft_expr_clone(struct nft_expr *dst, struct nft_expr *src)
664{ 666{
667 int err;
668
665 __module_get(src->ops->type->owner); 669 __module_get(src->ops->type->owner);
666 memcpy(dst, src, src->ops->size); 670 if (src->ops->clone) {
671 dst->ops = src->ops;
672 err = src->ops->clone(dst, src);
673 if (err < 0)
674 return err;
675 } else {
676 memcpy(dst, src, src->ops->size);
677 }
678 return 0;
667} 679}
668 680
669/** 681/**
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index 4c79ce8c1f92..b2a8e6338576 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -61,6 +61,9 @@ struct Qdisc {
61 */ 61 */
62#define TCQ_F_WARN_NONWC (1 << 16) 62#define TCQ_F_WARN_NONWC (1 << 16)
63#define TCQ_F_CPUSTATS 0x20 /* run using percpu statistics */ 63#define TCQ_F_CPUSTATS 0x20 /* run using percpu statistics */
64#define TCQ_F_NOPARENT 0x40 /* root of its hierarchy :
65 * qdisc_tree_decrease_qlen() should stop.
66 */
64 u32 limit; 67 u32 limit;
65 const struct Qdisc_ops *ops; 68 const struct Qdisc_ops *ops;
66 struct qdisc_size_table __rcu *stab; 69 struct qdisc_size_table __rcu *stab;
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
index 495c87e367b3..eea9bdeecba2 100644
--- a/include/net/sctp/structs.h
+++ b/include/net/sctp/structs.h
@@ -775,10 +775,10 @@ struct sctp_transport {
775 hb_sent:1, 775 hb_sent:1,
776 776
777 /* Is the Path MTU update pending on this tranport */ 777 /* Is the Path MTU update pending on this tranport */
778 pmtu_pending:1; 778 pmtu_pending:1,
779 779
780 /* Has this transport moved the ctsn since we last sacked */ 780 /* Has this transport moved the ctsn since we last sacked */
781 __u32 sack_generation; 781 sack_generation:1;
782 u32 dst_cookie; 782 u32 dst_cookie;
783 783
784 struct flowi fl; 784 struct flowi fl;
@@ -1482,19 +1482,20 @@ struct sctp_association {
1482 prsctp_capable:1, /* Can peer do PR-SCTP? */ 1482 prsctp_capable:1, /* Can peer do PR-SCTP? */
1483 auth_capable:1; /* Is peer doing SCTP-AUTH? */ 1483 auth_capable:1; /* Is peer doing SCTP-AUTH? */
1484 1484
1485 /* Ack State : This flag indicates if the next received 1485 /* sack_needed : This flag indicates if the next received
1486 * : packet is to be responded to with a 1486 * : packet is to be responded to with a
1487 * : SACK. This is initializedto 0. When a packet 1487 * : SACK. This is initialized to 0. When a packet
1488 * : is received it is incremented. If this value 1488 * : is received sack_cnt is incremented. If this value
1489 * : reaches 2 or more, a SACK is sent and the 1489 * : reaches 2 or more, a SACK is sent and the
1490 * : value is reset to 0. Note: This is used only 1490 * : value is reset to 0. Note: This is used only
1491 * : when no DATA chunks are received out of 1491 * : when no DATA chunks are received out of
1492 * : order. When DATA chunks are out of order, 1492 * : order. When DATA chunks are out of order,
1493 * : SACK's are not delayed (see Section 6). 1493 * : SACK's are not delayed (see Section 6).
1494 */ 1494 */
1495 __u8 sack_needed; /* Do we need to sack the peer? */ 1495 __u8 sack_needed:1, /* Do we need to sack the peer? */
1496 sack_generation:1,
1497 zero_window_announced:1;
1496 __u32 sack_cnt; 1498 __u32 sack_cnt;
1497 __u32 sack_generation;
1498 1499
1499 __u32 adaptation_ind; /* Adaptation Code point. */ 1500 __u32 adaptation_ind; /* Adaptation Code point. */
1500 1501
diff --git a/include/net/sock.h b/include/net/sock.h
index bbf7c2cf15b4..14d3c0734007 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -254,7 +254,6 @@ struct cg_proto;
254 * @sk_wq: sock wait queue and async head 254 * @sk_wq: sock wait queue and async head
255 * @sk_rx_dst: receive input route used by early demux 255 * @sk_rx_dst: receive input route used by early demux
256 * @sk_dst_cache: destination cache 256 * @sk_dst_cache: destination cache
257 * @sk_dst_lock: destination cache lock
258 * @sk_policy: flow policy 257 * @sk_policy: flow policy
259 * @sk_receive_queue: incoming packets 258 * @sk_receive_queue: incoming packets
260 * @sk_wmem_alloc: transmit queue bytes committed 259 * @sk_wmem_alloc: transmit queue bytes committed
@@ -384,14 +383,16 @@ struct sock {
384 int sk_rcvbuf; 383 int sk_rcvbuf;
385 384
386 struct sk_filter __rcu *sk_filter; 385 struct sk_filter __rcu *sk_filter;
387 struct socket_wq __rcu *sk_wq; 386 union {
388 387 struct socket_wq __rcu *sk_wq;
388 struct socket_wq *sk_wq_raw;
389 };
389#ifdef CONFIG_XFRM 390#ifdef CONFIG_XFRM
390 struct xfrm_policy *sk_policy[2]; 391 struct xfrm_policy __rcu *sk_policy[2];
391#endif 392#endif
392 struct dst_entry *sk_rx_dst; 393 struct dst_entry *sk_rx_dst;
393 struct dst_entry __rcu *sk_dst_cache; 394 struct dst_entry __rcu *sk_dst_cache;
394 spinlock_t sk_dst_lock; 395 /* Note: 32bit hole on 64bit arches */
395 atomic_t sk_wmem_alloc; 396 atomic_t sk_wmem_alloc;
396 atomic_t sk_omem_alloc; 397 atomic_t sk_omem_alloc;
397 int sk_sndbuf; 398 int sk_sndbuf;
@@ -403,6 +404,7 @@ struct sock {
403 sk_userlocks : 4, 404 sk_userlocks : 4,
404 sk_protocol : 8, 405 sk_protocol : 8,
405 sk_type : 16; 406 sk_type : 16;
407#define SK_PROTOCOL_MAX U8_MAX
406 kmemcheck_bitfield_end(flags); 408 kmemcheck_bitfield_end(flags);
407 int sk_wmem_queued; 409 int sk_wmem_queued;
408 gfp_t sk_allocation; 410 gfp_t sk_allocation;
@@ -739,6 +741,8 @@ enum sock_flags {
739 SOCK_SELECT_ERR_QUEUE, /* Wake select on error queue */ 741 SOCK_SELECT_ERR_QUEUE, /* Wake select on error queue */
740}; 742};
741 743
744#define SK_FLAGS_TIMESTAMP ((1UL << SOCK_TIMESTAMP) | (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE))
745
742static inline void sock_copy_flags(struct sock *nsk, struct sock *osk) 746static inline void sock_copy_flags(struct sock *nsk, struct sock *osk)
743{ 747{
744 nsk->sk_flags = osk->sk_flags; 748 nsk->sk_flags = osk->sk_flags;
@@ -813,7 +817,7 @@ void sk_stream_write_space(struct sock *sk);
813static inline void __sk_add_backlog(struct sock *sk, struct sk_buff *skb) 817static inline void __sk_add_backlog(struct sock *sk, struct sk_buff *skb)
814{ 818{
815 /* dont let skb dst not refcounted, we are going to leave rcu lock */ 819 /* dont let skb dst not refcounted, we are going to leave rcu lock */
816 skb_dst_force(skb); 820 skb_dst_force_safe(skb);
817 821
818 if (!sk->sk_backlog.tail) 822 if (!sk->sk_backlog.tail)
819 sk->sk_backlog.head = skb; 823 sk->sk_backlog.head = skb;
@@ -2005,10 +2009,27 @@ static inline unsigned long sock_wspace(struct sock *sk)
2005 return amt; 2009 return amt;
2006} 2010}
2007 2011
2008static inline void sk_wake_async(struct sock *sk, int how, int band) 2012/* Note:
2013 * We use sk->sk_wq_raw, from contexts knowing this
2014 * pointer is not NULL and cannot disappear/change.
2015 */
2016static inline void sk_set_bit(int nr, struct sock *sk)
2017{
2018 set_bit(nr, &sk->sk_wq_raw->flags);
2019}
2020
2021static inline void sk_clear_bit(int nr, struct sock *sk)
2022{
2023 clear_bit(nr, &sk->sk_wq_raw->flags);
2024}
2025
2026static inline void sk_wake_async(const struct sock *sk, int how, int band)
2009{ 2027{
2010 if (sock_flag(sk, SOCK_FASYNC)) 2028 if (sock_flag(sk, SOCK_FASYNC)) {
2011 sock_wake_async(sk->sk_socket, how, band); 2029 rcu_read_lock();
2030 sock_wake_async(rcu_dereference(sk->sk_wq), how, band);
2031 rcu_read_unlock();
2032 }
2012} 2033}
2013 2034
2014/* Since sk_{r,w}mem_alloc sums skb->truesize, even a small frame might 2035/* Since sk_{r,w}mem_alloc sums skb->truesize, even a small frame might
@@ -2226,6 +2247,31 @@ static inline bool sk_listener(const struct sock *sk)
2226 return (1 << sk->sk_state) & (TCPF_LISTEN | TCPF_NEW_SYN_RECV); 2247 return (1 << sk->sk_state) & (TCPF_LISTEN | TCPF_NEW_SYN_RECV);
2227} 2248}
2228 2249
2250/**
2251 * sk_state_load - read sk->sk_state for lockless contexts
2252 * @sk: socket pointer
2253 *
2254 * Paired with sk_state_store(). Used in places we do not hold socket lock :
2255 * tcp_diag_get_info(), tcp_get_info(), tcp_poll(), get_tcp4_sock() ...
2256 */
2257static inline int sk_state_load(const struct sock *sk)
2258{
2259 return smp_load_acquire(&sk->sk_state);
2260}
2261
2262/**
2263 * sk_state_store - update sk->sk_state
2264 * @sk: socket pointer
2265 * @newstate: new state
2266 *
2267 * Paired with sk_state_load(). Should be used in contexts where
2268 * state change might impact lockless readers.
2269 */
2270static inline void sk_state_store(struct sock *sk, int newstate)
2271{
2272 smp_store_release(&sk->sk_state, newstate);
2273}
2274
2229void sock_enable_timestamp(struct sock *sk, int flag); 2275void sock_enable_timestamp(struct sock *sk, int flag);
2230int sock_get_timestamp(struct sock *, struct timeval __user *); 2276int sock_get_timestamp(struct sock *, struct timeval __user *);
2231int sock_get_timestampns(struct sock *, struct timespec __user *); 2277int sock_get_timestampns(struct sock *, struct timespec __user *);
diff --git a/include/net/switchdev.h b/include/net/switchdev.h
index bc865e244efe..1d22ce9f352e 100644
--- a/include/net/switchdev.h
+++ b/include/net/switchdev.h
@@ -323,7 +323,7 @@ static inline int switchdev_port_fdb_dump(struct sk_buff *skb,
323 struct net_device *filter_dev, 323 struct net_device *filter_dev,
324 int idx) 324 int idx)
325{ 325{
326 return -EOPNOTSUPP; 326 return idx;
327} 327}
328 328
329static inline void switchdev_port_fwd_mark_set(struct net_device *dev, 329static inline void switchdev_port_fwd_mark_set(struct net_device *dev,
diff --git a/include/net/vxlan.h b/include/net/vxlan.h
index c1c899c3a51b..e289ada6adf6 100644
--- a/include/net/vxlan.h
+++ b/include/net/vxlan.h
@@ -79,7 +79,7 @@ struct vxlanhdr {
79}; 79};
80 80
81/* VXLAN header flags. */ 81/* VXLAN header flags. */
82#define VXLAN_HF_RCO BIT(24) 82#define VXLAN_HF_RCO BIT(21)
83#define VXLAN_HF_VNI BIT(27) 83#define VXLAN_HF_VNI BIT(27)
84#define VXLAN_HF_GBP BIT(31) 84#define VXLAN_HF_GBP BIT(31)
85 85
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index 4a9c21f9b4ea..d6f6e5006ee9 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -548,6 +548,7 @@ struct xfrm_policy {
548 u16 family; 548 u16 family;
549 struct xfrm_sec_ctx *security; 549 struct xfrm_sec_ctx *security;
550 struct xfrm_tmpl xfrm_vec[XFRM_MAX_DEPTH]; 550 struct xfrm_tmpl xfrm_vec[XFRM_MAX_DEPTH];
551 struct rcu_head rcu;
551}; 552};
552 553
553static inline struct net *xp_net(const struct xfrm_policy *xp) 554static inline struct net *xp_net(const struct xfrm_policy *xp)
@@ -1141,12 +1142,14 @@ static inline int xfrm6_route_forward(struct sk_buff *skb)
1141 return xfrm_route_forward(skb, AF_INET6); 1142 return xfrm_route_forward(skb, AF_INET6);
1142} 1143}
1143 1144
1144int __xfrm_sk_clone_policy(struct sock *sk); 1145int __xfrm_sk_clone_policy(struct sock *sk, const struct sock *osk);
1145 1146
1146static inline int xfrm_sk_clone_policy(struct sock *sk) 1147static inline int xfrm_sk_clone_policy(struct sock *sk, const struct sock *osk)
1147{ 1148{
1148 if (unlikely(sk->sk_policy[0] || sk->sk_policy[1])) 1149 sk->sk_policy[0] = NULL;
1149 return __xfrm_sk_clone_policy(sk); 1150 sk->sk_policy[1] = NULL;
1151 if (unlikely(osk->sk_policy[0] || osk->sk_policy[1]))
1152 return __xfrm_sk_clone_policy(sk, osk);
1150 return 0; 1153 return 0;
1151} 1154}
1152 1155
@@ -1154,12 +1157,16 @@ int xfrm_policy_delete(struct xfrm_policy *pol, int dir);
1154 1157
1155static inline void xfrm_sk_free_policy(struct sock *sk) 1158static inline void xfrm_sk_free_policy(struct sock *sk)
1156{ 1159{
1157 if (unlikely(sk->sk_policy[0] != NULL)) { 1160 struct xfrm_policy *pol;
1158 xfrm_policy_delete(sk->sk_policy[0], XFRM_POLICY_MAX); 1161
1162 pol = rcu_dereference_protected(sk->sk_policy[0], 1);
1163 if (unlikely(pol != NULL)) {
1164 xfrm_policy_delete(pol, XFRM_POLICY_MAX);
1159 sk->sk_policy[0] = NULL; 1165 sk->sk_policy[0] = NULL;
1160 } 1166 }
1161 if (unlikely(sk->sk_policy[1] != NULL)) { 1167 pol = rcu_dereference_protected(sk->sk_policy[1], 1);
1162 xfrm_policy_delete(sk->sk_policy[1], XFRM_POLICY_MAX+1); 1168 if (unlikely(pol != NULL)) {
1169 xfrm_policy_delete(pol, XFRM_POLICY_MAX+1);
1163 sk->sk_policy[1] = NULL; 1170 sk->sk_policy[1] = NULL;
1164 } 1171 }
1165} 1172}
@@ -1169,7 +1176,7 @@ void xfrm_garbage_collect(struct net *net);
1169#else 1176#else
1170 1177
1171static inline void xfrm_sk_free_policy(struct sock *sk) {} 1178static inline void xfrm_sk_free_policy(struct sock *sk) {}
1172static inline int xfrm_sk_clone_policy(struct sock *sk) { return 0; } 1179static inline int xfrm_sk_clone_policy(struct sock *sk, const struct sock *osk) { return 0; }
1173static inline int xfrm6_route_forward(struct sk_buff *skb) { return 1; } 1180static inline int xfrm6_route_forward(struct sk_buff *skb) { return 1; }
1174static inline int xfrm4_route_forward(struct sk_buff *skb) { return 1; } 1181static inline int xfrm4_route_forward(struct sk_buff *skb) { return 1; }
1175static inline int xfrm6_policy_check(struct sock *sk, int dir, struct sk_buff *skb) 1182static inline int xfrm6_policy_check(struct sock *sk, int dir, struct sk_buff *skb)
diff --git a/include/rdma/ib_mad.h b/include/rdma/ib_mad.h
index 188df91d5851..ec9b44dd3d80 100644
--- a/include/rdma/ib_mad.h
+++ b/include/rdma/ib_mad.h
@@ -237,6 +237,8 @@ struct ib_vendor_mad {
237 u8 data[IB_MGMT_VENDOR_DATA]; 237 u8 data[IB_MGMT_VENDOR_DATA];
238}; 238};
239 239
240#define IB_MGMT_CLASSPORTINFO_ATTR_ID cpu_to_be16(0x0001)
241
240struct ib_class_port_info { 242struct ib_class_port_info {
241 u8 base_version; 243 u8 base_version;
242 u8 class_version; 244 u8 class_version;
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index 9a68a19532ba..120da1d7f57e 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -1271,6 +1271,7 @@ struct ib_uobject {
1271 int id; /* index into kernel idr */ 1271 int id; /* index into kernel idr */
1272 struct kref ref; 1272 struct kref ref;
1273 struct rw_semaphore mutex; /* protects .live */ 1273 struct rw_semaphore mutex; /* protects .live */
1274 struct rcu_head rcu; /* kfree_rcu() overhead */
1274 int live; 1275 int live;
1275}; 1276};
1276 1277
diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h
index ed527121031d..fcfa3d7f5e7e 100644
--- a/include/scsi/scsi_host.h
+++ b/include/scsi/scsi_host.h
@@ -668,6 +668,9 @@ struct Scsi_Host {
668 unsigned use_blk_mq:1; 668 unsigned use_blk_mq:1;
669 unsigned use_cmd_list:1; 669 unsigned use_cmd_list:1;
670 670
671 /* Host responded with short (<36 bytes) INQUIRY result */
672 unsigned short_inquiry:1;
673
671 /* 674 /*
672 * Optional work queue to be utilized by the transport 675 * Optional work queue to be utilized by the transport
673 */ 676 */
diff --git a/include/sound/hda_register.h b/include/sound/hda_register.h
index 2ae8812d7b1a..94dc6a9772e0 100644
--- a/include/sound/hda_register.h
+++ b/include/sound/hda_register.h
@@ -93,6 +93,9 @@ enum { SDI0, SDI1, SDI2, SDI3, SDO0, SDO1, SDO2, SDO3 };
93#define AZX_REG_HSW_EM4 0x100c 93#define AZX_REG_HSW_EM4 0x100c
94#define AZX_REG_HSW_EM5 0x1010 94#define AZX_REG_HSW_EM5 0x1010
95 95
96/* Skylake/Broxton display HD-A controller Extended Mode registers */
97#define AZX_REG_SKL_EM4L 0x1040
98
96/* PCI space */ 99/* PCI space */
97#define AZX_PCIREG_TCSEL 0x44 100#define AZX_PCIREG_TCSEL 0x44
98 101
diff --git a/include/sound/soc-dapm.h b/include/sound/soc-dapm.h
index 7855cfe46b69..95a937eafb79 100644
--- a/include/sound/soc-dapm.h
+++ b/include/sound/soc-dapm.h
@@ -398,6 +398,7 @@ int snd_soc_dapm_del_routes(struct snd_soc_dapm_context *dapm,
398int snd_soc_dapm_weak_routes(struct snd_soc_dapm_context *dapm, 398int snd_soc_dapm_weak_routes(struct snd_soc_dapm_context *dapm,
399 const struct snd_soc_dapm_route *route, int num); 399 const struct snd_soc_dapm_route *route, int num);
400void snd_soc_dapm_free_widget(struct snd_soc_dapm_widget *w); 400void snd_soc_dapm_free_widget(struct snd_soc_dapm_widget *w);
401void snd_soc_dapm_reset_cache(struct snd_soc_dapm_context *dapm);
401 402
402/* dapm events */ 403/* dapm events */
403void snd_soc_dapm_stream_event(struct snd_soc_pcm_runtime *rtd, int stream, 404void snd_soc_dapm_stream_event(struct snd_soc_pcm_runtime *rtd, int stream,
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
index 0a2c74008e53..aabf0aca0171 100644
--- a/include/target/target_core_base.h
+++ b/include/target/target_core_base.h
@@ -474,7 +474,7 @@ struct se_cmd {
474 struct completion cmd_wait_comp; 474 struct completion cmd_wait_comp;
475 const struct target_core_fabric_ops *se_tfo; 475 const struct target_core_fabric_ops *se_tfo;
476 sense_reason_t (*execute_cmd)(struct se_cmd *); 476 sense_reason_t (*execute_cmd)(struct se_cmd *);
477 sense_reason_t (*transport_complete_callback)(struct se_cmd *, bool); 477 sense_reason_t (*transport_complete_callback)(struct se_cmd *, bool, int *);
478 void *protocol_data; 478 void *protocol_data;
479 479
480 unsigned char *t_task_cdb; 480 unsigned char *t_task_cdb;
diff --git a/include/uapi/linux/Kbuild b/include/uapi/linux/Kbuild
index 628e6e64c2fb..c2e5d6cb34e3 100644
--- a/include/uapi/linux/Kbuild
+++ b/include/uapi/linux/Kbuild
@@ -186,6 +186,7 @@ header-y += if_tunnel.h
186header-y += if_vlan.h 186header-y += if_vlan.h
187header-y += if_x25.h 187header-y += if_x25.h
188header-y += igmp.h 188header-y += igmp.h
189header-y += ila.h
189header-y += in6.h 190header-y += in6.h
190header-y += inet_diag.h 191header-y += inet_diag.h
191header-y += in.h 192header-y += in.h
diff --git a/include/uapi/linux/nfs.h b/include/uapi/linux/nfs.h
index 654bae3f1a38..5e6296160361 100644
--- a/include/uapi/linux/nfs.h
+++ b/include/uapi/linux/nfs.h
@@ -33,17 +33,6 @@
33 33
34#define NFS_PIPE_DIRNAME "nfs" 34#define NFS_PIPE_DIRNAME "nfs"
35 35
36/* NFS ioctls */
37/* Let's follow btrfs lead on CLONE to avoid messing userspace */
38#define NFS_IOC_CLONE _IOW(0x94, 9, int)
39#define NFS_IOC_CLONE_RANGE _IOW(0x94, 13, int)
40
41struct nfs_ioctl_clone_range_args {
42 __s64 src_fd;
43 __u64 src_off, count;
44 __u64 dst_off;
45};
46
47/* 36/*
48 * NFS stats. The good thing with these values is that NFSv3 errors are 37 * NFS stats. The good thing with these values is that NFSv3 errors are
49 * a superset of NFSv2 errors (with the exception of NFSERR_WFLUSH which 38 * a superset of NFSv2 errors (with the exception of NFSERR_WFLUSH which
diff --git a/include/uapi/linux/openvswitch.h b/include/uapi/linux/openvswitch.h
index 28ccedd000f5..a27222d5b413 100644
--- a/include/uapi/linux/openvswitch.h
+++ b/include/uapi/linux/openvswitch.h
@@ -628,7 +628,7 @@ struct ovs_action_hash {
628 * @OVS_CT_ATTR_MARK: u32 value followed by u32 mask. For each bit set in the 628 * @OVS_CT_ATTR_MARK: u32 value followed by u32 mask. For each bit set in the
629 * mask, the corresponding bit in the value is copied to the connection 629 * mask, the corresponding bit in the value is copied to the connection
630 * tracking mark field in the connection. 630 * tracking mark field in the connection.
631 * @OVS_CT_ATTR_LABEL: %OVS_CT_LABELS_LEN value followed by %OVS_CT_LABELS_LEN 631 * @OVS_CT_ATTR_LABELS: %OVS_CT_LABELS_LEN value followed by %OVS_CT_LABELS_LEN
632 * mask. For each bit set in the mask, the corresponding bit in the value is 632 * mask. For each bit set in the mask, the corresponding bit in the value is
633 * copied to the connection tracking label field in the connection. 633 * copied to the connection tracking label field in the connection.
634 * @OVS_CT_ATTR_HELPER: variable length string defining conntrack ALG. 634 * @OVS_CT_ATTR_HELPER: variable length string defining conntrack ALG.
diff --git a/include/uapi/linux/vfio.h b/include/uapi/linux/vfio.h
index 751b69f858c8..9fd7b5d8df2f 100644
--- a/include/uapi/linux/vfio.h
+++ b/include/uapi/linux/vfio.h
@@ -39,13 +39,6 @@
39#define VFIO_SPAPR_TCE_v2_IOMMU 7 39#define VFIO_SPAPR_TCE_v2_IOMMU 7
40 40
41/* 41/*
42 * The No-IOMMU IOMMU offers no translation or isolation for devices and
43 * supports no ioctls outside of VFIO_CHECK_EXTENSION. Use of VFIO's No-IOMMU
44 * code will taint the host kernel and should be used with extreme caution.
45 */
46#define VFIO_NOIOMMU_IOMMU 8
47
48/*
49 * The IOCTL interface is designed for extensibility by embedding the 42 * The IOCTL interface is designed for extensibility by embedding the
50 * structure length (argsz) and flags into structures passed between 43 * structure length (argsz) and flags into structures passed between
51 * kernel and userspace. We therefore use the _IO() macro for these 44 * kernel and userspace. We therefore use the _IO() macro for these
diff --git a/include/video/imx-ipu-v3.h b/include/video/imx-ipu-v3.h
index 85dedca3dcfb..eeba75395f7d 100644
--- a/include/video/imx-ipu-v3.h
+++ b/include/video/imx-ipu-v3.h
@@ -343,7 +343,6 @@ struct ipu_client_platformdata {
343 int di; 343 int di;
344 int dc; 344 int dc;
345 int dp; 345 int dp;
346 int dmfc;
347 int dma[2]; 346 int dma[2];
348}; 347};
349 348
diff --git a/include/xen/interface/io/ring.h b/include/xen/interface/io/ring.h
index 7d28aff605c7..7dc685b4057d 100644
--- a/include/xen/interface/io/ring.h
+++ b/include/xen/interface/io/ring.h
@@ -181,6 +181,20 @@ struct __name##_back_ring { \
181#define RING_GET_REQUEST(_r, _idx) \ 181#define RING_GET_REQUEST(_r, _idx) \
182 (&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].req)) 182 (&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].req))
183 183
184/*
185 * Get a local copy of a request.
186 *
187 * Use this in preference to RING_GET_REQUEST() so all processing is
188 * done on a local copy that cannot be modified by the other end.
189 *
190 * Note that https://gcc.gnu.org/bugzilla/show_bug.cgi?id=58145 may cause this
191 * to be ineffective where _req is a struct which consists of only bitfields.
192 */
193#define RING_COPY_REQUEST(_r, _idx, _req) do { \
194 /* Use volatile to force the copy into _req. */ \
195 *(_req) = *(volatile typeof(_req))RING_GET_REQUEST(_r, _idx); \
196} while (0)
197
184#define RING_GET_RESPONSE(_r, _idx) \ 198#define RING_GET_RESPONSE(_r, _idx) \
185 (&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].rsp)) 199 (&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].rsp))
186 200
diff --git a/init/Kconfig b/init/Kconfig
index c24b6f767bf0..235c7a2c0d20 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -2030,13 +2030,6 @@ config INIT_ALL_POSSIBLE
2030 it was better to provide this option than to break all the archs 2030 it was better to provide this option than to break all the archs
2031 and have several arch maintainers pursuing me down dark alleys. 2031 and have several arch maintainers pursuing me down dark alleys.
2032 2032
2033config STOP_MACHINE
2034 bool
2035 default y
2036 depends on (SMP && MODULE_UNLOAD) || HOTPLUG_CPU
2037 help
2038 Need stop_machine() primitive.
2039
2040source "block/Kconfig" 2033source "block/Kconfig"
2041 2034
2042config PREEMPT_NOTIFIERS 2035config PREEMPT_NOTIFIERS
diff --git a/kernel/bpf/arraymap.c b/kernel/bpf/arraymap.c
index 3f4c99e06c6b..b0799bced518 100644
--- a/kernel/bpf/arraymap.c
+++ b/kernel/bpf/arraymap.c
@@ -28,11 +28,17 @@ static struct bpf_map *array_map_alloc(union bpf_attr *attr)
28 attr->value_size == 0) 28 attr->value_size == 0)
29 return ERR_PTR(-EINVAL); 29 return ERR_PTR(-EINVAL);
30 30
31 if (attr->value_size >= 1 << (KMALLOC_SHIFT_MAX - 1))
32 /* if value_size is bigger, the user space won't be able to
33 * access the elements.
34 */
35 return ERR_PTR(-E2BIG);
36
31 elem_size = round_up(attr->value_size, 8); 37 elem_size = round_up(attr->value_size, 8);
32 38
33 /* check round_up into zero and u32 overflow */ 39 /* check round_up into zero and u32 overflow */
34 if (elem_size == 0 || 40 if (elem_size == 0 ||
35 attr->max_entries > (U32_MAX - sizeof(*array)) / elem_size) 41 attr->max_entries > (U32_MAX - PAGE_SIZE - sizeof(*array)) / elem_size)
36 return ERR_PTR(-ENOMEM); 42 return ERR_PTR(-ENOMEM);
37 43
38 array_size = sizeof(*array) + attr->max_entries * elem_size; 44 array_size = sizeof(*array) + attr->max_entries * elem_size;
@@ -105,7 +111,7 @@ static int array_map_update_elem(struct bpf_map *map, void *key, void *value,
105 /* all elements already exist */ 111 /* all elements already exist */
106 return -EEXIST; 112 return -EEXIST;
107 113
108 memcpy(array->value + array->elem_size * index, value, array->elem_size); 114 memcpy(array->value + array->elem_size * index, value, map->value_size);
109 return 0; 115 return 0;
110} 116}
111 117
diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c
index 19909b22b4f8..34777b3746fa 100644
--- a/kernel/bpf/hashtab.c
+++ b/kernel/bpf/hashtab.c
@@ -64,12 +64,35 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
64 */ 64 */
65 goto free_htab; 65 goto free_htab;
66 66
67 err = -ENOMEM; 67 if (htab->map.value_size >= (1 << (KMALLOC_SHIFT_MAX - 1)) -
68 MAX_BPF_STACK - sizeof(struct htab_elem))
69 /* if value_size is bigger, the user space won't be able to
70 * access the elements via bpf syscall. This check also makes
71 * sure that the elem_size doesn't overflow and it's
72 * kmalloc-able later in htab_map_update_elem()
73 */
74 goto free_htab;
75
76 htab->elem_size = sizeof(struct htab_elem) +
77 round_up(htab->map.key_size, 8) +
78 htab->map.value_size;
79
68 /* prevent zero size kmalloc and check for u32 overflow */ 80 /* prevent zero size kmalloc and check for u32 overflow */
69 if (htab->n_buckets == 0 || 81 if (htab->n_buckets == 0 ||
70 htab->n_buckets > U32_MAX / sizeof(struct hlist_head)) 82 htab->n_buckets > U32_MAX / sizeof(struct hlist_head))
71 goto free_htab; 83 goto free_htab;
72 84
85 if ((u64) htab->n_buckets * sizeof(struct hlist_head) +
86 (u64) htab->elem_size * htab->map.max_entries >=
87 U32_MAX - PAGE_SIZE)
88 /* make sure page count doesn't overflow */
89 goto free_htab;
90
91 htab->map.pages = round_up(htab->n_buckets * sizeof(struct hlist_head) +
92 htab->elem_size * htab->map.max_entries,
93 PAGE_SIZE) >> PAGE_SHIFT;
94
95 err = -ENOMEM;
73 htab->buckets = kmalloc_array(htab->n_buckets, sizeof(struct hlist_head), 96 htab->buckets = kmalloc_array(htab->n_buckets, sizeof(struct hlist_head),
74 GFP_USER | __GFP_NOWARN); 97 GFP_USER | __GFP_NOWARN);
75 98
@@ -85,13 +108,6 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
85 raw_spin_lock_init(&htab->lock); 108 raw_spin_lock_init(&htab->lock);
86 htab->count = 0; 109 htab->count = 0;
87 110
88 htab->elem_size = sizeof(struct htab_elem) +
89 round_up(htab->map.key_size, 8) +
90 htab->map.value_size;
91
92 htab->map.pages = round_up(htab->n_buckets * sizeof(struct hlist_head) +
93 htab->elem_size * htab->map.max_entries,
94 PAGE_SIZE) >> PAGE_SHIFT;
95 return &htab->map; 111 return &htab->map;
96 112
97free_htab: 113free_htab:
@@ -222,7 +238,7 @@ static int htab_map_update_elem(struct bpf_map *map, void *key, void *value,
222 WARN_ON_ONCE(!rcu_read_lock_held()); 238 WARN_ON_ONCE(!rcu_read_lock_held());
223 239
224 /* allocate new element outside of lock */ 240 /* allocate new element outside of lock */
225 l_new = kmalloc(htab->elem_size, GFP_ATOMIC); 241 l_new = kmalloc(htab->elem_size, GFP_ATOMIC | __GFP_NOWARN);
226 if (!l_new) 242 if (!l_new)
227 return -ENOMEM; 243 return -ENOMEM;
228 244
diff --git a/kernel/bpf/inode.c b/kernel/bpf/inode.c
index be6d726e31c9..5a8a797d50b7 100644
--- a/kernel/bpf/inode.c
+++ b/kernel/bpf/inode.c
@@ -34,7 +34,7 @@ static void *bpf_any_get(void *raw, enum bpf_type type)
34 atomic_inc(&((struct bpf_prog *)raw)->aux->refcnt); 34 atomic_inc(&((struct bpf_prog *)raw)->aux->refcnt);
35 break; 35 break;
36 case BPF_TYPE_MAP: 36 case BPF_TYPE_MAP:
37 atomic_inc(&((struct bpf_map *)raw)->refcnt); 37 bpf_map_inc(raw, true);
38 break; 38 break;
39 default: 39 default:
40 WARN_ON_ONCE(1); 40 WARN_ON_ONCE(1);
@@ -51,7 +51,7 @@ static void bpf_any_put(void *raw, enum bpf_type type)
51 bpf_prog_put(raw); 51 bpf_prog_put(raw);
52 break; 52 break;
53 case BPF_TYPE_MAP: 53 case BPF_TYPE_MAP:
54 bpf_map_put(raw); 54 bpf_map_put_with_uref(raw);
55 break; 55 break;
56 default: 56 default:
57 WARN_ON_ONCE(1); 57 WARN_ON_ONCE(1);
@@ -64,7 +64,7 @@ static void *bpf_fd_probe_obj(u32 ufd, enum bpf_type *type)
64 void *raw; 64 void *raw;
65 65
66 *type = BPF_TYPE_MAP; 66 *type = BPF_TYPE_MAP;
67 raw = bpf_map_get(ufd); 67 raw = bpf_map_get_with_uref(ufd);
68 if (IS_ERR(raw)) { 68 if (IS_ERR(raw)) {
69 *type = BPF_TYPE_PROG; 69 *type = BPF_TYPE_PROG;
70 raw = bpf_prog_get(ufd); 70 raw = bpf_prog_get(ufd);
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index 0d3313d02a7e..3b39550d8485 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -82,6 +82,14 @@ static void bpf_map_free_deferred(struct work_struct *work)
82 map->ops->map_free(map); 82 map->ops->map_free(map);
83} 83}
84 84
85static void bpf_map_put_uref(struct bpf_map *map)
86{
87 if (atomic_dec_and_test(&map->usercnt)) {
88 if (map->map_type == BPF_MAP_TYPE_PROG_ARRAY)
89 bpf_fd_array_map_clear(map);
90 }
91}
92
85/* decrement map refcnt and schedule it for freeing via workqueue 93/* decrement map refcnt and schedule it for freeing via workqueue
86 * (unrelying map implementation ops->map_free() might sleep) 94 * (unrelying map implementation ops->map_free() might sleep)
87 */ 95 */
@@ -93,17 +101,15 @@ void bpf_map_put(struct bpf_map *map)
93 } 101 }
94} 102}
95 103
96static int bpf_map_release(struct inode *inode, struct file *filp) 104void bpf_map_put_with_uref(struct bpf_map *map)
97{ 105{
98 struct bpf_map *map = filp->private_data; 106 bpf_map_put_uref(map);
99
100 if (map->map_type == BPF_MAP_TYPE_PROG_ARRAY)
101 /* prog_array stores refcnt-ed bpf_prog pointers
102 * release them all when user space closes prog_array_fd
103 */
104 bpf_fd_array_map_clear(map);
105
106 bpf_map_put(map); 107 bpf_map_put(map);
108}
109
110static int bpf_map_release(struct inode *inode, struct file *filp)
111{
112 bpf_map_put_with_uref(filp->private_data);
107 return 0; 113 return 0;
108} 114}
109 115
@@ -142,6 +148,7 @@ static int map_create(union bpf_attr *attr)
142 return PTR_ERR(map); 148 return PTR_ERR(map);
143 149
144 atomic_set(&map->refcnt, 1); 150 atomic_set(&map->refcnt, 1);
151 atomic_set(&map->usercnt, 1);
145 152
146 err = bpf_map_charge_memlock(map); 153 err = bpf_map_charge_memlock(map);
147 if (err) 154 if (err)
@@ -174,7 +181,14 @@ struct bpf_map *__bpf_map_get(struct fd f)
174 return f.file->private_data; 181 return f.file->private_data;
175} 182}
176 183
177struct bpf_map *bpf_map_get(u32 ufd) 184void bpf_map_inc(struct bpf_map *map, bool uref)
185{
186 atomic_inc(&map->refcnt);
187 if (uref)
188 atomic_inc(&map->usercnt);
189}
190
191struct bpf_map *bpf_map_get_with_uref(u32 ufd)
178{ 192{
179 struct fd f = fdget(ufd); 193 struct fd f = fdget(ufd);
180 struct bpf_map *map; 194 struct bpf_map *map;
@@ -183,7 +197,7 @@ struct bpf_map *bpf_map_get(u32 ufd)
183 if (IS_ERR(map)) 197 if (IS_ERR(map))
184 return map; 198 return map;
185 199
186 atomic_inc(&map->refcnt); 200 bpf_map_inc(map, true);
187 fdput(f); 201 fdput(f);
188 202
189 return map; 203 return map;
@@ -226,7 +240,7 @@ static int map_lookup_elem(union bpf_attr *attr)
226 goto free_key; 240 goto free_key;
227 241
228 err = -ENOMEM; 242 err = -ENOMEM;
229 value = kmalloc(map->value_size, GFP_USER); 243 value = kmalloc(map->value_size, GFP_USER | __GFP_NOWARN);
230 if (!value) 244 if (!value)
231 goto free_key; 245 goto free_key;
232 246
@@ -285,7 +299,7 @@ static int map_update_elem(union bpf_attr *attr)
285 goto free_key; 299 goto free_key;
286 300
287 err = -ENOMEM; 301 err = -ENOMEM;
288 value = kmalloc(map->value_size, GFP_USER); 302 value = kmalloc(map->value_size, GFP_USER | __GFP_NOWARN);
289 if (!value) 303 if (!value)
290 goto free_key; 304 goto free_key;
291 305
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index c6073056badf..a7945d10b378 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -2021,8 +2021,7 @@ static int replace_map_fd_with_map_ptr(struct verifier_env *env)
2021 * will be used by the valid program until it's unloaded 2021 * will be used by the valid program until it's unloaded
2022 * and all maps are released in free_bpf_prog_info() 2022 * and all maps are released in free_bpf_prog_info()
2023 */ 2023 */
2024 atomic_inc(&map->refcnt); 2024 bpf_map_inc(map, false);
2025
2026 fdput(f); 2025 fdput(f);
2027next_insn: 2026next_insn:
2028 insn++; 2027 insn++;
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index f1603c153890..470f6536b9e8 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -98,6 +98,12 @@ static DEFINE_SPINLOCK(css_set_lock);
98static DEFINE_SPINLOCK(cgroup_idr_lock); 98static DEFINE_SPINLOCK(cgroup_idr_lock);
99 99
100/* 100/*
101 * Protects cgroup_file->kn for !self csses. It synchronizes notifications
102 * against file removal/re-creation across css hiding.
103 */
104static DEFINE_SPINLOCK(cgroup_file_kn_lock);
105
106/*
101 * Protects cgroup_subsys->release_agent_path. Modifying it also requires 107 * Protects cgroup_subsys->release_agent_path. Modifying it also requires
102 * cgroup_mutex. Reading requires either cgroup_mutex or this spinlock. 108 * cgroup_mutex. Reading requires either cgroup_mutex or this spinlock.
103 */ 109 */
@@ -754,9 +760,11 @@ static void put_css_set_locked(struct css_set *cset)
754 if (!atomic_dec_and_test(&cset->refcount)) 760 if (!atomic_dec_and_test(&cset->refcount))
755 return; 761 return;
756 762
757 /* This css_set is dead. unlink it and release cgroup refcounts */ 763 /* This css_set is dead. unlink it and release cgroup and css refs */
758 for_each_subsys(ss, ssid) 764 for_each_subsys(ss, ssid) {
759 list_del(&cset->e_cset_node[ssid]); 765 list_del(&cset->e_cset_node[ssid]);
766 css_put(cset->subsys[ssid]);
767 }
760 hash_del(&cset->hlist); 768 hash_del(&cset->hlist);
761 css_set_count--; 769 css_set_count--;
762 770
@@ -1056,9 +1064,13 @@ static struct css_set *find_css_set(struct css_set *old_cset,
1056 key = css_set_hash(cset->subsys); 1064 key = css_set_hash(cset->subsys);
1057 hash_add(css_set_table, &cset->hlist, key); 1065 hash_add(css_set_table, &cset->hlist, key);
1058 1066
1059 for_each_subsys(ss, ssid) 1067 for_each_subsys(ss, ssid) {
1068 struct cgroup_subsys_state *css = cset->subsys[ssid];
1069
1060 list_add_tail(&cset->e_cset_node[ssid], 1070 list_add_tail(&cset->e_cset_node[ssid],
1061 &cset->subsys[ssid]->cgroup->e_csets[ssid]); 1071 &css->cgroup->e_csets[ssid]);
1072 css_get(css);
1073 }
1062 1074
1063 spin_unlock_bh(&css_set_lock); 1075 spin_unlock_bh(&css_set_lock);
1064 1076
@@ -1393,6 +1405,16 @@ static void cgroup_rm_file(struct cgroup *cgrp, const struct cftype *cft)
1393 char name[CGROUP_FILE_NAME_MAX]; 1405 char name[CGROUP_FILE_NAME_MAX];
1394 1406
1395 lockdep_assert_held(&cgroup_mutex); 1407 lockdep_assert_held(&cgroup_mutex);
1408
1409 if (cft->file_offset) {
1410 struct cgroup_subsys_state *css = cgroup_css(cgrp, cft->ss);
1411 struct cgroup_file *cfile = (void *)css + cft->file_offset;
1412
1413 spin_lock_irq(&cgroup_file_kn_lock);
1414 cfile->kn = NULL;
1415 spin_unlock_irq(&cgroup_file_kn_lock);
1416 }
1417
1396 kernfs_remove_by_name(cgrp->kn, cgroup_file_name(cgrp, cft, name)); 1418 kernfs_remove_by_name(cgrp->kn, cgroup_file_name(cgrp, cft, name));
1397} 1419}
1398 1420
@@ -1856,7 +1878,6 @@ static void init_cgroup_housekeeping(struct cgroup *cgrp)
1856 1878
1857 INIT_LIST_HEAD(&cgrp->self.sibling); 1879 INIT_LIST_HEAD(&cgrp->self.sibling);
1858 INIT_LIST_HEAD(&cgrp->self.children); 1880 INIT_LIST_HEAD(&cgrp->self.children);
1859 INIT_LIST_HEAD(&cgrp->self.files);
1860 INIT_LIST_HEAD(&cgrp->cset_links); 1881 INIT_LIST_HEAD(&cgrp->cset_links);
1861 INIT_LIST_HEAD(&cgrp->pidlists); 1882 INIT_LIST_HEAD(&cgrp->pidlists);
1862 mutex_init(&cgrp->pidlist_mutex); 1883 mutex_init(&cgrp->pidlist_mutex);
@@ -2216,6 +2237,9 @@ struct cgroup_taskset {
2216 struct list_head src_csets; 2237 struct list_head src_csets;
2217 struct list_head dst_csets; 2238 struct list_head dst_csets;
2218 2239
2240 /* the subsys currently being processed */
2241 int ssid;
2242
2219 /* 2243 /*
2220 * Fields for cgroup_taskset_*() iteration. 2244 * Fields for cgroup_taskset_*() iteration.
2221 * 2245 *
@@ -2278,25 +2302,29 @@ static void cgroup_taskset_add(struct task_struct *task,
2278/** 2302/**
2279 * cgroup_taskset_first - reset taskset and return the first task 2303 * cgroup_taskset_first - reset taskset and return the first task
2280 * @tset: taskset of interest 2304 * @tset: taskset of interest
2305 * @dst_cssp: output variable for the destination css
2281 * 2306 *
2282 * @tset iteration is initialized and the first task is returned. 2307 * @tset iteration is initialized and the first task is returned.
2283 */ 2308 */
2284struct task_struct *cgroup_taskset_first(struct cgroup_taskset *tset) 2309struct task_struct *cgroup_taskset_first(struct cgroup_taskset *tset,
2310 struct cgroup_subsys_state **dst_cssp)
2285{ 2311{
2286 tset->cur_cset = list_first_entry(tset->csets, struct css_set, mg_node); 2312 tset->cur_cset = list_first_entry(tset->csets, struct css_set, mg_node);
2287 tset->cur_task = NULL; 2313 tset->cur_task = NULL;
2288 2314
2289 return cgroup_taskset_next(tset); 2315 return cgroup_taskset_next(tset, dst_cssp);
2290} 2316}
2291 2317
2292/** 2318/**
2293 * cgroup_taskset_next - iterate to the next task in taskset 2319 * cgroup_taskset_next - iterate to the next task in taskset
2294 * @tset: taskset of interest 2320 * @tset: taskset of interest
2321 * @dst_cssp: output variable for the destination css
2295 * 2322 *
2296 * Return the next task in @tset. Iteration must have been initialized 2323 * Return the next task in @tset. Iteration must have been initialized
2297 * with cgroup_taskset_first(). 2324 * with cgroup_taskset_first().
2298 */ 2325 */
2299struct task_struct *cgroup_taskset_next(struct cgroup_taskset *tset) 2326struct task_struct *cgroup_taskset_next(struct cgroup_taskset *tset,
2327 struct cgroup_subsys_state **dst_cssp)
2300{ 2328{
2301 struct css_set *cset = tset->cur_cset; 2329 struct css_set *cset = tset->cur_cset;
2302 struct task_struct *task = tset->cur_task; 2330 struct task_struct *task = tset->cur_task;
@@ -2311,6 +2339,18 @@ struct task_struct *cgroup_taskset_next(struct cgroup_taskset *tset)
2311 if (&task->cg_list != &cset->mg_tasks) { 2339 if (&task->cg_list != &cset->mg_tasks) {
2312 tset->cur_cset = cset; 2340 tset->cur_cset = cset;
2313 tset->cur_task = task; 2341 tset->cur_task = task;
2342
2343 /*
2344 * This function may be called both before and
2345 * after cgroup_taskset_migrate(). The two cases
2346 * can be distinguished by looking at whether @cset
2347 * has its ->mg_dst_cset set.
2348 */
2349 if (cset->mg_dst_cset)
2350 *dst_cssp = cset->mg_dst_cset->subsys[tset->ssid];
2351 else
2352 *dst_cssp = cset->subsys[tset->ssid];
2353
2314 return task; 2354 return task;
2315 } 2355 }
2316 2356
@@ -2346,7 +2386,8 @@ static int cgroup_taskset_migrate(struct cgroup_taskset *tset,
2346 /* check that we can legitimately attach to the cgroup */ 2386 /* check that we can legitimately attach to the cgroup */
2347 for_each_e_css(css, i, dst_cgrp) { 2387 for_each_e_css(css, i, dst_cgrp) {
2348 if (css->ss->can_attach) { 2388 if (css->ss->can_attach) {
2349 ret = css->ss->can_attach(css, tset); 2389 tset->ssid = i;
2390 ret = css->ss->can_attach(tset);
2350 if (ret) { 2391 if (ret) {
2351 failed_css = css; 2392 failed_css = css;
2352 goto out_cancel_attach; 2393 goto out_cancel_attach;
@@ -2379,9 +2420,12 @@ static int cgroup_taskset_migrate(struct cgroup_taskset *tset,
2379 */ 2420 */
2380 tset->csets = &tset->dst_csets; 2421 tset->csets = &tset->dst_csets;
2381 2422
2382 for_each_e_css(css, i, dst_cgrp) 2423 for_each_e_css(css, i, dst_cgrp) {
2383 if (css->ss->attach) 2424 if (css->ss->attach) {
2384 css->ss->attach(css, tset); 2425 tset->ssid = i;
2426 css->ss->attach(tset);
2427 }
2428 }
2385 2429
2386 ret = 0; 2430 ret = 0;
2387 goto out_release_tset; 2431 goto out_release_tset;
@@ -2390,8 +2434,10 @@ out_cancel_attach:
2390 for_each_e_css(css, i, dst_cgrp) { 2434 for_each_e_css(css, i, dst_cgrp) {
2391 if (css == failed_css) 2435 if (css == failed_css)
2392 break; 2436 break;
2393 if (css->ss->cancel_attach) 2437 if (css->ss->cancel_attach) {
2394 css->ss->cancel_attach(css, tset); 2438 tset->ssid = i;
2439 css->ss->cancel_attach(tset);
2440 }
2395 } 2441 }
2396out_release_tset: 2442out_release_tset:
2397 spin_lock_bh(&css_set_lock); 2443 spin_lock_bh(&css_set_lock);
@@ -3313,9 +3359,9 @@ static int cgroup_add_file(struct cgroup_subsys_state *css, struct cgroup *cgrp,
3313 if (cft->file_offset) { 3359 if (cft->file_offset) {
3314 struct cgroup_file *cfile = (void *)css + cft->file_offset; 3360 struct cgroup_file *cfile = (void *)css + cft->file_offset;
3315 3361
3316 kernfs_get(kn); 3362 spin_lock_irq(&cgroup_file_kn_lock);
3317 cfile->kn = kn; 3363 cfile->kn = kn;
3318 list_add(&cfile->node, &css->files); 3364 spin_unlock_irq(&cgroup_file_kn_lock);
3319 } 3365 }
3320 3366
3321 return 0; 3367 return 0;
@@ -3553,6 +3599,22 @@ int cgroup_add_legacy_cftypes(struct cgroup_subsys *ss, struct cftype *cfts)
3553} 3599}
3554 3600
3555/** 3601/**
3602 * cgroup_file_notify - generate a file modified event for a cgroup_file
3603 * @cfile: target cgroup_file
3604 *
3605 * @cfile must have been obtained by setting cftype->file_offset.
3606 */
3607void cgroup_file_notify(struct cgroup_file *cfile)
3608{
3609 unsigned long flags;
3610
3611 spin_lock_irqsave(&cgroup_file_kn_lock, flags);
3612 if (cfile->kn)
3613 kernfs_notify(cfile->kn);
3614 spin_unlock_irqrestore(&cgroup_file_kn_lock, flags);
3615}
3616
3617/**
3556 * cgroup_task_count - count the number of tasks in a cgroup. 3618 * cgroup_task_count - count the number of tasks in a cgroup.
3557 * @cgrp: the cgroup in question 3619 * @cgrp: the cgroup in question
3558 * 3620 *
@@ -4613,13 +4675,9 @@ static void css_free_work_fn(struct work_struct *work)
4613 container_of(work, struct cgroup_subsys_state, destroy_work); 4675 container_of(work, struct cgroup_subsys_state, destroy_work);
4614 struct cgroup_subsys *ss = css->ss; 4676 struct cgroup_subsys *ss = css->ss;
4615 struct cgroup *cgrp = css->cgroup; 4677 struct cgroup *cgrp = css->cgroup;
4616 struct cgroup_file *cfile;
4617 4678
4618 percpu_ref_exit(&css->refcnt); 4679 percpu_ref_exit(&css->refcnt);
4619 4680
4620 list_for_each_entry(cfile, &css->files, node)
4621 kernfs_put(cfile->kn);
4622
4623 if (ss) { 4681 if (ss) {
4624 /* css free path */ 4682 /* css free path */
4625 int id = css->id; 4683 int id = css->id;
@@ -4724,7 +4782,6 @@ static void init_and_link_css(struct cgroup_subsys_state *css,
4724 css->ss = ss; 4782 css->ss = ss;
4725 INIT_LIST_HEAD(&css->sibling); 4783 INIT_LIST_HEAD(&css->sibling);
4726 INIT_LIST_HEAD(&css->children); 4784 INIT_LIST_HEAD(&css->children);
4727 INIT_LIST_HEAD(&css->files);
4728 css->serial_nr = css_serial_nr_next++; 4785 css->serial_nr = css_serial_nr_next++;
4729 4786
4730 if (cgroup_parent(cgrp)) { 4787 if (cgroup_parent(cgrp)) {
diff --git a/kernel/cgroup_freezer.c b/kernel/cgroup_freezer.c
index f1b30ad5dc6d..2d3df82c54f2 100644
--- a/kernel/cgroup_freezer.c
+++ b/kernel/cgroup_freezer.c
@@ -155,12 +155,10 @@ static void freezer_css_free(struct cgroup_subsys_state *css)
155 * @freezer->lock. freezer_attach() makes the new tasks conform to the 155 * @freezer->lock. freezer_attach() makes the new tasks conform to the
156 * current state and all following state changes can see the new tasks. 156 * current state and all following state changes can see the new tasks.
157 */ 157 */
158static void freezer_attach(struct cgroup_subsys_state *new_css, 158static void freezer_attach(struct cgroup_taskset *tset)
159 struct cgroup_taskset *tset)
160{ 159{
161 struct freezer *freezer = css_freezer(new_css);
162 struct task_struct *task; 160 struct task_struct *task;
163 bool clear_frozen = false; 161 struct cgroup_subsys_state *new_css;
164 162
165 mutex_lock(&freezer_mutex); 163 mutex_lock(&freezer_mutex);
166 164
@@ -174,22 +172,21 @@ static void freezer_attach(struct cgroup_subsys_state *new_css,
174 * current state before executing the following - !frozen tasks may 172 * current state before executing the following - !frozen tasks may
175 * be visible in a FROZEN cgroup and frozen tasks in a THAWED one. 173 * be visible in a FROZEN cgroup and frozen tasks in a THAWED one.
176 */ 174 */
177 cgroup_taskset_for_each(task, tset) { 175 cgroup_taskset_for_each(task, new_css, tset) {
176 struct freezer *freezer = css_freezer(new_css);
177
178 if (!(freezer->state & CGROUP_FREEZING)) { 178 if (!(freezer->state & CGROUP_FREEZING)) {
179 __thaw_task(task); 179 __thaw_task(task);
180 } else { 180 } else {
181 freeze_task(task); 181 freeze_task(task);
182 freezer->state &= ~CGROUP_FROZEN; 182 /* clear FROZEN and propagate upwards */
183 clear_frozen = true; 183 while (freezer && (freezer->state & CGROUP_FROZEN)) {
184 freezer->state &= ~CGROUP_FROZEN;
185 freezer = parent_freezer(freezer);
186 }
184 } 187 }
185 } 188 }
186 189
187 /* propagate FROZEN clearing upwards */
188 while (clear_frozen && (freezer = parent_freezer(freezer))) {
189 freezer->state &= ~CGROUP_FROZEN;
190 clear_frozen = freezer->state & CGROUP_FREEZING;
191 }
192
193 mutex_unlock(&freezer_mutex); 190 mutex_unlock(&freezer_mutex);
194} 191}
195 192
diff --git a/kernel/cgroup_pids.c b/kernel/cgroup_pids.c
index cdd8df4e991c..b50d5a167fda 100644
--- a/kernel/cgroup_pids.c
+++ b/kernel/cgroup_pids.c
@@ -106,7 +106,7 @@ static void pids_uncharge(struct pids_cgroup *pids, int num)
106{ 106{
107 struct pids_cgroup *p; 107 struct pids_cgroup *p;
108 108
109 for (p = pids; p; p = parent_pids(p)) 109 for (p = pids; parent_pids(p); p = parent_pids(p))
110 pids_cancel(p, num); 110 pids_cancel(p, num);
111} 111}
112 112
@@ -123,7 +123,7 @@ static void pids_charge(struct pids_cgroup *pids, int num)
123{ 123{
124 struct pids_cgroup *p; 124 struct pids_cgroup *p;
125 125
126 for (p = pids; p; p = parent_pids(p)) 126 for (p = pids; parent_pids(p); p = parent_pids(p))
127 atomic64_add(num, &p->counter); 127 atomic64_add(num, &p->counter);
128} 128}
129 129
@@ -140,7 +140,7 @@ static int pids_try_charge(struct pids_cgroup *pids, int num)
140{ 140{
141 struct pids_cgroup *p, *q; 141 struct pids_cgroup *p, *q;
142 142
143 for (p = pids; p; p = parent_pids(p)) { 143 for (p = pids; parent_pids(p); p = parent_pids(p)) {
144 int64_t new = atomic64_add_return(num, &p->counter); 144 int64_t new = atomic64_add_return(num, &p->counter);
145 145
146 /* 146 /*
@@ -162,13 +162,13 @@ revert:
162 return -EAGAIN; 162 return -EAGAIN;
163} 163}
164 164
165static int pids_can_attach(struct cgroup_subsys_state *css, 165static int pids_can_attach(struct cgroup_taskset *tset)
166 struct cgroup_taskset *tset)
167{ 166{
168 struct pids_cgroup *pids = css_pids(css);
169 struct task_struct *task; 167 struct task_struct *task;
168 struct cgroup_subsys_state *dst_css;
170 169
171 cgroup_taskset_for_each(task, tset) { 170 cgroup_taskset_for_each(task, dst_css, tset) {
171 struct pids_cgroup *pids = css_pids(dst_css);
172 struct cgroup_subsys_state *old_css; 172 struct cgroup_subsys_state *old_css;
173 struct pids_cgroup *old_pids; 173 struct pids_cgroup *old_pids;
174 174
@@ -187,13 +187,13 @@ static int pids_can_attach(struct cgroup_subsys_state *css,
187 return 0; 187 return 0;
188} 188}
189 189
190static void pids_cancel_attach(struct cgroup_subsys_state *css, 190static void pids_cancel_attach(struct cgroup_taskset *tset)
191 struct cgroup_taskset *tset)
192{ 191{
193 struct pids_cgroup *pids = css_pids(css);
194 struct task_struct *task; 192 struct task_struct *task;
193 struct cgroup_subsys_state *dst_css;
195 194
196 cgroup_taskset_for_each(task, tset) { 195 cgroup_taskset_for_each(task, dst_css, tset) {
196 struct pids_cgroup *pids = css_pids(dst_css);
197 struct cgroup_subsys_state *old_css; 197 struct cgroup_subsys_state *old_css;
198 struct pids_cgroup *old_pids; 198 struct pids_cgroup *old_pids;
199 199
@@ -205,65 +205,28 @@ static void pids_cancel_attach(struct cgroup_subsys_state *css,
205 } 205 }
206} 206}
207 207
208/*
209 * task_css_check(true) in pids_can_fork() and pids_cancel_fork() relies
210 * on threadgroup_change_begin() held by the copy_process().
211 */
208static int pids_can_fork(struct task_struct *task, void **priv_p) 212static int pids_can_fork(struct task_struct *task, void **priv_p)
209{ 213{
210 struct cgroup_subsys_state *css; 214 struct cgroup_subsys_state *css;
211 struct pids_cgroup *pids; 215 struct pids_cgroup *pids;
212 int err;
213 216
214 /* 217 css = task_css_check(current, pids_cgrp_id, true);
215 * Use the "current" task_css for the pids subsystem as the tentative
216 * css. It is possible we will charge the wrong hierarchy, in which
217 * case we will forcefully revert/reapply the charge on the right
218 * hierarchy after it is committed to the task proper.
219 */
220 css = task_get_css(current, pids_cgrp_id);
221 pids = css_pids(css); 218 pids = css_pids(css);
222 219 return pids_try_charge(pids, 1);
223 err = pids_try_charge(pids, 1);
224 if (err)
225 goto err_css_put;
226
227 *priv_p = css;
228 return 0;
229
230err_css_put:
231 css_put(css);
232 return err;
233} 220}
234 221
235static void pids_cancel_fork(struct task_struct *task, void *priv) 222static void pids_cancel_fork(struct task_struct *task, void *priv)
236{ 223{
237 struct cgroup_subsys_state *css = priv;
238 struct pids_cgroup *pids = css_pids(css);
239
240 pids_uncharge(pids, 1);
241 css_put(css);
242}
243
244static void pids_fork(struct task_struct *task, void *priv)
245{
246 struct cgroup_subsys_state *css; 224 struct cgroup_subsys_state *css;
247 struct cgroup_subsys_state *old_css = priv;
248 struct pids_cgroup *pids; 225 struct pids_cgroup *pids;
249 struct pids_cgroup *old_pids = css_pids(old_css);
250 226
251 css = task_get_css(task, pids_cgrp_id); 227 css = task_css_check(current, pids_cgrp_id, true);
252 pids = css_pids(css); 228 pids = css_pids(css);
253 229 pids_uncharge(pids, 1);
254 /*
255 * If the association has changed, we have to revert and reapply the
256 * charge/uncharge on the wrong hierarchy to the current one. Since
257 * the association can only change due to an organisation event, its
258 * okay for us to ignore the limit in this case.
259 */
260 if (pids != old_pids) {
261 pids_uncharge(old_pids, 1);
262 pids_charge(pids, 1);
263 }
264
265 css_put(css);
266 css_put(old_css);
267} 230}
268 231
269static void pids_free(struct task_struct *task) 232static void pids_free(struct task_struct *task)
@@ -335,6 +298,7 @@ static struct cftype pids_files[] = {
335 { 298 {
336 .name = "current", 299 .name = "current",
337 .read_s64 = pids_current_read, 300 .read_s64 = pids_current_read,
301 .flags = CFTYPE_NOT_ON_ROOT,
338 }, 302 },
339 { } /* terminate */ 303 { } /* terminate */
340}; 304};
@@ -346,7 +310,6 @@ struct cgroup_subsys pids_cgrp_subsys = {
346 .cancel_attach = pids_cancel_attach, 310 .cancel_attach = pids_cancel_attach,
347 .can_fork = pids_can_fork, 311 .can_fork = pids_can_fork,
348 .cancel_fork = pids_cancel_fork, 312 .cancel_fork = pids_cancel_fork,
349 .fork = pids_fork,
350 .free = pids_free, 313 .free = pids_free,
351 .legacy_cftypes = pids_files, 314 .legacy_cftypes = pids_files,
352 .dfl_cftypes = pids_files, 315 .dfl_cftypes = pids_files,
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index 10ae73611d80..02a8ea5c9963 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -1429,15 +1429,16 @@ static int fmeter_getrate(struct fmeter *fmp)
1429static struct cpuset *cpuset_attach_old_cs; 1429static struct cpuset *cpuset_attach_old_cs;
1430 1430
1431/* Called by cgroups to determine if a cpuset is usable; cpuset_mutex held */ 1431/* Called by cgroups to determine if a cpuset is usable; cpuset_mutex held */
1432static int cpuset_can_attach(struct cgroup_subsys_state *css, 1432static int cpuset_can_attach(struct cgroup_taskset *tset)
1433 struct cgroup_taskset *tset)
1434{ 1433{
1435 struct cpuset *cs = css_cs(css); 1434 struct cgroup_subsys_state *css;
1435 struct cpuset *cs;
1436 struct task_struct *task; 1436 struct task_struct *task;
1437 int ret; 1437 int ret;
1438 1438
1439 /* used later by cpuset_attach() */ 1439 /* used later by cpuset_attach() */
1440 cpuset_attach_old_cs = task_cs(cgroup_taskset_first(tset)); 1440 cpuset_attach_old_cs = task_cs(cgroup_taskset_first(tset, &css));
1441 cs = css_cs(css);
1441 1442
1442 mutex_lock(&cpuset_mutex); 1443 mutex_lock(&cpuset_mutex);
1443 1444
@@ -1447,7 +1448,7 @@ static int cpuset_can_attach(struct cgroup_subsys_state *css,
1447 (cpumask_empty(cs->cpus_allowed) || nodes_empty(cs->mems_allowed))) 1448 (cpumask_empty(cs->cpus_allowed) || nodes_empty(cs->mems_allowed)))
1448 goto out_unlock; 1449 goto out_unlock;
1449 1450
1450 cgroup_taskset_for_each(task, tset) { 1451 cgroup_taskset_for_each(task, css, tset) {
1451 ret = task_can_attach(task, cs->cpus_allowed); 1452 ret = task_can_attach(task, cs->cpus_allowed);
1452 if (ret) 1453 if (ret)
1453 goto out_unlock; 1454 goto out_unlock;
@@ -1467,9 +1468,14 @@ out_unlock:
1467 return ret; 1468 return ret;
1468} 1469}
1469 1470
1470static void cpuset_cancel_attach(struct cgroup_subsys_state *css, 1471static void cpuset_cancel_attach(struct cgroup_taskset *tset)
1471 struct cgroup_taskset *tset)
1472{ 1472{
1473 struct cgroup_subsys_state *css;
1474 struct cpuset *cs;
1475
1476 cgroup_taskset_first(tset, &css);
1477 cs = css_cs(css);
1478
1473 mutex_lock(&cpuset_mutex); 1479 mutex_lock(&cpuset_mutex);
1474 css_cs(css)->attach_in_progress--; 1480 css_cs(css)->attach_in_progress--;
1475 mutex_unlock(&cpuset_mutex); 1481 mutex_unlock(&cpuset_mutex);
@@ -1482,16 +1488,19 @@ static void cpuset_cancel_attach(struct cgroup_subsys_state *css,
1482 */ 1488 */
1483static cpumask_var_t cpus_attach; 1489static cpumask_var_t cpus_attach;
1484 1490
1485static void cpuset_attach(struct cgroup_subsys_state *css, 1491static void cpuset_attach(struct cgroup_taskset *tset)
1486 struct cgroup_taskset *tset)
1487{ 1492{
1488 /* static buf protected by cpuset_mutex */ 1493 /* static buf protected by cpuset_mutex */
1489 static nodemask_t cpuset_attach_nodemask_to; 1494 static nodemask_t cpuset_attach_nodemask_to;
1490 struct task_struct *task; 1495 struct task_struct *task;
1491 struct task_struct *leader; 1496 struct task_struct *leader;
1492 struct cpuset *cs = css_cs(css); 1497 struct cgroup_subsys_state *css;
1498 struct cpuset *cs;
1493 struct cpuset *oldcs = cpuset_attach_old_cs; 1499 struct cpuset *oldcs = cpuset_attach_old_cs;
1494 1500
1501 cgroup_taskset_first(tset, &css);
1502 cs = css_cs(css);
1503
1495 mutex_lock(&cpuset_mutex); 1504 mutex_lock(&cpuset_mutex);
1496 1505
1497 /* prepare for attach */ 1506 /* prepare for attach */
@@ -1502,7 +1511,7 @@ static void cpuset_attach(struct cgroup_subsys_state *css,
1502 1511
1503 guarantee_online_mems(cs, &cpuset_attach_nodemask_to); 1512 guarantee_online_mems(cs, &cpuset_attach_nodemask_to);
1504 1513
1505 cgroup_taskset_for_each(task, tset) { 1514 cgroup_taskset_for_each(task, css, tset) {
1506 /* 1515 /*
1507 * can_attach beforehand should guarantee that this doesn't 1516 * can_attach beforehand should guarantee that this doesn't
1508 * fail. TODO: have a better way to handle failure here 1517 * fail. TODO: have a better way to handle failure here
@@ -1518,7 +1527,7 @@ static void cpuset_attach(struct cgroup_subsys_state *css,
1518 * sleep and should be moved outside migration path proper. 1527 * sleep and should be moved outside migration path proper.
1519 */ 1528 */
1520 cpuset_attach_nodemask_to = cs->effective_mems; 1529 cpuset_attach_nodemask_to = cs->effective_mems;
1521 cgroup_taskset_for_each_leader(leader, tset) { 1530 cgroup_taskset_for_each_leader(leader, css, tset) {
1522 struct mm_struct *mm = get_task_mm(leader); 1531 struct mm_struct *mm = get_task_mm(leader);
1523 1532
1524 if (mm) { 1533 if (mm) {
diff --git a/kernel/events/callchain.c b/kernel/events/callchain.c
index d659487254d5..9c418002b8c1 100644
--- a/kernel/events/callchain.c
+++ b/kernel/events/callchain.c
@@ -3,7 +3,7 @@
3 * 3 *
4 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de> 4 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
5 * Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar 5 * Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar
6 * Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> 6 * Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra
7 * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com> 7 * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
8 * 8 *
9 * For licensing details see kernel-base/COPYING 9 * For licensing details see kernel-base/COPYING
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 36babfd20648..ef2d6ea10736 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -3,7 +3,7 @@
3 * 3 *
4 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de> 4 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
5 * Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar 5 * Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar
6 * Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> 6 * Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra
7 * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com> 7 * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
8 * 8 *
9 * For licensing details see kernel-base/COPYING 9 * For licensing details see kernel-base/COPYING
@@ -435,7 +435,7 @@ static inline void update_cgrp_time_from_event(struct perf_event *event)
435 if (!is_cgroup_event(event)) 435 if (!is_cgroup_event(event))
436 return; 436 return;
437 437
438 cgrp = perf_cgroup_from_task(current); 438 cgrp = perf_cgroup_from_task(current, event->ctx);
439 /* 439 /*
440 * Do not update time when cgroup is not active 440 * Do not update time when cgroup is not active
441 */ 441 */
@@ -458,7 +458,7 @@ perf_cgroup_set_timestamp(struct task_struct *task,
458 if (!task || !ctx->nr_cgroups) 458 if (!task || !ctx->nr_cgroups)
459 return; 459 return;
460 460
461 cgrp = perf_cgroup_from_task(task); 461 cgrp = perf_cgroup_from_task(task, ctx);
462 info = this_cpu_ptr(cgrp->info); 462 info = this_cpu_ptr(cgrp->info);
463 info->timestamp = ctx->timestamp; 463 info->timestamp = ctx->timestamp;
464} 464}
@@ -489,7 +489,6 @@ static void perf_cgroup_switch(struct task_struct *task, int mode)
489 * we reschedule only in the presence of cgroup 489 * we reschedule only in the presence of cgroup
490 * constrained events. 490 * constrained events.
491 */ 491 */
492 rcu_read_lock();
493 492
494 list_for_each_entry_rcu(pmu, &pmus, entry) { 493 list_for_each_entry_rcu(pmu, &pmus, entry) {
495 cpuctx = this_cpu_ptr(pmu->pmu_cpu_context); 494 cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
@@ -522,8 +521,10 @@ static void perf_cgroup_switch(struct task_struct *task, int mode)
522 * set cgrp before ctxsw in to allow 521 * set cgrp before ctxsw in to allow
523 * event_filter_match() to not have to pass 522 * event_filter_match() to not have to pass
524 * task around 523 * task around
524 * we pass the cpuctx->ctx to perf_cgroup_from_task()
525 * because cgorup events are only per-cpu
525 */ 526 */
526 cpuctx->cgrp = perf_cgroup_from_task(task); 527 cpuctx->cgrp = perf_cgroup_from_task(task, &cpuctx->ctx);
527 cpu_ctx_sched_in(cpuctx, EVENT_ALL, task); 528 cpu_ctx_sched_in(cpuctx, EVENT_ALL, task);
528 } 529 }
529 perf_pmu_enable(cpuctx->ctx.pmu); 530 perf_pmu_enable(cpuctx->ctx.pmu);
@@ -531,8 +532,6 @@ static void perf_cgroup_switch(struct task_struct *task, int mode)
531 } 532 }
532 } 533 }
533 534
534 rcu_read_unlock();
535
536 local_irq_restore(flags); 535 local_irq_restore(flags);
537} 536}
538 537
@@ -542,17 +541,20 @@ static inline void perf_cgroup_sched_out(struct task_struct *task,
542 struct perf_cgroup *cgrp1; 541 struct perf_cgroup *cgrp1;
543 struct perf_cgroup *cgrp2 = NULL; 542 struct perf_cgroup *cgrp2 = NULL;
544 543
544 rcu_read_lock();
545 /* 545 /*
546 * we come here when we know perf_cgroup_events > 0 546 * we come here when we know perf_cgroup_events > 0
547 * we do not need to pass the ctx here because we know
548 * we are holding the rcu lock
547 */ 549 */
548 cgrp1 = perf_cgroup_from_task(task); 550 cgrp1 = perf_cgroup_from_task(task, NULL);
549 551
550 /* 552 /*
551 * next is NULL when called from perf_event_enable_on_exec() 553 * next is NULL when called from perf_event_enable_on_exec()
552 * that will systematically cause a cgroup_switch() 554 * that will systematically cause a cgroup_switch()
553 */ 555 */
554 if (next) 556 if (next)
555 cgrp2 = perf_cgroup_from_task(next); 557 cgrp2 = perf_cgroup_from_task(next, NULL);
556 558
557 /* 559 /*
558 * only schedule out current cgroup events if we know 560 * only schedule out current cgroup events if we know
@@ -561,6 +563,8 @@ static inline void perf_cgroup_sched_out(struct task_struct *task,
561 */ 563 */
562 if (cgrp1 != cgrp2) 564 if (cgrp1 != cgrp2)
563 perf_cgroup_switch(task, PERF_CGROUP_SWOUT); 565 perf_cgroup_switch(task, PERF_CGROUP_SWOUT);
566
567 rcu_read_unlock();
564} 568}
565 569
566static inline void perf_cgroup_sched_in(struct task_struct *prev, 570static inline void perf_cgroup_sched_in(struct task_struct *prev,
@@ -569,13 +573,16 @@ static inline void perf_cgroup_sched_in(struct task_struct *prev,
569 struct perf_cgroup *cgrp1; 573 struct perf_cgroup *cgrp1;
570 struct perf_cgroup *cgrp2 = NULL; 574 struct perf_cgroup *cgrp2 = NULL;
571 575
576 rcu_read_lock();
572 /* 577 /*
573 * we come here when we know perf_cgroup_events > 0 578 * we come here when we know perf_cgroup_events > 0
579 * we do not need to pass the ctx here because we know
580 * we are holding the rcu lock
574 */ 581 */
575 cgrp1 = perf_cgroup_from_task(task); 582 cgrp1 = perf_cgroup_from_task(task, NULL);
576 583
577 /* prev can never be NULL */ 584 /* prev can never be NULL */
578 cgrp2 = perf_cgroup_from_task(prev); 585 cgrp2 = perf_cgroup_from_task(prev, NULL);
579 586
580 /* 587 /*
581 * only need to schedule in cgroup events if we are changing 588 * only need to schedule in cgroup events if we are changing
@@ -584,6 +591,8 @@ static inline void perf_cgroup_sched_in(struct task_struct *prev,
584 */ 591 */
585 if (cgrp1 != cgrp2) 592 if (cgrp1 != cgrp2)
586 perf_cgroup_switch(task, PERF_CGROUP_SWIN); 593 perf_cgroup_switch(task, PERF_CGROUP_SWIN);
594
595 rcu_read_unlock();
587} 596}
588 597
589static inline int perf_cgroup_connect(int fd, struct perf_event *event, 598static inline int perf_cgroup_connect(int fd, struct perf_event *event,
@@ -4216,7 +4225,14 @@ retry:
4216 goto retry; 4225 goto retry;
4217 } 4226 }
4218 4227
4219 __perf_event_period(&pe); 4228 if (event->attr.freq) {
4229 event->attr.sample_freq = value;
4230 } else {
4231 event->attr.sample_period = value;
4232 event->hw.sample_period = value;
4233 }
4234
4235 local64_set(&event->hw.period_left, 0);
4220 raw_spin_unlock_irq(&ctx->lock); 4236 raw_spin_unlock_irq(&ctx->lock);
4221 4237
4222 return 0; 4238 return 0;
@@ -5667,6 +5683,17 @@ perf_event_aux_ctx(struct perf_event_context *ctx,
5667} 5683}
5668 5684
5669static void 5685static void
5686perf_event_aux_task_ctx(perf_event_aux_output_cb output, void *data,
5687 struct perf_event_context *task_ctx)
5688{
5689 rcu_read_lock();
5690 preempt_disable();
5691 perf_event_aux_ctx(task_ctx, output, data);
5692 preempt_enable();
5693 rcu_read_unlock();
5694}
5695
5696static void
5670perf_event_aux(perf_event_aux_output_cb output, void *data, 5697perf_event_aux(perf_event_aux_output_cb output, void *data,
5671 struct perf_event_context *task_ctx) 5698 struct perf_event_context *task_ctx)
5672{ 5699{
@@ -5675,14 +5702,23 @@ perf_event_aux(perf_event_aux_output_cb output, void *data,
5675 struct pmu *pmu; 5702 struct pmu *pmu;
5676 int ctxn; 5703 int ctxn;
5677 5704
5705 /*
5706 * If we have task_ctx != NULL we only notify
5707 * the task context itself. The task_ctx is set
5708 * only for EXIT events before releasing task
5709 * context.
5710 */
5711 if (task_ctx) {
5712 perf_event_aux_task_ctx(output, data, task_ctx);
5713 return;
5714 }
5715
5678 rcu_read_lock(); 5716 rcu_read_lock();
5679 list_for_each_entry_rcu(pmu, &pmus, entry) { 5717 list_for_each_entry_rcu(pmu, &pmus, entry) {
5680 cpuctx = get_cpu_ptr(pmu->pmu_cpu_context); 5718 cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
5681 if (cpuctx->unique_pmu != pmu) 5719 if (cpuctx->unique_pmu != pmu)
5682 goto next; 5720 goto next;
5683 perf_event_aux_ctx(&cpuctx->ctx, output, data); 5721 perf_event_aux_ctx(&cpuctx->ctx, output, data);
5684 if (task_ctx)
5685 goto next;
5686 ctxn = pmu->task_ctx_nr; 5722 ctxn = pmu->task_ctx_nr;
5687 if (ctxn < 0) 5723 if (ctxn < 0)
5688 goto next; 5724 goto next;
@@ -5692,12 +5728,6 @@ perf_event_aux(perf_event_aux_output_cb output, void *data,
5692next: 5728next:
5693 put_cpu_ptr(pmu->pmu_cpu_context); 5729 put_cpu_ptr(pmu->pmu_cpu_context);
5694 } 5730 }
5695
5696 if (task_ctx) {
5697 preempt_disable();
5698 perf_event_aux_ctx(task_ctx, output, data);
5699 preempt_enable();
5700 }
5701 rcu_read_unlock(); 5731 rcu_read_unlock();
5702} 5732}
5703 5733
@@ -8787,10 +8817,8 @@ static void perf_event_exit_task_context(struct task_struct *child, int ctxn)
8787 struct perf_event_context *child_ctx, *clone_ctx = NULL; 8817 struct perf_event_context *child_ctx, *clone_ctx = NULL;
8788 unsigned long flags; 8818 unsigned long flags;
8789 8819
8790 if (likely(!child->perf_event_ctxp[ctxn])) { 8820 if (likely(!child->perf_event_ctxp[ctxn]))
8791 perf_event_task(child, NULL, 0);
8792 return; 8821 return;
8793 }
8794 8822
8795 local_irq_save(flags); 8823 local_irq_save(flags);
8796 /* 8824 /*
@@ -8874,6 +8902,14 @@ void perf_event_exit_task(struct task_struct *child)
8874 8902
8875 for_each_task_context_nr(ctxn) 8903 for_each_task_context_nr(ctxn)
8876 perf_event_exit_task_context(child, ctxn); 8904 perf_event_exit_task_context(child, ctxn);
8905
8906 /*
8907 * The perf_event_exit_task_context calls perf_event_task
8908 * with child's task_ctx, which generates EXIT events for
8909 * child contexts and sets child->perf_event_ctxp[] to NULL.
8910 * At this point we need to send EXIT events to cpu contexts.
8911 */
8912 perf_event_task(child, NULL, 0);
8877} 8913}
8878 8914
8879static void perf_free_event(struct perf_event *event, 8915static void perf_free_event(struct perf_event *event,
@@ -9452,16 +9488,18 @@ static void perf_cgroup_css_free(struct cgroup_subsys_state *css)
9452static int __perf_cgroup_move(void *info) 9488static int __perf_cgroup_move(void *info)
9453{ 9489{
9454 struct task_struct *task = info; 9490 struct task_struct *task = info;
9491 rcu_read_lock();
9455 perf_cgroup_switch(task, PERF_CGROUP_SWOUT | PERF_CGROUP_SWIN); 9492 perf_cgroup_switch(task, PERF_CGROUP_SWOUT | PERF_CGROUP_SWIN);
9493 rcu_read_unlock();
9456 return 0; 9494 return 0;
9457} 9495}
9458 9496
9459static void perf_cgroup_attach(struct cgroup_subsys_state *css, 9497static void perf_cgroup_attach(struct cgroup_taskset *tset)
9460 struct cgroup_taskset *tset)
9461{ 9498{
9462 struct task_struct *task; 9499 struct task_struct *task;
9500 struct cgroup_subsys_state *css;
9463 9501
9464 cgroup_taskset_for_each(task, tset) 9502 cgroup_taskset_for_each(task, css, tset)
9465 task_function_call(task, __perf_cgroup_move, task); 9503 task_function_call(task, __perf_cgroup_move, task);
9466} 9504}
9467 9505
diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c
index b5d1ea79c595..adfdc0536117 100644
--- a/kernel/events/ring_buffer.c
+++ b/kernel/events/ring_buffer.c
@@ -3,7 +3,7 @@
3 * 3 *
4 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de> 4 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
5 * Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar 5 * Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar
6 * Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> 6 * Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra
7 * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com> 7 * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
8 * 8 *
9 * For licensing details see kernel-base/COPYING 9 * For licensing details see kernel-base/COPYING
diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
index 4e5e9798aa0c..7dad84913abf 100644
--- a/kernel/events/uprobes.c
+++ b/kernel/events/uprobes.c
@@ -19,7 +19,7 @@
19 * Authors: 19 * Authors:
20 * Srikar Dronamraju 20 * Srikar Dronamraju
21 * Jim Keniston 21 * Jim Keniston
22 * Copyright (C) 2011-2012 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> 22 * Copyright (C) 2011-2012 Red Hat, Inc., Peter Zijlstra
23 */ 23 */
24 24
25#include <linux/kernel.h> 25#include <linux/kernel.h>
diff --git a/kernel/fork.c b/kernel/fork.c
index f97f2c449f5c..fce002ee3ddf 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -1368,8 +1368,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
1368 p->real_start_time = ktime_get_boot_ns(); 1368 p->real_start_time = ktime_get_boot_ns();
1369 p->io_context = NULL; 1369 p->io_context = NULL;
1370 p->audit_context = NULL; 1370 p->audit_context = NULL;
1371 if (clone_flags & CLONE_THREAD) 1371 threadgroup_change_begin(current);
1372 threadgroup_change_begin(current);
1373 cgroup_fork(p); 1372 cgroup_fork(p);
1374#ifdef CONFIG_NUMA 1373#ifdef CONFIG_NUMA
1375 p->mempolicy = mpol_dup(p->mempolicy); 1374 p->mempolicy = mpol_dup(p->mempolicy);
@@ -1610,8 +1609,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
1610 1609
1611 proc_fork_connector(p); 1610 proc_fork_connector(p);
1612 cgroup_post_fork(p, cgrp_ss_priv); 1611 cgroup_post_fork(p, cgrp_ss_priv);
1613 if (clone_flags & CLONE_THREAD) 1612 threadgroup_change_end(current);
1614 threadgroup_change_end(current);
1615 perf_event_fork(p); 1613 perf_event_fork(p);
1616 1614
1617 trace_task_newtask(p, clone_flags); 1615 trace_task_newtask(p, clone_flags);
@@ -1652,8 +1650,7 @@ bad_fork_cleanup_policy:
1652 mpol_put(p->mempolicy); 1650 mpol_put(p->mempolicy);
1653bad_fork_cleanup_threadgroup_lock: 1651bad_fork_cleanup_threadgroup_lock:
1654#endif 1652#endif
1655 if (clone_flags & CLONE_THREAD) 1653 threadgroup_change_end(current);
1656 threadgroup_change_end(current);
1657 delayacct_tsk_free(p); 1654 delayacct_tsk_free(p);
1658bad_fork_cleanup_count: 1655bad_fork_cleanup_count:
1659 atomic_dec(&p->cred->user->processes); 1656 atomic_dec(&p->cred->user->processes);
diff --git a/kernel/irq_work.c b/kernel/irq_work.c
index cbf9fb899d92..bcf107ce0854 100644
--- a/kernel/irq_work.c
+++ b/kernel/irq_work.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2010 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> 2 * Copyright (C) 2010 Red Hat, Inc., Peter Zijlstra
3 * 3 *
4 * Provides a framework for enqueueing and running callbacks from hardirq 4 * Provides a framework for enqueueing and running callbacks from hardirq
5 * context. The enqueueing is NMI-safe. 5 * context. The enqueueing is NMI-safe.
diff --git a/kernel/jump_label.c b/kernel/jump_label.c
index f7dd15d537f9..05254eeb4b4e 100644
--- a/kernel/jump_label.c
+++ b/kernel/jump_label.c
@@ -2,7 +2,7 @@
2 * jump label support 2 * jump label support
3 * 3 *
4 * Copyright (C) 2009 Jason Baron <jbaron@redhat.com> 4 * Copyright (C) 2009 Jason Baron <jbaron@redhat.com>
5 * Copyright (C) 2011 Peter Zijlstra <pzijlstr@redhat.com> 5 * Copyright (C) 2011 Peter Zijlstra
6 * 6 *
7 */ 7 */
8#include <linux/memory.h> 8#include <linux/memory.h>
diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c
index 6e5344112419..db545cbcdb89 100644
--- a/kernel/livepatch/core.c
+++ b/kernel/livepatch/core.c
@@ -294,6 +294,12 @@ static int klp_write_object_relocations(struct module *pmod,
294 294
295 for (reloc = obj->relocs; reloc->name; reloc++) { 295 for (reloc = obj->relocs; reloc->name; reloc++) {
296 if (!klp_is_module(obj)) { 296 if (!klp_is_module(obj)) {
297
298#if defined(CONFIG_RANDOMIZE_BASE)
299 /* If KASLR has been enabled, adjust old value accordingly */
300 if (kaslr_enabled())
301 reloc->val += kaslr_offset();
302#endif
297 ret = klp_verify_vmlinux_symbol(reloc->name, 303 ret = klp_verify_vmlinux_symbol(reloc->name,
298 reloc->val); 304 reloc->val);
299 if (ret) 305 if (ret)
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index deae3907ac1e..60ace56618f6 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -6,7 +6,7 @@
6 * Started by Ingo Molnar: 6 * Started by Ingo Molnar:
7 * 7 *
8 * Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> 8 * Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
9 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> 9 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
10 * 10 *
11 * this code maps all the lock dependencies as they occur in a live kernel 11 * this code maps all the lock dependencies as they occur in a live kernel
12 * and will warn about the following classes of locking bugs: 12 * and will warn about the following classes of locking bugs:
diff --git a/kernel/locking/lockdep_proc.c b/kernel/locking/lockdep_proc.c
index d83d798bef95..dbb61a302548 100644
--- a/kernel/locking/lockdep_proc.c
+++ b/kernel/locking/lockdep_proc.c
@@ -6,7 +6,7 @@
6 * Started by Ingo Molnar: 6 * Started by Ingo Molnar:
7 * 7 *
8 * Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> 8 * Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
9 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> 9 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
10 * 10 *
11 * Code for /proc/lockdep and /proc/lockdep_stats: 11 * Code for /proc/lockdep and /proc/lockdep_stats:
12 * 12 *
diff --git a/kernel/locking/osq_lock.c b/kernel/locking/osq_lock.c
index d092a0c9c2d4..05a37857ab55 100644
--- a/kernel/locking/osq_lock.c
+++ b/kernel/locking/osq_lock.c
@@ -93,10 +93,12 @@ bool osq_lock(struct optimistic_spin_queue *lock)
93 node->cpu = curr; 93 node->cpu = curr;
94 94
95 /* 95 /*
96 * ACQUIRE semantics, pairs with corresponding RELEASE 96 * We need both ACQUIRE (pairs with corresponding RELEASE in
97 * in unlock() uncontended, or fastpath. 97 * unlock() uncontended, or fastpath) and RELEASE (to publish
98 * the node fields we just initialised) semantics when updating
99 * the lock tail.
98 */ 100 */
99 old = atomic_xchg_acquire(&lock->tail, curr); 101 old = atomic_xchg(&lock->tail, curr);
100 if (old == OSQ_UNLOCKED_VAL) 102 if (old == OSQ_UNLOCKED_VAL)
101 return true; 103 return true;
102 104
diff --git a/kernel/panic.c b/kernel/panic.c
index 4579dbb7ed87..4b150bc0c6c1 100644
--- a/kernel/panic.c
+++ b/kernel/panic.c
@@ -152,8 +152,11 @@ void panic(const char *fmt, ...)
152 * We may have ended up stopping the CPU holding the lock (in 152 * We may have ended up stopping the CPU holding the lock (in
153 * smp_send_stop()) while still having some valuable data in the console 153 * smp_send_stop()) while still having some valuable data in the console
154 * buffer. Try to acquire the lock then release it regardless of the 154 * buffer. Try to acquire the lock then release it regardless of the
155 * result. The release will also print the buffers out. 155 * result. The release will also print the buffers out. Locks debug
156 * should be disabled to avoid reporting bad unlock balance when
157 * panic() is not being callled from OOPS.
156 */ 158 */
159 debug_locks_off();
157 console_trylock(); 160 console_trylock();
158 console_unlock(); 161 console_unlock();
159 162
diff --git a/kernel/pid.c b/kernel/pid.c
index ca368793808e..78b3d9f80d44 100644
--- a/kernel/pid.c
+++ b/kernel/pid.c
@@ -467,7 +467,7 @@ struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
467 rcu_read_lock(); 467 rcu_read_lock();
468 if (type != PIDTYPE_PID) 468 if (type != PIDTYPE_PID)
469 task = task->group_leader; 469 task = task->group_leader;
470 pid = get_pid(task->pids[type].pid); 470 pid = get_pid(rcu_dereference(task->pids[type].pid));
471 rcu_read_unlock(); 471 rcu_read_unlock();
472 return pid; 472 return pid;
473} 473}
@@ -528,7 +528,7 @@ pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type,
528 if (likely(pid_alive(task))) { 528 if (likely(pid_alive(task))) {
529 if (type != PIDTYPE_PID) 529 if (type != PIDTYPE_PID)
530 task = task->group_leader; 530 task = task->group_leader;
531 nr = pid_nr_ns(task->pids[type].pid, ns); 531 nr = pid_nr_ns(rcu_dereference(task->pids[type].pid), ns);
532 } 532 }
533 rcu_read_unlock(); 533 rcu_read_unlock();
534 534
diff --git a/kernel/sched/clock.c b/kernel/sched/clock.c
index c0a205101c23..caf4041f5b0a 100644
--- a/kernel/sched/clock.c
+++ b/kernel/sched/clock.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * sched_clock for unstable cpu clocks 2 * sched_clock for unstable cpu clocks
3 * 3 *
4 * Copyright (C) 2008 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> 4 * Copyright (C) 2008 Red Hat, Inc., Peter Zijlstra
5 * 5 *
6 * Updates and enhancements: 6 * Updates and enhancements:
7 * Copyright (C) 2008 Red Hat, Inc. Steven Rostedt <srostedt@redhat.com> 7 * Copyright (C) 2008 Red Hat, Inc. Steven Rostedt <srostedt@redhat.com>
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 4d568ac9319e..732e993b564b 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1947,13 +1947,38 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
1947 1947
1948#ifdef CONFIG_SMP 1948#ifdef CONFIG_SMP
1949 /* 1949 /*
1950 * Ensure we load p->on_cpu _after_ p->on_rq, otherwise it would be
1951 * possible to, falsely, observe p->on_cpu == 0.
1952 *
1953 * One must be running (->on_cpu == 1) in order to remove oneself
1954 * from the runqueue.
1955 *
1956 * [S] ->on_cpu = 1; [L] ->on_rq
1957 * UNLOCK rq->lock
1958 * RMB
1959 * LOCK rq->lock
1960 * [S] ->on_rq = 0; [L] ->on_cpu
1961 *
1962 * Pairs with the full barrier implied in the UNLOCK+LOCK on rq->lock
1963 * from the consecutive calls to schedule(); the first switching to our
1964 * task, the second putting it to sleep.
1965 */
1966 smp_rmb();
1967
1968 /*
1950 * If the owning (remote) cpu is still in the middle of schedule() with 1969 * If the owning (remote) cpu is still in the middle of schedule() with
1951 * this task as prev, wait until its done referencing the task. 1970 * this task as prev, wait until its done referencing the task.
1952 */ 1971 */
1953 while (p->on_cpu) 1972 while (p->on_cpu)
1954 cpu_relax(); 1973 cpu_relax();
1955 /* 1974 /*
1956 * Pairs with the smp_wmb() in finish_lock_switch(). 1975 * Combined with the control dependency above, we have an effective
1976 * smp_load_acquire() without the need for full barriers.
1977 *
1978 * Pairs with the smp_store_release() in finish_lock_switch().
1979 *
1980 * This ensures that tasks getting woken will be fully ordered against
1981 * their previous state and preserve Program Order.
1957 */ 1982 */
1958 smp_rmb(); 1983 smp_rmb();
1959 1984
@@ -2039,7 +2064,6 @@ out:
2039 */ 2064 */
2040int wake_up_process(struct task_struct *p) 2065int wake_up_process(struct task_struct *p)
2041{ 2066{
2042 WARN_ON(task_is_stopped_or_traced(p));
2043 return try_to_wake_up(p, TASK_NORMAL, 0); 2067 return try_to_wake_up(p, TASK_NORMAL, 0);
2044} 2068}
2045EXPORT_SYMBOL(wake_up_process); 2069EXPORT_SYMBOL(wake_up_process);
@@ -5847,13 +5871,13 @@ static int init_rootdomain(struct root_domain *rd)
5847{ 5871{
5848 memset(rd, 0, sizeof(*rd)); 5872 memset(rd, 0, sizeof(*rd));
5849 5873
5850 if (!alloc_cpumask_var(&rd->span, GFP_KERNEL)) 5874 if (!zalloc_cpumask_var(&rd->span, GFP_KERNEL))
5851 goto out; 5875 goto out;
5852 if (!alloc_cpumask_var(&rd->online, GFP_KERNEL)) 5876 if (!zalloc_cpumask_var(&rd->online, GFP_KERNEL))
5853 goto free_span; 5877 goto free_span;
5854 if (!alloc_cpumask_var(&rd->dlo_mask, GFP_KERNEL)) 5878 if (!zalloc_cpumask_var(&rd->dlo_mask, GFP_KERNEL))
5855 goto free_online; 5879 goto free_online;
5856 if (!alloc_cpumask_var(&rd->rto_mask, GFP_KERNEL)) 5880 if (!zalloc_cpumask_var(&rd->rto_mask, GFP_KERNEL))
5857 goto free_dlo_mask; 5881 goto free_dlo_mask;
5858 5882
5859 init_dl_bw(&rd->dl_bw); 5883 init_dl_bw(&rd->dl_bw);
@@ -8217,12 +8241,12 @@ static void cpu_cgroup_fork(struct task_struct *task, void *private)
8217 sched_move_task(task); 8241 sched_move_task(task);
8218} 8242}
8219 8243
8220static int cpu_cgroup_can_attach(struct cgroup_subsys_state *css, 8244static int cpu_cgroup_can_attach(struct cgroup_taskset *tset)
8221 struct cgroup_taskset *tset)
8222{ 8245{
8223 struct task_struct *task; 8246 struct task_struct *task;
8247 struct cgroup_subsys_state *css;
8224 8248
8225 cgroup_taskset_for_each(task, tset) { 8249 cgroup_taskset_for_each(task, css, tset) {
8226#ifdef CONFIG_RT_GROUP_SCHED 8250#ifdef CONFIG_RT_GROUP_SCHED
8227 if (!sched_rt_can_attach(css_tg(css), task)) 8251 if (!sched_rt_can_attach(css_tg(css), task))
8228 return -EINVAL; 8252 return -EINVAL;
@@ -8235,12 +8259,12 @@ static int cpu_cgroup_can_attach(struct cgroup_subsys_state *css,
8235 return 0; 8259 return 0;
8236} 8260}
8237 8261
8238static void cpu_cgroup_attach(struct cgroup_subsys_state *css, 8262static void cpu_cgroup_attach(struct cgroup_taskset *tset)
8239 struct cgroup_taskset *tset)
8240{ 8263{
8241 struct task_struct *task; 8264 struct task_struct *task;
8265 struct cgroup_subsys_state *css;
8242 8266
8243 cgroup_taskset_for_each(task, tset) 8267 cgroup_taskset_for_each(task, css, tset)
8244 sched_move_task(task); 8268 sched_move_task(task);
8245} 8269}
8246 8270
diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c
index 26a54461bf59..05de80b48586 100644
--- a/kernel/sched/cputime.c
+++ b/kernel/sched/cputime.c
@@ -788,6 +788,9 @@ cputime_t task_gtime(struct task_struct *t)
788 unsigned int seq; 788 unsigned int seq;
789 cputime_t gtime; 789 cputime_t gtime;
790 790
791 if (!context_tracking_is_enabled())
792 return t->gtime;
793
791 do { 794 do {
792 seq = read_seqbegin(&t->vtime_seqlock); 795 seq = read_seqbegin(&t->vtime_seqlock);
793 796
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index f04fda8f669c..90e26b11deaa 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -17,7 +17,7 @@
17 * Copyright (C) 2007, Thomas Gleixner <tglx@linutronix.de> 17 * Copyright (C) 2007, Thomas Gleixner <tglx@linutronix.de>
18 * 18 *
19 * Adaptive scheduling granularity, math enhancements by Peter Zijlstra 19 * Adaptive scheduling granularity, math enhancements by Peter Zijlstra
20 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> 20 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
21 */ 21 */
22 22
23#include <linux/latencytop.h> 23#include <linux/latencytop.h>
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index e3cc16312046..8ec86abe0ea1 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -64,7 +64,7 @@ static void start_rt_bandwidth(struct rt_bandwidth *rt_b)
64 raw_spin_unlock(&rt_b->rt_runtime_lock); 64 raw_spin_unlock(&rt_b->rt_runtime_lock);
65} 65}
66 66
67#ifdef CONFIG_SMP 67#if defined(CONFIG_SMP) && defined(HAVE_RT_PUSH_IPI)
68static void push_irq_work_func(struct irq_work *work); 68static void push_irq_work_func(struct irq_work *work);
69#endif 69#endif
70 70
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index efd3bfc7e347..b242775bf670 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1073,6 +1073,9 @@ static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
1073 * We must ensure this doesn't happen until the switch is completely 1073 * We must ensure this doesn't happen until the switch is completely
1074 * finished. 1074 * finished.
1075 * 1075 *
1076 * In particular, the load of prev->state in finish_task_switch() must
1077 * happen before this.
1078 *
1076 * Pairs with the control dependency and rmb in try_to_wake_up(). 1079 * Pairs with the control dependency and rmb in try_to_wake_up().
1077 */ 1080 */
1078 smp_store_release(&prev->on_cpu, 0); 1081 smp_store_release(&prev->on_cpu, 0);
diff --git a/kernel/sched/wait.c b/kernel/sched/wait.c
index 052e02672d12..f15d6b6a538a 100644
--- a/kernel/sched/wait.c
+++ b/kernel/sched/wait.c
@@ -392,7 +392,7 @@ __wait_on_bit(wait_queue_head_t *wq, struct wait_bit_queue *q,
392 do { 392 do {
393 prepare_to_wait(wq, &q->wait, mode); 393 prepare_to_wait(wq, &q->wait, mode);
394 if (test_bit(q->key.bit_nr, q->key.flags)) 394 if (test_bit(q->key.bit_nr, q->key.flags))
395 ret = (*action)(&q->key); 395 ret = (*action)(&q->key, mode);
396 } while (test_bit(q->key.bit_nr, q->key.flags) && !ret); 396 } while (test_bit(q->key.bit_nr, q->key.flags) && !ret);
397 finish_wait(wq, &q->wait); 397 finish_wait(wq, &q->wait);
398 return ret; 398 return ret;
@@ -431,7 +431,7 @@ __wait_on_bit_lock(wait_queue_head_t *wq, struct wait_bit_queue *q,
431 prepare_to_wait_exclusive(wq, &q->wait, mode); 431 prepare_to_wait_exclusive(wq, &q->wait, mode);
432 if (!test_bit(q->key.bit_nr, q->key.flags)) 432 if (!test_bit(q->key.bit_nr, q->key.flags))
433 continue; 433 continue;
434 ret = action(&q->key); 434 ret = action(&q->key, mode);
435 if (!ret) 435 if (!ret)
436 continue; 436 continue;
437 abort_exclusive_wait(wq, &q->wait, mode, &q->key); 437 abort_exclusive_wait(wq, &q->wait, mode, &q->key);
@@ -581,44 +581,44 @@ void wake_up_atomic_t(atomic_t *p)
581} 581}
582EXPORT_SYMBOL(wake_up_atomic_t); 582EXPORT_SYMBOL(wake_up_atomic_t);
583 583
584__sched int bit_wait(struct wait_bit_key *word) 584__sched int bit_wait(struct wait_bit_key *word, int mode)
585{ 585{
586 if (signal_pending_state(current->state, current))
587 return 1;
588 schedule(); 586 schedule();
587 if (signal_pending_state(mode, current))
588 return -EINTR;
589 return 0; 589 return 0;
590} 590}
591EXPORT_SYMBOL(bit_wait); 591EXPORT_SYMBOL(bit_wait);
592 592
593__sched int bit_wait_io(struct wait_bit_key *word) 593__sched int bit_wait_io(struct wait_bit_key *word, int mode)
594{ 594{
595 if (signal_pending_state(current->state, current))
596 return 1;
597 io_schedule(); 595 io_schedule();
596 if (signal_pending_state(mode, current))
597 return -EINTR;
598 return 0; 598 return 0;
599} 599}
600EXPORT_SYMBOL(bit_wait_io); 600EXPORT_SYMBOL(bit_wait_io);
601 601
602__sched int bit_wait_timeout(struct wait_bit_key *word) 602__sched int bit_wait_timeout(struct wait_bit_key *word, int mode)
603{ 603{
604 unsigned long now = READ_ONCE(jiffies); 604 unsigned long now = READ_ONCE(jiffies);
605 if (signal_pending_state(current->state, current))
606 return 1;
607 if (time_after_eq(now, word->timeout)) 605 if (time_after_eq(now, word->timeout))
608 return -EAGAIN; 606 return -EAGAIN;
609 schedule_timeout(word->timeout - now); 607 schedule_timeout(word->timeout - now);
608 if (signal_pending_state(mode, current))
609 return -EINTR;
610 return 0; 610 return 0;
611} 611}
612EXPORT_SYMBOL_GPL(bit_wait_timeout); 612EXPORT_SYMBOL_GPL(bit_wait_timeout);
613 613
614__sched int bit_wait_io_timeout(struct wait_bit_key *word) 614__sched int bit_wait_io_timeout(struct wait_bit_key *word, int mode)
615{ 615{
616 unsigned long now = READ_ONCE(jiffies); 616 unsigned long now = READ_ONCE(jiffies);
617 if (signal_pending_state(current->state, current))
618 return 1;
619 if (time_after_eq(now, word->timeout)) 617 if (time_after_eq(now, word->timeout))
620 return -EAGAIN; 618 return -EAGAIN;
621 io_schedule_timeout(word->timeout - now); 619 io_schedule_timeout(word->timeout - now);
620 if (signal_pending_state(mode, current))
621 return -EINTR;
622 return 0; 622 return 0;
623} 623}
624EXPORT_SYMBOL_GPL(bit_wait_io_timeout); 624EXPORT_SYMBOL_GPL(bit_wait_io_timeout);
diff --git a/kernel/signal.c b/kernel/signal.c
index c0b01fe24bbd..f3f1f7a972fd 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -3503,7 +3503,7 @@ SYSCALL_DEFINE0(pause)
3503 3503
3504#endif 3504#endif
3505 3505
3506int sigsuspend(sigset_t *set) 3506static int sigsuspend(sigset_t *set)
3507{ 3507{
3508 current->saved_sigmask = current->blocked; 3508 current->saved_sigmask = current->blocked;
3509 set_current_blocked(set); 3509 set_current_blocked(set);
diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c
index 867bc20e1ef1..a3bbaee77c58 100644
--- a/kernel/stop_machine.c
+++ b/kernel/stop_machine.c
@@ -531,7 +531,7 @@ static int __init cpu_stop_init(void)
531} 531}
532early_initcall(cpu_stop_init); 532early_initcall(cpu_stop_init);
533 533
534#ifdef CONFIG_STOP_MACHINE 534#if defined(CONFIG_SMP) || defined(CONFIG_HOTPLUG_CPU)
535 535
536static int __stop_machine(cpu_stop_fn_t fn, void *data, const struct cpumask *cpus) 536static int __stop_machine(cpu_stop_fn_t fn, void *data, const struct cpumask *cpus)
537{ 537{
@@ -631,4 +631,4 @@ int stop_machine_from_inactive_cpu(cpu_stop_fn_t fn, void *data,
631 return ret ?: done.ret; 631 return ret ?: done.ret;
632} 632}
633 633
634#endif /* CONFIG_STOP_MACHINE */ 634#endif /* CONFIG_SMP || CONFIG_HOTPLUG_CPU */
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 75f1d05ea82d..9c6045a27ba3 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -1887,12 +1887,6 @@ rb_event_index(struct ring_buffer_event *event)
1887 return (addr & ~PAGE_MASK) - BUF_PAGE_HDR_SIZE; 1887 return (addr & ~PAGE_MASK) - BUF_PAGE_HDR_SIZE;
1888} 1888}
1889 1889
1890static void rb_reset_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
1891{
1892 cpu_buffer->read_stamp = cpu_buffer->reader_page->page->time_stamp;
1893 cpu_buffer->reader_page->read = 0;
1894}
1895
1896static void rb_inc_iter(struct ring_buffer_iter *iter) 1890static void rb_inc_iter(struct ring_buffer_iter *iter)
1897{ 1891{
1898 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; 1892 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
@@ -2803,8 +2797,11 @@ rb_reserve_next_event(struct ring_buffer *buffer,
2803 2797
2804 event = __rb_reserve_next(cpu_buffer, &info); 2798 event = __rb_reserve_next(cpu_buffer, &info);
2805 2799
2806 if (unlikely(PTR_ERR(event) == -EAGAIN)) 2800 if (unlikely(PTR_ERR(event) == -EAGAIN)) {
2801 if (info.add_timestamp)
2802 info.length -= RB_LEN_TIME_EXTEND;
2807 goto again; 2803 goto again;
2804 }
2808 2805
2809 if (!event) 2806 if (!event)
2810 goto out_fail; 2807 goto out_fail;
@@ -3626,7 +3623,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
3626 3623
3627 /* Finally update the reader page to the new head */ 3624 /* Finally update the reader page to the new head */
3628 cpu_buffer->reader_page = reader; 3625 cpu_buffer->reader_page = reader;
3629 rb_reset_reader_page(cpu_buffer); 3626 cpu_buffer->reader_page->read = 0;
3630 3627
3631 if (overwrite != cpu_buffer->last_overrun) { 3628 if (overwrite != cpu_buffer->last_overrun) {
3632 cpu_buffer->lost_events = overwrite - cpu_buffer->last_overrun; 3629 cpu_buffer->lost_events = overwrite - cpu_buffer->last_overrun;
@@ -3636,6 +3633,10 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
3636 goto again; 3633 goto again;
3637 3634
3638 out: 3635 out:
3636 /* Update the read_stamp on the first event */
3637 if (reader && reader->read == 0)
3638 cpu_buffer->read_stamp = reader->page->time_stamp;
3639
3639 arch_spin_unlock(&cpu_buffer->lock); 3640 arch_spin_unlock(&cpu_buffer->lock);
3640 local_irq_restore(flags); 3641 local_irq_restore(flags);
3641 3642
diff --git a/kernel/trace/trace_event_perf.c b/kernel/trace/trace_event_perf.c
index abfc903e741e..cc9f7a9319be 100644
--- a/kernel/trace/trace_event_perf.c
+++ b/kernel/trace/trace_event_perf.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * trace event based perf event profiling/tracing 2 * trace event based perf event profiling/tracing
3 * 3 *
4 * Copyright (C) 2009 Red Hat Inc, Peter Zijlstra <pzijlstr@redhat.com> 4 * Copyright (C) 2009 Red Hat Inc, Peter Zijlstra
5 * Copyright (C) 2009-2010 Frederic Weisbecker <fweisbec@gmail.com> 5 * Copyright (C) 2009-2010 Frederic Weisbecker <fweisbec@gmail.com>
6 */ 6 */
7 7
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index 6bbc5f652355..4f6ef6912e00 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -582,6 +582,12 @@ static void __ftrace_clear_event_pids(struct trace_array *tr)
582 unregister_trace_sched_wakeup(event_filter_pid_sched_wakeup_probe_pre, tr); 582 unregister_trace_sched_wakeup(event_filter_pid_sched_wakeup_probe_pre, tr);
583 unregister_trace_sched_wakeup(event_filter_pid_sched_wakeup_probe_post, tr); 583 unregister_trace_sched_wakeup(event_filter_pid_sched_wakeup_probe_post, tr);
584 584
585 unregister_trace_sched_wakeup_new(event_filter_pid_sched_wakeup_probe_pre, tr);
586 unregister_trace_sched_wakeup_new(event_filter_pid_sched_wakeup_probe_post, tr);
587
588 unregister_trace_sched_waking(event_filter_pid_sched_wakeup_probe_pre, tr);
589 unregister_trace_sched_waking(event_filter_pid_sched_wakeup_probe_post, tr);
590
585 list_for_each_entry(file, &tr->events, list) { 591 list_for_each_entry(file, &tr->events, list) {
586 clear_bit(EVENT_FILE_FL_PID_FILTER_BIT, &file->flags); 592 clear_bit(EVENT_FILE_FL_PID_FILTER_BIT, &file->flags);
587 } 593 }
@@ -1729,6 +1735,16 @@ ftrace_event_pid_write(struct file *filp, const char __user *ubuf,
1729 tr, INT_MAX); 1735 tr, INT_MAX);
1730 register_trace_prio_sched_wakeup(event_filter_pid_sched_wakeup_probe_post, 1736 register_trace_prio_sched_wakeup(event_filter_pid_sched_wakeup_probe_post,
1731 tr, 0); 1737 tr, 0);
1738
1739 register_trace_prio_sched_wakeup_new(event_filter_pid_sched_wakeup_probe_pre,
1740 tr, INT_MAX);
1741 register_trace_prio_sched_wakeup_new(event_filter_pid_sched_wakeup_probe_post,
1742 tr, 0);
1743
1744 register_trace_prio_sched_waking(event_filter_pid_sched_wakeup_probe_pre,
1745 tr, INT_MAX);
1746 register_trace_prio_sched_waking(event_filter_pid_sched_wakeup_probe_post,
1747 tr, 0);
1732 } 1748 }
1733 1749
1734 /* 1750 /*
diff --git a/lib/btree.c b/lib/btree.c
index 4264871ea1a0..f93a945274af 100644
--- a/lib/btree.c
+++ b/lib/btree.c
@@ -5,7 +5,7 @@
5 * 5 *
6 * Copyright (c) 2007-2008 Joern Engel <joern@logfs.org> 6 * Copyright (c) 2007-2008 Joern Engel <joern@logfs.org>
7 * Bits and pieces stolen from Peter Zijlstra's code, which is 7 * Bits and pieces stolen from Peter Zijlstra's code, which is
8 * Copyright 2007, Red Hat Inc. Peter Zijlstra <pzijlstr@redhat.com> 8 * Copyright 2007, Red Hat Inc. Peter Zijlstra
9 * GPLv2 9 * GPLv2
10 * 10 *
11 * see http://programming.kicks-ass.net/kernel-patches/vma_lookup/btree.patch 11 * see http://programming.kicks-ass.net/kernel-patches/vma_lookup/btree.patch
diff --git a/lib/dma-debug.c b/lib/dma-debug.c
index 8855f019ebe8..d34bd24c2c84 100644
--- a/lib/dma-debug.c
+++ b/lib/dma-debug.c
@@ -1464,7 +1464,7 @@ void debug_dma_alloc_coherent(struct device *dev, size_t size,
1464 entry->type = dma_debug_coherent; 1464 entry->type = dma_debug_coherent;
1465 entry->dev = dev; 1465 entry->dev = dev;
1466 entry->pfn = page_to_pfn(virt_to_page(virt)); 1466 entry->pfn = page_to_pfn(virt_to_page(virt));
1467 entry->offset = (size_t) virt & PAGE_MASK; 1467 entry->offset = (size_t) virt & ~PAGE_MASK;
1468 entry->size = size; 1468 entry->size = size;
1469 entry->dev_addr = dma_addr; 1469 entry->dev_addr = dma_addr;
1470 entry->direction = DMA_BIDIRECTIONAL; 1470 entry->direction = DMA_BIDIRECTIONAL;
@@ -1480,7 +1480,7 @@ void debug_dma_free_coherent(struct device *dev, size_t size,
1480 .type = dma_debug_coherent, 1480 .type = dma_debug_coherent,
1481 .dev = dev, 1481 .dev = dev,
1482 .pfn = page_to_pfn(virt_to_page(virt)), 1482 .pfn = page_to_pfn(virt_to_page(virt)),
1483 .offset = (size_t) virt & PAGE_MASK, 1483 .offset = (size_t) virt & ~PAGE_MASK,
1484 .dev_addr = addr, 1484 .dev_addr = addr,
1485 .size = size, 1485 .size = size,
1486 .direction = DMA_BIDIRECTIONAL, 1486 .direction = DMA_BIDIRECTIONAL,
diff --git a/lib/proportions.c b/lib/proportions.c
index 6f724298f67a..efa54f259ea9 100644
--- a/lib/proportions.c
+++ b/lib/proportions.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * Floating proportions 2 * Floating proportions
3 * 3 *
4 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> 4 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
5 * 5 *
6 * Description: 6 * Description:
7 * 7 *
diff --git a/lib/rhashtable.c b/lib/rhashtable.c
index a54ff8949f91..eb9240c458fa 100644
--- a/lib/rhashtable.c
+++ b/lib/rhashtable.c
@@ -389,33 +389,31 @@ static bool rhashtable_check_elasticity(struct rhashtable *ht,
389 return false; 389 return false;
390} 390}
391 391
392int rhashtable_insert_rehash(struct rhashtable *ht) 392int rhashtable_insert_rehash(struct rhashtable *ht,
393 struct bucket_table *tbl)
393{ 394{
394 struct bucket_table *old_tbl; 395 struct bucket_table *old_tbl;
395 struct bucket_table *new_tbl; 396 struct bucket_table *new_tbl;
396 struct bucket_table *tbl;
397 unsigned int size; 397 unsigned int size;
398 int err; 398 int err;
399 399
400 old_tbl = rht_dereference_rcu(ht->tbl, ht); 400 old_tbl = rht_dereference_rcu(ht->tbl, ht);
401 tbl = rhashtable_last_table(ht, old_tbl);
402 401
403 size = tbl->size; 402 size = tbl->size;
404 403
404 err = -EBUSY;
405
405 if (rht_grow_above_75(ht, tbl)) 406 if (rht_grow_above_75(ht, tbl))
406 size *= 2; 407 size *= 2;
407 /* Do not schedule more than one rehash */ 408 /* Do not schedule more than one rehash */
408 else if (old_tbl != tbl) 409 else if (old_tbl != tbl)
409 return -EBUSY; 410 goto fail;
411
412 err = -ENOMEM;
410 413
411 new_tbl = bucket_table_alloc(ht, size, GFP_ATOMIC); 414 new_tbl = bucket_table_alloc(ht, size, GFP_ATOMIC);
412 if (new_tbl == NULL) { 415 if (new_tbl == NULL)
413 /* Schedule async resize/rehash to try allocation 416 goto fail;
414 * non-atomic context.
415 */
416 schedule_work(&ht->run_work);
417 return -ENOMEM;
418 }
419 417
420 err = rhashtable_rehash_attach(ht, tbl, new_tbl); 418 err = rhashtable_rehash_attach(ht, tbl, new_tbl);
421 if (err) { 419 if (err) {
@@ -426,12 +424,24 @@ int rhashtable_insert_rehash(struct rhashtable *ht)
426 schedule_work(&ht->run_work); 424 schedule_work(&ht->run_work);
427 425
428 return err; 426 return err;
427
428fail:
429 /* Do not fail the insert if someone else did a rehash. */
430 if (likely(rcu_dereference_raw(tbl->future_tbl)))
431 return 0;
432
433 /* Schedule async rehash to retry allocation in process context. */
434 if (err == -ENOMEM)
435 schedule_work(&ht->run_work);
436
437 return err;
429} 438}
430EXPORT_SYMBOL_GPL(rhashtable_insert_rehash); 439EXPORT_SYMBOL_GPL(rhashtable_insert_rehash);
431 440
432int rhashtable_insert_slow(struct rhashtable *ht, const void *key, 441struct bucket_table *rhashtable_insert_slow(struct rhashtable *ht,
433 struct rhash_head *obj, 442 const void *key,
434 struct bucket_table *tbl) 443 struct rhash_head *obj,
444 struct bucket_table *tbl)
435{ 445{
436 struct rhash_head *head; 446 struct rhash_head *head;
437 unsigned int hash; 447 unsigned int hash;
@@ -467,7 +477,12 @@ int rhashtable_insert_slow(struct rhashtable *ht, const void *key,
467exit: 477exit:
468 spin_unlock(rht_bucket_lock(tbl, hash)); 478 spin_unlock(rht_bucket_lock(tbl, hash));
469 479
470 return err; 480 if (err == 0)
481 return NULL;
482 else if (err == -EAGAIN)
483 return tbl;
484 else
485 return ERR_PTR(err);
471} 486}
472EXPORT_SYMBOL_GPL(rhashtable_insert_slow); 487EXPORT_SYMBOL_GPL(rhashtable_insert_slow);
473 488
@@ -503,10 +518,10 @@ int rhashtable_walk_init(struct rhashtable *ht, struct rhashtable_iter *iter)
503 if (!iter->walker) 518 if (!iter->walker)
504 return -ENOMEM; 519 return -ENOMEM;
505 520
506 mutex_lock(&ht->mutex); 521 spin_lock(&ht->lock);
507 iter->walker->tbl = rht_dereference(ht->tbl, ht); 522 iter->walker->tbl = rht_dereference(ht->tbl, ht);
508 list_add(&iter->walker->list, &iter->walker->tbl->walkers); 523 list_add(&iter->walker->list, &iter->walker->tbl->walkers);
509 mutex_unlock(&ht->mutex); 524 spin_unlock(&ht->lock);
510 525
511 return 0; 526 return 0;
512} 527}
@@ -520,10 +535,10 @@ EXPORT_SYMBOL_GPL(rhashtable_walk_init);
520 */ 535 */
521void rhashtable_walk_exit(struct rhashtable_iter *iter) 536void rhashtable_walk_exit(struct rhashtable_iter *iter)
522{ 537{
523 mutex_lock(&iter->ht->mutex); 538 spin_lock(&iter->ht->lock);
524 if (iter->walker->tbl) 539 if (iter->walker->tbl)
525 list_del(&iter->walker->list); 540 list_del(&iter->walker->list);
526 mutex_unlock(&iter->ht->mutex); 541 spin_unlock(&iter->ht->lock);
527 kfree(iter->walker); 542 kfree(iter->walker);
528} 543}
529EXPORT_SYMBOL_GPL(rhashtable_walk_exit); 544EXPORT_SYMBOL_GPL(rhashtable_walk_exit);
@@ -547,14 +562,12 @@ int rhashtable_walk_start(struct rhashtable_iter *iter)
547{ 562{
548 struct rhashtable *ht = iter->ht; 563 struct rhashtable *ht = iter->ht;
549 564
550 mutex_lock(&ht->mutex); 565 rcu_read_lock();
551 566
567 spin_lock(&ht->lock);
552 if (iter->walker->tbl) 568 if (iter->walker->tbl)
553 list_del(&iter->walker->list); 569 list_del(&iter->walker->list);
554 570 spin_unlock(&ht->lock);
555 rcu_read_lock();
556
557 mutex_unlock(&ht->mutex);
558 571
559 if (!iter->walker->tbl) { 572 if (!iter->walker->tbl) {
560 iter->walker->tbl = rht_dereference_rcu(ht->tbl, ht); 573 iter->walker->tbl = rht_dereference_rcu(ht->tbl, ht);
@@ -723,9 +736,6 @@ int rhashtable_init(struct rhashtable *ht,
723 if (params->nulls_base && params->nulls_base < (1U << RHT_BASE_SHIFT)) 736 if (params->nulls_base && params->nulls_base < (1U << RHT_BASE_SHIFT))
724 return -EINVAL; 737 return -EINVAL;
725 738
726 if (params->nelem_hint)
727 size = rounded_hashtable_size(params);
728
729 memset(ht, 0, sizeof(*ht)); 739 memset(ht, 0, sizeof(*ht));
730 mutex_init(&ht->mutex); 740 mutex_init(&ht->mutex);
731 spin_lock_init(&ht->lock); 741 spin_lock_init(&ht->lock);
@@ -745,6 +755,9 @@ int rhashtable_init(struct rhashtable *ht,
745 755
746 ht->p.min_size = max(ht->p.min_size, HASH_MIN_SIZE); 756 ht->p.min_size = max(ht->p.min_size, HASH_MIN_SIZE);
747 757
758 if (params->nelem_hint)
759 size = rounded_hashtable_size(&ht->p);
760
748 /* The maximum (not average) chain length grows with the 761 /* The maximum (not average) chain length grows with the
749 * size of the hash table, at a rate of (log N)/(log log N). 762 * size of the hash table, at a rate of (log N)/(log log N).
750 * The value of 16 is selected so that even if the hash 763 * The value of 16 is selected so that even if the hash
diff --git a/mm/backing-dev.c b/mm/backing-dev.c
index 8ed2ffd963c5..7340353f8aea 100644
--- a/mm/backing-dev.c
+++ b/mm/backing-dev.c
@@ -957,8 +957,9 @@ EXPORT_SYMBOL(congestion_wait);
957 * jiffies for either a BDI to exit congestion of the given @sync queue 957 * jiffies for either a BDI to exit congestion of the given @sync queue
958 * or a write to complete. 958 * or a write to complete.
959 * 959 *
960 * In the absence of zone congestion, cond_resched() is called to yield 960 * In the absence of zone congestion, a short sleep or a cond_resched is
961 * the processor if necessary but otherwise does not sleep. 961 * performed to yield the processor and to allow other subsystems to make
962 * a forward progress.
962 * 963 *
963 * The return value is 0 if the sleep is for the full timeout. Otherwise, 964 * The return value is 0 if the sleep is for the full timeout. Otherwise,
964 * it is the number of jiffies that were still remaining when the function 965 * it is the number of jiffies that were still remaining when the function
@@ -978,7 +979,19 @@ long wait_iff_congested(struct zone *zone, int sync, long timeout)
978 */ 979 */
979 if (atomic_read(&nr_wb_congested[sync]) == 0 || 980 if (atomic_read(&nr_wb_congested[sync]) == 0 ||
980 !test_bit(ZONE_CONGESTED, &zone->flags)) { 981 !test_bit(ZONE_CONGESTED, &zone->flags)) {
981 cond_resched(); 982
983 /*
984 * Memory allocation/reclaim might be called from a WQ
985 * context and the current implementation of the WQ
986 * concurrency control doesn't recognize that a particular
987 * WQ is congested if the worker thread is looping without
988 * ever sleeping. Therefore we have to do a short sleep
989 * here rather than calling cond_resched().
990 */
991 if (current->flags & PF_WQ_WORKER)
992 schedule_timeout(1);
993 else
994 cond_resched();
982 995
983 /* In case we scheduled, work out time remaining */ 996 /* In case we scheduled, work out time remaining */
984 ret = timeout - (jiffies - start); 997 ret = timeout - (jiffies - start);
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index c29ddebc8705..62fe06bb7d04 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -2009,7 +2009,7 @@ int hugepage_madvise(struct vm_area_struct *vma,
2009 /* 2009 /*
2010 * Be somewhat over-protective like KSM for now! 2010 * Be somewhat over-protective like KSM for now!
2011 */ 2011 */
2012 if (*vm_flags & (VM_HUGEPAGE | VM_NO_THP)) 2012 if (*vm_flags & VM_NO_THP)
2013 return -EINVAL; 2013 return -EINVAL;
2014 *vm_flags &= ~VM_NOHUGEPAGE; 2014 *vm_flags &= ~VM_NOHUGEPAGE;
2015 *vm_flags |= VM_HUGEPAGE; 2015 *vm_flags |= VM_HUGEPAGE;
@@ -2025,7 +2025,7 @@ int hugepage_madvise(struct vm_area_struct *vma,
2025 /* 2025 /*
2026 * Be somewhat over-protective like KSM for now! 2026 * Be somewhat over-protective like KSM for now!
2027 */ 2027 */
2028 if (*vm_flags & (VM_NOHUGEPAGE | VM_NO_THP)) 2028 if (*vm_flags & VM_NO_THP)
2029 return -EINVAL; 2029 return -EINVAL;
2030 *vm_flags &= ~VM_HUGEPAGE; 2030 *vm_flags &= ~VM_HUGEPAGE;
2031 *vm_flags |= VM_NOHUGEPAGE; 2031 *vm_flags |= VM_NOHUGEPAGE;
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 827bb02a43a4..ef6963b577fd 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -372,8 +372,10 @@ retry_locked:
372 spin_unlock(&resv->lock); 372 spin_unlock(&resv->lock);
373 373
374 trg = kmalloc(sizeof(*trg), GFP_KERNEL); 374 trg = kmalloc(sizeof(*trg), GFP_KERNEL);
375 if (!trg) 375 if (!trg) {
376 kfree(nrg);
376 return -ENOMEM; 377 return -ENOMEM;
378 }
377 379
378 spin_lock(&resv->lock); 380 spin_lock(&resv->lock);
379 list_add(&trg->link, &resv->region_cache); 381 list_add(&trg->link, &resv->region_cache);
@@ -483,8 +485,16 @@ static long region_del(struct resv_map *resv, long f, long t)
483retry: 485retry:
484 spin_lock(&resv->lock); 486 spin_lock(&resv->lock);
485 list_for_each_entry_safe(rg, trg, head, link) { 487 list_for_each_entry_safe(rg, trg, head, link) {
486 if (rg->to <= f) 488 /*
489 * Skip regions before the range to be deleted. file_region
490 * ranges are normally of the form [from, to). However, there
491 * may be a "placeholder" entry in the map which is of the form
492 * (from, to) with from == to. Check for placeholder entries
493 * at the beginning of the range to be deleted.
494 */
495 if (rg->to <= f && (rg->to != rg->from || rg->to != f))
487 continue; 496 continue;
497
488 if (rg->from >= t) 498 if (rg->from >= t)
489 break; 499 break;
490 500
@@ -1886,7 +1896,10 @@ struct page *alloc_huge_page(struct vm_area_struct *vma,
1886 page = __alloc_buddy_huge_page_with_mpol(h, vma, addr); 1896 page = __alloc_buddy_huge_page_with_mpol(h, vma, addr);
1887 if (!page) 1897 if (!page)
1888 goto out_uncharge_cgroup; 1898 goto out_uncharge_cgroup;
1889 1899 if (!avoid_reserve && vma_has_reserves(vma, gbl_chg)) {
1900 SetPagePrivate(page);
1901 h->resv_huge_pages--;
1902 }
1890 spin_lock(&hugetlb_lock); 1903 spin_lock(&hugetlb_lock);
1891 list_move(&page->lru, &h->hugepage_activelist); 1904 list_move(&page->lru, &h->hugepage_activelist);
1892 /* Fall through */ 1905 /* Fall through */
@@ -3693,12 +3706,12 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
3693 } else if (unlikely(is_hugetlb_entry_hwpoisoned(entry))) 3706 } else if (unlikely(is_hugetlb_entry_hwpoisoned(entry)))
3694 return VM_FAULT_HWPOISON_LARGE | 3707 return VM_FAULT_HWPOISON_LARGE |
3695 VM_FAULT_SET_HINDEX(hstate_index(h)); 3708 VM_FAULT_SET_HINDEX(hstate_index(h));
3709 } else {
3710 ptep = huge_pte_alloc(mm, address, huge_page_size(h));
3711 if (!ptep)
3712 return VM_FAULT_OOM;
3696 } 3713 }
3697 3714
3698 ptep = huge_pte_alloc(mm, address, huge_page_size(h));
3699 if (!ptep)
3700 return VM_FAULT_OOM;
3701
3702 mapping = vma->vm_file->f_mapping; 3715 mapping = vma->vm_file->f_mapping;
3703 idx = vma_hugecache_offset(h, vma, address); 3716 idx = vma_hugecache_offset(h, vma, address);
3704 3717
diff --git a/mm/kasan/kasan.c b/mm/kasan/kasan.c
index d41b21bce6a0..bc0a8d8b8f42 100644
--- a/mm/kasan/kasan.c
+++ b/mm/kasan/kasan.c
@@ -19,6 +19,7 @@
19#include <linux/export.h> 19#include <linux/export.h>
20#include <linux/init.h> 20#include <linux/init.h>
21#include <linux/kernel.h> 21#include <linux/kernel.h>
22#include <linux/kmemleak.h>
22#include <linux/memblock.h> 23#include <linux/memblock.h>
23#include <linux/memory.h> 24#include <linux/memory.h>
24#include <linux/mm.h> 25#include <linux/mm.h>
@@ -444,6 +445,7 @@ int kasan_module_alloc(void *addr, size_t size)
444 445
445 if (ret) { 446 if (ret) {
446 find_vm_area(addr)->flags |= VM_KASAN; 447 find_vm_area(addr)->flags |= VM_KASAN;
448 kmemleak_ignore(ret);
447 return 0; 449 return 0;
448 } 450 }
449 451
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 9acfb165eb52..e234c21a5e6c 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -2128,7 +2128,7 @@ done_restock:
2128 */ 2128 */
2129 do { 2129 do {
2130 if (page_counter_read(&memcg->memory) > memcg->high) { 2130 if (page_counter_read(&memcg->memory) > memcg->high) {
2131 current->memcg_nr_pages_over_high += nr_pages; 2131 current->memcg_nr_pages_over_high += batch;
2132 set_notify_resume(current); 2132 set_notify_resume(current);
2133 break; 2133 break;
2134 } 2134 }
@@ -4779,23 +4779,18 @@ static void mem_cgroup_clear_mc(void)
4779 spin_unlock(&mc.lock); 4779 spin_unlock(&mc.lock);
4780} 4780}
4781 4781
4782static int mem_cgroup_can_attach(struct cgroup_subsys_state *css, 4782static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
4783 struct cgroup_taskset *tset)
4784{ 4783{
4785 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 4784 struct cgroup_subsys_state *css;
4785 struct mem_cgroup *memcg;
4786 struct mem_cgroup *from; 4786 struct mem_cgroup *from;
4787 struct task_struct *leader, *p; 4787 struct task_struct *leader, *p;
4788 struct mm_struct *mm; 4788 struct mm_struct *mm;
4789 unsigned long move_flags; 4789 unsigned long move_flags;
4790 int ret = 0; 4790 int ret = 0;
4791 4791
4792 /* 4792 /* charge immigration isn't supported on the default hierarchy */
4793 * We are now commited to this value whatever it is. Changes in this 4793 if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
4794 * tunable will only affect upcoming migrations, not the current one.
4795 * So we need to save it, and keep it going.
4796 */
4797 move_flags = READ_ONCE(memcg->move_charge_at_immigrate);
4798 if (!move_flags)
4799 return 0; 4794 return 0;
4800 4795
4801 /* 4796 /*
@@ -4805,13 +4800,23 @@ static int mem_cgroup_can_attach(struct cgroup_subsys_state *css,
4805 * multiple. 4800 * multiple.
4806 */ 4801 */
4807 p = NULL; 4802 p = NULL;
4808 cgroup_taskset_for_each_leader(leader, tset) { 4803 cgroup_taskset_for_each_leader(leader, css, tset) {
4809 WARN_ON_ONCE(p); 4804 WARN_ON_ONCE(p);
4810 p = leader; 4805 p = leader;
4806 memcg = mem_cgroup_from_css(css);
4811 } 4807 }
4812 if (!p) 4808 if (!p)
4813 return 0; 4809 return 0;
4814 4810
4811 /*
4812 * We are now commited to this value whatever it is. Changes in this
4813 * tunable will only affect upcoming migrations, not the current one.
4814 * So we need to save it, and keep it going.
4815 */
4816 move_flags = READ_ONCE(memcg->move_charge_at_immigrate);
4817 if (!move_flags)
4818 return 0;
4819
4815 from = mem_cgroup_from_task(p); 4820 from = mem_cgroup_from_task(p);
4816 4821
4817 VM_BUG_ON(from == memcg); 4822 VM_BUG_ON(from == memcg);
@@ -4842,8 +4847,7 @@ static int mem_cgroup_can_attach(struct cgroup_subsys_state *css,
4842 return ret; 4847 return ret;
4843} 4848}
4844 4849
4845static void mem_cgroup_cancel_attach(struct cgroup_subsys_state *css, 4850static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset)
4846 struct cgroup_taskset *tset)
4847{ 4851{
4848 if (mc.to) 4852 if (mc.to)
4849 mem_cgroup_clear_mc(); 4853 mem_cgroup_clear_mc();
@@ -4985,10 +4989,10 @@ retry:
4985 atomic_dec(&mc.from->moving_account); 4989 atomic_dec(&mc.from->moving_account);
4986} 4990}
4987 4991
4988static void mem_cgroup_move_task(struct cgroup_subsys_state *css, 4992static void mem_cgroup_move_task(struct cgroup_taskset *tset)
4989 struct cgroup_taskset *tset)
4990{ 4993{
4991 struct task_struct *p = cgroup_taskset_first(tset); 4994 struct cgroup_subsys_state *css;
4995 struct task_struct *p = cgroup_taskset_first(tset, &css);
4992 struct mm_struct *mm = get_task_mm(p); 4996 struct mm_struct *mm = get_task_mm(p);
4993 4997
4994 if (mm) { 4998 if (mm) {
@@ -5000,17 +5004,14 @@ static void mem_cgroup_move_task(struct cgroup_subsys_state *css,
5000 mem_cgroup_clear_mc(); 5004 mem_cgroup_clear_mc();
5001} 5005}
5002#else /* !CONFIG_MMU */ 5006#else /* !CONFIG_MMU */
5003static int mem_cgroup_can_attach(struct cgroup_subsys_state *css, 5007static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
5004 struct cgroup_taskset *tset)
5005{ 5008{
5006 return 0; 5009 return 0;
5007} 5010}
5008static void mem_cgroup_cancel_attach(struct cgroup_subsys_state *css, 5011static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset)
5009 struct cgroup_taskset *tset)
5010{ 5012{
5011} 5013}
5012static void mem_cgroup_move_task(struct cgroup_subsys_state *css, 5014static void mem_cgroup_move_task(struct cgroup_taskset *tset)
5013 struct cgroup_taskset *tset)
5014{ 5015{
5015} 5016}
5016#endif 5017#endif
@@ -5511,11 +5512,11 @@ void mem_cgroup_uncharge_list(struct list_head *page_list)
5511 * mem_cgroup_replace_page - migrate a charge to another page 5512 * mem_cgroup_replace_page - migrate a charge to another page
5512 * @oldpage: currently charged page 5513 * @oldpage: currently charged page
5513 * @newpage: page to transfer the charge to 5514 * @newpage: page to transfer the charge to
5514 * @lrucare: either or both pages might be on the LRU already
5515 * 5515 *
5516 * Migrate the charge from @oldpage to @newpage. 5516 * Migrate the charge from @oldpage to @newpage.
5517 * 5517 *
5518 * Both pages must be locked, @newpage->mapping must be set up. 5518 * Both pages must be locked, @newpage->mapping must be set up.
5519 * Either or both pages might be on the LRU already.
5519 */ 5520 */
5520void mem_cgroup_replace_page(struct page *oldpage, struct page *newpage) 5521void mem_cgroup_replace_page(struct page *oldpage, struct page *newpage)
5521{ 5522{
diff --git a/mm/memory.c b/mm/memory.c
index deb679c31f2a..c387430f06c3 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -3015,9 +3015,9 @@ static int do_cow_fault(struct mm_struct *mm, struct vm_area_struct *vma,
3015 } else { 3015 } else {
3016 /* 3016 /*
3017 * The fault handler has no page to lock, so it holds 3017 * The fault handler has no page to lock, so it holds
3018 * i_mmap_lock for write to protect against truncate. 3018 * i_mmap_lock for read to protect against truncate.
3019 */ 3019 */
3020 i_mmap_unlock_write(vma->vm_file->f_mapping); 3020 i_mmap_unlock_read(vma->vm_file->f_mapping);
3021 } 3021 }
3022 goto uncharge_out; 3022 goto uncharge_out;
3023 } 3023 }
@@ -3031,9 +3031,9 @@ static int do_cow_fault(struct mm_struct *mm, struct vm_area_struct *vma,
3031 } else { 3031 } else {
3032 /* 3032 /*
3033 * The fault handler has no page to lock, so it holds 3033 * The fault handler has no page to lock, so it holds
3034 * i_mmap_lock for write to protect against truncate. 3034 * i_mmap_lock for read to protect against truncate.
3035 */ 3035 */
3036 i_mmap_unlock_write(vma->vm_file->f_mapping); 3036 i_mmap_unlock_read(vma->vm_file->f_mapping);
3037 } 3037 }
3038 return ret; 3038 return ret;
3039uncharge_out: 3039uncharge_out:
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index d13a33918fa2..c12680993ff3 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -608,6 +608,8 @@ void oom_kill_process(struct oom_control *oc, struct task_struct *p,
608 continue; 608 continue;
609 if (unlikely(p->flags & PF_KTHREAD)) 609 if (unlikely(p->flags & PF_KTHREAD))
610 continue; 610 continue;
611 if (is_global_init(p))
612 continue;
611 if (p->signal->oom_score_adj == OOM_SCORE_ADJ_MIN) 613 if (p->signal->oom_score_adj == OOM_SCORE_ADJ_MIN)
612 continue; 614 continue;
613 615
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 2c90357c34ea..d15d88c8efa1 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -2,7 +2,7 @@
2 * mm/page-writeback.c 2 * mm/page-writeback.c
3 * 3 *
4 * Copyright (C) 2002, Linus Torvalds. 4 * Copyright (C) 2002, Linus Torvalds.
5 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> 5 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
6 * 6 *
7 * Contains functions related to writing back dirty pages at the 7 * Contains functions related to writing back dirty pages at the
8 * address_space level. 8 * address_space level.
@@ -1542,7 +1542,9 @@ static void balance_dirty_pages(struct address_space *mapping,
1542 for (;;) { 1542 for (;;) {
1543 unsigned long now = jiffies; 1543 unsigned long now = jiffies;
1544 unsigned long dirty, thresh, bg_thresh; 1544 unsigned long dirty, thresh, bg_thresh;
1545 unsigned long m_dirty, m_thresh, m_bg_thresh; 1545 unsigned long m_dirty = 0; /* stop bogus uninit warnings */
1546 unsigned long m_thresh = 0;
1547 unsigned long m_bg_thresh = 0;
1546 1548
1547 /* 1549 /*
1548 * Unstable writes are a feature of certain networked 1550 * Unstable writes are a feature of certain networked
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 17a3c66639a9..9d666df5ef95 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -3647,8 +3647,9 @@ static void show_migration_types(unsigned char type)
3647{ 3647{
3648 static const char types[MIGRATE_TYPES] = { 3648 static const char types[MIGRATE_TYPES] = {
3649 [MIGRATE_UNMOVABLE] = 'U', 3649 [MIGRATE_UNMOVABLE] = 'U',
3650 [MIGRATE_RECLAIMABLE] = 'E',
3651 [MIGRATE_MOVABLE] = 'M', 3650 [MIGRATE_MOVABLE] = 'M',
3651 [MIGRATE_RECLAIMABLE] = 'E',
3652 [MIGRATE_HIGHATOMIC] = 'H',
3652#ifdef CONFIG_CMA 3653#ifdef CONFIG_CMA
3653 [MIGRATE_CMA] = 'C', 3654 [MIGRATE_CMA] = 'C',
3654#endif 3655#endif
diff --git a/mm/shmem.c b/mm/shmem.c
index 9187eee4128b..2afcdbbdb685 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -843,14 +843,14 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc)
843 list_add_tail(&info->swaplist, &shmem_swaplist); 843 list_add_tail(&info->swaplist, &shmem_swaplist);
844 844
845 if (add_to_swap_cache(page, swap, GFP_ATOMIC) == 0) { 845 if (add_to_swap_cache(page, swap, GFP_ATOMIC) == 0) {
846 swap_shmem_alloc(swap);
847 shmem_delete_from_page_cache(page, swp_to_radix_entry(swap));
848
849 spin_lock(&info->lock); 846 spin_lock(&info->lock);
850 info->swapped++;
851 shmem_recalc_inode(inode); 847 shmem_recalc_inode(inode);
848 info->swapped++;
852 spin_unlock(&info->lock); 849 spin_unlock(&info->lock);
853 850
851 swap_shmem_alloc(swap);
852 shmem_delete_from_page_cache(page, swp_to_radix_entry(swap));
853
854 mutex_unlock(&shmem_swaplist_mutex); 854 mutex_unlock(&shmem_swaplist_mutex);
855 BUG_ON(page_mapped(page)); 855 BUG_ON(page_mapped(page));
856 swap_writepage(page, wbc); 856 swap_writepage(page, wbc);
@@ -1078,7 +1078,7 @@ repeat:
1078 if (sgp != SGP_WRITE && sgp != SGP_FALLOC && 1078 if (sgp != SGP_WRITE && sgp != SGP_FALLOC &&
1079 ((loff_t)index << PAGE_CACHE_SHIFT) >= i_size_read(inode)) { 1079 ((loff_t)index << PAGE_CACHE_SHIFT) >= i_size_read(inode)) {
1080 error = -EINVAL; 1080 error = -EINVAL;
1081 goto failed; 1081 goto unlock;
1082 } 1082 }
1083 1083
1084 if (page && sgp == SGP_WRITE) 1084 if (page && sgp == SGP_WRITE)
@@ -1246,11 +1246,15 @@ clear:
1246 /* Perhaps the file has been truncated since we checked */ 1246 /* Perhaps the file has been truncated since we checked */
1247 if (sgp != SGP_WRITE && sgp != SGP_FALLOC && 1247 if (sgp != SGP_WRITE && sgp != SGP_FALLOC &&
1248 ((loff_t)index << PAGE_CACHE_SHIFT) >= i_size_read(inode)) { 1248 ((loff_t)index << PAGE_CACHE_SHIFT) >= i_size_read(inode)) {
1249 if (alloced) {
1250 ClearPageDirty(page);
1251 delete_from_page_cache(page);
1252 spin_lock(&info->lock);
1253 shmem_recalc_inode(inode);
1254 spin_unlock(&info->lock);
1255 }
1249 error = -EINVAL; 1256 error = -EINVAL;
1250 if (alloced) 1257 goto unlock;
1251 goto trunc;
1252 else
1253 goto failed;
1254 } 1258 }
1255 *pagep = page; 1259 *pagep = page;
1256 return 0; 1260 return 0;
@@ -1258,23 +1262,13 @@ clear:
1258 /* 1262 /*
1259 * Error recovery. 1263 * Error recovery.
1260 */ 1264 */
1261trunc:
1262 info = SHMEM_I(inode);
1263 ClearPageDirty(page);
1264 delete_from_page_cache(page);
1265 spin_lock(&info->lock);
1266 info->alloced--;
1267 inode->i_blocks -= BLOCKS_PER_PAGE;
1268 spin_unlock(&info->lock);
1269decused: 1265decused:
1270 sbinfo = SHMEM_SB(inode->i_sb);
1271 if (sbinfo->max_blocks) 1266 if (sbinfo->max_blocks)
1272 percpu_counter_add(&sbinfo->used_blocks, -1); 1267 percpu_counter_add(&sbinfo->used_blocks, -1);
1273unacct: 1268unacct:
1274 shmem_unacct_blocks(info->flags, 1); 1269 shmem_unacct_blocks(info->flags, 1);
1275failed: 1270failed:
1276 if (swap.val && error != -EINVAL && 1271 if (swap.val && !shmem_confirm_swap(mapping, index, swap))
1277 !shmem_confirm_swap(mapping, index, swap))
1278 error = -EEXIST; 1272 error = -EEXIST;
1279unlock: 1273unlock:
1280 if (page) { 1274 if (page) {
diff --git a/mm/slab.c b/mm/slab.c
index e0819fa96559..4765c97ce690 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -3419,7 +3419,7 @@ void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p)
3419} 3419}
3420EXPORT_SYMBOL(kmem_cache_free_bulk); 3420EXPORT_SYMBOL(kmem_cache_free_bulk);
3421 3421
3422bool kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, 3422int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
3423 void **p) 3423 void **p)
3424{ 3424{
3425 return __kmem_cache_alloc_bulk(s, flags, size, p); 3425 return __kmem_cache_alloc_bulk(s, flags, size, p);
diff --git a/mm/slab.h b/mm/slab.h
index 27492eb678f7..7b6087197997 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -170,7 +170,7 @@ ssize_t slabinfo_write(struct file *file, const char __user *buffer,
170 * may be allocated or freed using these operations. 170 * may be allocated or freed using these operations.
171 */ 171 */
172void __kmem_cache_free_bulk(struct kmem_cache *, size_t, void **); 172void __kmem_cache_free_bulk(struct kmem_cache *, size_t, void **);
173bool __kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **); 173int __kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **);
174 174
175#ifdef CONFIG_MEMCG_KMEM 175#ifdef CONFIG_MEMCG_KMEM
176/* 176/*
diff --git a/mm/slab_common.c b/mm/slab_common.c
index d88e97c10a2e..3c6a86b4ec25 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -112,7 +112,7 @@ void __kmem_cache_free_bulk(struct kmem_cache *s, size_t nr, void **p)
112 kmem_cache_free(s, p[i]); 112 kmem_cache_free(s, p[i]);
113} 113}
114 114
115bool __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t nr, 115int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t nr,
116 void **p) 116 void **p)
117{ 117{
118 size_t i; 118 size_t i;
@@ -121,10 +121,10 @@ bool __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t nr,
121 void *x = p[i] = kmem_cache_alloc(s, flags); 121 void *x = p[i] = kmem_cache_alloc(s, flags);
122 if (!x) { 122 if (!x) {
123 __kmem_cache_free_bulk(s, i, p); 123 __kmem_cache_free_bulk(s, i, p);
124 return false; 124 return 0;
125 } 125 }
126 } 126 }
127 return true; 127 return i;
128} 128}
129 129
130#ifdef CONFIG_MEMCG_KMEM 130#ifdef CONFIG_MEMCG_KMEM
diff --git a/mm/slob.c b/mm/slob.c
index 0d7e5df74d1f..17e8f8cc7c53 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -617,7 +617,7 @@ void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p)
617} 617}
618EXPORT_SYMBOL(kmem_cache_free_bulk); 618EXPORT_SYMBOL(kmem_cache_free_bulk);
619 619
620bool kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, 620int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
621 void **p) 621 void **p)
622{ 622{
623 return __kmem_cache_alloc_bulk(s, flags, size, p); 623 return __kmem_cache_alloc_bulk(s, flags, size, p);
diff --git a/mm/slub.c b/mm/slub.c
index 7cb4bf9ae320..46997517406e 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1065,11 +1065,15 @@ bad:
1065 return 0; 1065 return 0;
1066} 1066}
1067 1067
1068/* Supports checking bulk free of a constructed freelist */
1068static noinline struct kmem_cache_node *free_debug_processing( 1069static noinline struct kmem_cache_node *free_debug_processing(
1069 struct kmem_cache *s, struct page *page, void *object, 1070 struct kmem_cache *s, struct page *page,
1071 void *head, void *tail, int bulk_cnt,
1070 unsigned long addr, unsigned long *flags) 1072 unsigned long addr, unsigned long *flags)
1071{ 1073{
1072 struct kmem_cache_node *n = get_node(s, page_to_nid(page)); 1074 struct kmem_cache_node *n = get_node(s, page_to_nid(page));
1075 void *object = head;
1076 int cnt = 0;
1073 1077
1074 spin_lock_irqsave(&n->list_lock, *flags); 1078 spin_lock_irqsave(&n->list_lock, *flags);
1075 slab_lock(page); 1079 slab_lock(page);
@@ -1077,6 +1081,9 @@ static noinline struct kmem_cache_node *free_debug_processing(
1077 if (!check_slab(s, page)) 1081 if (!check_slab(s, page))
1078 goto fail; 1082 goto fail;
1079 1083
1084next_object:
1085 cnt++;
1086
1080 if (!check_valid_pointer(s, page, object)) { 1087 if (!check_valid_pointer(s, page, object)) {
1081 slab_err(s, page, "Invalid object pointer 0x%p", object); 1088 slab_err(s, page, "Invalid object pointer 0x%p", object);
1082 goto fail; 1089 goto fail;
@@ -1107,8 +1114,19 @@ static noinline struct kmem_cache_node *free_debug_processing(
1107 if (s->flags & SLAB_STORE_USER) 1114 if (s->flags & SLAB_STORE_USER)
1108 set_track(s, object, TRACK_FREE, addr); 1115 set_track(s, object, TRACK_FREE, addr);
1109 trace(s, page, object, 0); 1116 trace(s, page, object, 0);
1117 /* Freepointer not overwritten by init_object(), SLAB_POISON moved it */
1110 init_object(s, object, SLUB_RED_INACTIVE); 1118 init_object(s, object, SLUB_RED_INACTIVE);
1119
1120 /* Reached end of constructed freelist yet? */
1121 if (object != tail) {
1122 object = get_freepointer(s, object);
1123 goto next_object;
1124 }
1111out: 1125out:
1126 if (cnt != bulk_cnt)
1127 slab_err(s, page, "Bulk freelist count(%d) invalid(%d)\n",
1128 bulk_cnt, cnt);
1129
1112 slab_unlock(page); 1130 slab_unlock(page);
1113 /* 1131 /*
1114 * Keep node_lock to preserve integrity 1132 * Keep node_lock to preserve integrity
@@ -1204,7 +1222,7 @@ unsigned long kmem_cache_flags(unsigned long object_size,
1204 1222
1205 return flags; 1223 return flags;
1206} 1224}
1207#else 1225#else /* !CONFIG_SLUB_DEBUG */
1208static inline void setup_object_debug(struct kmem_cache *s, 1226static inline void setup_object_debug(struct kmem_cache *s,
1209 struct page *page, void *object) {} 1227 struct page *page, void *object) {}
1210 1228
@@ -1212,7 +1230,8 @@ static inline int alloc_debug_processing(struct kmem_cache *s,
1212 struct page *page, void *object, unsigned long addr) { return 0; } 1230 struct page *page, void *object, unsigned long addr) { return 0; }
1213 1231
1214static inline struct kmem_cache_node *free_debug_processing( 1232static inline struct kmem_cache_node *free_debug_processing(
1215 struct kmem_cache *s, struct page *page, void *object, 1233 struct kmem_cache *s, struct page *page,
1234 void *head, void *tail, int bulk_cnt,
1216 unsigned long addr, unsigned long *flags) { return NULL; } 1235 unsigned long addr, unsigned long *flags) { return NULL; }
1217 1236
1218static inline int slab_pad_check(struct kmem_cache *s, struct page *page) 1237static inline int slab_pad_check(struct kmem_cache *s, struct page *page)
@@ -1273,14 +1292,21 @@ static inline struct kmem_cache *slab_pre_alloc_hook(struct kmem_cache *s,
1273 return memcg_kmem_get_cache(s, flags); 1292 return memcg_kmem_get_cache(s, flags);
1274} 1293}
1275 1294
1276static inline void slab_post_alloc_hook(struct kmem_cache *s, 1295static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags,
1277 gfp_t flags, void *object) 1296 size_t size, void **p)
1278{ 1297{
1298 size_t i;
1299
1279 flags &= gfp_allowed_mask; 1300 flags &= gfp_allowed_mask;
1280 kmemcheck_slab_alloc(s, flags, object, slab_ksize(s)); 1301 for (i = 0; i < size; i++) {
1281 kmemleak_alloc_recursive(object, s->object_size, 1, s->flags, flags); 1302 void *object = p[i];
1303
1304 kmemcheck_slab_alloc(s, flags, object, slab_ksize(s));
1305 kmemleak_alloc_recursive(object, s->object_size, 1,
1306 s->flags, flags);
1307 kasan_slab_alloc(s, object);
1308 }
1282 memcg_kmem_put_cache(s); 1309 memcg_kmem_put_cache(s);
1283 kasan_slab_alloc(s, object);
1284} 1310}
1285 1311
1286static inline void slab_free_hook(struct kmem_cache *s, void *x) 1312static inline void slab_free_hook(struct kmem_cache *s, void *x)
@@ -1308,6 +1334,29 @@ static inline void slab_free_hook(struct kmem_cache *s, void *x)
1308 kasan_slab_free(s, x); 1334 kasan_slab_free(s, x);
1309} 1335}
1310 1336
1337static inline void slab_free_freelist_hook(struct kmem_cache *s,
1338 void *head, void *tail)
1339{
1340/*
1341 * Compiler cannot detect this function can be removed if slab_free_hook()
1342 * evaluates to nothing. Thus, catch all relevant config debug options here.
1343 */
1344#if defined(CONFIG_KMEMCHECK) || \
1345 defined(CONFIG_LOCKDEP) || \
1346 defined(CONFIG_DEBUG_KMEMLEAK) || \
1347 defined(CONFIG_DEBUG_OBJECTS_FREE) || \
1348 defined(CONFIG_KASAN)
1349
1350 void *object = head;
1351 void *tail_obj = tail ? : head;
1352
1353 do {
1354 slab_free_hook(s, object);
1355 } while ((object != tail_obj) &&
1356 (object = get_freepointer(s, object)));
1357#endif
1358}
1359
1311static void setup_object(struct kmem_cache *s, struct page *page, 1360static void setup_object(struct kmem_cache *s, struct page *page,
1312 void *object) 1361 void *object)
1313{ 1362{
@@ -2295,23 +2344,15 @@ static inline void *get_freelist(struct kmem_cache *s, struct page *page)
2295 * And if we were unable to get a new slab from the partial slab lists then 2344 * And if we were unable to get a new slab from the partial slab lists then
2296 * we need to allocate a new slab. This is the slowest path since it involves 2345 * we need to allocate a new slab. This is the slowest path since it involves
2297 * a call to the page allocator and the setup of a new slab. 2346 * a call to the page allocator and the setup of a new slab.
2347 *
2348 * Version of __slab_alloc to use when we know that interrupts are
2349 * already disabled (which is the case for bulk allocation).
2298 */ 2350 */
2299static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, 2351static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
2300 unsigned long addr, struct kmem_cache_cpu *c) 2352 unsigned long addr, struct kmem_cache_cpu *c)
2301{ 2353{
2302 void *freelist; 2354 void *freelist;
2303 struct page *page; 2355 struct page *page;
2304 unsigned long flags;
2305
2306 local_irq_save(flags);
2307#ifdef CONFIG_PREEMPT
2308 /*
2309 * We may have been preempted and rescheduled on a different
2310 * cpu before disabling interrupts. Need to reload cpu area
2311 * pointer.
2312 */
2313 c = this_cpu_ptr(s->cpu_slab);
2314#endif
2315 2356
2316 page = c->page; 2357 page = c->page;
2317 if (!page) 2358 if (!page)
@@ -2369,7 +2410,6 @@ load_freelist:
2369 VM_BUG_ON(!c->page->frozen); 2410 VM_BUG_ON(!c->page->frozen);
2370 c->freelist = get_freepointer(s, freelist); 2411 c->freelist = get_freepointer(s, freelist);
2371 c->tid = next_tid(c->tid); 2412 c->tid = next_tid(c->tid);
2372 local_irq_restore(flags);
2373 return freelist; 2413 return freelist;
2374 2414
2375new_slab: 2415new_slab:
@@ -2386,7 +2426,6 @@ new_slab:
2386 2426
2387 if (unlikely(!freelist)) { 2427 if (unlikely(!freelist)) {
2388 slab_out_of_memory(s, gfpflags, node); 2428 slab_out_of_memory(s, gfpflags, node);
2389 local_irq_restore(flags);
2390 return NULL; 2429 return NULL;
2391 } 2430 }
2392 2431
@@ -2402,11 +2441,35 @@ new_slab:
2402 deactivate_slab(s, page, get_freepointer(s, freelist)); 2441 deactivate_slab(s, page, get_freepointer(s, freelist));
2403 c->page = NULL; 2442 c->page = NULL;
2404 c->freelist = NULL; 2443 c->freelist = NULL;
2405 local_irq_restore(flags);
2406 return freelist; 2444 return freelist;
2407} 2445}
2408 2446
2409/* 2447/*
2448 * Another one that disabled interrupt and compensates for possible
2449 * cpu changes by refetching the per cpu area pointer.
2450 */
2451static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
2452 unsigned long addr, struct kmem_cache_cpu *c)
2453{
2454 void *p;
2455 unsigned long flags;
2456
2457 local_irq_save(flags);
2458#ifdef CONFIG_PREEMPT
2459 /*
2460 * We may have been preempted and rescheduled on a different
2461 * cpu before disabling interrupts. Need to reload cpu area
2462 * pointer.
2463 */
2464 c = this_cpu_ptr(s->cpu_slab);
2465#endif
2466
2467 p = ___slab_alloc(s, gfpflags, node, addr, c);
2468 local_irq_restore(flags);
2469 return p;
2470}
2471
2472/*
2410 * Inlined fastpath so that allocation functions (kmalloc, kmem_cache_alloc) 2473 * Inlined fastpath so that allocation functions (kmalloc, kmem_cache_alloc)
2411 * have the fastpath folded into their functions. So no function call 2474 * have the fastpath folded into their functions. So no function call
2412 * overhead for requests that can be satisfied on the fastpath. 2475 * overhead for requests that can be satisfied on the fastpath.
@@ -2419,7 +2482,7 @@ new_slab:
2419static __always_inline void *slab_alloc_node(struct kmem_cache *s, 2482static __always_inline void *slab_alloc_node(struct kmem_cache *s,
2420 gfp_t gfpflags, int node, unsigned long addr) 2483 gfp_t gfpflags, int node, unsigned long addr)
2421{ 2484{
2422 void **object; 2485 void *object;
2423 struct kmem_cache_cpu *c; 2486 struct kmem_cache_cpu *c;
2424 struct page *page; 2487 struct page *page;
2425 unsigned long tid; 2488 unsigned long tid;
@@ -2498,7 +2561,7 @@ redo:
2498 if (unlikely(gfpflags & __GFP_ZERO) && object) 2561 if (unlikely(gfpflags & __GFP_ZERO) && object)
2499 memset(object, 0, s->object_size); 2562 memset(object, 0, s->object_size);
2500 2563
2501 slab_post_alloc_hook(s, gfpflags, object); 2564 slab_post_alloc_hook(s, gfpflags, 1, &object);
2502 2565
2503 return object; 2566 return object;
2504} 2567}
@@ -2569,10 +2632,11 @@ EXPORT_SYMBOL(kmem_cache_alloc_node_trace);
2569 * handling required then we can return immediately. 2632 * handling required then we can return immediately.
2570 */ 2633 */
2571static void __slab_free(struct kmem_cache *s, struct page *page, 2634static void __slab_free(struct kmem_cache *s, struct page *page,
2572 void *x, unsigned long addr) 2635 void *head, void *tail, int cnt,
2636 unsigned long addr)
2637
2573{ 2638{
2574 void *prior; 2639 void *prior;
2575 void **object = (void *)x;
2576 int was_frozen; 2640 int was_frozen;
2577 struct page new; 2641 struct page new;
2578 unsigned long counters; 2642 unsigned long counters;
@@ -2582,7 +2646,8 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
2582 stat(s, FREE_SLOWPATH); 2646 stat(s, FREE_SLOWPATH);
2583 2647
2584 if (kmem_cache_debug(s) && 2648 if (kmem_cache_debug(s) &&
2585 !(n = free_debug_processing(s, page, x, addr, &flags))) 2649 !(n = free_debug_processing(s, page, head, tail, cnt,
2650 addr, &flags)))
2586 return; 2651 return;
2587 2652
2588 do { 2653 do {
@@ -2592,10 +2657,10 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
2592 } 2657 }
2593 prior = page->freelist; 2658 prior = page->freelist;
2594 counters = page->counters; 2659 counters = page->counters;
2595 set_freepointer(s, object, prior); 2660 set_freepointer(s, tail, prior);
2596 new.counters = counters; 2661 new.counters = counters;
2597 was_frozen = new.frozen; 2662 was_frozen = new.frozen;
2598 new.inuse--; 2663 new.inuse -= cnt;
2599 if ((!new.inuse || !prior) && !was_frozen) { 2664 if ((!new.inuse || !prior) && !was_frozen) {
2600 2665
2601 if (kmem_cache_has_cpu_partial(s) && !prior) { 2666 if (kmem_cache_has_cpu_partial(s) && !prior) {
@@ -2626,7 +2691,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
2626 2691
2627 } while (!cmpxchg_double_slab(s, page, 2692 } while (!cmpxchg_double_slab(s, page,
2628 prior, counters, 2693 prior, counters,
2629 object, new.counters, 2694 head, new.counters,
2630 "__slab_free")); 2695 "__slab_free"));
2631 2696
2632 if (likely(!n)) { 2697 if (likely(!n)) {
@@ -2691,15 +2756,20 @@ slab_empty:
2691 * 2756 *
2692 * If fastpath is not possible then fall back to __slab_free where we deal 2757 * If fastpath is not possible then fall back to __slab_free where we deal
2693 * with all sorts of special processing. 2758 * with all sorts of special processing.
2759 *
2760 * Bulk free of a freelist with several objects (all pointing to the
2761 * same page) possible by specifying head and tail ptr, plus objects
2762 * count (cnt). Bulk free indicated by tail pointer being set.
2694 */ 2763 */
2695static __always_inline void slab_free(struct kmem_cache *s, 2764static __always_inline void slab_free(struct kmem_cache *s, struct page *page,
2696 struct page *page, void *x, unsigned long addr) 2765 void *head, void *tail, int cnt,
2766 unsigned long addr)
2697{ 2767{
2698 void **object = (void *)x; 2768 void *tail_obj = tail ? : head;
2699 struct kmem_cache_cpu *c; 2769 struct kmem_cache_cpu *c;
2700 unsigned long tid; 2770 unsigned long tid;
2701 2771
2702 slab_free_hook(s, x); 2772 slab_free_freelist_hook(s, head, tail);
2703 2773
2704redo: 2774redo:
2705 /* 2775 /*
@@ -2718,19 +2788,19 @@ redo:
2718 barrier(); 2788 barrier();
2719 2789
2720 if (likely(page == c->page)) { 2790 if (likely(page == c->page)) {
2721 set_freepointer(s, object, c->freelist); 2791 set_freepointer(s, tail_obj, c->freelist);
2722 2792
2723 if (unlikely(!this_cpu_cmpxchg_double( 2793 if (unlikely(!this_cpu_cmpxchg_double(
2724 s->cpu_slab->freelist, s->cpu_slab->tid, 2794 s->cpu_slab->freelist, s->cpu_slab->tid,
2725 c->freelist, tid, 2795 c->freelist, tid,
2726 object, next_tid(tid)))) { 2796 head, next_tid(tid)))) {
2727 2797
2728 note_cmpxchg_failure("slab_free", s, tid); 2798 note_cmpxchg_failure("slab_free", s, tid);
2729 goto redo; 2799 goto redo;
2730 } 2800 }
2731 stat(s, FREE_FASTPATH); 2801 stat(s, FREE_FASTPATH);
2732 } else 2802 } else
2733 __slab_free(s, page, x, addr); 2803 __slab_free(s, page, head, tail_obj, cnt, addr);
2734 2804
2735} 2805}
2736 2806
@@ -2739,59 +2809,116 @@ void kmem_cache_free(struct kmem_cache *s, void *x)
2739 s = cache_from_obj(s, x); 2809 s = cache_from_obj(s, x);
2740 if (!s) 2810 if (!s)
2741 return; 2811 return;
2742 slab_free(s, virt_to_head_page(x), x, _RET_IP_); 2812 slab_free(s, virt_to_head_page(x), x, NULL, 1, _RET_IP_);
2743 trace_kmem_cache_free(_RET_IP_, x); 2813 trace_kmem_cache_free(_RET_IP_, x);
2744} 2814}
2745EXPORT_SYMBOL(kmem_cache_free); 2815EXPORT_SYMBOL(kmem_cache_free);
2746 2816
2747/* Note that interrupts must be enabled when calling this function. */ 2817struct detached_freelist {
2748void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p)
2749{
2750 struct kmem_cache_cpu *c;
2751 struct page *page; 2818 struct page *page;
2752 int i; 2819 void *tail;
2820 void *freelist;
2821 int cnt;
2822};
2753 2823
2754 local_irq_disable(); 2824/*
2755 c = this_cpu_ptr(s->cpu_slab); 2825 * This function progressively scans the array with free objects (with
2826 * a limited look ahead) and extract objects belonging to the same
2827 * page. It builds a detached freelist directly within the given
2828 * page/objects. This can happen without any need for
2829 * synchronization, because the objects are owned by running process.
2830 * The freelist is build up as a single linked list in the objects.
2831 * The idea is, that this detached freelist can then be bulk
2832 * transferred to the real freelist(s), but only requiring a single
2833 * synchronization primitive. Look ahead in the array is limited due
2834 * to performance reasons.
2835 */
2836static int build_detached_freelist(struct kmem_cache *s, size_t size,
2837 void **p, struct detached_freelist *df)
2838{
2839 size_t first_skipped_index = 0;
2840 int lookahead = 3;
2841 void *object;
2756 2842
2757 for (i = 0; i < size; i++) { 2843 /* Always re-init detached_freelist */
2758 void *object = p[i]; 2844 df->page = NULL;
2759 2845
2760 BUG_ON(!object); 2846 do {
2761 /* kmem cache debug support */ 2847 object = p[--size];
2762 s = cache_from_obj(s, object); 2848 } while (!object && size);
2763 if (unlikely(!s))
2764 goto exit;
2765 slab_free_hook(s, object);
2766 2849
2767 page = virt_to_head_page(object); 2850 if (!object)
2851 return 0;
2768 2852
2769 if (c->page == page) { 2853 /* Start new detached freelist */
2770 /* Fastpath: local CPU free */ 2854 set_freepointer(s, object, NULL);
2771 set_freepointer(s, object, c->freelist); 2855 df->page = virt_to_head_page(object);
2772 c->freelist = object; 2856 df->tail = object;
2773 } else { 2857 df->freelist = object;
2774 c->tid = next_tid(c->tid); 2858 p[size] = NULL; /* mark object processed */
2775 local_irq_enable(); 2859 df->cnt = 1;
2776 /* Slowpath: overhead locked cmpxchg_double_slab */ 2860
2777 __slab_free(s, page, object, _RET_IP_); 2861 while (size) {
2778 local_irq_disable(); 2862 object = p[--size];
2779 c = this_cpu_ptr(s->cpu_slab); 2863 if (!object)
2864 continue; /* Skip processed objects */
2865
2866 /* df->page is always set at this point */
2867 if (df->page == virt_to_head_page(object)) {
2868 /* Opportunity build freelist */
2869 set_freepointer(s, object, df->freelist);
2870 df->freelist = object;
2871 df->cnt++;
2872 p[size] = NULL; /* mark object processed */
2873
2874 continue;
2780 } 2875 }
2876
2877 /* Limit look ahead search */
2878 if (!--lookahead)
2879 break;
2880
2881 if (!first_skipped_index)
2882 first_skipped_index = size + 1;
2781 } 2883 }
2782exit: 2884
2783 c->tid = next_tid(c->tid); 2885 return first_skipped_index;
2784 local_irq_enable(); 2886}
2887
2888
2889/* Note that interrupts must be enabled when calling this function. */
2890void kmem_cache_free_bulk(struct kmem_cache *orig_s, size_t size, void **p)
2891{
2892 if (WARN_ON(!size))
2893 return;
2894
2895 do {
2896 struct detached_freelist df;
2897 struct kmem_cache *s;
2898
2899 /* Support for memcg */
2900 s = cache_from_obj(orig_s, p[size - 1]);
2901
2902 size = build_detached_freelist(s, size, p, &df);
2903 if (unlikely(!df.page))
2904 continue;
2905
2906 slab_free(s, df.page, df.freelist, df.tail, df.cnt, _RET_IP_);
2907 } while (likely(size));
2785} 2908}
2786EXPORT_SYMBOL(kmem_cache_free_bulk); 2909EXPORT_SYMBOL(kmem_cache_free_bulk);
2787 2910
2788/* Note that interrupts must be enabled when calling this function. */ 2911/* Note that interrupts must be enabled when calling this function. */
2789bool kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, 2912int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
2790 void **p) 2913 void **p)
2791{ 2914{
2792 struct kmem_cache_cpu *c; 2915 struct kmem_cache_cpu *c;
2793 int i; 2916 int i;
2794 2917
2918 /* memcg and kmem_cache debug support */
2919 s = slab_pre_alloc_hook(s, flags);
2920 if (unlikely(!s))
2921 return false;
2795 /* 2922 /*
2796 * Drain objects in the per cpu slab, while disabling local 2923 * Drain objects in the per cpu slab, while disabling local
2797 * IRQs, which protects against PREEMPT and interrupts 2924 * IRQs, which protects against PREEMPT and interrupts
@@ -2804,36 +2931,20 @@ bool kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
2804 void *object = c->freelist; 2931 void *object = c->freelist;
2805 2932
2806 if (unlikely(!object)) { 2933 if (unlikely(!object)) {
2807 local_irq_enable();
2808 /* 2934 /*
2809 * Invoking slow path likely have side-effect 2935 * Invoking slow path likely have side-effect
2810 * of re-populating per CPU c->freelist 2936 * of re-populating per CPU c->freelist
2811 */ 2937 */
2812 p[i] = __slab_alloc(s, flags, NUMA_NO_NODE, 2938 p[i] = ___slab_alloc(s, flags, NUMA_NO_NODE,
2813 _RET_IP_, c); 2939 _RET_IP_, c);
2814 if (unlikely(!p[i])) { 2940 if (unlikely(!p[i]))
2815 __kmem_cache_free_bulk(s, i, p); 2941 goto error;
2816 return false; 2942
2817 }
2818 local_irq_disable();
2819 c = this_cpu_ptr(s->cpu_slab); 2943 c = this_cpu_ptr(s->cpu_slab);
2820 continue; /* goto for-loop */ 2944 continue; /* goto for-loop */
2821 } 2945 }
2822
2823 /* kmem_cache debug support */
2824 s = slab_pre_alloc_hook(s, flags);
2825 if (unlikely(!s)) {
2826 __kmem_cache_free_bulk(s, i, p);
2827 c->tid = next_tid(c->tid);
2828 local_irq_enable();
2829 return false;
2830 }
2831
2832 c->freelist = get_freepointer(s, object); 2946 c->freelist = get_freepointer(s, object);
2833 p[i] = object; 2947 p[i] = object;
2834
2835 /* kmem_cache debug support */
2836 slab_post_alloc_hook(s, flags, object);
2837 } 2948 }
2838 c->tid = next_tid(c->tid); 2949 c->tid = next_tid(c->tid);
2839 local_irq_enable(); 2950 local_irq_enable();
@@ -2846,7 +2957,14 @@ bool kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
2846 memset(p[j], 0, s->object_size); 2957 memset(p[j], 0, s->object_size);
2847 } 2958 }
2848 2959
2849 return true; 2960 /* memcg and kmem_cache debug support */
2961 slab_post_alloc_hook(s, flags, size, p);
2962 return i;
2963error:
2964 local_irq_enable();
2965 slab_post_alloc_hook(s, flags, i, p);
2966 __kmem_cache_free_bulk(s, i, p);
2967 return 0;
2850} 2968}
2851EXPORT_SYMBOL(kmem_cache_alloc_bulk); 2969EXPORT_SYMBOL(kmem_cache_alloc_bulk);
2852 2970
@@ -3511,7 +3629,7 @@ void kfree(const void *x)
3511 __free_kmem_pages(page, compound_order(page)); 3629 __free_kmem_pages(page, compound_order(page));
3512 return; 3630 return;
3513 } 3631 }
3514 slab_free(page->slab_cache, page, object, _RET_IP_); 3632 slab_free(page->slab_cache, page, object, NULL, 1, _RET_IP_);
3515} 3633}
3516EXPORT_SYMBOL(kfree); 3634EXPORT_SYMBOL(kfree);
3517 3635
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index d04563480c94..8e3c9c5a3042 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -1443,7 +1443,6 @@ struct vm_struct *remove_vm_area(const void *addr)
1443 vmap_debug_free_range(va->va_start, va->va_end); 1443 vmap_debug_free_range(va->va_start, va->va_end);
1444 kasan_free_shadow(vm); 1444 kasan_free_shadow(vm);
1445 free_unmap_vmap_area(va); 1445 free_unmap_vmap_area(va);
1446 vm->size -= PAGE_SIZE;
1447 1446
1448 return vm; 1447 return vm;
1449 } 1448 }
@@ -1468,8 +1467,8 @@ static void __vunmap(const void *addr, int deallocate_pages)
1468 return; 1467 return;
1469 } 1468 }
1470 1469
1471 debug_check_no_locks_freed(addr, area->size); 1470 debug_check_no_locks_freed(addr, get_vm_area_size(area));
1472 debug_check_no_obj_freed(addr, area->size); 1471 debug_check_no_obj_freed(addr, get_vm_area_size(area));
1473 1472
1474 if (deallocate_pages) { 1473 if (deallocate_pages) {
1475 int i; 1474 int i;
diff --git a/mm/vmstat.c b/mm/vmstat.c
index 879a2be23325..0d5712b0206c 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -921,8 +921,8 @@ static void walk_zones_in_node(struct seq_file *m, pg_data_t *pgdat,
921#ifdef CONFIG_PROC_FS 921#ifdef CONFIG_PROC_FS
922static char * const migratetype_names[MIGRATE_TYPES] = { 922static char * const migratetype_names[MIGRATE_TYPES] = {
923 "Unmovable", 923 "Unmovable",
924 "Reclaimable",
925 "Movable", 924 "Movable",
925 "Reclaimable",
926 "HighAtomic", 926 "HighAtomic",
927#ifdef CONFIG_CMA 927#ifdef CONFIG_CMA
928 "CMA", 928 "CMA",
@@ -1379,6 +1379,7 @@ static const struct file_operations proc_vmstat_file_operations = {
1379#endif /* CONFIG_PROC_FS */ 1379#endif /* CONFIG_PROC_FS */
1380 1380
1381#ifdef CONFIG_SMP 1381#ifdef CONFIG_SMP
1382static struct workqueue_struct *vmstat_wq;
1382static DEFINE_PER_CPU(struct delayed_work, vmstat_work); 1383static DEFINE_PER_CPU(struct delayed_work, vmstat_work);
1383int sysctl_stat_interval __read_mostly = HZ; 1384int sysctl_stat_interval __read_mostly = HZ;
1384static cpumask_var_t cpu_stat_off; 1385static cpumask_var_t cpu_stat_off;
@@ -1391,7 +1392,7 @@ static void vmstat_update(struct work_struct *w)
1391 * to occur in the future. Keep on running the 1392 * to occur in the future. Keep on running the
1392 * update worker thread. 1393 * update worker thread.
1393 */ 1394 */
1394 schedule_delayed_work_on(smp_processor_id(), 1395 queue_delayed_work_on(smp_processor_id(), vmstat_wq,
1395 this_cpu_ptr(&vmstat_work), 1396 this_cpu_ptr(&vmstat_work),
1396 round_jiffies_relative(sysctl_stat_interval)); 1397 round_jiffies_relative(sysctl_stat_interval));
1397 } else { 1398 } else {
@@ -1460,7 +1461,7 @@ static void vmstat_shepherd(struct work_struct *w)
1460 if (need_update(cpu) && 1461 if (need_update(cpu) &&
1461 cpumask_test_and_clear_cpu(cpu, cpu_stat_off)) 1462 cpumask_test_and_clear_cpu(cpu, cpu_stat_off))
1462 1463
1463 schedule_delayed_work_on(cpu, 1464 queue_delayed_work_on(cpu, vmstat_wq,
1464 &per_cpu(vmstat_work, cpu), 0); 1465 &per_cpu(vmstat_work, cpu), 0);
1465 1466
1466 put_online_cpus(); 1467 put_online_cpus();
@@ -1549,6 +1550,7 @@ static int __init setup_vmstat(void)
1549 1550
1550 start_shepherd_timer(); 1551 start_shepherd_timer();
1551 cpu_notifier_register_done(); 1552 cpu_notifier_register_done();
1553 vmstat_wq = alloc_workqueue("vmstat", WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1552#endif 1554#endif
1553#ifdef CONFIG_PROC_FS 1555#ifdef CONFIG_PROC_FS
1554 proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations); 1556 proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
diff --git a/mm/zswap.c b/mm/zswap.c
index 025f8dc723de..bf14508afd64 100644
--- a/mm/zswap.c
+++ b/mm/zswap.c
@@ -541,6 +541,7 @@ static struct zswap_pool *zswap_pool_last_get(void)
541 return last; 541 return last;
542} 542}
543 543
544/* type and compressor must be null-terminated */
544static struct zswap_pool *zswap_pool_find_get(char *type, char *compressor) 545static struct zswap_pool *zswap_pool_find_get(char *type, char *compressor)
545{ 546{
546 struct zswap_pool *pool; 547 struct zswap_pool *pool;
@@ -548,10 +549,9 @@ static struct zswap_pool *zswap_pool_find_get(char *type, char *compressor)
548 assert_spin_locked(&zswap_pools_lock); 549 assert_spin_locked(&zswap_pools_lock);
549 550
550 list_for_each_entry_rcu(pool, &zswap_pools, list) { 551 list_for_each_entry_rcu(pool, &zswap_pools, list) {
551 if (strncmp(pool->tfm_name, compressor, sizeof(pool->tfm_name))) 552 if (strcmp(pool->tfm_name, compressor))
552 continue; 553 continue;
553 if (strncmp(zpool_get_type(pool->zpool), type, 554 if (strcmp(zpool_get_type(pool->zpool), type))
554 sizeof(zswap_zpool_type)))
555 continue; 555 continue;
556 /* if we can't get it, it's about to be destroyed */ 556 /* if we can't get it, it's about to be destroyed */
557 if (!zswap_pool_get(pool)) 557 if (!zswap_pool_get(pool))
diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c
index 496b27588493..e2ed69850489 100644
--- a/net/8021q/vlan_core.c
+++ b/net/8021q/vlan_core.c
@@ -30,7 +30,9 @@ bool vlan_do_receive(struct sk_buff **skbp)
30 skb->pkt_type = PACKET_HOST; 30 skb->pkt_type = PACKET_HOST;
31 } 31 }
32 32
33 if (!(vlan_dev_priv(vlan_dev)->flags & VLAN_FLAG_REORDER_HDR)) { 33 if (!(vlan_dev_priv(vlan_dev)->flags & VLAN_FLAG_REORDER_HDR) &&
34 !netif_is_macvlan_port(vlan_dev) &&
35 !netif_is_bridge_port(vlan_dev)) {
34 unsigned int offset = skb->data - skb_mac_header(skb); 36 unsigned int offset = skb->data - skb_mac_header(skb);
35 37
36 /* 38 /*
diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c
index ae3a47f9d1d5..fbd0acf80b13 100644
--- a/net/ax25/af_ax25.c
+++ b/net/ax25/af_ax25.c
@@ -805,6 +805,9 @@ static int ax25_create(struct net *net, struct socket *sock, int protocol,
805 struct sock *sk; 805 struct sock *sk;
806 ax25_cb *ax25; 806 ax25_cb *ax25;
807 807
808 if (protocol < 0 || protocol > SK_PROTOCOL_MAX)
809 return -EINVAL;
810
808 if (!net_eq(net, &init_net)) 811 if (!net_eq(net, &init_net))
809 return -EAFNOSUPPORT; 812 return -EAFNOSUPPORT;
810 813
diff --git a/net/batman-adv/distributed-arp-table.c b/net/batman-adv/distributed-arp-table.c
index 83bc1aaf5800..a49c705fb86b 100644
--- a/net/batman-adv/distributed-arp-table.c
+++ b/net/batman-adv/distributed-arp-table.c
@@ -566,6 +566,7 @@ batadv_dat_select_candidates(struct batadv_priv *bat_priv, __be32 ip_dst)
566 int select; 566 int select;
567 batadv_dat_addr_t last_max = BATADV_DAT_ADDR_MAX, ip_key; 567 batadv_dat_addr_t last_max = BATADV_DAT_ADDR_MAX, ip_key;
568 struct batadv_dat_candidate *res; 568 struct batadv_dat_candidate *res;
569 struct batadv_dat_entry dat;
569 570
570 if (!bat_priv->orig_hash) 571 if (!bat_priv->orig_hash)
571 return NULL; 572 return NULL;
@@ -575,7 +576,9 @@ batadv_dat_select_candidates(struct batadv_priv *bat_priv, __be32 ip_dst)
575 if (!res) 576 if (!res)
576 return NULL; 577 return NULL;
577 578
578 ip_key = (batadv_dat_addr_t)batadv_hash_dat(&ip_dst, 579 dat.ip = ip_dst;
580 dat.vid = 0;
581 ip_key = (batadv_dat_addr_t)batadv_hash_dat(&dat,
579 BATADV_DAT_ADDR_MAX); 582 BATADV_DAT_ADDR_MAX);
580 583
581 batadv_dbg(BATADV_DBG_DAT, bat_priv, 584 batadv_dbg(BATADV_DBG_DAT, bat_priv,
diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c
index 8d990b070a2e..3207667e69de 100644
--- a/net/batman-adv/routing.c
+++ b/net/batman-adv/routing.c
@@ -836,6 +836,7 @@ int batadv_recv_unicast_packet(struct sk_buff *skb,
836 u8 *orig_addr; 836 u8 *orig_addr;
837 struct batadv_orig_node *orig_node = NULL; 837 struct batadv_orig_node *orig_node = NULL;
838 int check, hdr_size = sizeof(*unicast_packet); 838 int check, hdr_size = sizeof(*unicast_packet);
839 enum batadv_subtype subtype;
839 bool is4addr; 840 bool is4addr;
840 841
841 unicast_packet = (struct batadv_unicast_packet *)skb->data; 842 unicast_packet = (struct batadv_unicast_packet *)skb->data;
@@ -863,10 +864,20 @@ int batadv_recv_unicast_packet(struct sk_buff *skb,
863 /* packet for me */ 864 /* packet for me */
864 if (batadv_is_my_mac(bat_priv, unicast_packet->dest)) { 865 if (batadv_is_my_mac(bat_priv, unicast_packet->dest)) {
865 if (is4addr) { 866 if (is4addr) {
866 batadv_dat_inc_counter(bat_priv, 867 subtype = unicast_4addr_packet->subtype;
867 unicast_4addr_packet->subtype); 868 batadv_dat_inc_counter(bat_priv, subtype);
868 orig_addr = unicast_4addr_packet->src; 869
869 orig_node = batadv_orig_hash_find(bat_priv, orig_addr); 870 /* Only payload data should be considered for speedy
871 * join. For example, DAT also uses unicast 4addr
872 * types, but those packets should not be considered
873 * for speedy join, since the clients do not actually
874 * reside at the sending originator.
875 */
876 if (subtype == BATADV_P_DATA) {
877 orig_addr = unicast_4addr_packet->src;
878 orig_node = batadv_orig_hash_find(bat_priv,
879 orig_addr);
880 }
870 } 881 }
871 882
872 if (batadv_dat_snoop_incoming_arp_request(bat_priv, skb, 883 if (batadv_dat_snoop_incoming_arp_request(bat_priv, skb,
diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c
index 4228b10c47ea..76f19ba62462 100644
--- a/net/batman-adv/translation-table.c
+++ b/net/batman-adv/translation-table.c
@@ -68,13 +68,15 @@ static void batadv_tt_global_del(struct batadv_priv *bat_priv,
68 unsigned short vid, const char *message, 68 unsigned short vid, const char *message,
69 bool roaming); 69 bool roaming);
70 70
71/* returns 1 if they are the same mac addr */ 71/* returns 1 if they are the same mac addr and vid */
72static int batadv_compare_tt(const struct hlist_node *node, const void *data2) 72static int batadv_compare_tt(const struct hlist_node *node, const void *data2)
73{ 73{
74 const void *data1 = container_of(node, struct batadv_tt_common_entry, 74 const void *data1 = container_of(node, struct batadv_tt_common_entry,
75 hash_entry); 75 hash_entry);
76 const struct batadv_tt_common_entry *tt1 = data1;
77 const struct batadv_tt_common_entry *tt2 = data2;
76 78
77 return batadv_compare_eth(data1, data2); 79 return (tt1->vid == tt2->vid) && batadv_compare_eth(data1, data2);
78} 80}
79 81
80/** 82/**
@@ -1427,9 +1429,15 @@ static bool batadv_tt_global_add(struct batadv_priv *bat_priv,
1427 } 1429 }
1428 1430
1429 /* if the client was temporary added before receiving the first 1431 /* if the client was temporary added before receiving the first
1430 * OGM announcing it, we have to clear the TEMP flag 1432 * OGM announcing it, we have to clear the TEMP flag. Also,
1433 * remove the previous temporary orig node and re-add it
1434 * if required. If the orig entry changed, the new one which
1435 * is a non-temporary entry is preferred.
1431 */ 1436 */
1432 common->flags &= ~BATADV_TT_CLIENT_TEMP; 1437 if (common->flags & BATADV_TT_CLIENT_TEMP) {
1438 batadv_tt_global_del_orig_list(tt_global_entry);
1439 common->flags &= ~BATADV_TT_CLIENT_TEMP;
1440 }
1433 1441
1434 /* the change can carry possible "attribute" flags like the 1442 /* the change can carry possible "attribute" flags like the
1435 * TT_CLIENT_WIFI, therefore they have to be copied in the 1443 * TT_CLIENT_WIFI, therefore they have to be copied in the
diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c
index a3bffd1ec2b4..70306cc9d814 100644
--- a/net/bluetooth/af_bluetooth.c
+++ b/net/bluetooth/af_bluetooth.c
@@ -271,11 +271,11 @@ static long bt_sock_data_wait(struct sock *sk, long timeo)
271 if (signal_pending(current) || !timeo) 271 if (signal_pending(current) || !timeo)
272 break; 272 break;
273 273
274 set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); 274 sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
275 release_sock(sk); 275 release_sock(sk);
276 timeo = schedule_timeout(timeo); 276 timeo = schedule_timeout(timeo);
277 lock_sock(sk); 277 lock_sock(sk);
278 clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); 278 sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
279 } 279 }
280 280
281 __set_current_state(TASK_RUNNING); 281 __set_current_state(TASK_RUNNING);
@@ -441,7 +441,7 @@ unsigned int bt_sock_poll(struct file *file, struct socket *sock,
441 if (!test_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags) && sock_writeable(sk)) 441 if (!test_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags) && sock_writeable(sk))
442 mask |= POLLOUT | POLLWRNORM | POLLWRBAND; 442 mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
443 else 443 else
444 set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); 444 sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
445 445
446 return mask; 446 return mask;
447} 447}
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
index fe129663bd3f..f52bcbf2e58c 100644
--- a/net/bluetooth/sco.c
+++ b/net/bluetooth/sco.c
@@ -526,6 +526,9 @@ static int sco_sock_bind(struct socket *sock, struct sockaddr *addr,
526 if (!addr || addr->sa_family != AF_BLUETOOTH) 526 if (!addr || addr->sa_family != AF_BLUETOOTH)
527 return -EINVAL; 527 return -EINVAL;
528 528
529 if (addr_len < sizeof(struct sockaddr_sco))
530 return -EINVAL;
531
529 lock_sock(sk); 532 lock_sock(sk);
530 533
531 if (sk->sk_state != BT_OPEN) { 534 if (sk->sk_state != BT_OPEN) {
diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c
index c91353841e40..ffed8a1d4f27 100644
--- a/net/bluetooth/smp.c
+++ b/net/bluetooth/smp.c
@@ -3027,8 +3027,13 @@ static void smp_ready_cb(struct l2cap_chan *chan)
3027 3027
3028 BT_DBG("chan %p", chan); 3028 BT_DBG("chan %p", chan);
3029 3029
3030 /* No need to call l2cap_chan_hold() here since we already own
3031 * the reference taken in smp_new_conn_cb(). This is just the
3032 * first time that we tie it to a specific pointer. The code in
3033 * l2cap_core.c ensures that there's no risk this function wont
3034 * get called if smp_new_conn_cb was previously called.
3035 */
3030 conn->smp = chan; 3036 conn->smp = chan;
3031 l2cap_chan_hold(chan);
3032 3037
3033 if (hcon->type == ACL_LINK && test_bit(HCI_CONN_ENCRYPT, &hcon->flags)) 3038 if (hcon->type == ACL_LINK && test_bit(HCI_CONN_ENCRYPT, &hcon->flags))
3034 bredr_pairing(chan); 3039 bredr_pairing(chan);
diff --git a/net/bridge/br_stp.c b/net/bridge/br_stp.c
index f7e8dee64fc8..5f3f64553179 100644
--- a/net/bridge/br_stp.c
+++ b/net/bridge/br_stp.c
@@ -48,7 +48,7 @@ void br_set_state(struct net_bridge_port *p, unsigned int state)
48 48
49 p->state = state; 49 p->state = state;
50 err = switchdev_port_attr_set(p->dev, &attr); 50 err = switchdev_port_attr_set(p->dev, &attr);
51 if (err) 51 if (err && err != -EOPNOTSUPP)
52 br_warn(p->br, "error setting offload STP state on port %u(%s)\n", 52 br_warn(p->br, "error setting offload STP state on port %u(%s)\n",
53 (unsigned int) p->port_no, p->dev->name); 53 (unsigned int) p->port_no, p->dev->name);
54} 54}
diff --git a/net/bridge/br_stp_if.c b/net/bridge/br_stp_if.c
index fa53d7a89f48..5396ff08af32 100644
--- a/net/bridge/br_stp_if.c
+++ b/net/bridge/br_stp_if.c
@@ -50,7 +50,7 @@ void br_init_port(struct net_bridge_port *p)
50 p->config_pending = 0; 50 p->config_pending = 0;
51 51
52 err = switchdev_port_attr_set(p->dev, &attr); 52 err = switchdev_port_attr_set(p->dev, &attr);
53 if (err) 53 if (err && err != -EOPNOTSUPP)
54 netdev_err(p->dev, "failed to set HW ageing time\n"); 54 netdev_err(p->dev, "failed to set HW ageing time\n");
55} 55}
56 56
diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c
index cc858919108e..aa209b1066c9 100644
--- a/net/caif/caif_socket.c
+++ b/net/caif/caif_socket.c
@@ -323,7 +323,7 @@ static long caif_stream_data_wait(struct sock *sk, long timeo)
323 !timeo) 323 !timeo)
324 break; 324 break;
325 325
326 set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); 326 sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
327 release_sock(sk); 327 release_sock(sk);
328 timeo = schedule_timeout(timeo); 328 timeo = schedule_timeout(timeo);
329 lock_sock(sk); 329 lock_sock(sk);
@@ -331,7 +331,7 @@ static long caif_stream_data_wait(struct sock *sk, long timeo)
331 if (sock_flag(sk, SOCK_DEAD)) 331 if (sock_flag(sk, SOCK_DEAD))
332 break; 332 break;
333 333
334 clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); 334 sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
335 } 335 }
336 336
337 finish_wait(sk_sleep(sk), &wait); 337 finish_wait(sk_sleep(sk), &wait);
diff --git a/net/core/datagram.c b/net/core/datagram.c
index 617088aee21d..d62af69ad844 100644
--- a/net/core/datagram.c
+++ b/net/core/datagram.c
@@ -785,7 +785,7 @@ unsigned int datagram_poll(struct file *file, struct socket *sock,
785 if (sock_writeable(sk)) 785 if (sock_writeable(sk))
786 mask |= POLLOUT | POLLWRNORM | POLLWRBAND; 786 mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
787 else 787 else
788 set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); 788 sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
789 789
790 return mask; 790 return mask;
791} 791}
diff --git a/net/core/dev.c b/net/core/dev.c
index ab9b8d0d115e..ae00b894e675 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2403,17 +2403,20 @@ static void skb_warn_bad_offload(const struct sk_buff *skb)
2403{ 2403{
2404 static const netdev_features_t null_features = 0; 2404 static const netdev_features_t null_features = 0;
2405 struct net_device *dev = skb->dev; 2405 struct net_device *dev = skb->dev;
2406 const char *driver = ""; 2406 const char *name = "";
2407 2407
2408 if (!net_ratelimit()) 2408 if (!net_ratelimit())
2409 return; 2409 return;
2410 2410
2411 if (dev && dev->dev.parent) 2411 if (dev) {
2412 driver = dev_driver_string(dev->dev.parent); 2412 if (dev->dev.parent)
2413 2413 name = dev_driver_string(dev->dev.parent);
2414 else
2415 name = netdev_name(dev);
2416 }
2414 WARN(1, "%s: caps=(%pNF, %pNF) len=%d data_len=%d gso_size=%d " 2417 WARN(1, "%s: caps=(%pNF, %pNF) len=%d data_len=%d gso_size=%d "
2415 "gso_type=%d ip_summed=%d\n", 2418 "gso_type=%d ip_summed=%d\n",
2416 driver, dev ? &dev->features : &null_features, 2419 name, dev ? &dev->features : &null_features,
2417 skb->sk ? &skb->sk->sk_route_caps : &null_features, 2420 skb->sk ? &skb->sk->sk_route_caps : &null_features,
2418 skb->len, skb->data_len, skb_shinfo(skb)->gso_size, 2421 skb->len, skb->data_len, skb_shinfo(skb)->gso_size,
2419 skb_shinfo(skb)->gso_type, skb->ip_summed); 2422 skb_shinfo(skb)->gso_type, skb->ip_summed);
@@ -6426,11 +6429,16 @@ int __netdev_update_features(struct net_device *dev)
6426 6429
6427 if (dev->netdev_ops->ndo_set_features) 6430 if (dev->netdev_ops->ndo_set_features)
6428 err = dev->netdev_ops->ndo_set_features(dev, features); 6431 err = dev->netdev_ops->ndo_set_features(dev, features);
6432 else
6433 err = 0;
6429 6434
6430 if (unlikely(err < 0)) { 6435 if (unlikely(err < 0)) {
6431 netdev_err(dev, 6436 netdev_err(dev,
6432 "set_features() failed (%d); wanted %pNF, left %pNF\n", 6437 "set_features() failed (%d); wanted %pNF, left %pNF\n",
6433 err, &features, &dev->features); 6438 err, &features, &dev->features);
6439 /* return non-0 since some features might have changed and
6440 * it's better to fire a spurious notification than miss it
6441 */
6434 return -1; 6442 return -1;
6435 } 6443 }
6436 6444
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index 1aa8437ed6c4..f18ae91b652e 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -857,7 +857,7 @@ static void neigh_probe(struct neighbour *neigh)
857 struct sk_buff *skb = skb_peek_tail(&neigh->arp_queue); 857 struct sk_buff *skb = skb_peek_tail(&neigh->arp_queue);
858 /* keep skb alive even if arp_queue overflows */ 858 /* keep skb alive even if arp_queue overflows */
859 if (skb) 859 if (skb)
860 skb = skb_copy(skb, GFP_ATOMIC); 860 skb = skb_clone(skb, GFP_ATOMIC);
861 write_unlock(&neigh->lock); 861 write_unlock(&neigh->lock);
862 neigh->ops->solicit(neigh, skb); 862 neigh->ops->solicit(neigh, skb);
863 atomic_inc(&neigh->probes); 863 atomic_inc(&neigh->probes);
@@ -2215,7 +2215,7 @@ static int pneigh_fill_info(struct sk_buff *skb, struct pneigh_entry *pn,
2215 ndm->ndm_pad2 = 0; 2215 ndm->ndm_pad2 = 0;
2216 ndm->ndm_flags = pn->flags | NTF_PROXY; 2216 ndm->ndm_flags = pn->flags | NTF_PROXY;
2217 ndm->ndm_type = RTN_UNICAST; 2217 ndm->ndm_type = RTN_UNICAST;
2218 ndm->ndm_ifindex = pn->dev->ifindex; 2218 ndm->ndm_ifindex = pn->dev ? pn->dev->ifindex : 0;
2219 ndm->ndm_state = NUD_NONE; 2219 ndm->ndm_state = NUD_NONE;
2220 2220
2221 if (nla_put(skb, NDA_DST, tbl->key_len, pn->key)) 2221 if (nla_put(skb, NDA_DST, tbl->key_len, pn->key))
@@ -2333,7 +2333,7 @@ static int pneigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
2333 if (h > s_h) 2333 if (h > s_h)
2334 s_idx = 0; 2334 s_idx = 0;
2335 for (n = tbl->phash_buckets[h], idx = 0; n; n = n->next) { 2335 for (n = tbl->phash_buckets[h], idx = 0; n; n = n->next) {
2336 if (dev_net(n->dev) != net) 2336 if (pneigh_net(n) != net)
2337 continue; 2337 continue;
2338 if (idx < s_idx) 2338 if (idx < s_idx)
2339 goto next; 2339 goto next;
diff --git a/net/core/netclassid_cgroup.c b/net/core/netclassid_cgroup.c
index 6441f47b1a8f..d9ee8d08a3a6 100644
--- a/net/core/netclassid_cgroup.c
+++ b/net/core/netclassid_cgroup.c
@@ -56,7 +56,7 @@ static void cgrp_css_free(struct cgroup_subsys_state *css)
56 kfree(css_cls_state(css)); 56 kfree(css_cls_state(css));
57} 57}
58 58
59static int update_classid(const void *v, struct file *file, unsigned n) 59static int update_classid_sock(const void *v, struct file *file, unsigned n)
60{ 60{
61 int err; 61 int err;
62 struct socket *sock = sock_from_file(file, &err); 62 struct socket *sock = sock_from_file(file, &err);
@@ -67,18 +67,27 @@ static int update_classid(const void *v, struct file *file, unsigned n)
67 return 0; 67 return 0;
68} 68}
69 69
70static void cgrp_attach(struct cgroup_subsys_state *css, 70static void update_classid(struct cgroup_subsys_state *css, void *v)
71 struct cgroup_taskset *tset)
72{ 71{
73 struct cgroup_cls_state *cs = css_cls_state(css); 72 struct css_task_iter it;
74 void *v = (void *)(unsigned long)cs->classid;
75 struct task_struct *p; 73 struct task_struct *p;
76 74
77 cgroup_taskset_for_each(p, tset) { 75 css_task_iter_start(css, &it);
76 while ((p = css_task_iter_next(&it))) {
78 task_lock(p); 77 task_lock(p);
79 iterate_fd(p->files, 0, update_classid, v); 78 iterate_fd(p->files, 0, update_classid_sock, v);
80 task_unlock(p); 79 task_unlock(p);
81 } 80 }
81 css_task_iter_end(&it);
82}
83
84static void cgrp_attach(struct cgroup_taskset *tset)
85{
86 struct cgroup_subsys_state *css;
87
88 cgroup_taskset_first(tset, &css);
89 update_classid(css,
90 (void *)(unsigned long)css_cls_state(css)->classid);
82} 91}
83 92
84static u64 read_classid(struct cgroup_subsys_state *css, struct cftype *cft) 93static u64 read_classid(struct cgroup_subsys_state *css, struct cftype *cft)
@@ -89,8 +98,11 @@ static u64 read_classid(struct cgroup_subsys_state *css, struct cftype *cft)
89static int write_classid(struct cgroup_subsys_state *css, struct cftype *cft, 98static int write_classid(struct cgroup_subsys_state *css, struct cftype *cft,
90 u64 value) 99 u64 value)
91{ 100{
92 css_cls_state(css)->classid = (u32) value; 101 struct cgroup_cls_state *cs = css_cls_state(css);
102
103 cs->classid = (u32)value;
93 104
105 update_classid(css, (void *)(unsigned long)cs->classid);
94 return 0; 106 return 0;
95} 107}
96 108
diff --git a/net/core/netprio_cgroup.c b/net/core/netprio_cgroup.c
index cbd0a199bf52..40fd09fe06ae 100644
--- a/net/core/netprio_cgroup.c
+++ b/net/core/netprio_cgroup.c
@@ -218,13 +218,14 @@ static int update_netprio(const void *v, struct file *file, unsigned n)
218 return 0; 218 return 0;
219} 219}
220 220
221static void net_prio_attach(struct cgroup_subsys_state *css, 221static void net_prio_attach(struct cgroup_taskset *tset)
222 struct cgroup_taskset *tset)
223{ 222{
224 struct task_struct *p; 223 struct task_struct *p;
225 void *v = (void *)(unsigned long)css->cgroup->id; 224 struct cgroup_subsys_state *css;
225
226 cgroup_taskset_for_each(p, css, tset) {
227 void *v = (void *)(unsigned long)css->cgroup->id;
226 228
227 cgroup_taskset_for_each(p, tset) {
228 task_lock(p); 229 task_lock(p);
229 iterate_fd(p->files, 0, update_netprio, v); 230 iterate_fd(p->files, 0, update_netprio, v);
230 task_unlock(p); 231 task_unlock(p);
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 504bd17b7456..34ba7a08876d 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -1045,15 +1045,156 @@ static int rtnl_phys_switch_id_fill(struct sk_buff *skb, struct net_device *dev)
1045 return 0; 1045 return 0;
1046} 1046}
1047 1047
1048static noinline_for_stack int rtnl_fill_stats(struct sk_buff *skb,
1049 struct net_device *dev)
1050{
1051 const struct rtnl_link_stats64 *stats;
1052 struct rtnl_link_stats64 temp;
1053 struct nlattr *attr;
1054
1055 stats = dev_get_stats(dev, &temp);
1056
1057 attr = nla_reserve(skb, IFLA_STATS,
1058 sizeof(struct rtnl_link_stats));
1059 if (!attr)
1060 return -EMSGSIZE;
1061
1062 copy_rtnl_link_stats(nla_data(attr), stats);
1063
1064 attr = nla_reserve(skb, IFLA_STATS64,
1065 sizeof(struct rtnl_link_stats64));
1066 if (!attr)
1067 return -EMSGSIZE;
1068
1069 copy_rtnl_link_stats64(nla_data(attr), stats);
1070
1071 return 0;
1072}
1073
1074static noinline_for_stack int rtnl_fill_vfinfo(struct sk_buff *skb,
1075 struct net_device *dev,
1076 int vfs_num,
1077 struct nlattr *vfinfo)
1078{
1079 struct ifla_vf_rss_query_en vf_rss_query_en;
1080 struct ifla_vf_link_state vf_linkstate;
1081 struct ifla_vf_spoofchk vf_spoofchk;
1082 struct ifla_vf_tx_rate vf_tx_rate;
1083 struct ifla_vf_stats vf_stats;
1084 struct ifla_vf_trust vf_trust;
1085 struct ifla_vf_vlan vf_vlan;
1086 struct ifla_vf_rate vf_rate;
1087 struct nlattr *vf, *vfstats;
1088 struct ifla_vf_mac vf_mac;
1089 struct ifla_vf_info ivi;
1090
1091 /* Not all SR-IOV capable drivers support the
1092 * spoofcheck and "RSS query enable" query. Preset to
1093 * -1 so the user space tool can detect that the driver
1094 * didn't report anything.
1095 */
1096 ivi.spoofchk = -1;
1097 ivi.rss_query_en = -1;
1098 ivi.trusted = -1;
1099 memset(ivi.mac, 0, sizeof(ivi.mac));
1100 /* The default value for VF link state is "auto"
1101 * IFLA_VF_LINK_STATE_AUTO which equals zero
1102 */
1103 ivi.linkstate = 0;
1104 if (dev->netdev_ops->ndo_get_vf_config(dev, vfs_num, &ivi))
1105 return 0;
1106
1107 vf_mac.vf =
1108 vf_vlan.vf =
1109 vf_rate.vf =
1110 vf_tx_rate.vf =
1111 vf_spoofchk.vf =
1112 vf_linkstate.vf =
1113 vf_rss_query_en.vf =
1114 vf_trust.vf = ivi.vf;
1115
1116 memcpy(vf_mac.mac, ivi.mac, sizeof(ivi.mac));
1117 vf_vlan.vlan = ivi.vlan;
1118 vf_vlan.qos = ivi.qos;
1119 vf_tx_rate.rate = ivi.max_tx_rate;
1120 vf_rate.min_tx_rate = ivi.min_tx_rate;
1121 vf_rate.max_tx_rate = ivi.max_tx_rate;
1122 vf_spoofchk.setting = ivi.spoofchk;
1123 vf_linkstate.link_state = ivi.linkstate;
1124 vf_rss_query_en.setting = ivi.rss_query_en;
1125 vf_trust.setting = ivi.trusted;
1126 vf = nla_nest_start(skb, IFLA_VF_INFO);
1127 if (!vf) {
1128 nla_nest_cancel(skb, vfinfo);
1129 return -EMSGSIZE;
1130 }
1131 if (nla_put(skb, IFLA_VF_MAC, sizeof(vf_mac), &vf_mac) ||
1132 nla_put(skb, IFLA_VF_VLAN, sizeof(vf_vlan), &vf_vlan) ||
1133 nla_put(skb, IFLA_VF_RATE, sizeof(vf_rate),
1134 &vf_rate) ||
1135 nla_put(skb, IFLA_VF_TX_RATE, sizeof(vf_tx_rate),
1136 &vf_tx_rate) ||
1137 nla_put(skb, IFLA_VF_SPOOFCHK, sizeof(vf_spoofchk),
1138 &vf_spoofchk) ||
1139 nla_put(skb, IFLA_VF_LINK_STATE, sizeof(vf_linkstate),
1140 &vf_linkstate) ||
1141 nla_put(skb, IFLA_VF_RSS_QUERY_EN,
1142 sizeof(vf_rss_query_en),
1143 &vf_rss_query_en) ||
1144 nla_put(skb, IFLA_VF_TRUST,
1145 sizeof(vf_trust), &vf_trust))
1146 return -EMSGSIZE;
1147 memset(&vf_stats, 0, sizeof(vf_stats));
1148 if (dev->netdev_ops->ndo_get_vf_stats)
1149 dev->netdev_ops->ndo_get_vf_stats(dev, vfs_num,
1150 &vf_stats);
1151 vfstats = nla_nest_start(skb, IFLA_VF_STATS);
1152 if (!vfstats) {
1153 nla_nest_cancel(skb, vf);
1154 nla_nest_cancel(skb, vfinfo);
1155 return -EMSGSIZE;
1156 }
1157 if (nla_put_u64(skb, IFLA_VF_STATS_RX_PACKETS,
1158 vf_stats.rx_packets) ||
1159 nla_put_u64(skb, IFLA_VF_STATS_TX_PACKETS,
1160 vf_stats.tx_packets) ||
1161 nla_put_u64(skb, IFLA_VF_STATS_RX_BYTES,
1162 vf_stats.rx_bytes) ||
1163 nla_put_u64(skb, IFLA_VF_STATS_TX_BYTES,
1164 vf_stats.tx_bytes) ||
1165 nla_put_u64(skb, IFLA_VF_STATS_BROADCAST,
1166 vf_stats.broadcast) ||
1167 nla_put_u64(skb, IFLA_VF_STATS_MULTICAST,
1168 vf_stats.multicast))
1169 return -EMSGSIZE;
1170 nla_nest_end(skb, vfstats);
1171 nla_nest_end(skb, vf);
1172 return 0;
1173}
1174
1175static int rtnl_fill_link_ifmap(struct sk_buff *skb, struct net_device *dev)
1176{
1177 struct rtnl_link_ifmap map = {
1178 .mem_start = dev->mem_start,
1179 .mem_end = dev->mem_end,
1180 .base_addr = dev->base_addr,
1181 .irq = dev->irq,
1182 .dma = dev->dma,
1183 .port = dev->if_port,
1184 };
1185 if (nla_put(skb, IFLA_MAP, sizeof(map), &map))
1186 return -EMSGSIZE;
1187
1188 return 0;
1189}
1190
1048static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev, 1191static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
1049 int type, u32 pid, u32 seq, u32 change, 1192 int type, u32 pid, u32 seq, u32 change,
1050 unsigned int flags, u32 ext_filter_mask) 1193 unsigned int flags, u32 ext_filter_mask)
1051{ 1194{
1052 struct ifinfomsg *ifm; 1195 struct ifinfomsg *ifm;
1053 struct nlmsghdr *nlh; 1196 struct nlmsghdr *nlh;
1054 struct rtnl_link_stats64 temp; 1197 struct nlattr *af_spec;
1055 const struct rtnl_link_stats64 *stats;
1056 struct nlattr *attr, *af_spec;
1057 struct rtnl_af_ops *af_ops; 1198 struct rtnl_af_ops *af_ops;
1058 struct net_device *upper_dev = netdev_master_upper_dev_get(dev); 1199 struct net_device *upper_dev = netdev_master_upper_dev_get(dev);
1059 1200
@@ -1096,18 +1237,8 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
1096 nla_put_u8(skb, IFLA_PROTO_DOWN, dev->proto_down)) 1237 nla_put_u8(skb, IFLA_PROTO_DOWN, dev->proto_down))
1097 goto nla_put_failure; 1238 goto nla_put_failure;
1098 1239
1099 if (1) { 1240 if (rtnl_fill_link_ifmap(skb, dev))
1100 struct rtnl_link_ifmap map = { 1241 goto nla_put_failure;
1101 .mem_start = dev->mem_start,
1102 .mem_end = dev->mem_end,
1103 .base_addr = dev->base_addr,
1104 .irq = dev->irq,
1105 .dma = dev->dma,
1106 .port = dev->if_port,
1107 };
1108 if (nla_put(skb, IFLA_MAP, sizeof(map), &map))
1109 goto nla_put_failure;
1110 }
1111 1242
1112 if (dev->addr_len) { 1243 if (dev->addr_len) {
1113 if (nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr) || 1244 if (nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr) ||
@@ -1124,128 +1255,27 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
1124 if (rtnl_phys_switch_id_fill(skb, dev)) 1255 if (rtnl_phys_switch_id_fill(skb, dev))
1125 goto nla_put_failure; 1256 goto nla_put_failure;
1126 1257
1127 attr = nla_reserve(skb, IFLA_STATS, 1258 if (rtnl_fill_stats(skb, dev))
1128 sizeof(struct rtnl_link_stats));
1129 if (attr == NULL)
1130 goto nla_put_failure;
1131
1132 stats = dev_get_stats(dev, &temp);
1133 copy_rtnl_link_stats(nla_data(attr), stats);
1134
1135 attr = nla_reserve(skb, IFLA_STATS64,
1136 sizeof(struct rtnl_link_stats64));
1137 if (attr == NULL)
1138 goto nla_put_failure; 1259 goto nla_put_failure;
1139 copy_rtnl_link_stats64(nla_data(attr), stats);
1140 1260
1141 if (dev->dev.parent && (ext_filter_mask & RTEXT_FILTER_VF) && 1261 if (dev->dev.parent && (ext_filter_mask & RTEXT_FILTER_VF) &&
1142 nla_put_u32(skb, IFLA_NUM_VF, dev_num_vf(dev->dev.parent))) 1262 nla_put_u32(skb, IFLA_NUM_VF, dev_num_vf(dev->dev.parent)))
1143 goto nla_put_failure; 1263 goto nla_put_failure;
1144 1264
1145 if (dev->netdev_ops->ndo_get_vf_config && dev->dev.parent 1265 if (dev->netdev_ops->ndo_get_vf_config && dev->dev.parent &&
1146 && (ext_filter_mask & RTEXT_FILTER_VF)) { 1266 ext_filter_mask & RTEXT_FILTER_VF) {
1147 int i; 1267 int i;
1148 1268 struct nlattr *vfinfo;
1149 struct nlattr *vfinfo, *vf, *vfstats;
1150 int num_vfs = dev_num_vf(dev->dev.parent); 1269 int num_vfs = dev_num_vf(dev->dev.parent);
1151 1270
1152 vfinfo = nla_nest_start(skb, IFLA_VFINFO_LIST); 1271 vfinfo = nla_nest_start(skb, IFLA_VFINFO_LIST);
1153 if (!vfinfo) 1272 if (!vfinfo)
1154 goto nla_put_failure; 1273 goto nla_put_failure;
1155 for (i = 0; i < num_vfs; i++) { 1274 for (i = 0; i < num_vfs; i++) {
1156 struct ifla_vf_info ivi; 1275 if (rtnl_fill_vfinfo(skb, dev, i, vfinfo))
1157 struct ifla_vf_mac vf_mac;
1158 struct ifla_vf_vlan vf_vlan;
1159 struct ifla_vf_rate vf_rate;
1160 struct ifla_vf_tx_rate vf_tx_rate;
1161 struct ifla_vf_spoofchk vf_spoofchk;
1162 struct ifla_vf_link_state vf_linkstate;
1163 struct ifla_vf_rss_query_en vf_rss_query_en;
1164 struct ifla_vf_stats vf_stats;
1165 struct ifla_vf_trust vf_trust;
1166
1167 /*
1168 * Not all SR-IOV capable drivers support the
1169 * spoofcheck and "RSS query enable" query. Preset to
1170 * -1 so the user space tool can detect that the driver
1171 * didn't report anything.
1172 */
1173 ivi.spoofchk = -1;
1174 ivi.rss_query_en = -1;
1175 ivi.trusted = -1;
1176 memset(ivi.mac, 0, sizeof(ivi.mac));
1177 /* The default value for VF link state is "auto"
1178 * IFLA_VF_LINK_STATE_AUTO which equals zero
1179 */
1180 ivi.linkstate = 0;
1181 if (dev->netdev_ops->ndo_get_vf_config(dev, i, &ivi))
1182 break;
1183 vf_mac.vf =
1184 vf_vlan.vf =
1185 vf_rate.vf =
1186 vf_tx_rate.vf =
1187 vf_spoofchk.vf =
1188 vf_linkstate.vf =
1189 vf_rss_query_en.vf =
1190 vf_trust.vf = ivi.vf;
1191
1192 memcpy(vf_mac.mac, ivi.mac, sizeof(ivi.mac));
1193 vf_vlan.vlan = ivi.vlan;
1194 vf_vlan.qos = ivi.qos;
1195 vf_tx_rate.rate = ivi.max_tx_rate;
1196 vf_rate.min_tx_rate = ivi.min_tx_rate;
1197 vf_rate.max_tx_rate = ivi.max_tx_rate;
1198 vf_spoofchk.setting = ivi.spoofchk;
1199 vf_linkstate.link_state = ivi.linkstate;
1200 vf_rss_query_en.setting = ivi.rss_query_en;
1201 vf_trust.setting = ivi.trusted;
1202 vf = nla_nest_start(skb, IFLA_VF_INFO);
1203 if (!vf) {
1204 nla_nest_cancel(skb, vfinfo);
1205 goto nla_put_failure;
1206 }
1207 if (nla_put(skb, IFLA_VF_MAC, sizeof(vf_mac), &vf_mac) ||
1208 nla_put(skb, IFLA_VF_VLAN, sizeof(vf_vlan), &vf_vlan) ||
1209 nla_put(skb, IFLA_VF_RATE, sizeof(vf_rate),
1210 &vf_rate) ||
1211 nla_put(skb, IFLA_VF_TX_RATE, sizeof(vf_tx_rate),
1212 &vf_tx_rate) ||
1213 nla_put(skb, IFLA_VF_SPOOFCHK, sizeof(vf_spoofchk),
1214 &vf_spoofchk) ||
1215 nla_put(skb, IFLA_VF_LINK_STATE, sizeof(vf_linkstate),
1216 &vf_linkstate) ||
1217 nla_put(skb, IFLA_VF_RSS_QUERY_EN,
1218 sizeof(vf_rss_query_en),
1219 &vf_rss_query_en) ||
1220 nla_put(skb, IFLA_VF_TRUST,
1221 sizeof(vf_trust), &vf_trust))
1222 goto nla_put_failure; 1276 goto nla_put_failure;
1223 memset(&vf_stats, 0, sizeof(vf_stats));
1224 if (dev->netdev_ops->ndo_get_vf_stats)
1225 dev->netdev_ops->ndo_get_vf_stats(dev, i,
1226 &vf_stats);
1227 vfstats = nla_nest_start(skb, IFLA_VF_STATS);
1228 if (!vfstats) {
1229 nla_nest_cancel(skb, vf);
1230 nla_nest_cancel(skb, vfinfo);
1231 goto nla_put_failure;
1232 }
1233 if (nla_put_u64(skb, IFLA_VF_STATS_RX_PACKETS,
1234 vf_stats.rx_packets) ||
1235 nla_put_u64(skb, IFLA_VF_STATS_TX_PACKETS,
1236 vf_stats.tx_packets) ||
1237 nla_put_u64(skb, IFLA_VF_STATS_RX_BYTES,
1238 vf_stats.rx_bytes) ||
1239 nla_put_u64(skb, IFLA_VF_STATS_TX_BYTES,
1240 vf_stats.tx_bytes) ||
1241 nla_put_u64(skb, IFLA_VF_STATS_BROADCAST,
1242 vf_stats.broadcast) ||
1243 nla_put_u64(skb, IFLA_VF_STATS_MULTICAST,
1244 vf_stats.multicast))
1245 goto nla_put_failure;
1246 nla_nest_end(skb, vfstats);
1247 nla_nest_end(skb, vf);
1248 } 1277 }
1278
1249 nla_nest_end(skb, vfinfo); 1279 nla_nest_end(skb, vfinfo);
1250 } 1280 }
1251 1281
diff --git a/net/core/scm.c b/net/core/scm.c
index 3b6899b7d810..8a1741b14302 100644
--- a/net/core/scm.c
+++ b/net/core/scm.c
@@ -305,6 +305,8 @@ void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
305 err = put_user(cmlen, &cm->cmsg_len); 305 err = put_user(cmlen, &cm->cmsg_len);
306 if (!err) { 306 if (!err) {
307 cmlen = CMSG_SPACE(i*sizeof(int)); 307 cmlen = CMSG_SPACE(i*sizeof(int));
308 if (msg->msg_controllen < cmlen)
309 cmlen = msg->msg_controllen;
308 msg->msg_control += cmlen; 310 msg->msg_control += cmlen;
309 msg->msg_controllen -= cmlen; 311 msg->msg_controllen -= cmlen;
310 } 312 }
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index aa41e6dd6429..b2df375ec9c2 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -3643,7 +3643,8 @@ static void __skb_complete_tx_timestamp(struct sk_buff *skb,
3643 serr->ee.ee_info = tstype; 3643 serr->ee.ee_info = tstype;
3644 if (sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID) { 3644 if (sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID) {
3645 serr->ee.ee_data = skb_shinfo(skb)->tskey; 3645 serr->ee.ee_data = skb_shinfo(skb)->tskey;
3646 if (sk->sk_protocol == IPPROTO_TCP) 3646 if (sk->sk_protocol == IPPROTO_TCP &&
3647 sk->sk_type == SOCK_STREAM)
3647 serr->ee.ee_data -= sk->sk_tskey; 3648 serr->ee.ee_data -= sk->sk_tskey;
3648 } 3649 }
3649 3650
@@ -4268,7 +4269,8 @@ static struct sk_buff *skb_reorder_vlan_header(struct sk_buff *skb)
4268 return NULL; 4269 return NULL;
4269 } 4270 }
4270 4271
4271 memmove(skb->data - ETH_HLEN, skb->data - VLAN_ETH_HLEN, 2 * ETH_ALEN); 4272 memmove(skb->data - ETH_HLEN, skb->data - skb->mac_len - VLAN_HLEN,
4273 2 * ETH_ALEN);
4272 skb->mac_header += VLAN_HLEN; 4274 skb->mac_header += VLAN_HLEN;
4273 return skb; 4275 return skb;
4274} 4276}
diff --git a/net/core/sock.c b/net/core/sock.c
index 1e4dd54bfb5a..0d91f7dca751 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -433,8 +433,6 @@ static bool sock_needs_netstamp(const struct sock *sk)
433 } 433 }
434} 434}
435 435
436#define SK_FLAGS_TIMESTAMP ((1UL << SOCK_TIMESTAMP) | (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE))
437
438static void sock_disable_timestamp(struct sock *sk, unsigned long flags) 436static void sock_disable_timestamp(struct sock *sk, unsigned long flags)
439{ 437{
440 if (sk->sk_flags & flags) { 438 if (sk->sk_flags & flags) {
@@ -874,7 +872,8 @@ set_rcvbuf:
874 872
875 if (val & SOF_TIMESTAMPING_OPT_ID && 873 if (val & SOF_TIMESTAMPING_OPT_ID &&
876 !(sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID)) { 874 !(sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID)) {
877 if (sk->sk_protocol == IPPROTO_TCP) { 875 if (sk->sk_protocol == IPPROTO_TCP &&
876 sk->sk_type == SOCK_STREAM) {
878 if (sk->sk_state != TCP_ESTABLISHED) { 877 if (sk->sk_state != TCP_ESTABLISHED) {
879 ret = -EINVAL; 878 ret = -EINVAL;
880 break; 879 break;
@@ -1530,7 +1529,6 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
1530 skb_queue_head_init(&newsk->sk_receive_queue); 1529 skb_queue_head_init(&newsk->sk_receive_queue);
1531 skb_queue_head_init(&newsk->sk_write_queue); 1530 skb_queue_head_init(&newsk->sk_write_queue);
1532 1531
1533 spin_lock_init(&newsk->sk_dst_lock);
1534 rwlock_init(&newsk->sk_callback_lock); 1532 rwlock_init(&newsk->sk_callback_lock);
1535 lockdep_set_class_and_name(&newsk->sk_callback_lock, 1533 lockdep_set_class_and_name(&newsk->sk_callback_lock,
1536 af_callback_keys + newsk->sk_family, 1534 af_callback_keys + newsk->sk_family,
@@ -1553,7 +1551,7 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
1553 */ 1551 */
1554 is_charged = sk_filter_charge(newsk, filter); 1552 is_charged = sk_filter_charge(newsk, filter);
1555 1553
1556 if (unlikely(!is_charged || xfrm_sk_clone_policy(newsk))) { 1554 if (unlikely(!is_charged || xfrm_sk_clone_policy(newsk, sk))) {
1557 /* It is still raw copy of parent, so invalidate 1555 /* It is still raw copy of parent, so invalidate
1558 * destructor and make plain sk_free() */ 1556 * destructor and make plain sk_free() */
1559 newsk->sk_destruct = NULL; 1557 newsk->sk_destruct = NULL;
@@ -1607,7 +1605,7 @@ void sk_setup_caps(struct sock *sk, struct dst_entry *dst)
1607{ 1605{
1608 u32 max_segs = 1; 1606 u32 max_segs = 1;
1609 1607
1610 __sk_dst_set(sk, dst); 1608 sk_dst_set(sk, dst);
1611 sk->sk_route_caps = dst->dev->features; 1609 sk->sk_route_caps = dst->dev->features;
1612 if (sk->sk_route_caps & NETIF_F_GSO) 1610 if (sk->sk_route_caps & NETIF_F_GSO)
1613 sk->sk_route_caps |= NETIF_F_GSO_SOFTWARE; 1611 sk->sk_route_caps |= NETIF_F_GSO_SOFTWARE;
@@ -1815,7 +1813,7 @@ static long sock_wait_for_wmem(struct sock *sk, long timeo)
1815{ 1813{
1816 DEFINE_WAIT(wait); 1814 DEFINE_WAIT(wait);
1817 1815
1818 clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); 1816 sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
1819 for (;;) { 1817 for (;;) {
1820 if (!timeo) 1818 if (!timeo)
1821 break; 1819 break;
@@ -1861,7 +1859,7 @@ struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
1861 if (sk_wmem_alloc_get(sk) < sk->sk_sndbuf) 1859 if (sk_wmem_alloc_get(sk) < sk->sk_sndbuf)
1862 break; 1860 break;
1863 1861
1864 set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); 1862 sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
1865 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); 1863 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1866 err = -EAGAIN; 1864 err = -EAGAIN;
1867 if (!timeo) 1865 if (!timeo)
@@ -2048,9 +2046,9 @@ int sk_wait_data(struct sock *sk, long *timeo, const struct sk_buff *skb)
2048 DEFINE_WAIT(wait); 2046 DEFINE_WAIT(wait);
2049 2047
2050 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); 2048 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
2051 set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); 2049 sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
2052 rc = sk_wait_event(sk, timeo, skb_peek_tail(&sk->sk_receive_queue) != skb); 2050 rc = sk_wait_event(sk, timeo, skb_peek_tail(&sk->sk_receive_queue) != skb);
2053 clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); 2051 sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
2054 finish_wait(sk_sleep(sk), &wait); 2052 finish_wait(sk_sleep(sk), &wait);
2055 return rc; 2053 return rc;
2056} 2054}
@@ -2388,7 +2386,6 @@ void sock_init_data(struct socket *sock, struct sock *sk)
2388 } else 2386 } else
2389 sk->sk_wq = NULL; 2387 sk->sk_wq = NULL;
2390 2388
2391 spin_lock_init(&sk->sk_dst_lock);
2392 rwlock_init(&sk->sk_callback_lock); 2389 rwlock_init(&sk->sk_callback_lock);
2393 lockdep_set_class_and_name(&sk->sk_callback_lock, 2390 lockdep_set_class_and_name(&sk->sk_callback_lock,
2394 af_callback_keys + sk->sk_family, 2391 af_callback_keys + sk->sk_family,
diff --git a/net/core/stream.c b/net/core/stream.c
index d70f77a0c889..b96f7a79e544 100644
--- a/net/core/stream.c
+++ b/net/core/stream.c
@@ -39,7 +39,7 @@ void sk_stream_write_space(struct sock *sk)
39 wake_up_interruptible_poll(&wq->wait, POLLOUT | 39 wake_up_interruptible_poll(&wq->wait, POLLOUT |
40 POLLWRNORM | POLLWRBAND); 40 POLLWRNORM | POLLWRBAND);
41 if (wq && wq->fasync_list && !(sk->sk_shutdown & SEND_SHUTDOWN)) 41 if (wq && wq->fasync_list && !(sk->sk_shutdown & SEND_SHUTDOWN))
42 sock_wake_async(sock, SOCK_WAKE_SPACE, POLL_OUT); 42 sock_wake_async(wq, SOCK_WAKE_SPACE, POLL_OUT);
43 rcu_read_unlock(); 43 rcu_read_unlock();
44 } 44 }
45} 45}
@@ -126,7 +126,7 @@ int sk_stream_wait_memory(struct sock *sk, long *timeo_p)
126 current_timeo = vm_wait = (prandom_u32() % (HZ / 5)) + 2; 126 current_timeo = vm_wait = (prandom_u32() % (HZ / 5)) + 2;
127 127
128 while (1) { 128 while (1) {
129 set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); 129 sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
130 130
131 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); 131 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
132 132
@@ -139,7 +139,7 @@ int sk_stream_wait_memory(struct sock *sk, long *timeo_p)
139 } 139 }
140 if (signal_pending(current)) 140 if (signal_pending(current))
141 goto do_interrupted; 141 goto do_interrupted;
142 clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); 142 sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
143 if (sk_stream_memory_free(sk) && !vm_wait) 143 if (sk_stream_memory_free(sk) && !vm_wait)
144 break; 144 break;
145 145
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
index db5fc2440a23..9c6d0508e63a 100644
--- a/net/dccp/ipv6.c
+++ b/net/dccp/ipv6.c
@@ -202,7 +202,9 @@ static int dccp_v6_send_response(const struct sock *sk, struct request_sock *req
202 security_req_classify_flow(req, flowi6_to_flowi(&fl6)); 202 security_req_classify_flow(req, flowi6_to_flowi(&fl6));
203 203
204 204
205 final_p = fl6_update_dst(&fl6, np->opt, &final); 205 rcu_read_lock();
206 final_p = fl6_update_dst(&fl6, rcu_dereference(np->opt), &final);
207 rcu_read_unlock();
206 208
207 dst = ip6_dst_lookup_flow(sk, &fl6, final_p); 209 dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
208 if (IS_ERR(dst)) { 210 if (IS_ERR(dst)) {
@@ -219,7 +221,10 @@ static int dccp_v6_send_response(const struct sock *sk, struct request_sock *req
219 &ireq->ir_v6_loc_addr, 221 &ireq->ir_v6_loc_addr,
220 &ireq->ir_v6_rmt_addr); 222 &ireq->ir_v6_rmt_addr);
221 fl6.daddr = ireq->ir_v6_rmt_addr; 223 fl6.daddr = ireq->ir_v6_rmt_addr;
222 err = ip6_xmit(sk, skb, &fl6, np->opt, np->tclass); 224 rcu_read_lock();
225 err = ip6_xmit(sk, skb, &fl6, rcu_dereference(np->opt),
226 np->tclass);
227 rcu_read_unlock();
223 err = net_xmit_eval(err); 228 err = net_xmit_eval(err);
224 } 229 }
225 230
@@ -387,6 +392,7 @@ static struct sock *dccp_v6_request_recv_sock(const struct sock *sk,
387 struct inet_request_sock *ireq = inet_rsk(req); 392 struct inet_request_sock *ireq = inet_rsk(req);
388 struct ipv6_pinfo *newnp; 393 struct ipv6_pinfo *newnp;
389 const struct ipv6_pinfo *np = inet6_sk(sk); 394 const struct ipv6_pinfo *np = inet6_sk(sk);
395 struct ipv6_txoptions *opt;
390 struct inet_sock *newinet; 396 struct inet_sock *newinet;
391 struct dccp6_sock *newdp6; 397 struct dccp6_sock *newdp6;
392 struct sock *newsk; 398 struct sock *newsk;
@@ -453,7 +459,7 @@ static struct sock *dccp_v6_request_recv_sock(const struct sock *sk,
453 * comment in that function for the gory details. -acme 459 * comment in that function for the gory details. -acme
454 */ 460 */
455 461
456 __ip6_dst_store(newsk, dst, NULL, NULL); 462 ip6_dst_store(newsk, dst, NULL, NULL);
457 newsk->sk_route_caps = dst->dev->features & ~(NETIF_F_IP_CSUM | 463 newsk->sk_route_caps = dst->dev->features & ~(NETIF_F_IP_CSUM |
458 NETIF_F_TSO); 464 NETIF_F_TSO);
459 newdp6 = (struct dccp6_sock *)newsk; 465 newdp6 = (struct dccp6_sock *)newsk;
@@ -488,13 +494,15 @@ static struct sock *dccp_v6_request_recv_sock(const struct sock *sk,
488 * Yes, keeping reference count would be much more clever, but we make 494 * Yes, keeping reference count would be much more clever, but we make
489 * one more one thing there: reattach optmem to newsk. 495 * one more one thing there: reattach optmem to newsk.
490 */ 496 */
491 if (np->opt != NULL) 497 opt = rcu_dereference(np->opt);
492 newnp->opt = ipv6_dup_options(newsk, np->opt); 498 if (opt) {
493 499 opt = ipv6_dup_options(newsk, opt);
500 RCU_INIT_POINTER(newnp->opt, opt);
501 }
494 inet_csk(newsk)->icsk_ext_hdr_len = 0; 502 inet_csk(newsk)->icsk_ext_hdr_len = 0;
495 if (newnp->opt != NULL) 503 if (opt)
496 inet_csk(newsk)->icsk_ext_hdr_len = (newnp->opt->opt_nflen + 504 inet_csk(newsk)->icsk_ext_hdr_len = opt->opt_nflen +
497 newnp->opt->opt_flen); 505 opt->opt_flen;
498 506
499 dccp_sync_mss(newsk, dst_mtu(dst)); 507 dccp_sync_mss(newsk, dst_mtu(dst));
500 508
@@ -757,6 +765,7 @@ static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
757 struct ipv6_pinfo *np = inet6_sk(sk); 765 struct ipv6_pinfo *np = inet6_sk(sk);
758 struct dccp_sock *dp = dccp_sk(sk); 766 struct dccp_sock *dp = dccp_sk(sk);
759 struct in6_addr *saddr = NULL, *final_p, final; 767 struct in6_addr *saddr = NULL, *final_p, final;
768 struct ipv6_txoptions *opt;
760 struct flowi6 fl6; 769 struct flowi6 fl6;
761 struct dst_entry *dst; 770 struct dst_entry *dst;
762 int addr_type; 771 int addr_type;
@@ -856,7 +865,8 @@ static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
856 fl6.fl6_sport = inet->inet_sport; 865 fl6.fl6_sport = inet->inet_sport;
857 security_sk_classify_flow(sk, flowi6_to_flowi(&fl6)); 866 security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
858 867
859 final_p = fl6_update_dst(&fl6, np->opt, &final); 868 opt = rcu_dereference_protected(np->opt, sock_owned_by_user(sk));
869 final_p = fl6_update_dst(&fl6, opt, &final);
860 870
861 dst = ip6_dst_lookup_flow(sk, &fl6, final_p); 871 dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
862 if (IS_ERR(dst)) { 872 if (IS_ERR(dst)) {
@@ -873,12 +883,11 @@ static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
873 np->saddr = *saddr; 883 np->saddr = *saddr;
874 inet->inet_rcv_saddr = LOOPBACK4_IPV6; 884 inet->inet_rcv_saddr = LOOPBACK4_IPV6;
875 885
876 __ip6_dst_store(sk, dst, NULL, NULL); 886 ip6_dst_store(sk, dst, NULL, NULL);
877 887
878 icsk->icsk_ext_hdr_len = 0; 888 icsk->icsk_ext_hdr_len = 0;
879 if (np->opt != NULL) 889 if (opt)
880 icsk->icsk_ext_hdr_len = (np->opt->opt_flen + 890 icsk->icsk_ext_hdr_len = opt->opt_flen + opt->opt_nflen;
881 np->opt->opt_nflen);
882 891
883 inet->inet_dport = usin->sin6_port; 892 inet->inet_dport = usin->sin6_port;
884 893
diff --git a/net/dccp/proto.c b/net/dccp/proto.c
index b5cf13a28009..41e65804ddf5 100644
--- a/net/dccp/proto.c
+++ b/net/dccp/proto.c
@@ -339,8 +339,7 @@ unsigned int dccp_poll(struct file *file, struct socket *sock,
339 if (sk_stream_is_writeable(sk)) { 339 if (sk_stream_is_writeable(sk)) {
340 mask |= POLLOUT | POLLWRNORM; 340 mask |= POLLOUT | POLLWRNORM;
341 } else { /* send SIGIO later */ 341 } else { /* send SIGIO later */
342 set_bit(SOCK_ASYNC_NOSPACE, 342 sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
343 &sk->sk_socket->flags);
344 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); 343 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
345 344
346 /* Race breaker. If space is freed after 345 /* Race breaker. If space is freed after
diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c
index 675cf94e04f8..13d6b1a6e0fc 100644
--- a/net/decnet/af_decnet.c
+++ b/net/decnet/af_decnet.c
@@ -678,6 +678,9 @@ static int dn_create(struct net *net, struct socket *sock, int protocol,
678{ 678{
679 struct sock *sk; 679 struct sock *sk;
680 680
681 if (protocol < 0 || protocol > SK_PROTOCOL_MAX)
682 return -EINVAL;
683
681 if (!net_eq(net, &init_net)) 684 if (!net_eq(net, &init_net))
682 return -EAFNOSUPPORT; 685 return -EAFNOSUPPORT;
683 686
@@ -1747,9 +1750,9 @@ static int dn_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
1747 } 1750 }
1748 1751
1749 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); 1752 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1750 set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); 1753 sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
1751 sk_wait_event(sk, &timeo, dn_data_ready(sk, queue, flags, target)); 1754 sk_wait_event(sk, &timeo, dn_data_ready(sk, queue, flags, target));
1752 clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); 1755 sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
1753 finish_wait(sk_sleep(sk), &wait); 1756 finish_wait(sk_sleep(sk), &wait);
1754 } 1757 }
1755 1758
@@ -2004,10 +2007,10 @@ static int dn_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
2004 } 2007 }
2005 2008
2006 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); 2009 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
2007 set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); 2010 sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
2008 sk_wait_event(sk, &timeo, 2011 sk_wait_event(sk, &timeo,
2009 !dn_queue_too_long(scp, queue, flags)); 2012 !dn_queue_too_long(scp, queue, flags));
2010 clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); 2013 sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
2011 finish_wait(sk_sleep(sk), &wait); 2014 finish_wait(sk_sleep(sk), &wait);
2012 continue; 2015 continue;
2013 } 2016 }
diff --git a/net/dns_resolver/dns_query.c b/net/dns_resolver/dns_query.c
index 4677b6fa6dda..ecc28cff08ab 100644
--- a/net/dns_resolver/dns_query.c
+++ b/net/dns_resolver/dns_query.c
@@ -67,7 +67,7 @@
67 * Returns the size of the result on success, -ve error code otherwise. 67 * Returns the size of the result on success, -ve error code otherwise.
68 */ 68 */
69int dns_query(const char *type, const char *name, size_t namelen, 69int dns_query(const char *type, const char *name, size_t namelen,
70 const char *options, char **_result, time_t *_expiry) 70 const char *options, char **_result, time64_t *_expiry)
71{ 71{
72 struct key *rkey; 72 struct key *rkey;
73 const struct user_key_payload *upayload; 73 const struct user_key_payload *upayload;
diff --git a/net/hsr/hsr_device.c b/net/hsr/hsr_device.c
index 35a9788bb3ae..c7d1adca30d8 100644
--- a/net/hsr/hsr_device.c
+++ b/net/hsr/hsr_device.c
@@ -312,7 +312,7 @@ static void send_hsr_supervision_frame(struct hsr_port *master, u8 type)
312 return; 312 return;
313 313
314out: 314out:
315 WARN_ON_ONCE("HSR: Could not send supervision frame\n"); 315 WARN_ONCE(1, "HSR: Could not send supervision frame\n");
316 kfree_skb(skb); 316 kfree_skb(skb);
317} 317}
318 318
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 11c4ca13ec3b..5c5db6636704 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -257,6 +257,9 @@ static int inet_create(struct net *net, struct socket *sock, int protocol,
257 int try_loading_module = 0; 257 int try_loading_module = 0;
258 int err; 258 int err;
259 259
260 if (protocol < 0 || protocol >= IPPROTO_MAX)
261 return -EINVAL;
262
260 sock->state = SS_UNCONNECTED; 263 sock->state = SS_UNCONNECTED;
261 264
262 /* Look for the requested type/protocol pair. */ 265 /* Look for the requested type/protocol pair. */
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index cc8f3e506cde..473447593060 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -1155,6 +1155,7 @@ static int fib_inetaddr_event(struct notifier_block *this, unsigned long event,
1155static int fib_netdev_event(struct notifier_block *this, unsigned long event, void *ptr) 1155static int fib_netdev_event(struct notifier_block *this, unsigned long event, void *ptr)
1156{ 1156{
1157 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 1157 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1158 struct netdev_notifier_changeupper_info *info;
1158 struct in_device *in_dev; 1159 struct in_device *in_dev;
1159 struct net *net = dev_net(dev); 1160 struct net *net = dev_net(dev);
1160 unsigned int flags; 1161 unsigned int flags;
@@ -1193,6 +1194,14 @@ static int fib_netdev_event(struct notifier_block *this, unsigned long event, vo
1193 case NETDEV_CHANGEMTU: 1194 case NETDEV_CHANGEMTU:
1194 rt_cache_flush(net); 1195 rt_cache_flush(net);
1195 break; 1196 break;
1197 case NETDEV_CHANGEUPPER:
1198 info = ptr;
1199 /* flush all routes if dev is linked to or unlinked from
1200 * an L3 master device (e.g., VRF)
1201 */
1202 if (info->upper_dev && netif_is_l3_master(info->upper_dev))
1203 fib_disable_ip(dev, NETDEV_DOWN, true);
1204 break;
1196 } 1205 }
1197 return NOTIFY_DONE; 1206 return NOTIFY_DONE;
1198} 1207}
diff --git a/net/ipv4/fou.c b/net/ipv4/fou.c
index e0fcbbbcfe54..bd903fe0f750 100644
--- a/net/ipv4/fou.c
+++ b/net/ipv4/fou.c
@@ -24,6 +24,7 @@ struct fou {
24 u16 type; 24 u16 type;
25 struct udp_offload udp_offloads; 25 struct udp_offload udp_offloads;
26 struct list_head list; 26 struct list_head list;
27 struct rcu_head rcu;
27}; 28};
28 29
29#define FOU_F_REMCSUM_NOPARTIAL BIT(0) 30#define FOU_F_REMCSUM_NOPARTIAL BIT(0)
@@ -417,7 +418,7 @@ static void fou_release(struct fou *fou)
417 list_del(&fou->list); 418 list_del(&fou->list);
418 udp_tunnel_sock_release(sock); 419 udp_tunnel_sock_release(sock);
419 420
420 kfree(fou); 421 kfree_rcu(fou, rcu);
421} 422}
422 423
423static int fou_encap_init(struct sock *sk, struct fou *fou, struct fou_cfg *cfg) 424static int fou_encap_init(struct sock *sk, struct fou *fou, struct fou_cfg *cfg)
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index 6baf36e11808..05e4cba14162 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -2126,7 +2126,7 @@ int ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr)
2126 ASSERT_RTNL(); 2126 ASSERT_RTNL();
2127 2127
2128 in_dev = ip_mc_find_dev(net, imr); 2128 in_dev = ip_mc_find_dev(net, imr);
2129 if (!in_dev) { 2129 if (!imr->imr_ifindex && !imr->imr_address.s_addr && !in_dev) {
2130 ret = -ENODEV; 2130 ret = -ENODEV;
2131 goto out; 2131 goto out;
2132 } 2132 }
@@ -2147,7 +2147,8 @@ int ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr)
2147 2147
2148 *imlp = iml->next_rcu; 2148 *imlp = iml->next_rcu;
2149 2149
2150 ip_mc_dec_group(in_dev, group); 2150 if (in_dev)
2151 ip_mc_dec_group(in_dev, group);
2151 2152
2152 /* decrease mem now to avoid the memleak warning */ 2153 /* decrease mem now to avoid the memleak warning */
2153 atomic_sub(sizeof(*iml), &sk->sk_omem_alloc); 2154 atomic_sub(sizeof(*iml), &sk->sk_omem_alloc);
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index 1feb15f23de8..46b9c887bede 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -563,7 +563,7 @@ static void reqsk_timer_handler(unsigned long data)
563 int max_retries, thresh; 563 int max_retries, thresh;
564 u8 defer_accept; 564 u8 defer_accept;
565 565
566 if (sk_listener->sk_state != TCP_LISTEN) 566 if (sk_state_load(sk_listener) != TCP_LISTEN)
567 goto drop; 567 goto drop;
568 568
569 max_retries = icsk->icsk_syn_retries ? : sysctl_tcp_synack_retries; 569 max_retries = icsk->icsk_syn_retries ? : sysctl_tcp_synack_retries;
@@ -749,7 +749,7 @@ int inet_csk_listen_start(struct sock *sk, int backlog)
749 * It is OK, because this socket enters to hash table only 749 * It is OK, because this socket enters to hash table only
750 * after validation is complete. 750 * after validation is complete.
751 */ 751 */
752 sk->sk_state = TCP_LISTEN; 752 sk_state_store(sk, TCP_LISTEN);
753 if (!sk->sk_prot->get_port(sk, inet->inet_num)) { 753 if (!sk->sk_prot->get_port(sk, inet->inet_num)) {
754 inet->inet_sport = htons(inet->inet_num); 754 inet->inet_sport = htons(inet->inet_num);
755 755
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index 92dd4b74d513..c3a38353f5dc 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -134,7 +134,7 @@ static int __ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
134 struct mfc_cache *c, struct rtmsg *rtm); 134 struct mfc_cache *c, struct rtmsg *rtm);
135static void mroute_netlink_event(struct mr_table *mrt, struct mfc_cache *mfc, 135static void mroute_netlink_event(struct mr_table *mrt, struct mfc_cache *mfc,
136 int cmd); 136 int cmd);
137static void mroute_clean_tables(struct mr_table *mrt); 137static void mroute_clean_tables(struct mr_table *mrt, bool all);
138static void ipmr_expire_process(unsigned long arg); 138static void ipmr_expire_process(unsigned long arg);
139 139
140#ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES 140#ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES
@@ -350,7 +350,7 @@ static struct mr_table *ipmr_new_table(struct net *net, u32 id)
350static void ipmr_free_table(struct mr_table *mrt) 350static void ipmr_free_table(struct mr_table *mrt)
351{ 351{
352 del_timer_sync(&mrt->ipmr_expire_timer); 352 del_timer_sync(&mrt->ipmr_expire_timer);
353 mroute_clean_tables(mrt); 353 mroute_clean_tables(mrt, true);
354 kfree(mrt); 354 kfree(mrt);
355} 355}
356 356
@@ -441,10 +441,6 @@ struct net_device *ipmr_new_tunnel(struct net *net, struct vifctl *v)
441 return dev; 441 return dev;
442 442
443failure: 443failure:
444 /* allow the register to be completed before unregistering. */
445 rtnl_unlock();
446 rtnl_lock();
447
448 unregister_netdevice(dev); 444 unregister_netdevice(dev);
449 return NULL; 445 return NULL;
450} 446}
@@ -540,10 +536,6 @@ static struct net_device *ipmr_reg_vif(struct net *net, struct mr_table *mrt)
540 return dev; 536 return dev;
541 537
542failure: 538failure:
543 /* allow the register to be completed before unregistering. */
544 rtnl_unlock();
545 rtnl_lock();
546
547 unregister_netdevice(dev); 539 unregister_netdevice(dev);
548 return NULL; 540 return NULL;
549} 541}
@@ -1208,7 +1200,7 @@ static int ipmr_mfc_add(struct net *net, struct mr_table *mrt,
1208 * Close the multicast socket, and clear the vif tables etc 1200 * Close the multicast socket, and clear the vif tables etc
1209 */ 1201 */
1210 1202
1211static void mroute_clean_tables(struct mr_table *mrt) 1203static void mroute_clean_tables(struct mr_table *mrt, bool all)
1212{ 1204{
1213 int i; 1205 int i;
1214 LIST_HEAD(list); 1206 LIST_HEAD(list);
@@ -1217,8 +1209,9 @@ static void mroute_clean_tables(struct mr_table *mrt)
1217 /* Shut down all active vif entries */ 1209 /* Shut down all active vif entries */
1218 1210
1219 for (i = 0; i < mrt->maxvif; i++) { 1211 for (i = 0; i < mrt->maxvif; i++) {
1220 if (!(mrt->vif_table[i].flags & VIFF_STATIC)) 1212 if (!all && (mrt->vif_table[i].flags & VIFF_STATIC))
1221 vif_delete(mrt, i, 0, &list); 1213 continue;
1214 vif_delete(mrt, i, 0, &list);
1222 } 1215 }
1223 unregister_netdevice_many(&list); 1216 unregister_netdevice_many(&list);
1224 1217
@@ -1226,7 +1219,7 @@ static void mroute_clean_tables(struct mr_table *mrt)
1226 1219
1227 for (i = 0; i < MFC_LINES; i++) { 1220 for (i = 0; i < MFC_LINES; i++) {
1228 list_for_each_entry_safe(c, next, &mrt->mfc_cache_array[i], list) { 1221 list_for_each_entry_safe(c, next, &mrt->mfc_cache_array[i], list) {
1229 if (c->mfc_flags & MFC_STATIC) 1222 if (!all && (c->mfc_flags & MFC_STATIC))
1230 continue; 1223 continue;
1231 list_del_rcu(&c->list); 1224 list_del_rcu(&c->list);
1232 mroute_netlink_event(mrt, c, RTM_DELROUTE); 1225 mroute_netlink_event(mrt, c, RTM_DELROUTE);
@@ -1261,7 +1254,7 @@ static void mrtsock_destruct(struct sock *sk)
1261 NETCONFA_IFINDEX_ALL, 1254 NETCONFA_IFINDEX_ALL,
1262 net->ipv4.devconf_all); 1255 net->ipv4.devconf_all);
1263 RCU_INIT_POINTER(mrt->mroute_sk, NULL); 1256 RCU_INIT_POINTER(mrt->mroute_sk, NULL);
1264 mroute_clean_tables(mrt); 1257 mroute_clean_tables(mrt, false);
1265 } 1258 }
1266 } 1259 }
1267 rtnl_unlock(); 1260 rtnl_unlock();
diff --git a/net/ipv4/netfilter/Kconfig b/net/ipv4/netfilter/Kconfig
index a35584176535..c187c60e3e0c 100644
--- a/net/ipv4/netfilter/Kconfig
+++ b/net/ipv4/netfilter/Kconfig
@@ -60,6 +60,7 @@ config NFT_REJECT_IPV4
60 60
61config NFT_DUP_IPV4 61config NFT_DUP_IPV4
62 tristate "IPv4 nf_tables packet duplication support" 62 tristate "IPv4 nf_tables packet duplication support"
63 depends on !NF_CONNTRACK || NF_CONNTRACK
63 select NF_DUP_IPV4 64 select NF_DUP_IPV4
64 help 65 help
65 This module enables IPv4 packet duplication support for nf_tables. 66 This module enables IPv4 packet duplication support for nf_tables.
diff --git a/net/ipv4/netfilter/nf_nat_pptp.c b/net/ipv4/netfilter/nf_nat_pptp.c
index 657d2307f031..b3ca21b2ba9b 100644
--- a/net/ipv4/netfilter/nf_nat_pptp.c
+++ b/net/ipv4/netfilter/nf_nat_pptp.c
@@ -45,7 +45,7 @@ static void pptp_nat_expected(struct nf_conn *ct,
45 struct net *net = nf_ct_net(ct); 45 struct net *net = nf_ct_net(ct);
46 const struct nf_conn *master = ct->master; 46 const struct nf_conn *master = ct->master;
47 struct nf_conntrack_expect *other_exp; 47 struct nf_conntrack_expect *other_exp;
48 struct nf_conntrack_tuple t; 48 struct nf_conntrack_tuple t = {};
49 const struct nf_ct_pptp_master *ct_pptp_info; 49 const struct nf_ct_pptp_master *ct_pptp_info;
50 const struct nf_nat_pptp *nat_pptp_info; 50 const struct nf_nat_pptp *nat_pptp_info;
51 struct nf_nat_range range; 51 struct nf_nat_range range;
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index 8c0d0bdc2a7c..63e5be0abd86 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -406,10 +406,12 @@ static int raw_send_hdrinc(struct sock *sk, struct flowi4 *fl4,
406 ip_select_ident(net, skb, NULL); 406 ip_select_ident(net, skb, NULL);
407 407
408 iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl); 408 iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
409 skb->transport_header += iphlen;
410 if (iph->protocol == IPPROTO_ICMP &&
411 length >= iphlen + sizeof(struct icmphdr))
412 icmp_out_count(net, ((struct icmphdr *)
413 skb_transport_header(skb))->type);
409 } 414 }
410 if (iph->protocol == IPPROTO_ICMP)
411 icmp_out_count(net, ((struct icmphdr *)
412 skb_transport_header(skb))->type);
413 415
414 err = NF_HOOK(NFPROTO_IPV4, NF_INET_LOCAL_OUT, 416 err = NF_HOOK(NFPROTO_IPV4, NF_INET_LOCAL_OUT,
415 net, sk, skb, NULL, rt->dst.dev, 417 net, sk, skb, NULL, rt->dst.dev,
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 0cfa7c0c1e80..c82cca18c90f 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -451,11 +451,14 @@ unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
451 unsigned int mask; 451 unsigned int mask;
452 struct sock *sk = sock->sk; 452 struct sock *sk = sock->sk;
453 const struct tcp_sock *tp = tcp_sk(sk); 453 const struct tcp_sock *tp = tcp_sk(sk);
454 int state;
454 455
455 sock_rps_record_flow(sk); 456 sock_rps_record_flow(sk);
456 457
457 sock_poll_wait(file, sk_sleep(sk), wait); 458 sock_poll_wait(file, sk_sleep(sk), wait);
458 if (sk->sk_state == TCP_LISTEN) 459
460 state = sk_state_load(sk);
461 if (state == TCP_LISTEN)
459 return inet_csk_listen_poll(sk); 462 return inet_csk_listen_poll(sk);
460 463
461 /* Socket is not locked. We are protected from async events 464 /* Socket is not locked. We are protected from async events
@@ -492,14 +495,14 @@ unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
492 * NOTE. Check for TCP_CLOSE is added. The goal is to prevent 495 * NOTE. Check for TCP_CLOSE is added. The goal is to prevent
493 * blocking on fresh not-connected or disconnected socket. --ANK 496 * blocking on fresh not-connected or disconnected socket. --ANK
494 */ 497 */
495 if (sk->sk_shutdown == SHUTDOWN_MASK || sk->sk_state == TCP_CLOSE) 498 if (sk->sk_shutdown == SHUTDOWN_MASK || state == TCP_CLOSE)
496 mask |= POLLHUP; 499 mask |= POLLHUP;
497 if (sk->sk_shutdown & RCV_SHUTDOWN) 500 if (sk->sk_shutdown & RCV_SHUTDOWN)
498 mask |= POLLIN | POLLRDNORM | POLLRDHUP; 501 mask |= POLLIN | POLLRDNORM | POLLRDHUP;
499 502
500 /* Connected or passive Fast Open socket? */ 503 /* Connected or passive Fast Open socket? */
501 if (sk->sk_state != TCP_SYN_SENT && 504 if (state != TCP_SYN_SENT &&
502 (sk->sk_state != TCP_SYN_RECV || tp->fastopen_rsk)) { 505 (state != TCP_SYN_RECV || tp->fastopen_rsk)) {
503 int target = sock_rcvlowat(sk, 0, INT_MAX); 506 int target = sock_rcvlowat(sk, 0, INT_MAX);
504 507
505 if (tp->urg_seq == tp->copied_seq && 508 if (tp->urg_seq == tp->copied_seq &&
@@ -507,9 +510,6 @@ unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
507 tp->urg_data) 510 tp->urg_data)
508 target++; 511 target++;
509 512
510 /* Potential race condition. If read of tp below will
511 * escape above sk->sk_state, we can be illegally awaken
512 * in SYN_* states. */
513 if (tp->rcv_nxt - tp->copied_seq >= target) 513 if (tp->rcv_nxt - tp->copied_seq >= target)
514 mask |= POLLIN | POLLRDNORM; 514 mask |= POLLIN | POLLRDNORM;
515 515
@@ -517,8 +517,7 @@ unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
517 if (sk_stream_is_writeable(sk)) { 517 if (sk_stream_is_writeable(sk)) {
518 mask |= POLLOUT | POLLWRNORM; 518 mask |= POLLOUT | POLLWRNORM;
519 } else { /* send SIGIO later */ 519 } else { /* send SIGIO later */
520 set_bit(SOCK_ASYNC_NOSPACE, 520 sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
521 &sk->sk_socket->flags);
522 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); 521 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
523 522
524 /* Race breaker. If space is freed after 523 /* Race breaker. If space is freed after
@@ -906,7 +905,7 @@ static ssize_t do_tcp_sendpages(struct sock *sk, struct page *page, int offset,
906 goto out_err; 905 goto out_err;
907 } 906 }
908 907
909 clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); 908 sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
910 909
911 mss_now = tcp_send_mss(sk, &size_goal, flags); 910 mss_now = tcp_send_mss(sk, &size_goal, flags);
912 copied = 0; 911 copied = 0;
@@ -1134,7 +1133,7 @@ int tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
1134 } 1133 }
1135 1134
1136 /* This should be in poll */ 1135 /* This should be in poll */
1137 clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); 1136 sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
1138 1137
1139 mss_now = tcp_send_mss(sk, &size_goal, flags); 1138 mss_now = tcp_send_mss(sk, &size_goal, flags);
1140 1139
@@ -1934,7 +1933,7 @@ void tcp_set_state(struct sock *sk, int state)
1934 /* Change state AFTER socket is unhashed to avoid closed 1933 /* Change state AFTER socket is unhashed to avoid closed
1935 * socket sitting in hash tables. 1934 * socket sitting in hash tables.
1936 */ 1935 */
1937 sk->sk_state = state; 1936 sk_state_store(sk, state);
1938 1937
1939#ifdef STATE_TRACE 1938#ifdef STATE_TRACE
1940 SOCK_DEBUG(sk, "TCP sk=%p, State %s -> %s\n", sk, statename[oldstate], statename[state]); 1939 SOCK_DEBUG(sk, "TCP sk=%p, State %s -> %s\n", sk, statename[oldstate], statename[state]);
@@ -2644,7 +2643,8 @@ void tcp_get_info(struct sock *sk, struct tcp_info *info)
2644 if (sk->sk_type != SOCK_STREAM) 2643 if (sk->sk_type != SOCK_STREAM)
2645 return; 2644 return;
2646 2645
2647 info->tcpi_state = sk->sk_state; 2646 info->tcpi_state = sk_state_load(sk);
2647
2648 info->tcpi_ca_state = icsk->icsk_ca_state; 2648 info->tcpi_ca_state = icsk->icsk_ca_state;
2649 info->tcpi_retransmits = icsk->icsk_retransmits; 2649 info->tcpi_retransmits = icsk->icsk_retransmits;
2650 info->tcpi_probes = icsk->icsk_probes_out; 2650 info->tcpi_probes = icsk->icsk_probes_out;
@@ -2672,7 +2672,7 @@ void tcp_get_info(struct sock *sk, struct tcp_info *info)
2672 info->tcpi_snd_mss = tp->mss_cache; 2672 info->tcpi_snd_mss = tp->mss_cache;
2673 info->tcpi_rcv_mss = icsk->icsk_ack.rcv_mss; 2673 info->tcpi_rcv_mss = icsk->icsk_ack.rcv_mss;
2674 2674
2675 if (sk->sk_state == TCP_LISTEN) { 2675 if (info->tcpi_state == TCP_LISTEN) {
2676 info->tcpi_unacked = sk->sk_ack_backlog; 2676 info->tcpi_unacked = sk->sk_ack_backlog;
2677 info->tcpi_sacked = sk->sk_max_ack_backlog; 2677 info->tcpi_sacked = sk->sk_max_ack_backlog;
2678 } else { 2678 } else {
diff --git a/net/ipv4/tcp_diag.c b/net/ipv4/tcp_diag.c
index 479f34946177..b31604086edd 100644
--- a/net/ipv4/tcp_diag.c
+++ b/net/ipv4/tcp_diag.c
@@ -21,7 +21,7 @@ static void tcp_diag_get_info(struct sock *sk, struct inet_diag_msg *r,
21{ 21{
22 struct tcp_info *info = _info; 22 struct tcp_info *info = _info;
23 23
24 if (sk->sk_state == TCP_LISTEN) { 24 if (sk_state_load(sk) == TCP_LISTEN) {
25 r->idiag_rqueue = sk->sk_ack_backlog; 25 r->idiag_rqueue = sk->sk_ack_backlog;
26 r->idiag_wqueue = sk->sk_max_ack_backlog; 26 r->idiag_wqueue = sk->sk_max_ack_backlog;
27 } else if (sk->sk_type == SOCK_STREAM) { 27 } else if (sk->sk_type == SOCK_STREAM) {
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index fdd88c3803a6..2d656eef7f8e 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -4481,19 +4481,34 @@ static int __must_check tcp_queue_rcv(struct sock *sk, struct sk_buff *skb, int
4481int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size) 4481int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size)
4482{ 4482{
4483 struct sk_buff *skb; 4483 struct sk_buff *skb;
4484 int err = -ENOMEM;
4485 int data_len = 0;
4484 bool fragstolen; 4486 bool fragstolen;
4485 4487
4486 if (size == 0) 4488 if (size == 0)
4487 return 0; 4489 return 0;
4488 4490
4489 skb = alloc_skb(size, sk->sk_allocation); 4491 if (size > PAGE_SIZE) {
4492 int npages = min_t(size_t, size >> PAGE_SHIFT, MAX_SKB_FRAGS);
4493
4494 data_len = npages << PAGE_SHIFT;
4495 size = data_len + (size & ~PAGE_MASK);
4496 }
4497 skb = alloc_skb_with_frags(size - data_len, data_len,
4498 PAGE_ALLOC_COSTLY_ORDER,
4499 &err, sk->sk_allocation);
4490 if (!skb) 4500 if (!skb)
4491 goto err; 4501 goto err;
4492 4502
4503 skb_put(skb, size - data_len);
4504 skb->data_len = data_len;
4505 skb->len = size;
4506
4493 if (tcp_try_rmem_schedule(sk, skb, skb->truesize)) 4507 if (tcp_try_rmem_schedule(sk, skb, skb->truesize))
4494 goto err_free; 4508 goto err_free;
4495 4509
4496 if (memcpy_from_msg(skb_put(skb, size), msg, size)) 4510 err = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, size);
4511 if (err)
4497 goto err_free; 4512 goto err_free;
4498 4513
4499 TCP_SKB_CB(skb)->seq = tcp_sk(sk)->rcv_nxt; 4514 TCP_SKB_CB(skb)->seq = tcp_sk(sk)->rcv_nxt;
@@ -4509,7 +4524,8 @@ int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size)
4509err_free: 4524err_free:
4510 kfree_skb(skb); 4525 kfree_skb(skb);
4511err: 4526err:
4512 return -ENOMEM; 4527 return err;
4528
4513} 4529}
4514 4530
4515static void tcp_data_queue(struct sock *sk, struct sk_buff *skb) 4531static void tcp_data_queue(struct sock *sk, struct sk_buff *skb)
@@ -5667,6 +5683,7 @@ discard:
5667 } 5683 }
5668 5684
5669 tp->rcv_nxt = TCP_SKB_CB(skb)->seq + 1; 5685 tp->rcv_nxt = TCP_SKB_CB(skb)->seq + 1;
5686 tp->copied_seq = tp->rcv_nxt;
5670 tp->rcv_wup = TCP_SKB_CB(skb)->seq + 1; 5687 tp->rcv_wup = TCP_SKB_CB(skb)->seq + 1;
5671 5688
5672 /* RFC1323: The window in SYN & SYN/ACK segments is 5689 /* RFC1323: The window in SYN & SYN/ACK segments is
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 950e28c0cdf2..d8841a2f1569 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -921,7 +921,8 @@ int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
921 } 921 }
922 922
923 md5sig = rcu_dereference_protected(tp->md5sig_info, 923 md5sig = rcu_dereference_protected(tp->md5sig_info,
924 sock_owned_by_user(sk)); 924 sock_owned_by_user(sk) ||
925 lockdep_is_held(&sk->sk_lock.slock));
925 if (!md5sig) { 926 if (!md5sig) {
926 md5sig = kmalloc(sizeof(*md5sig), gfp); 927 md5sig = kmalloc(sizeof(*md5sig), gfp);
927 if (!md5sig) 928 if (!md5sig)
@@ -1492,7 +1493,7 @@ bool tcp_prequeue(struct sock *sk, struct sk_buff *skb)
1492 if (likely(sk->sk_rx_dst)) 1493 if (likely(sk->sk_rx_dst))
1493 skb_dst_drop(skb); 1494 skb_dst_drop(skb);
1494 else 1495 else
1495 skb_dst_force(skb); 1496 skb_dst_force_safe(skb);
1496 1497
1497 __skb_queue_tail(&tp->ucopy.prequeue, skb); 1498 __skb_queue_tail(&tp->ucopy.prequeue, skb);
1498 tp->ucopy.memory += skb->truesize; 1499 tp->ucopy.memory += skb->truesize;
@@ -1720,8 +1721,7 @@ void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
1720{ 1721{
1721 struct dst_entry *dst = skb_dst(skb); 1722 struct dst_entry *dst = skb_dst(skb);
1722 1723
1723 if (dst) { 1724 if (dst && dst_hold_safe(dst)) {
1724 dst_hold(dst);
1725 sk->sk_rx_dst = dst; 1725 sk->sk_rx_dst = dst;
1726 inet_sk(sk)->rx_dst_ifindex = skb->skb_iif; 1726 inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
1727 } 1727 }
@@ -2158,6 +2158,7 @@ static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i)
2158 __u16 destp = ntohs(inet->inet_dport); 2158 __u16 destp = ntohs(inet->inet_dport);
2159 __u16 srcp = ntohs(inet->inet_sport); 2159 __u16 srcp = ntohs(inet->inet_sport);
2160 int rx_queue; 2160 int rx_queue;
2161 int state;
2161 2162
2162 if (icsk->icsk_pending == ICSK_TIME_RETRANS || 2163 if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
2163 icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS || 2164 icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS ||
@@ -2175,17 +2176,18 @@ static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i)
2175 timer_expires = jiffies; 2176 timer_expires = jiffies;
2176 } 2177 }
2177 2178
2178 if (sk->sk_state == TCP_LISTEN) 2179 state = sk_state_load(sk);
2180 if (state == TCP_LISTEN)
2179 rx_queue = sk->sk_ack_backlog; 2181 rx_queue = sk->sk_ack_backlog;
2180 else 2182 else
2181 /* 2183 /* Because we don't lock the socket,
2182 * because we dont lock socket, we might find a transient negative value 2184 * we might find a transient negative value.
2183 */ 2185 */
2184 rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0); 2186 rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
2185 2187
2186 seq_printf(f, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX " 2188 seq_printf(f, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
2187 "%08X %5u %8d %lu %d %pK %lu %lu %u %u %d", 2189 "%08X %5u %8d %lu %d %pK %lu %lu %u %u %d",
2188 i, src, srcp, dest, destp, sk->sk_state, 2190 i, src, srcp, dest, destp, state,
2189 tp->write_seq - tp->snd_una, 2191 tp->write_seq - tp->snd_una,
2190 rx_queue, 2192 rx_queue,
2191 timer_active, 2193 timer_active,
@@ -2199,8 +2201,8 @@ static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i)
2199 jiffies_to_clock_t(icsk->icsk_ack.ato), 2201 jiffies_to_clock_t(icsk->icsk_ack.ato),
2200 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong, 2202 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
2201 tp->snd_cwnd, 2203 tp->snd_cwnd,
2202 sk->sk_state == TCP_LISTEN ? 2204 state == TCP_LISTEN ?
2203 (fastopenq ? fastopenq->max_qlen : 0) : 2205 fastopenq->max_qlen :
2204 (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh)); 2206 (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh));
2205} 2207}
2206 2208
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index cb7ca569052c..9bfc39ff2285 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -3150,7 +3150,7 @@ static int tcp_send_syn_data(struct sock *sk, struct sk_buff *syn)
3150{ 3150{
3151 struct tcp_sock *tp = tcp_sk(sk); 3151 struct tcp_sock *tp = tcp_sk(sk);
3152 struct tcp_fastopen_request *fo = tp->fastopen_req; 3152 struct tcp_fastopen_request *fo = tp->fastopen_req;
3153 int syn_loss = 0, space, err = 0, copied; 3153 int syn_loss = 0, space, err = 0;
3154 unsigned long last_syn_loss = 0; 3154 unsigned long last_syn_loss = 0;
3155 struct sk_buff *syn_data; 3155 struct sk_buff *syn_data;
3156 3156
@@ -3188,17 +3188,18 @@ static int tcp_send_syn_data(struct sock *sk, struct sk_buff *syn)
3188 goto fallback; 3188 goto fallback;
3189 syn_data->ip_summed = CHECKSUM_PARTIAL; 3189 syn_data->ip_summed = CHECKSUM_PARTIAL;
3190 memcpy(syn_data->cb, syn->cb, sizeof(syn->cb)); 3190 memcpy(syn_data->cb, syn->cb, sizeof(syn->cb));
3191 copied = copy_from_iter(skb_put(syn_data, space), space, 3191 if (space) {
3192 &fo->data->msg_iter); 3192 int copied = copy_from_iter(skb_put(syn_data, space), space,
3193 if (unlikely(!copied)) { 3193 &fo->data->msg_iter);
3194 kfree_skb(syn_data); 3194 if (unlikely(!copied)) {
3195 goto fallback; 3195 kfree_skb(syn_data);
3196 } 3196 goto fallback;
3197 if (copied != space) { 3197 }
3198 skb_trim(syn_data, copied); 3198 if (copied != space) {
3199 space = copied; 3199 skb_trim(syn_data, copied);
3200 space = copied;
3201 }
3200 } 3202 }
3201
3202 /* No more data pending in inet_wait_for_connect() */ 3203 /* No more data pending in inet_wait_for_connect() */
3203 if (space == fo->size) 3204 if (space == fo->size)
3204 fo->data = NULL; 3205 fo->data = NULL;
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index c9c716a483e4..193ba1fa8a9a 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -168,7 +168,7 @@ static int tcp_write_timeout(struct sock *sk)
168 dst_negative_advice(sk); 168 dst_negative_advice(sk);
169 if (tp->syn_fastopen || tp->syn_data) 169 if (tp->syn_fastopen || tp->syn_data)
170 tcp_fastopen_cache_set(sk, 0, NULL, true, 0); 170 tcp_fastopen_cache_set(sk, 0, NULL, true, 0);
171 if (tp->syn_data) 171 if (tp->syn_data && icsk->icsk_retransmits == 1)
172 NET_INC_STATS_BH(sock_net(sk), 172 NET_INC_STATS_BH(sock_net(sk),
173 LINUX_MIB_TCPFASTOPENACTIVEFAIL); 173 LINUX_MIB_TCPFASTOPENACTIVEFAIL);
174 } 174 }
@@ -176,6 +176,18 @@ static int tcp_write_timeout(struct sock *sk)
176 syn_set = true; 176 syn_set = true;
177 } else { 177 } else {
178 if (retransmits_timed_out(sk, sysctl_tcp_retries1, 0, 0)) { 178 if (retransmits_timed_out(sk, sysctl_tcp_retries1, 0, 0)) {
179 /* Some middle-boxes may black-hole Fast Open _after_
180 * the handshake. Therefore we conservatively disable
181 * Fast Open on this path on recurring timeouts with
182 * few or zero bytes acked after Fast Open.
183 */
184 if (tp->syn_data_acked &&
185 tp->bytes_acked <= tp->rx_opt.mss_clamp) {
186 tcp_fastopen_cache_set(sk, 0, NULL, true, 0);
187 if (icsk->icsk_retransmits == sysctl_tcp_retries1)
188 NET_INC_STATS_BH(sock_net(sk),
189 LINUX_MIB_TCPFASTOPENACTIVEFAIL);
190 }
179 /* Black hole detection */ 191 /* Black hole detection */
180 tcp_mtu_probing(icsk, sk); 192 tcp_mtu_probing(icsk, sk);
181 193
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 24ec14f9825c..0c7b0e61b917 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -100,7 +100,6 @@
100#include <linux/slab.h> 100#include <linux/slab.h>
101#include <net/tcp_states.h> 101#include <net/tcp_states.h>
102#include <linux/skbuff.h> 102#include <linux/skbuff.h>
103#include <linux/netdevice.h>
104#include <linux/proc_fs.h> 103#include <linux/proc_fs.h>
105#include <linux/seq_file.h> 104#include <linux/seq_file.h>
106#include <net/net_namespace.h> 105#include <net/net_namespace.h>
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index d84742f003a9..17f8e7ea133b 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -350,6 +350,12 @@ static struct inet6_dev *ipv6_add_dev(struct net_device *dev)
350 setup_timer(&ndev->rs_timer, addrconf_rs_timer, 350 setup_timer(&ndev->rs_timer, addrconf_rs_timer,
351 (unsigned long)ndev); 351 (unsigned long)ndev);
352 memcpy(&ndev->cnf, dev_net(dev)->ipv6.devconf_dflt, sizeof(ndev->cnf)); 352 memcpy(&ndev->cnf, dev_net(dev)->ipv6.devconf_dflt, sizeof(ndev->cnf));
353
354 if (ndev->cnf.stable_secret.initialized)
355 ndev->addr_gen_mode = IN6_ADDR_GEN_MODE_STABLE_PRIVACY;
356 else
357 ndev->addr_gen_mode = IN6_ADDR_GEN_MODE_EUI64;
358
353 ndev->cnf.mtu6 = dev->mtu; 359 ndev->cnf.mtu6 = dev->mtu;
354 ndev->cnf.sysctl = NULL; 360 ndev->cnf.sysctl = NULL;
355 ndev->nd_parms = neigh_parms_alloc(dev, &nd_tbl); 361 ndev->nd_parms = neigh_parms_alloc(dev, &nd_tbl);
@@ -2455,7 +2461,7 @@ ok:
2455#ifdef CONFIG_IPV6_OPTIMISTIC_DAD 2461#ifdef CONFIG_IPV6_OPTIMISTIC_DAD
2456 if (in6_dev->cnf.optimistic_dad && 2462 if (in6_dev->cnf.optimistic_dad &&
2457 !net->ipv6.devconf_all->forwarding && sllao) 2463 !net->ipv6.devconf_all->forwarding && sllao)
2458 addr_flags = IFA_F_OPTIMISTIC; 2464 addr_flags |= IFA_F_OPTIMISTIC;
2459#endif 2465#endif
2460 2466
2461 /* Do not allow to create too much of autoconfigured 2467 /* Do not allow to create too much of autoconfigured
@@ -3642,7 +3648,7 @@ static void addrconf_dad_work(struct work_struct *w)
3642 3648
3643 /* send a neighbour solicitation for our addr */ 3649 /* send a neighbour solicitation for our addr */
3644 addrconf_addr_solict_mult(&ifp->addr, &mcaddr); 3650 addrconf_addr_solict_mult(&ifp->addr, &mcaddr);
3645 ndisc_send_ns(ifp->idev->dev, &ifp->addr, &mcaddr, &in6addr_any, NULL); 3651 ndisc_send_ns(ifp->idev->dev, &ifp->addr, &mcaddr, &in6addr_any);
3646out: 3652out:
3647 in6_ifa_put(ifp); 3653 in6_ifa_put(ifp);
3648 rtnl_unlock(); 3654 rtnl_unlock();
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index 44bb66bde0e2..9f5137cd604e 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -109,6 +109,9 @@ static int inet6_create(struct net *net, struct socket *sock, int protocol,
109 int try_loading_module = 0; 109 int try_loading_module = 0;
110 int err; 110 int err;
111 111
112 if (protocol < 0 || protocol >= IPPROTO_MAX)
113 return -EINVAL;
114
112 /* Look for the requested type/protocol pair. */ 115 /* Look for the requested type/protocol pair. */
113lookup_protocol: 116lookup_protocol:
114 err = -ESOCKTNOSUPPORT; 117 err = -ESOCKTNOSUPPORT;
@@ -428,9 +431,11 @@ void inet6_destroy_sock(struct sock *sk)
428 431
429 /* Free tx options */ 432 /* Free tx options */
430 433
431 opt = xchg(&np->opt, NULL); 434 opt = xchg((__force struct ipv6_txoptions **)&np->opt, NULL);
432 if (opt) 435 if (opt) {
433 sock_kfree_s(sk, opt, opt->tot_len); 436 atomic_sub(opt->tot_len, &sk->sk_omem_alloc);
437 txopt_put(opt);
438 }
434} 439}
435EXPORT_SYMBOL_GPL(inet6_destroy_sock); 440EXPORT_SYMBOL_GPL(inet6_destroy_sock);
436 441
@@ -659,7 +664,10 @@ int inet6_sk_rebuild_header(struct sock *sk)
659 fl6.fl6_sport = inet->inet_sport; 664 fl6.fl6_sport = inet->inet_sport;
660 security_sk_classify_flow(sk, flowi6_to_flowi(&fl6)); 665 security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
661 666
662 final_p = fl6_update_dst(&fl6, np->opt, &final); 667 rcu_read_lock();
668 final_p = fl6_update_dst(&fl6, rcu_dereference(np->opt),
669 &final);
670 rcu_read_unlock();
663 671
664 dst = ip6_dst_lookup_flow(sk, &fl6, final_p); 672 dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
665 if (IS_ERR(dst)) { 673 if (IS_ERR(dst)) {
@@ -668,7 +676,7 @@ int inet6_sk_rebuild_header(struct sock *sk)
668 return PTR_ERR(dst); 676 return PTR_ERR(dst);
669 } 677 }
670 678
671 __ip6_dst_store(sk, dst, NULL, NULL); 679 ip6_dst_store(sk, dst, NULL, NULL);
672 } 680 }
673 681
674 return 0; 682 return 0;
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
index d70b0238f468..517c55b01ba8 100644
--- a/net/ipv6/datagram.c
+++ b/net/ipv6/datagram.c
@@ -167,8 +167,10 @@ ipv4_connected:
167 167
168 security_sk_classify_flow(sk, flowi6_to_flowi(&fl6)); 168 security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
169 169
170 opt = flowlabel ? flowlabel->opt : np->opt; 170 rcu_read_lock();
171 opt = flowlabel ? flowlabel->opt : rcu_dereference(np->opt);
171 final_p = fl6_update_dst(&fl6, opt, &final); 172 final_p = fl6_update_dst(&fl6, opt, &final);
173 rcu_read_unlock();
172 174
173 dst = ip6_dst_lookup_flow(sk, &fl6, final_p); 175 dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
174 err = 0; 176 err = 0;
diff --git a/net/ipv6/exthdrs.c b/net/ipv6/exthdrs.c
index ce203b0402be..ea7c4d64a00a 100644
--- a/net/ipv6/exthdrs.c
+++ b/net/ipv6/exthdrs.c
@@ -727,6 +727,7 @@ ipv6_dup_options(struct sock *sk, struct ipv6_txoptions *opt)
727 *((char **)&opt2->dst1opt) += dif; 727 *((char **)&opt2->dst1opt) += dif;
728 if (opt2->srcrt) 728 if (opt2->srcrt)
729 *((char **)&opt2->srcrt) += dif; 729 *((char **)&opt2->srcrt) += dif;
730 atomic_set(&opt2->refcnt, 1);
730 } 731 }
731 return opt2; 732 return opt2;
732} 733}
@@ -790,7 +791,7 @@ ipv6_renew_options(struct sock *sk, struct ipv6_txoptions *opt,
790 return ERR_PTR(-ENOBUFS); 791 return ERR_PTR(-ENOBUFS);
791 792
792 memset(opt2, 0, tot_len); 793 memset(opt2, 0, tot_len);
793 794 atomic_set(&opt2->refcnt, 1);
794 opt2->tot_len = tot_len; 795 opt2->tot_len = tot_len;
795 p = (char *)(opt2 + 1); 796 p = (char *)(opt2 + 1);
796 797
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
index 36c5a98b0472..0a37ddc7af51 100644
--- a/net/ipv6/icmp.c
+++ b/net/ipv6/icmp.c
@@ -834,11 +834,6 @@ void icmpv6_flow_init(struct sock *sk, struct flowi6 *fl6,
834 security_sk_classify_flow(sk, flowi6_to_flowi(fl6)); 834 security_sk_classify_flow(sk, flowi6_to_flowi(fl6));
835} 835}
836 836
837/*
838 * Special lock-class for __icmpv6_sk:
839 */
840static struct lock_class_key icmpv6_socket_sk_dst_lock_key;
841
842static int __net_init icmpv6_sk_init(struct net *net) 837static int __net_init icmpv6_sk_init(struct net *net)
843{ 838{
844 struct sock *sk; 839 struct sock *sk;
@@ -860,15 +855,6 @@ static int __net_init icmpv6_sk_init(struct net *net)
860 855
861 net->ipv6.icmp_sk[i] = sk; 856 net->ipv6.icmp_sk[i] = sk;
862 857
863 /*
864 * Split off their lock-class, because sk->sk_dst_lock
865 * gets used from softirqs, which is safe for
866 * __icmpv6_sk (because those never get directly used
867 * via userspace syscalls), but unsafe for normal sockets.
868 */
869 lockdep_set_class(&sk->sk_dst_lock,
870 &icmpv6_socket_sk_dst_lock_key);
871
872 /* Enough space for 2 64K ICMP packets, including 858 /* Enough space for 2 64K ICMP packets, including
873 * sk_buff struct overhead. 859 * sk_buff struct overhead.
874 */ 860 */
diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c
index 5d1c7cee2cb2..a7ca2cde2ecb 100644
--- a/net/ipv6/inet6_connection_sock.c
+++ b/net/ipv6/inet6_connection_sock.c
@@ -78,7 +78,9 @@ struct dst_entry *inet6_csk_route_req(const struct sock *sk,
78 memset(fl6, 0, sizeof(*fl6)); 78 memset(fl6, 0, sizeof(*fl6));
79 fl6->flowi6_proto = proto; 79 fl6->flowi6_proto = proto;
80 fl6->daddr = ireq->ir_v6_rmt_addr; 80 fl6->daddr = ireq->ir_v6_rmt_addr;
81 final_p = fl6_update_dst(fl6, np->opt, &final); 81 rcu_read_lock();
82 final_p = fl6_update_dst(fl6, rcu_dereference(np->opt), &final);
83 rcu_read_unlock();
82 fl6->saddr = ireq->ir_v6_loc_addr; 84 fl6->saddr = ireq->ir_v6_loc_addr;
83 fl6->flowi6_oif = ireq->ir_iif; 85 fl6->flowi6_oif = ireq->ir_iif;
84 fl6->flowi6_mark = ireq->ir_mark; 86 fl6->flowi6_mark = ireq->ir_mark;
@@ -109,14 +111,6 @@ void inet6_csk_addr2sockaddr(struct sock *sk, struct sockaddr *uaddr)
109EXPORT_SYMBOL_GPL(inet6_csk_addr2sockaddr); 111EXPORT_SYMBOL_GPL(inet6_csk_addr2sockaddr);
110 112
111static inline 113static inline
112void __inet6_csk_dst_store(struct sock *sk, struct dst_entry *dst,
113 const struct in6_addr *daddr,
114 const struct in6_addr *saddr)
115{
116 __ip6_dst_store(sk, dst, daddr, saddr);
117}
118
119static inline
120struct dst_entry *__inet6_csk_dst_check(struct sock *sk, u32 cookie) 114struct dst_entry *__inet6_csk_dst_check(struct sock *sk, u32 cookie)
121{ 115{
122 return __sk_dst_check(sk, cookie); 116 return __sk_dst_check(sk, cookie);
@@ -142,14 +136,16 @@ static struct dst_entry *inet6_csk_route_socket(struct sock *sk,
142 fl6->fl6_dport = inet->inet_dport; 136 fl6->fl6_dport = inet->inet_dport;
143 security_sk_classify_flow(sk, flowi6_to_flowi(fl6)); 137 security_sk_classify_flow(sk, flowi6_to_flowi(fl6));
144 138
145 final_p = fl6_update_dst(fl6, np->opt, &final); 139 rcu_read_lock();
140 final_p = fl6_update_dst(fl6, rcu_dereference(np->opt), &final);
141 rcu_read_unlock();
146 142
147 dst = __inet6_csk_dst_check(sk, np->dst_cookie); 143 dst = __inet6_csk_dst_check(sk, np->dst_cookie);
148 if (!dst) { 144 if (!dst) {
149 dst = ip6_dst_lookup_flow(sk, fl6, final_p); 145 dst = ip6_dst_lookup_flow(sk, fl6, final_p);
150 146
151 if (!IS_ERR(dst)) 147 if (!IS_ERR(dst))
152 __inet6_csk_dst_store(sk, dst, NULL, NULL); 148 ip6_dst_store(sk, dst, NULL, NULL);
153 } 149 }
154 return dst; 150 return dst;
155} 151}
@@ -175,7 +171,8 @@ int inet6_csk_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl_unused
175 /* Restore final destination back after routing done */ 171 /* Restore final destination back after routing done */
176 fl6.daddr = sk->sk_v6_daddr; 172 fl6.daddr = sk->sk_v6_daddr;
177 173
178 res = ip6_xmit(sk, skb, &fl6, np->opt, np->tclass); 174 res = ip6_xmit(sk, skb, &fl6, rcu_dereference(np->opt),
175 np->tclass);
179 rcu_read_unlock(); 176 rcu_read_unlock();
180 return res; 177 return res;
181} 178}
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index 3c7b9310b33f..e5ea177d34c6 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -1571,13 +1571,11 @@ static int ip6gre_changelink(struct net_device *dev, struct nlattr *tb[],
1571 return -EEXIST; 1571 return -EEXIST;
1572 } else { 1572 } else {
1573 t = nt; 1573 t = nt;
1574
1575 ip6gre_tunnel_unlink(ign, t);
1576 ip6gre_tnl_change(t, &p, !tb[IFLA_MTU]);
1577 ip6gre_tunnel_link(ign, t);
1578 netdev_state_change(dev);
1579 } 1574 }
1580 1575
1576 ip6gre_tunnel_unlink(ign, t);
1577 ip6gre_tnl_change(t, &p, !tb[IFLA_MTU]);
1578 ip6gre_tunnel_link(ign, t);
1581 return 0; 1579 return 0;
1582} 1580}
1583 1581
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index eabffbb89795..137fca42aaa6 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -177,7 +177,7 @@ void ip6_tnl_dst_reset(struct ip6_tnl *t)
177 int i; 177 int i;
178 178
179 for_each_possible_cpu(i) 179 for_each_possible_cpu(i)
180 ip6_tnl_per_cpu_dst_set(raw_cpu_ptr(t->dst_cache), NULL); 180 ip6_tnl_per_cpu_dst_set(per_cpu_ptr(t->dst_cache, i), NULL);
181} 181}
182EXPORT_SYMBOL_GPL(ip6_tnl_dst_reset); 182EXPORT_SYMBOL_GPL(ip6_tnl_dst_reset);
183 183
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
index ad19136086dd..a10e77103c88 100644
--- a/net/ipv6/ip6mr.c
+++ b/net/ipv6/ip6mr.c
@@ -118,7 +118,7 @@ static void mr6_netlink_event(struct mr6_table *mrt, struct mfc6_cache *mfc,
118 int cmd); 118 int cmd);
119static int ip6mr_rtm_dumproute(struct sk_buff *skb, 119static int ip6mr_rtm_dumproute(struct sk_buff *skb,
120 struct netlink_callback *cb); 120 struct netlink_callback *cb);
121static void mroute_clean_tables(struct mr6_table *mrt); 121static void mroute_clean_tables(struct mr6_table *mrt, bool all);
122static void ipmr_expire_process(unsigned long arg); 122static void ipmr_expire_process(unsigned long arg);
123 123
124#ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES 124#ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES
@@ -334,7 +334,7 @@ static struct mr6_table *ip6mr_new_table(struct net *net, u32 id)
334static void ip6mr_free_table(struct mr6_table *mrt) 334static void ip6mr_free_table(struct mr6_table *mrt)
335{ 335{
336 del_timer_sync(&mrt->ipmr_expire_timer); 336 del_timer_sync(&mrt->ipmr_expire_timer);
337 mroute_clean_tables(mrt); 337 mroute_clean_tables(mrt, true);
338 kfree(mrt); 338 kfree(mrt);
339} 339}
340 340
@@ -765,10 +765,6 @@ static struct net_device *ip6mr_reg_vif(struct net *net, struct mr6_table *mrt)
765 return dev; 765 return dev;
766 766
767failure: 767failure:
768 /* allow the register to be completed before unregistering. */
769 rtnl_unlock();
770 rtnl_lock();
771
772 unregister_netdevice(dev); 768 unregister_netdevice(dev);
773 return NULL; 769 return NULL;
774} 770}
@@ -1542,7 +1538,7 @@ static int ip6mr_mfc_add(struct net *net, struct mr6_table *mrt,
1542 * Close the multicast socket, and clear the vif tables etc 1538 * Close the multicast socket, and clear the vif tables etc
1543 */ 1539 */
1544 1540
1545static void mroute_clean_tables(struct mr6_table *mrt) 1541static void mroute_clean_tables(struct mr6_table *mrt, bool all)
1546{ 1542{
1547 int i; 1543 int i;
1548 LIST_HEAD(list); 1544 LIST_HEAD(list);
@@ -1552,8 +1548,9 @@ static void mroute_clean_tables(struct mr6_table *mrt)
1552 * Shut down all active vif entries 1548 * Shut down all active vif entries
1553 */ 1549 */
1554 for (i = 0; i < mrt->maxvif; i++) { 1550 for (i = 0; i < mrt->maxvif; i++) {
1555 if (!(mrt->vif6_table[i].flags & VIFF_STATIC)) 1551 if (!all && (mrt->vif6_table[i].flags & VIFF_STATIC))
1556 mif6_delete(mrt, i, &list); 1552 continue;
1553 mif6_delete(mrt, i, &list);
1557 } 1554 }
1558 unregister_netdevice_many(&list); 1555 unregister_netdevice_many(&list);
1559 1556
@@ -1562,7 +1559,7 @@ static void mroute_clean_tables(struct mr6_table *mrt)
1562 */ 1559 */
1563 for (i = 0; i < MFC6_LINES; i++) { 1560 for (i = 0; i < MFC6_LINES; i++) {
1564 list_for_each_entry_safe(c, next, &mrt->mfc6_cache_array[i], list) { 1561 list_for_each_entry_safe(c, next, &mrt->mfc6_cache_array[i], list) {
1565 if (c->mfc_flags & MFC_STATIC) 1562 if (!all && (c->mfc_flags & MFC_STATIC))
1566 continue; 1563 continue;
1567 write_lock_bh(&mrt_lock); 1564 write_lock_bh(&mrt_lock);
1568 list_del(&c->list); 1565 list_del(&c->list);
@@ -1625,7 +1622,7 @@ int ip6mr_sk_done(struct sock *sk)
1625 net->ipv6.devconf_all); 1622 net->ipv6.devconf_all);
1626 write_unlock_bh(&mrt_lock); 1623 write_unlock_bh(&mrt_lock);
1627 1624
1628 mroute_clean_tables(mrt); 1625 mroute_clean_tables(mrt, false);
1629 err = 0; 1626 err = 0;
1630 break; 1627 break;
1631 } 1628 }
diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
index 63e6956917c9..4449ad1f8114 100644
--- a/net/ipv6/ipv6_sockglue.c
+++ b/net/ipv6/ipv6_sockglue.c
@@ -111,7 +111,8 @@ struct ipv6_txoptions *ipv6_update_options(struct sock *sk,
111 icsk->icsk_sync_mss(sk, icsk->icsk_pmtu_cookie); 111 icsk->icsk_sync_mss(sk, icsk->icsk_pmtu_cookie);
112 } 112 }
113 } 113 }
114 opt = xchg(&inet6_sk(sk)->opt, opt); 114 opt = xchg((__force struct ipv6_txoptions **)&inet6_sk(sk)->opt,
115 opt);
115 sk_dst_reset(sk); 116 sk_dst_reset(sk);
116 117
117 return opt; 118 return opt;
@@ -231,9 +232,12 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
231 sk->sk_socket->ops = &inet_dgram_ops; 232 sk->sk_socket->ops = &inet_dgram_ops;
232 sk->sk_family = PF_INET; 233 sk->sk_family = PF_INET;
233 } 234 }
234 opt = xchg(&np->opt, NULL); 235 opt = xchg((__force struct ipv6_txoptions **)&np->opt,
235 if (opt) 236 NULL);
236 sock_kfree_s(sk, opt, opt->tot_len); 237 if (opt) {
238 atomic_sub(opt->tot_len, &sk->sk_omem_alloc);
239 txopt_put(opt);
240 }
237 pktopt = xchg(&np->pktoptions, NULL); 241 pktopt = xchg(&np->pktoptions, NULL);
238 kfree_skb(pktopt); 242 kfree_skb(pktopt);
239 243
@@ -403,7 +407,8 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
403 if (optname != IPV6_RTHDR && !ns_capable(net->user_ns, CAP_NET_RAW)) 407 if (optname != IPV6_RTHDR && !ns_capable(net->user_ns, CAP_NET_RAW))
404 break; 408 break;
405 409
406 opt = ipv6_renew_options(sk, np->opt, optname, 410 opt = rcu_dereference_protected(np->opt, sock_owned_by_user(sk));
411 opt = ipv6_renew_options(sk, opt, optname,
407 (struct ipv6_opt_hdr __user *)optval, 412 (struct ipv6_opt_hdr __user *)optval,
408 optlen); 413 optlen);
409 if (IS_ERR(opt)) { 414 if (IS_ERR(opt)) {
@@ -432,8 +437,10 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
432 retv = 0; 437 retv = 0;
433 opt = ipv6_update_options(sk, opt); 438 opt = ipv6_update_options(sk, opt);
434sticky_done: 439sticky_done:
435 if (opt) 440 if (opt) {
436 sock_kfree_s(sk, opt, opt->tot_len); 441 atomic_sub(opt->tot_len, &sk->sk_omem_alloc);
442 txopt_put(opt);
443 }
437 break; 444 break;
438 } 445 }
439 446
@@ -486,6 +493,7 @@ sticky_done:
486 break; 493 break;
487 494
488 memset(opt, 0, sizeof(*opt)); 495 memset(opt, 0, sizeof(*opt));
496 atomic_set(&opt->refcnt, 1);
489 opt->tot_len = sizeof(*opt) + optlen; 497 opt->tot_len = sizeof(*opt) + optlen;
490 retv = -EFAULT; 498 retv = -EFAULT;
491 if (copy_from_user(opt+1, optval, optlen)) 499 if (copy_from_user(opt+1, optval, optlen))
@@ -502,8 +510,10 @@ update:
502 retv = 0; 510 retv = 0;
503 opt = ipv6_update_options(sk, opt); 511 opt = ipv6_update_options(sk, opt);
504done: 512done:
505 if (opt) 513 if (opt) {
506 sock_kfree_s(sk, opt, opt->tot_len); 514 atomic_sub(opt->tot_len, &sk->sk_omem_alloc);
515 txopt_put(opt);
516 }
507 break; 517 break;
508 } 518 }
509 case IPV6_UNICAST_HOPS: 519 case IPV6_UNICAST_HOPS:
@@ -1110,10 +1120,11 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
1110 case IPV6_RTHDR: 1120 case IPV6_RTHDR:
1111 case IPV6_DSTOPTS: 1121 case IPV6_DSTOPTS:
1112 { 1122 {
1123 struct ipv6_txoptions *opt;
1113 1124
1114 lock_sock(sk); 1125 lock_sock(sk);
1115 len = ipv6_getsockopt_sticky(sk, np->opt, 1126 opt = rcu_dereference_protected(np->opt, sock_owned_by_user(sk));
1116 optname, optval, len); 1127 len = ipv6_getsockopt_sticky(sk, opt, optname, optval, len);
1117 release_sock(sk); 1128 release_sock(sk);
1118 /* check if ipv6_getsockopt_sticky() returns err code */ 1129 /* check if ipv6_getsockopt_sticky() returns err code */
1119 if (len < 0) 1130 if (len < 0)
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
index 124338a39e29..5ee56d0a8699 100644
--- a/net/ipv6/mcast.c
+++ b/net/ipv6/mcast.c
@@ -1651,7 +1651,6 @@ out:
1651 if (!err) { 1651 if (!err) {
1652 ICMP6MSGOUT_INC_STATS(net, idev, ICMPV6_MLD2_REPORT); 1652 ICMP6MSGOUT_INC_STATS(net, idev, ICMPV6_MLD2_REPORT);
1653 ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTMSGS); 1653 ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTMSGS);
1654 IP6_UPD_PO_STATS(net, idev, IPSTATS_MIB_OUTMCAST, payload_len);
1655 } else { 1654 } else {
1656 IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTDISCARDS); 1655 IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTDISCARDS);
1657 } 1656 }
@@ -2015,7 +2014,6 @@ out:
2015 if (!err) { 2014 if (!err) {
2016 ICMP6MSGOUT_INC_STATS(net, idev, type); 2015 ICMP6MSGOUT_INC_STATS(net, idev, type);
2017 ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTMSGS); 2016 ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTMSGS);
2018 IP6_UPD_PO_STATS(net, idev, IPSTATS_MIB_OUTMCAST, full_len);
2019 } else 2017 } else
2020 IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTDISCARDS); 2018 IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTDISCARDS);
2021 2019
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index 3e0f855e1bea..d6161e1c48c8 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -556,8 +556,7 @@ static void ndisc_send_unsol_na(struct net_device *dev)
556} 556}
557 557
558void ndisc_send_ns(struct net_device *dev, const struct in6_addr *solicit, 558void ndisc_send_ns(struct net_device *dev, const struct in6_addr *solicit,
559 const struct in6_addr *daddr, const struct in6_addr *saddr, 559 const struct in6_addr *daddr, const struct in6_addr *saddr)
560 struct sk_buff *oskb)
561{ 560{
562 struct sk_buff *skb; 561 struct sk_buff *skb;
563 struct in6_addr addr_buf; 562 struct in6_addr addr_buf;
@@ -593,9 +592,6 @@ void ndisc_send_ns(struct net_device *dev, const struct in6_addr *solicit,
593 ndisc_fill_addr_option(skb, ND_OPT_SOURCE_LL_ADDR, 592 ndisc_fill_addr_option(skb, ND_OPT_SOURCE_LL_ADDR,
594 dev->dev_addr); 593 dev->dev_addr);
595 594
596 if (!(dev->priv_flags & IFF_XMIT_DST_RELEASE) && oskb)
597 skb_dst_copy(skb, oskb);
598
599 ndisc_send_skb(skb, daddr, saddr); 595 ndisc_send_skb(skb, daddr, saddr);
600} 596}
601 597
@@ -682,12 +678,12 @@ static void ndisc_solicit(struct neighbour *neigh, struct sk_buff *skb)
682 "%s: trying to ucast probe in NUD_INVALID: %pI6\n", 678 "%s: trying to ucast probe in NUD_INVALID: %pI6\n",
683 __func__, target); 679 __func__, target);
684 } 680 }
685 ndisc_send_ns(dev, target, target, saddr, skb); 681 ndisc_send_ns(dev, target, target, saddr);
686 } else if ((probes -= NEIGH_VAR(neigh->parms, APP_PROBES)) < 0) { 682 } else if ((probes -= NEIGH_VAR(neigh->parms, APP_PROBES)) < 0) {
687 neigh_app_ns(neigh); 683 neigh_app_ns(neigh);
688 } else { 684 } else {
689 addrconf_addr_solict_mult(target, &mcaddr); 685 addrconf_addr_solict_mult(target, &mcaddr);
690 ndisc_send_ns(dev, target, &mcaddr, saddr, skb); 686 ndisc_send_ns(dev, target, &mcaddr, saddr);
691 } 687 }
692} 688}
693 689
diff --git a/net/ipv6/netfilter/Kconfig b/net/ipv6/netfilter/Kconfig
index f6a024e141e5..e10a04c9cdc7 100644
--- a/net/ipv6/netfilter/Kconfig
+++ b/net/ipv6/netfilter/Kconfig
@@ -49,6 +49,7 @@ config NFT_REJECT_IPV6
49 49
50config NFT_DUP_IPV6 50config NFT_DUP_IPV6
51 tristate "IPv6 nf_tables packet duplication support" 51 tristate "IPv6 nf_tables packet duplication support"
52 depends on !NF_CONNTRACK || NF_CONNTRACK
52 select NF_DUP_IPV6 53 select NF_DUP_IPV6
53 help 54 help
54 This module enables IPv6 packet duplication support for nf_tables. 55 This module enables IPv6 packet duplication support for nf_tables.
diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
index d5efeb87350e..bab4441ed4e4 100644
--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
@@ -190,7 +190,7 @@ static void nf_ct_frag6_expire(unsigned long data)
190/* Creation primitives. */ 190/* Creation primitives. */
191static inline struct frag_queue *fq_find(struct net *net, __be32 id, 191static inline struct frag_queue *fq_find(struct net *net, __be32 id,
192 u32 user, struct in6_addr *src, 192 u32 user, struct in6_addr *src,
193 struct in6_addr *dst, u8 ecn) 193 struct in6_addr *dst, int iif, u8 ecn)
194{ 194{
195 struct inet_frag_queue *q; 195 struct inet_frag_queue *q;
196 struct ip6_create_arg arg; 196 struct ip6_create_arg arg;
@@ -200,6 +200,7 @@ static inline struct frag_queue *fq_find(struct net *net, __be32 id,
200 arg.user = user; 200 arg.user = user;
201 arg.src = src; 201 arg.src = src;
202 arg.dst = dst; 202 arg.dst = dst;
203 arg.iif = iif;
203 arg.ecn = ecn; 204 arg.ecn = ecn;
204 205
205 local_bh_disable(); 206 local_bh_disable();
@@ -601,7 +602,7 @@ struct sk_buff *nf_ct_frag6_gather(struct net *net, struct sk_buff *skb, u32 use
601 fhdr = (struct frag_hdr *)skb_transport_header(clone); 602 fhdr = (struct frag_hdr *)skb_transport_header(clone);
602 603
603 fq = fq_find(net, fhdr->identification, user, &hdr->saddr, &hdr->daddr, 604 fq = fq_find(net, fhdr->identification, user, &hdr->saddr, &hdr->daddr,
604 ip6_frag_ecn(hdr)); 605 skb->dev ? skb->dev->ifindex : 0, ip6_frag_ecn(hdr));
605 if (fq == NULL) { 606 if (fq == NULL) {
606 pr_debug("Can't find and can't create new queue\n"); 607 pr_debug("Can't find and can't create new queue\n");
607 goto ret_orig; 608 goto ret_orig;
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index dc65ec198f7c..99140986e887 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -733,6 +733,7 @@ static int raw6_getfrag(void *from, char *to, int offset, int len, int odd,
733 733
734static int rawv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) 734static int rawv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
735{ 735{
736 struct ipv6_txoptions *opt_to_free = NULL;
736 struct ipv6_txoptions opt_space; 737 struct ipv6_txoptions opt_space;
737 DECLARE_SOCKADDR(struct sockaddr_in6 *, sin6, msg->msg_name); 738 DECLARE_SOCKADDR(struct sockaddr_in6 *, sin6, msg->msg_name);
738 struct in6_addr *daddr, *final_p, final; 739 struct in6_addr *daddr, *final_p, final;
@@ -839,8 +840,10 @@ static int rawv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
839 if (!(opt->opt_nflen|opt->opt_flen)) 840 if (!(opt->opt_nflen|opt->opt_flen))
840 opt = NULL; 841 opt = NULL;
841 } 842 }
842 if (!opt) 843 if (!opt) {
843 opt = np->opt; 844 opt = txopt_get(np);
845 opt_to_free = opt;
846 }
844 if (flowlabel) 847 if (flowlabel)
845 opt = fl6_merge_options(&opt_space, flowlabel, opt); 848 opt = fl6_merge_options(&opt_space, flowlabel, opt);
846 opt = ipv6_fixup_options(&opt_space, opt); 849 opt = ipv6_fixup_options(&opt_space, opt);
@@ -906,6 +909,7 @@ done:
906 dst_release(dst); 909 dst_release(dst);
907out: 910out:
908 fl6_sock_release(flowlabel); 911 fl6_sock_release(flowlabel);
912 txopt_put(opt_to_free);
909 return err < 0 ? err : len; 913 return err < 0 ? err : len;
910do_confirm: 914do_confirm:
911 dst_confirm(dst); 915 dst_confirm(dst);
diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
index 44e21a03cfc3..45f5ae51de65 100644
--- a/net/ipv6/reassembly.c
+++ b/net/ipv6/reassembly.c
@@ -108,7 +108,10 @@ bool ip6_frag_match(const struct inet_frag_queue *q, const void *a)
108 return fq->id == arg->id && 108 return fq->id == arg->id &&
109 fq->user == arg->user && 109 fq->user == arg->user &&
110 ipv6_addr_equal(&fq->saddr, arg->src) && 110 ipv6_addr_equal(&fq->saddr, arg->src) &&
111 ipv6_addr_equal(&fq->daddr, arg->dst); 111 ipv6_addr_equal(&fq->daddr, arg->dst) &&
112 (arg->iif == fq->iif ||
113 !(ipv6_addr_type(arg->dst) & (IPV6_ADDR_MULTICAST |
114 IPV6_ADDR_LINKLOCAL)));
112} 115}
113EXPORT_SYMBOL(ip6_frag_match); 116EXPORT_SYMBOL(ip6_frag_match);
114 117
@@ -180,7 +183,7 @@ static void ip6_frag_expire(unsigned long data)
180 183
181static struct frag_queue * 184static struct frag_queue *
182fq_find(struct net *net, __be32 id, const struct in6_addr *src, 185fq_find(struct net *net, __be32 id, const struct in6_addr *src,
183 const struct in6_addr *dst, u8 ecn) 186 const struct in6_addr *dst, int iif, u8 ecn)
184{ 187{
185 struct inet_frag_queue *q; 188 struct inet_frag_queue *q;
186 struct ip6_create_arg arg; 189 struct ip6_create_arg arg;
@@ -190,6 +193,7 @@ fq_find(struct net *net, __be32 id, const struct in6_addr *src,
190 arg.user = IP6_DEFRAG_LOCAL_DELIVER; 193 arg.user = IP6_DEFRAG_LOCAL_DELIVER;
191 arg.src = src; 194 arg.src = src;
192 arg.dst = dst; 195 arg.dst = dst;
196 arg.iif = iif;
193 arg.ecn = ecn; 197 arg.ecn = ecn;
194 198
195 hash = inet6_hash_frag(id, src, dst); 199 hash = inet6_hash_frag(id, src, dst);
@@ -551,7 +555,7 @@ static int ipv6_frag_rcv(struct sk_buff *skb)
551 } 555 }
552 556
553 fq = fq_find(net, fhdr->identification, &hdr->saddr, &hdr->daddr, 557 fq = fq_find(net, fhdr->identification, &hdr->saddr, &hdr->daddr,
554 ip6_frag_ecn(hdr)); 558 skb->dev ? skb->dev->ifindex : 0, ip6_frag_ecn(hdr));
555 if (fq) { 559 if (fq) {
556 int ret; 560 int ret;
557 561
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index c8bc9b4ac328..826e6aa44f8d 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -404,6 +404,14 @@ static void ip6_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
404 } 404 }
405} 405}
406 406
407static bool __rt6_check_expired(const struct rt6_info *rt)
408{
409 if (rt->rt6i_flags & RTF_EXPIRES)
410 return time_after(jiffies, rt->dst.expires);
411 else
412 return false;
413}
414
407static bool rt6_check_expired(const struct rt6_info *rt) 415static bool rt6_check_expired(const struct rt6_info *rt)
408{ 416{
409 if (rt->rt6i_flags & RTF_EXPIRES) { 417 if (rt->rt6i_flags & RTF_EXPIRES) {
@@ -515,7 +523,7 @@ static void rt6_probe_deferred(struct work_struct *w)
515 container_of(w, struct __rt6_probe_work, work); 523 container_of(w, struct __rt6_probe_work, work);
516 524
517 addrconf_addr_solict_mult(&work->target, &mcaddr); 525 addrconf_addr_solict_mult(&work->target, &mcaddr);
518 ndisc_send_ns(work->dev, &work->target, &mcaddr, NULL, NULL); 526 ndisc_send_ns(work->dev, &work->target, &mcaddr, NULL);
519 dev_put(work->dev); 527 dev_put(work->dev);
520 kfree(work); 528 kfree(work);
521} 529}
@@ -1252,7 +1260,8 @@ static struct dst_entry *rt6_check(struct rt6_info *rt, u32 cookie)
1252 1260
1253static struct dst_entry *rt6_dst_from_check(struct rt6_info *rt, u32 cookie) 1261static struct dst_entry *rt6_dst_from_check(struct rt6_info *rt, u32 cookie)
1254{ 1262{
1255 if (rt->dst.obsolete == DST_OBSOLETE_FORCE_CHK && 1263 if (!__rt6_check_expired(rt) &&
1264 rt->dst.obsolete == DST_OBSOLETE_FORCE_CHK &&
1256 rt6_check((struct rt6_info *)(rt->dst.from), cookie)) 1265 rt6_check((struct rt6_info *)(rt->dst.from), cookie))
1257 return &rt->dst; 1266 return &rt->dst;
1258 else 1267 else
@@ -1272,7 +1281,8 @@ static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie)
1272 1281
1273 rt6_dst_from_metrics_check(rt); 1282 rt6_dst_from_metrics_check(rt);
1274 1283
1275 if ((rt->rt6i_flags & RTF_PCPU) || unlikely(dst->flags & DST_NOCACHE)) 1284 if (rt->rt6i_flags & RTF_PCPU ||
1285 (unlikely(dst->flags & DST_NOCACHE) && rt->dst.from))
1276 return rt6_dst_from_check(rt, cookie); 1286 return rt6_dst_from_check(rt, cookie);
1277 else 1287 else
1278 return rt6_check(rt, cookie); 1288 return rt6_check(rt, cookie);
@@ -1322,6 +1332,12 @@ static void rt6_do_update_pmtu(struct rt6_info *rt, u32 mtu)
1322 rt6_update_expires(rt, net->ipv6.sysctl.ip6_rt_mtu_expires); 1332 rt6_update_expires(rt, net->ipv6.sysctl.ip6_rt_mtu_expires);
1323} 1333}
1324 1334
1335static bool rt6_cache_allowed_for_pmtu(const struct rt6_info *rt)
1336{
1337 return !(rt->rt6i_flags & RTF_CACHE) &&
1338 (rt->rt6i_flags & RTF_PCPU || rt->rt6i_node);
1339}
1340
1325static void __ip6_rt_update_pmtu(struct dst_entry *dst, const struct sock *sk, 1341static void __ip6_rt_update_pmtu(struct dst_entry *dst, const struct sock *sk,
1326 const struct ipv6hdr *iph, u32 mtu) 1342 const struct ipv6hdr *iph, u32 mtu)
1327{ 1343{
@@ -1335,7 +1351,7 @@ static void __ip6_rt_update_pmtu(struct dst_entry *dst, const struct sock *sk,
1335 if (mtu >= dst_mtu(dst)) 1351 if (mtu >= dst_mtu(dst))
1336 return; 1352 return;
1337 1353
1338 if (rt6->rt6i_flags & RTF_CACHE) { 1354 if (!rt6_cache_allowed_for_pmtu(rt6)) {
1339 rt6_do_update_pmtu(rt6, mtu); 1355 rt6_do_update_pmtu(rt6, mtu);
1340 } else { 1356 } else {
1341 const struct in6_addr *daddr, *saddr; 1357 const struct in6_addr *daddr, *saddr;
diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c
index bb8f2fa1c7fb..eaf7ac496d50 100644
--- a/net/ipv6/syncookies.c
+++ b/net/ipv6/syncookies.c
@@ -222,7 +222,7 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
222 memset(&fl6, 0, sizeof(fl6)); 222 memset(&fl6, 0, sizeof(fl6));
223 fl6.flowi6_proto = IPPROTO_TCP; 223 fl6.flowi6_proto = IPPROTO_TCP;
224 fl6.daddr = ireq->ir_v6_rmt_addr; 224 fl6.daddr = ireq->ir_v6_rmt_addr;
225 final_p = fl6_update_dst(&fl6, np->opt, &final); 225 final_p = fl6_update_dst(&fl6, rcu_dereference(np->opt), &final);
226 fl6.saddr = ireq->ir_v6_loc_addr; 226 fl6.saddr = ireq->ir_v6_loc_addr;
227 fl6.flowi6_oif = sk->sk_bound_dev_if; 227 fl6.flowi6_oif = sk->sk_bound_dev_if;
228 fl6.flowi6_mark = ireq->ir_mark; 228 fl6.flowi6_mark = ireq->ir_mark;
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 5baa8e754e41..6b8a8a9091fa 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -93,10 +93,9 @@ static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
93{ 93{
94 struct dst_entry *dst = skb_dst(skb); 94 struct dst_entry *dst = skb_dst(skb);
95 95
96 if (dst) { 96 if (dst && dst_hold_safe(dst)) {
97 const struct rt6_info *rt = (const struct rt6_info *)dst; 97 const struct rt6_info *rt = (const struct rt6_info *)dst;
98 98
99 dst_hold(dst);
100 sk->sk_rx_dst = dst; 99 sk->sk_rx_dst = dst;
101 inet_sk(sk)->rx_dst_ifindex = skb->skb_iif; 100 inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
102 inet6_sk(sk)->rx_dst_cookie = rt6_get_cookie(rt); 101 inet6_sk(sk)->rx_dst_cookie = rt6_get_cookie(rt);
@@ -120,6 +119,7 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
120 struct ipv6_pinfo *np = inet6_sk(sk); 119 struct ipv6_pinfo *np = inet6_sk(sk);
121 struct tcp_sock *tp = tcp_sk(sk); 120 struct tcp_sock *tp = tcp_sk(sk);
122 struct in6_addr *saddr = NULL, *final_p, final; 121 struct in6_addr *saddr = NULL, *final_p, final;
122 struct ipv6_txoptions *opt;
123 struct flowi6 fl6; 123 struct flowi6 fl6;
124 struct dst_entry *dst; 124 struct dst_entry *dst;
125 int addr_type; 125 int addr_type;
@@ -235,7 +235,8 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
235 fl6.fl6_dport = usin->sin6_port; 235 fl6.fl6_dport = usin->sin6_port;
236 fl6.fl6_sport = inet->inet_sport; 236 fl6.fl6_sport = inet->inet_sport;
237 237
238 final_p = fl6_update_dst(&fl6, np->opt, &final); 238 opt = rcu_dereference_protected(np->opt, sock_owned_by_user(sk));
239 final_p = fl6_update_dst(&fl6, opt, &final);
239 240
240 security_sk_classify_flow(sk, flowi6_to_flowi(&fl6)); 241 security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
241 242
@@ -255,7 +256,7 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
255 inet->inet_rcv_saddr = LOOPBACK4_IPV6; 256 inet->inet_rcv_saddr = LOOPBACK4_IPV6;
256 257
257 sk->sk_gso_type = SKB_GSO_TCPV6; 258 sk->sk_gso_type = SKB_GSO_TCPV6;
258 __ip6_dst_store(sk, dst, NULL, NULL); 259 ip6_dst_store(sk, dst, NULL, NULL);
259 260
260 if (tcp_death_row.sysctl_tw_recycle && 261 if (tcp_death_row.sysctl_tw_recycle &&
261 !tp->rx_opt.ts_recent_stamp && 262 !tp->rx_opt.ts_recent_stamp &&
@@ -263,9 +264,9 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
263 tcp_fetch_timewait_stamp(sk, dst); 264 tcp_fetch_timewait_stamp(sk, dst);
264 265
265 icsk->icsk_ext_hdr_len = 0; 266 icsk->icsk_ext_hdr_len = 0;
266 if (np->opt) 267 if (opt)
267 icsk->icsk_ext_hdr_len = (np->opt->opt_flen + 268 icsk->icsk_ext_hdr_len = opt->opt_flen +
268 np->opt->opt_nflen); 269 opt->opt_nflen;
269 270
270 tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr); 271 tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
271 272
@@ -461,7 +462,8 @@ static int tcp_v6_send_synack(const struct sock *sk, struct dst_entry *dst,
461 if (np->repflow && ireq->pktopts) 462 if (np->repflow && ireq->pktopts)
462 fl6->flowlabel = ip6_flowlabel(ipv6_hdr(ireq->pktopts)); 463 fl6->flowlabel = ip6_flowlabel(ipv6_hdr(ireq->pktopts));
463 464
464 err = ip6_xmit(sk, skb, fl6, np->opt, np->tclass); 465 err = ip6_xmit(sk, skb, fl6, rcu_dereference(np->opt),
466 np->tclass);
465 err = net_xmit_eval(err); 467 err = net_xmit_eval(err);
466 } 468 }
467 469
@@ -972,6 +974,7 @@ static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *
972 struct inet_request_sock *ireq; 974 struct inet_request_sock *ireq;
973 struct ipv6_pinfo *newnp; 975 struct ipv6_pinfo *newnp;
974 const struct ipv6_pinfo *np = inet6_sk(sk); 976 const struct ipv6_pinfo *np = inet6_sk(sk);
977 struct ipv6_txoptions *opt;
975 struct tcp6_sock *newtcp6sk; 978 struct tcp6_sock *newtcp6sk;
976 struct inet_sock *newinet; 979 struct inet_sock *newinet;
977 struct tcp_sock *newtp; 980 struct tcp_sock *newtp;
@@ -1056,7 +1059,7 @@ static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *
1056 */ 1059 */
1057 1060
1058 newsk->sk_gso_type = SKB_GSO_TCPV6; 1061 newsk->sk_gso_type = SKB_GSO_TCPV6;
1059 __ip6_dst_store(newsk, dst, NULL, NULL); 1062 ip6_dst_store(newsk, dst, NULL, NULL);
1060 inet6_sk_rx_dst_set(newsk, skb); 1063 inet6_sk_rx_dst_set(newsk, skb);
1061 1064
1062 newtcp6sk = (struct tcp6_sock *)newsk; 1065 newtcp6sk = (struct tcp6_sock *)newsk;
@@ -1098,13 +1101,15 @@ static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *
1098 but we make one more one thing there: reattach optmem 1101 but we make one more one thing there: reattach optmem
1099 to newsk. 1102 to newsk.
1100 */ 1103 */
1101 if (np->opt) 1104 opt = rcu_dereference(np->opt);
1102 newnp->opt = ipv6_dup_options(newsk, np->opt); 1105 if (opt) {
1103 1106 opt = ipv6_dup_options(newsk, opt);
1107 RCU_INIT_POINTER(newnp->opt, opt);
1108 }
1104 inet_csk(newsk)->icsk_ext_hdr_len = 0; 1109 inet_csk(newsk)->icsk_ext_hdr_len = 0;
1105 if (newnp->opt) 1110 if (opt)
1106 inet_csk(newsk)->icsk_ext_hdr_len = (newnp->opt->opt_nflen + 1111 inet_csk(newsk)->icsk_ext_hdr_len = opt->opt_nflen +
1107 newnp->opt->opt_flen); 1112 opt->opt_flen;
1108 1113
1109 tcp_ca_openreq_child(newsk, dst); 1114 tcp_ca_openreq_child(newsk, dst);
1110 1115
@@ -1690,6 +1695,8 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
1690 const struct tcp_sock *tp = tcp_sk(sp); 1695 const struct tcp_sock *tp = tcp_sk(sp);
1691 const struct inet_connection_sock *icsk = inet_csk(sp); 1696 const struct inet_connection_sock *icsk = inet_csk(sp);
1692 const struct fastopen_queue *fastopenq = &icsk->icsk_accept_queue.fastopenq; 1697 const struct fastopen_queue *fastopenq = &icsk->icsk_accept_queue.fastopenq;
1698 int rx_queue;
1699 int state;
1693 1700
1694 dest = &sp->sk_v6_daddr; 1701 dest = &sp->sk_v6_daddr;
1695 src = &sp->sk_v6_rcv_saddr; 1702 src = &sp->sk_v6_rcv_saddr;
@@ -1710,6 +1717,15 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
1710 timer_expires = jiffies; 1717 timer_expires = jiffies;
1711 } 1718 }
1712 1719
1720 state = sk_state_load(sp);
1721 if (state == TCP_LISTEN)
1722 rx_queue = sp->sk_ack_backlog;
1723 else
1724 /* Because we don't lock the socket,
1725 * we might find a transient negative value.
1726 */
1727 rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
1728
1713 seq_printf(seq, 1729 seq_printf(seq,
1714 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X " 1730 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1715 "%02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %lu %lu %u %u %d\n", 1731 "%02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %lu %lu %u %u %d\n",
@@ -1718,9 +1734,9 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
1718 src->s6_addr32[2], src->s6_addr32[3], srcp, 1734 src->s6_addr32[2], src->s6_addr32[3], srcp,
1719 dest->s6_addr32[0], dest->s6_addr32[1], 1735 dest->s6_addr32[0], dest->s6_addr32[1],
1720 dest->s6_addr32[2], dest->s6_addr32[3], destp, 1736 dest->s6_addr32[2], dest->s6_addr32[3], destp,
1721 sp->sk_state, 1737 state,
1722 tp->write_seq-tp->snd_una, 1738 tp->write_seq - tp->snd_una,
1723 (sp->sk_state == TCP_LISTEN) ? sp->sk_ack_backlog : (tp->rcv_nxt - tp->copied_seq), 1739 rx_queue,
1724 timer_active, 1740 timer_active,
1725 jiffies_delta_to_clock_t(timer_expires - jiffies), 1741 jiffies_delta_to_clock_t(timer_expires - jiffies),
1726 icsk->icsk_retransmits, 1742 icsk->icsk_retransmits,
@@ -1732,7 +1748,7 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
1732 jiffies_to_clock_t(icsk->icsk_ack.ato), 1748 jiffies_to_clock_t(icsk->icsk_ack.ato),
1733 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong, 1749 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
1734 tp->snd_cwnd, 1750 tp->snd_cwnd,
1735 sp->sk_state == TCP_LISTEN ? 1751 state == TCP_LISTEN ?
1736 fastopenq->max_qlen : 1752 fastopenq->max_qlen :
1737 (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh) 1753 (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh)
1738 ); 1754 );
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 01bcb49619ee..9da3287a3923 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -1110,6 +1110,7 @@ int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
1110 DECLARE_SOCKADDR(struct sockaddr_in6 *, sin6, msg->msg_name); 1110 DECLARE_SOCKADDR(struct sockaddr_in6 *, sin6, msg->msg_name);
1111 struct in6_addr *daddr, *final_p, final; 1111 struct in6_addr *daddr, *final_p, final;
1112 struct ipv6_txoptions *opt = NULL; 1112 struct ipv6_txoptions *opt = NULL;
1113 struct ipv6_txoptions *opt_to_free = NULL;
1113 struct ip6_flowlabel *flowlabel = NULL; 1114 struct ip6_flowlabel *flowlabel = NULL;
1114 struct flowi6 fl6; 1115 struct flowi6 fl6;
1115 struct dst_entry *dst; 1116 struct dst_entry *dst;
@@ -1263,8 +1264,10 @@ do_udp_sendmsg:
1263 opt = NULL; 1264 opt = NULL;
1264 connected = 0; 1265 connected = 0;
1265 } 1266 }
1266 if (!opt) 1267 if (!opt) {
1267 opt = np->opt; 1268 opt = txopt_get(np);
1269 opt_to_free = opt;
1270 }
1268 if (flowlabel) 1271 if (flowlabel)
1269 opt = fl6_merge_options(&opt_space, flowlabel, opt); 1272 opt = fl6_merge_options(&opt_space, flowlabel, opt);
1270 opt = ipv6_fixup_options(&opt_space, opt); 1273 opt = ipv6_fixup_options(&opt_space, opt);
@@ -1373,6 +1376,7 @@ release_dst:
1373out: 1376out:
1374 dst_release(dst); 1377 dst_release(dst);
1375 fl6_sock_release(flowlabel); 1378 fl6_sock_release(flowlabel);
1379 txopt_put(opt_to_free);
1376 if (!err) 1380 if (!err)
1377 return len; 1381 return len;
1378 /* 1382 /*
diff --git a/net/irda/af_irda.c b/net/irda/af_irda.c
index e6aa48b5395c..923abd6b3064 100644
--- a/net/irda/af_irda.c
+++ b/net/irda/af_irda.c
@@ -1086,6 +1086,9 @@ static int irda_create(struct net *net, struct socket *sock, int protocol,
1086 struct sock *sk; 1086 struct sock *sk;
1087 struct irda_sock *self; 1087 struct irda_sock *self;
1088 1088
1089 if (protocol < 0 || protocol > SK_PROTOCOL_MAX)
1090 return -EINVAL;
1091
1089 if (net != &init_net) 1092 if (net != &init_net)
1090 return -EAFNOSUPPORT; 1093 return -EAFNOSUPPORT;
1091 1094
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
index fcb2752419c6..435608c4306d 100644
--- a/net/iucv/af_iucv.c
+++ b/net/iucv/af_iucv.c
@@ -1483,7 +1483,7 @@ unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
1483 if (sock_writeable(sk) && iucv_below_msglim(sk)) 1483 if (sock_writeable(sk) && iucv_below_msglim(sk))
1484 mask |= POLLOUT | POLLWRNORM | POLLWRBAND; 1484 mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
1485 else 1485 else
1486 set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); 1486 sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
1487 1487
1488 return mask; 1488 return mask;
1489} 1489}
diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c
index aca38d8aed8e..a2c8747d2936 100644
--- a/net/l2tp/l2tp_ip6.c
+++ b/net/l2tp/l2tp_ip6.c
@@ -486,6 +486,7 @@ static int l2tp_ip6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
486 DECLARE_SOCKADDR(struct sockaddr_l2tpip6 *, lsa, msg->msg_name); 486 DECLARE_SOCKADDR(struct sockaddr_l2tpip6 *, lsa, msg->msg_name);
487 struct in6_addr *daddr, *final_p, final; 487 struct in6_addr *daddr, *final_p, final;
488 struct ipv6_pinfo *np = inet6_sk(sk); 488 struct ipv6_pinfo *np = inet6_sk(sk);
489 struct ipv6_txoptions *opt_to_free = NULL;
489 struct ipv6_txoptions *opt = NULL; 490 struct ipv6_txoptions *opt = NULL;
490 struct ip6_flowlabel *flowlabel = NULL; 491 struct ip6_flowlabel *flowlabel = NULL;
491 struct dst_entry *dst = NULL; 492 struct dst_entry *dst = NULL;
@@ -575,8 +576,10 @@ static int l2tp_ip6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
575 opt = NULL; 576 opt = NULL;
576 } 577 }
577 578
578 if (opt == NULL) 579 if (!opt) {
579 opt = np->opt; 580 opt = txopt_get(np);
581 opt_to_free = opt;
582 }
580 if (flowlabel) 583 if (flowlabel)
581 opt = fl6_merge_options(&opt_space, flowlabel, opt); 584 opt = fl6_merge_options(&opt_space, flowlabel, opt);
582 opt = ipv6_fixup_options(&opt_space, opt); 585 opt = ipv6_fixup_options(&opt_space, opt);
@@ -631,6 +634,7 @@ done:
631 dst_release(dst); 634 dst_release(dst);
632out: 635out:
633 fl6_sock_release(flowlabel); 636 fl6_sock_release(flowlabel);
637 txopt_put(opt_to_free);
634 638
635 return err < 0 ? err : len; 639 return err < 0 ? err : len;
636 640
diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c
index a758eb84e8f0..ff757181b0a8 100644
--- a/net/mac80211/agg-tx.c
+++ b/net/mac80211/agg-tx.c
@@ -500,7 +500,7 @@ void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid)
500 /* send AddBA request */ 500 /* send AddBA request */
501 ieee80211_send_addba_request(sdata, sta->sta.addr, tid, 501 ieee80211_send_addba_request(sdata, sta->sta.addr, tid,
502 tid_tx->dialog_token, start_seq_num, 502 tid_tx->dialog_token, start_seq_num,
503 local->hw.max_tx_aggregation_subframes, 503 IEEE80211_MAX_AMPDU_BUF,
504 tid_tx->timeout); 504 tid_tx->timeout);
505} 505}
506 506
@@ -926,6 +926,7 @@ void ieee80211_process_addba_resp(struct ieee80211_local *local,
926 amsdu = capab & IEEE80211_ADDBA_PARAM_AMSDU_MASK; 926 amsdu = capab & IEEE80211_ADDBA_PARAM_AMSDU_MASK;
927 tid = (capab & IEEE80211_ADDBA_PARAM_TID_MASK) >> 2; 927 tid = (capab & IEEE80211_ADDBA_PARAM_TID_MASK) >> 2;
928 buf_size = (capab & IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK) >> 6; 928 buf_size = (capab & IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK) >> 6;
929 buf_size = min(buf_size, local->hw.max_tx_aggregation_subframes);
929 930
930 mutex_lock(&sta->ampdu_mlme.mtx); 931 mutex_lock(&sta->ampdu_mlme.mtx);
931 932
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index c2bd1b6a6922..c12f348138ac 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -1169,8 +1169,7 @@ static int sta_apply_parameters(struct ieee80211_local *local,
1169 * rc isn't initialized here yet, so ignore it 1169 * rc isn't initialized here yet, so ignore it
1170 */ 1170 */
1171 __ieee80211_vht_handle_opmode(sdata, sta, 1171 __ieee80211_vht_handle_opmode(sdata, sta,
1172 params->opmode_notif, 1172 params->opmode_notif, band);
1173 band, false);
1174 } 1173 }
1175 1174
1176 if (ieee80211_vif_is_mesh(&sdata->vif)) 1175 if (ieee80211_vif_is_mesh(&sdata->vif))
@@ -3454,8 +3453,12 @@ static int ieee80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
3454 goto out_unlock; 3453 goto out_unlock;
3455 } 3454 }
3456 } else { 3455 } else {
3457 /* for cookie below */ 3456 /* Assign a dummy non-zero cookie, it's not sent to
3458 ack_skb = skb; 3457 * userspace in this case but we rely on its value
3458 * internally in the need_offchan case to distinguish
3459 * mgmt-tx from remain-on-channel.
3460 */
3461 *cookie = 0xffffffff;
3459 } 3462 }
3460 3463
3461 if (!need_offchan) { 3464 if (!need_offchan) {
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index d832bd59236b..5322b4c71630 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -1709,10 +1709,10 @@ enum ieee80211_sta_rx_bandwidth ieee80211_sta_cur_vht_bw(struct sta_info *sta);
1709void ieee80211_sta_set_rx_nss(struct sta_info *sta); 1709void ieee80211_sta_set_rx_nss(struct sta_info *sta);
1710u32 __ieee80211_vht_handle_opmode(struct ieee80211_sub_if_data *sdata, 1710u32 __ieee80211_vht_handle_opmode(struct ieee80211_sub_if_data *sdata,
1711 struct sta_info *sta, u8 opmode, 1711 struct sta_info *sta, u8 opmode,
1712 enum ieee80211_band band, bool nss_only); 1712 enum ieee80211_band band);
1713void ieee80211_vht_handle_opmode(struct ieee80211_sub_if_data *sdata, 1713void ieee80211_vht_handle_opmode(struct ieee80211_sub_if_data *sdata,
1714 struct sta_info *sta, u8 opmode, 1714 struct sta_info *sta, u8 opmode,
1715 enum ieee80211_band band, bool nss_only); 1715 enum ieee80211_band band);
1716void ieee80211_apply_vhtcap_overrides(struct ieee80211_sub_if_data *sdata, 1716void ieee80211_apply_vhtcap_overrides(struct ieee80211_sub_if_data *sdata,
1717 struct ieee80211_sta_vht_cap *vht_cap); 1717 struct ieee80211_sta_vht_cap *vht_cap);
1718void ieee80211_get_vht_mask_from_cap(__le16 vht_cap, 1718void ieee80211_get_vht_mask_from_cap(__le16 vht_cap,
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index d0dc1bfaeec2..c9e325d2e120 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -76,7 +76,8 @@ bool __ieee80211_recalc_txpower(struct ieee80211_sub_if_data *sdata)
76void ieee80211_recalc_txpower(struct ieee80211_sub_if_data *sdata, 76void ieee80211_recalc_txpower(struct ieee80211_sub_if_data *sdata,
77 bool update_bss) 77 bool update_bss)
78{ 78{
79 if (__ieee80211_recalc_txpower(sdata) || update_bss) 79 if (__ieee80211_recalc_txpower(sdata) ||
80 (update_bss && ieee80211_sdata_running(sdata)))
80 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_TXPOWER); 81 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_TXPOWER);
81} 82}
82 83
@@ -1861,6 +1862,7 @@ void ieee80211_if_remove(struct ieee80211_sub_if_data *sdata)
1861 unregister_netdevice(sdata->dev); 1862 unregister_netdevice(sdata->dev);
1862 } else { 1863 } else {
1863 cfg80211_unregister_wdev(&sdata->wdev); 1864 cfg80211_unregister_wdev(&sdata->wdev);
1865 ieee80211_teardown_sdata(sdata);
1864 kfree(sdata); 1866 kfree(sdata);
1865 } 1867 }
1866} 1868}
@@ -1870,7 +1872,6 @@ void ieee80211_sdata_stop(struct ieee80211_sub_if_data *sdata)
1870 if (WARN_ON_ONCE(!test_bit(SDATA_STATE_RUNNING, &sdata->state))) 1872 if (WARN_ON_ONCE(!test_bit(SDATA_STATE_RUNNING, &sdata->state)))
1871 return; 1873 return;
1872 ieee80211_do_stop(sdata, true); 1874 ieee80211_do_stop(sdata, true);
1873 ieee80211_teardown_sdata(sdata);
1874} 1875}
1875 1876
1876void ieee80211_remove_interfaces(struct ieee80211_local *local) 1877void ieee80211_remove_interfaces(struct ieee80211_local *local)
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index 858f6b1cb149..175ffcf7fb06 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -541,8 +541,7 @@ struct ieee80211_hw *ieee80211_alloc_hw_nm(size_t priv_data_len,
541 NL80211_FEATURE_HT_IBSS | 541 NL80211_FEATURE_HT_IBSS |
542 NL80211_FEATURE_VIF_TXPOWER | 542 NL80211_FEATURE_VIF_TXPOWER |
543 NL80211_FEATURE_MAC_ON_CREATE | 543 NL80211_FEATURE_MAC_ON_CREATE |
544 NL80211_FEATURE_USERSPACE_MPM | 544 NL80211_FEATURE_USERSPACE_MPM;
545 NL80211_FEATURE_FULL_AP_CLIENT_STATE;
546 545
547 if (!ops->hw_scan) 546 if (!ops->hw_scan)
548 wiphy->features |= NL80211_FEATURE_LOW_PRIORITY_SCAN | 547 wiphy->features |= NL80211_FEATURE_LOW_PRIORITY_SCAN |
diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c
index b890e225a8f1..b3b44a5dd375 100644
--- a/net/mac80211/mesh_pathtbl.c
+++ b/net/mac80211/mesh_pathtbl.c
@@ -779,10 +779,8 @@ void mesh_plink_broken(struct sta_info *sta)
779static void mesh_path_node_reclaim(struct rcu_head *rp) 779static void mesh_path_node_reclaim(struct rcu_head *rp)
780{ 780{
781 struct mpath_node *node = container_of(rp, struct mpath_node, rcu); 781 struct mpath_node *node = container_of(rp, struct mpath_node, rcu);
782 struct ieee80211_sub_if_data *sdata = node->mpath->sdata;
783 782
784 del_timer_sync(&node->mpath->timer); 783 del_timer_sync(&node->mpath->timer);
785 atomic_dec(&sdata->u.mesh.mpaths);
786 kfree(node->mpath); 784 kfree(node->mpath);
787 kfree(node); 785 kfree(node);
788} 786}
@@ -790,8 +788,9 @@ static void mesh_path_node_reclaim(struct rcu_head *rp)
790/* needs to be called with the corresponding hashwlock taken */ 788/* needs to be called with the corresponding hashwlock taken */
791static void __mesh_path_del(struct mesh_table *tbl, struct mpath_node *node) 789static void __mesh_path_del(struct mesh_table *tbl, struct mpath_node *node)
792{ 790{
793 struct mesh_path *mpath; 791 struct mesh_path *mpath = node->mpath;
794 mpath = node->mpath; 792 struct ieee80211_sub_if_data *sdata = node->mpath->sdata;
793
795 spin_lock(&mpath->state_lock); 794 spin_lock(&mpath->state_lock);
796 mpath->flags |= MESH_PATH_RESOLVING; 795 mpath->flags |= MESH_PATH_RESOLVING;
797 if (mpath->is_gate) 796 if (mpath->is_gate)
@@ -799,6 +798,7 @@ static void __mesh_path_del(struct mesh_table *tbl, struct mpath_node *node)
799 hlist_del_rcu(&node->list); 798 hlist_del_rcu(&node->list);
800 call_rcu(&node->rcu, mesh_path_node_reclaim); 799 call_rcu(&node->rcu, mesh_path_node_reclaim);
801 spin_unlock(&mpath->state_lock); 800 spin_unlock(&mpath->state_lock);
801 atomic_dec(&sdata->u.mesh.mpaths);
802 atomic_dec(&tbl->entries); 802 atomic_dec(&tbl->entries);
803} 803}
804 804
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index b140cc6651f4..3aa04344942b 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -1379,21 +1379,26 @@ static u32 ieee80211_handle_pwr_constr(struct ieee80211_sub_if_data *sdata,
1379 */ 1379 */
1380 if (has_80211h_pwr && 1380 if (has_80211h_pwr &&
1381 (!has_cisco_pwr || pwr_level_80211h <= pwr_level_cisco)) { 1381 (!has_cisco_pwr || pwr_level_80211h <= pwr_level_cisco)) {
1382 new_ap_level = pwr_level_80211h;
1383
1384 if (sdata->ap_power_level == new_ap_level)
1385 return 0;
1386
1382 sdata_dbg(sdata, 1387 sdata_dbg(sdata,
1383 "Limiting TX power to %d (%d - %d) dBm as advertised by %pM\n", 1388 "Limiting TX power to %d (%d - %d) dBm as advertised by %pM\n",
1384 pwr_level_80211h, chan_pwr, pwr_reduction_80211h, 1389 pwr_level_80211h, chan_pwr, pwr_reduction_80211h,
1385 sdata->u.mgd.bssid); 1390 sdata->u.mgd.bssid);
1386 new_ap_level = pwr_level_80211h;
1387 } else { /* has_cisco_pwr is always true here. */ 1391 } else { /* has_cisco_pwr is always true here. */
1392 new_ap_level = pwr_level_cisco;
1393
1394 if (sdata->ap_power_level == new_ap_level)
1395 return 0;
1396
1388 sdata_dbg(sdata, 1397 sdata_dbg(sdata,
1389 "Limiting TX power to %d dBm as advertised by %pM\n", 1398 "Limiting TX power to %d dBm as advertised by %pM\n",
1390 pwr_level_cisco, sdata->u.mgd.bssid); 1399 pwr_level_cisco, sdata->u.mgd.bssid);
1391 new_ap_level = pwr_level_cisco;
1392 } 1400 }
1393 1401
1394 if (sdata->ap_power_level == new_ap_level)
1395 return 0;
1396
1397 sdata->ap_power_level = new_ap_level; 1402 sdata->ap_power_level = new_ap_level;
1398 if (__ieee80211_recalc_txpower(sdata)) 1403 if (__ieee80211_recalc_txpower(sdata))
1399 return BSS_CHANGED_TXPOWER; 1404 return BSS_CHANGED_TXPOWER;
@@ -3575,7 +3580,7 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
3575 3580
3576 if (sta && elems.opmode_notif) 3581 if (sta && elems.opmode_notif)
3577 ieee80211_vht_handle_opmode(sdata, sta, *elems.opmode_notif, 3582 ieee80211_vht_handle_opmode(sdata, sta, *elems.opmode_notif,
3578 rx_status->band, true); 3583 rx_status->band);
3579 mutex_unlock(&local->sta_mtx); 3584 mutex_unlock(&local->sta_mtx);
3580 3585
3581 changed |= ieee80211_handle_pwr_constr(sdata, chan, mgmt, 3586 changed |= ieee80211_handle_pwr_constr(sdata, chan, mgmt,
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 8bae5de0dc44..82af407fea7a 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -2736,8 +2736,7 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
2736 opmode = mgmt->u.action.u.vht_opmode_notif.operating_mode; 2736 opmode = mgmt->u.action.u.vht_opmode_notif.operating_mode;
2737 2737
2738 ieee80211_vht_handle_opmode(rx->sdata, rx->sta, 2738 ieee80211_vht_handle_opmode(rx->sdata, rx->sta,
2739 opmode, status->band, 2739 opmode, status->band);
2740 false);
2741 goto handled; 2740 goto handled;
2742 } 2741 }
2743 default: 2742 default:
diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c
index 4aeca4b0c3cb..a413e52f7691 100644
--- a/net/mac80211/scan.c
+++ b/net/mac80211/scan.c
@@ -597,8 +597,8 @@ static int __ieee80211_start_scan(struct ieee80211_sub_if_data *sdata,
597 /* We need to ensure power level is at max for scanning. */ 597 /* We need to ensure power level is at max for scanning. */
598 ieee80211_hw_config(local, 0); 598 ieee80211_hw_config(local, 0);
599 599
600 if ((req->channels[0]->flags & 600 if ((req->channels[0]->flags & (IEEE80211_CHAN_NO_IR |
601 IEEE80211_CHAN_NO_IR) || 601 IEEE80211_CHAN_RADAR)) ||
602 !req->n_ssids) { 602 !req->n_ssids) {
603 next_delay = IEEE80211_PASSIVE_CHANNEL_TIME; 603 next_delay = IEEE80211_PASSIVE_CHANNEL_TIME;
604 } else { 604 } else {
@@ -645,7 +645,7 @@ ieee80211_scan_get_channel_time(struct ieee80211_channel *chan)
645 * TODO: channel switching also consumes quite some time, 645 * TODO: channel switching also consumes quite some time,
646 * add that delay as well to get a better estimation 646 * add that delay as well to get a better estimation
647 */ 647 */
648 if (chan->flags & IEEE80211_CHAN_NO_IR) 648 if (chan->flags & (IEEE80211_CHAN_NO_IR | IEEE80211_CHAN_RADAR))
649 return IEEE80211_PASSIVE_CHANNEL_TIME; 649 return IEEE80211_PASSIVE_CHANNEL_TIME;
650 return IEEE80211_PROBE_DELAY + IEEE80211_CHANNEL_TIME; 650 return IEEE80211_PROBE_DELAY + IEEE80211_CHANNEL_TIME;
651} 651}
@@ -777,7 +777,8 @@ static void ieee80211_scan_state_set_channel(struct ieee80211_local *local,
777 * 777 *
778 * In any case, it is not necessary for a passive scan. 778 * In any case, it is not necessary for a passive scan.
779 */ 779 */
780 if (chan->flags & IEEE80211_CHAN_NO_IR || !scan_req->n_ssids) { 780 if ((chan->flags & (IEEE80211_CHAN_NO_IR | IEEE80211_CHAN_RADAR)) ||
781 !scan_req->n_ssids) {
781 *next_delay = IEEE80211_PASSIVE_CHANNEL_TIME; 782 *next_delay = IEEE80211_PASSIVE_CHANNEL_TIME;
782 local->next_scan_state = SCAN_DECISION; 783 local->next_scan_state = SCAN_DECISION;
783 return; 784 return;
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index 74058020b7d6..33344f5a66a8 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -1641,6 +1641,29 @@ void ieee80211_stop_device(struct ieee80211_local *local)
1641 drv_stop(local); 1641 drv_stop(local);
1642} 1642}
1643 1643
1644static void ieee80211_flush_completed_scan(struct ieee80211_local *local,
1645 bool aborted)
1646{
1647 /* It's possible that we don't handle the scan completion in
1648 * time during suspend, so if it's still marked as completed
1649 * here, queue the work and flush it to clean things up.
1650 * Instead of calling the worker function directly here, we
1651 * really queue it to avoid potential races with other flows
1652 * scheduling the same work.
1653 */
1654 if (test_bit(SCAN_COMPLETED, &local->scanning)) {
1655 /* If coming from reconfiguration failure, abort the scan so
1656 * we don't attempt to continue a partial HW scan - which is
1657 * possible otherwise if (e.g.) the 2.4 GHz portion was the
1658 * completed scan, and a 5 GHz portion is still pending.
1659 */
1660 if (aborted)
1661 set_bit(SCAN_ABORTED, &local->scanning);
1662 ieee80211_queue_delayed_work(&local->hw, &local->scan_work, 0);
1663 flush_delayed_work(&local->scan_work);
1664 }
1665}
1666
1644static void ieee80211_handle_reconfig_failure(struct ieee80211_local *local) 1667static void ieee80211_handle_reconfig_failure(struct ieee80211_local *local)
1645{ 1668{
1646 struct ieee80211_sub_if_data *sdata; 1669 struct ieee80211_sub_if_data *sdata;
@@ -1660,6 +1683,8 @@ static void ieee80211_handle_reconfig_failure(struct ieee80211_local *local)
1660 local->suspended = false; 1683 local->suspended = false;
1661 local->in_reconfig = false; 1684 local->in_reconfig = false;
1662 1685
1686 ieee80211_flush_completed_scan(local, true);
1687
1663 /* scheduled scan clearly can't be running any more, but tell 1688 /* scheduled scan clearly can't be running any more, but tell
1664 * cfg80211 and clear local state 1689 * cfg80211 and clear local state
1665 */ 1690 */
@@ -1698,6 +1723,27 @@ static void ieee80211_assign_chanctx(struct ieee80211_local *local,
1698 mutex_unlock(&local->chanctx_mtx); 1723 mutex_unlock(&local->chanctx_mtx);
1699} 1724}
1700 1725
1726static void ieee80211_reconfig_stations(struct ieee80211_sub_if_data *sdata)
1727{
1728 struct ieee80211_local *local = sdata->local;
1729 struct sta_info *sta;
1730
1731 /* add STAs back */
1732 mutex_lock(&local->sta_mtx);
1733 list_for_each_entry(sta, &local->sta_list, list) {
1734 enum ieee80211_sta_state state;
1735
1736 if (!sta->uploaded || sta->sdata != sdata)
1737 continue;
1738
1739 for (state = IEEE80211_STA_NOTEXIST;
1740 state < sta->sta_state; state++)
1741 WARN_ON(drv_sta_state(local, sta->sdata, sta, state,
1742 state + 1));
1743 }
1744 mutex_unlock(&local->sta_mtx);
1745}
1746
1701int ieee80211_reconfig(struct ieee80211_local *local) 1747int ieee80211_reconfig(struct ieee80211_local *local)
1702{ 1748{
1703 struct ieee80211_hw *hw = &local->hw; 1749 struct ieee80211_hw *hw = &local->hw;
@@ -1833,50 +1879,11 @@ int ieee80211_reconfig(struct ieee80211_local *local)
1833 WARN_ON(drv_add_chanctx(local, ctx)); 1879 WARN_ON(drv_add_chanctx(local, ctx));
1834 mutex_unlock(&local->chanctx_mtx); 1880 mutex_unlock(&local->chanctx_mtx);
1835 1881
1836 list_for_each_entry(sdata, &local->interfaces, list) {
1837 if (!ieee80211_sdata_running(sdata))
1838 continue;
1839 ieee80211_assign_chanctx(local, sdata);
1840 }
1841
1842 sdata = rtnl_dereference(local->monitor_sdata); 1882 sdata = rtnl_dereference(local->monitor_sdata);
1843 if (sdata && ieee80211_sdata_running(sdata)) 1883 if (sdata && ieee80211_sdata_running(sdata))
1844 ieee80211_assign_chanctx(local, sdata); 1884 ieee80211_assign_chanctx(local, sdata);
1845 } 1885 }
1846 1886
1847 /* add STAs back */
1848 mutex_lock(&local->sta_mtx);
1849 list_for_each_entry(sta, &local->sta_list, list) {
1850 enum ieee80211_sta_state state;
1851
1852 if (!sta->uploaded)
1853 continue;
1854
1855 /* AP-mode stations will be added later */
1856 if (sta->sdata->vif.type == NL80211_IFTYPE_AP)
1857 continue;
1858
1859 for (state = IEEE80211_STA_NOTEXIST;
1860 state < sta->sta_state; state++)
1861 WARN_ON(drv_sta_state(local, sta->sdata, sta, state,
1862 state + 1));
1863 }
1864 mutex_unlock(&local->sta_mtx);
1865
1866 /* reconfigure tx conf */
1867 if (hw->queues >= IEEE80211_NUM_ACS) {
1868 list_for_each_entry(sdata, &local->interfaces, list) {
1869 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN ||
1870 sdata->vif.type == NL80211_IFTYPE_MONITOR ||
1871 !ieee80211_sdata_running(sdata))
1872 continue;
1873
1874 for (i = 0; i < IEEE80211_NUM_ACS; i++)
1875 drv_conf_tx(local, sdata, i,
1876 &sdata->tx_conf[i]);
1877 }
1878 }
1879
1880 /* reconfigure hardware */ 1887 /* reconfigure hardware */
1881 ieee80211_hw_config(local, ~0); 1888 ieee80211_hw_config(local, ~0);
1882 1889
@@ -1889,6 +1896,22 @@ int ieee80211_reconfig(struct ieee80211_local *local)
1889 if (!ieee80211_sdata_running(sdata)) 1896 if (!ieee80211_sdata_running(sdata))
1890 continue; 1897 continue;
1891 1898
1899 ieee80211_assign_chanctx(local, sdata);
1900
1901 switch (sdata->vif.type) {
1902 case NL80211_IFTYPE_AP_VLAN:
1903 case NL80211_IFTYPE_MONITOR:
1904 break;
1905 default:
1906 ieee80211_reconfig_stations(sdata);
1907 /* fall through */
1908 case NL80211_IFTYPE_AP: /* AP stations are handled later */
1909 for (i = 0; i < IEEE80211_NUM_ACS; i++)
1910 drv_conf_tx(local, sdata, i,
1911 &sdata->tx_conf[i]);
1912 break;
1913 }
1914
1892 /* common change flags for all interface types */ 1915 /* common change flags for all interface types */
1893 changed = BSS_CHANGED_ERP_CTS_PROT | 1916 changed = BSS_CHANGED_ERP_CTS_PROT |
1894 BSS_CHANGED_ERP_PREAMBLE | 1917 BSS_CHANGED_ERP_PREAMBLE |
@@ -2074,17 +2097,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
2074 mb(); 2097 mb();
2075 local->resuming = false; 2098 local->resuming = false;
2076 2099
2077 /* It's possible that we don't handle the scan completion in 2100 ieee80211_flush_completed_scan(local, false);
2078 * time during suspend, so if it's still marked as completed
2079 * here, queue the work and flush it to clean things up.
2080 * Instead of calling the worker function directly here, we
2081 * really queue it to avoid potential races with other flows
2082 * scheduling the same work.
2083 */
2084 if (test_bit(SCAN_COMPLETED, &local->scanning)) {
2085 ieee80211_queue_delayed_work(&local->hw, &local->scan_work, 0);
2086 flush_delayed_work(&local->scan_work);
2087 }
2088 2101
2089 if (local->open_count && !reconfig_due_to_wowlan) 2102 if (local->open_count && !reconfig_due_to_wowlan)
2090 drv_reconfig_complete(local, IEEE80211_RECONFIG_TYPE_SUSPEND); 2103 drv_reconfig_complete(local, IEEE80211_RECONFIG_TYPE_SUSPEND);
diff --git a/net/mac80211/vht.c b/net/mac80211/vht.c
index ff1c798921a6..c38b2f07a919 100644
--- a/net/mac80211/vht.c
+++ b/net/mac80211/vht.c
@@ -378,7 +378,7 @@ void ieee80211_sta_set_rx_nss(struct sta_info *sta)
378 378
379u32 __ieee80211_vht_handle_opmode(struct ieee80211_sub_if_data *sdata, 379u32 __ieee80211_vht_handle_opmode(struct ieee80211_sub_if_data *sdata,
380 struct sta_info *sta, u8 opmode, 380 struct sta_info *sta, u8 opmode,
381 enum ieee80211_band band, bool nss_only) 381 enum ieee80211_band band)
382{ 382{
383 struct ieee80211_local *local = sdata->local; 383 struct ieee80211_local *local = sdata->local;
384 struct ieee80211_supported_band *sband; 384 struct ieee80211_supported_band *sband;
@@ -401,9 +401,6 @@ u32 __ieee80211_vht_handle_opmode(struct ieee80211_sub_if_data *sdata,
401 changed |= IEEE80211_RC_NSS_CHANGED; 401 changed |= IEEE80211_RC_NSS_CHANGED;
402 } 402 }
403 403
404 if (nss_only)
405 return changed;
406
407 switch (opmode & IEEE80211_OPMODE_NOTIF_CHANWIDTH_MASK) { 404 switch (opmode & IEEE80211_OPMODE_NOTIF_CHANWIDTH_MASK) {
408 case IEEE80211_OPMODE_NOTIF_CHANWIDTH_20MHZ: 405 case IEEE80211_OPMODE_NOTIF_CHANWIDTH_20MHZ:
409 sta->cur_max_bandwidth = IEEE80211_STA_RX_BW_20; 406 sta->cur_max_bandwidth = IEEE80211_STA_RX_BW_20;
@@ -430,13 +427,12 @@ u32 __ieee80211_vht_handle_opmode(struct ieee80211_sub_if_data *sdata,
430 427
431void ieee80211_vht_handle_opmode(struct ieee80211_sub_if_data *sdata, 428void ieee80211_vht_handle_opmode(struct ieee80211_sub_if_data *sdata,
432 struct sta_info *sta, u8 opmode, 429 struct sta_info *sta, u8 opmode,
433 enum ieee80211_band band, bool nss_only) 430 enum ieee80211_band band)
434{ 431{
435 struct ieee80211_local *local = sdata->local; 432 struct ieee80211_local *local = sdata->local;
436 struct ieee80211_supported_band *sband = local->hw.wiphy->bands[band]; 433 struct ieee80211_supported_band *sband = local->hw.wiphy->bands[band];
437 434
438 u32 changed = __ieee80211_vht_handle_opmode(sdata, sta, opmode, 435 u32 changed = __ieee80211_vht_handle_opmode(sdata, sta, opmode, band);
439 band, nss_only);
440 436
441 if (changed > 0) 437 if (changed > 0)
442 rate_control_rate_update(local, sband, sta, changed); 438 rate_control_rate_update(local, sband, sta, changed);
diff --git a/net/mpls/af_mpls.c b/net/mpls/af_mpls.c
index c70d750148b6..c32fc411a911 100644
--- a/net/mpls/af_mpls.c
+++ b/net/mpls/af_mpls.c
@@ -27,6 +27,8 @@
27 */ 27 */
28#define MAX_MP_SELECT_LABELS 4 28#define MAX_MP_SELECT_LABELS 4
29 29
30#define MPLS_NEIGH_TABLE_UNSPEC (NEIGH_LINK_TABLE + 1)
31
30static int zero = 0; 32static int zero = 0;
31static int label_limit = (1 << 20) - 1; 33static int label_limit = (1 << 20) - 1;
32 34
@@ -317,7 +319,13 @@ static int mpls_forward(struct sk_buff *skb, struct net_device *dev,
317 } 319 }
318 } 320 }
319 321
320 err = neigh_xmit(nh->nh_via_table, out_dev, mpls_nh_via(rt, nh), skb); 322 /* If via wasn't specified then send out using device address */
323 if (nh->nh_via_table == MPLS_NEIGH_TABLE_UNSPEC)
324 err = neigh_xmit(NEIGH_LINK_TABLE, out_dev,
325 out_dev->dev_addr, skb);
326 else
327 err = neigh_xmit(nh->nh_via_table, out_dev,
328 mpls_nh_via(rt, nh), skb);
321 if (err) 329 if (err)
322 net_dbg_ratelimited("%s: packet transmission failed: %d\n", 330 net_dbg_ratelimited("%s: packet transmission failed: %d\n",
323 __func__, err); 331 __func__, err);
@@ -534,6 +542,10 @@ static int mpls_nh_assign_dev(struct net *net, struct mpls_route *rt,
534 if (!mpls_dev_get(dev)) 542 if (!mpls_dev_get(dev))
535 goto errout; 543 goto errout;
536 544
545 if ((nh->nh_via_table == NEIGH_LINK_TABLE) &&
546 (dev->addr_len != nh->nh_via_alen))
547 goto errout;
548
537 RCU_INIT_POINTER(nh->nh_dev, dev); 549 RCU_INIT_POINTER(nh->nh_dev, dev);
538 550
539 return 0; 551 return 0;
@@ -592,10 +604,14 @@ static int mpls_nh_build(struct net *net, struct mpls_route *rt,
592 goto errout; 604 goto errout;
593 } 605 }
594 606
595 err = nla_get_via(via, &nh->nh_via_alen, &nh->nh_via_table, 607 if (via) {
596 __mpls_nh_via(rt, nh)); 608 err = nla_get_via(via, &nh->nh_via_alen, &nh->nh_via_table,
597 if (err) 609 __mpls_nh_via(rt, nh));
598 goto errout; 610 if (err)
611 goto errout;
612 } else {
613 nh->nh_via_table = MPLS_NEIGH_TABLE_UNSPEC;
614 }
599 615
600 err = mpls_nh_assign_dev(net, rt, nh, oif); 616 err = mpls_nh_assign_dev(net, rt, nh, oif);
601 if (err) 617 if (err)
@@ -677,9 +693,6 @@ static int mpls_nh_build_multi(struct mpls_route_config *cfg,
677 nla_newdst = nla_find(attrs, attrlen, RTA_NEWDST); 693 nla_newdst = nla_find(attrs, attrlen, RTA_NEWDST);
678 } 694 }
679 695
680 if (!nla_via)
681 goto errout;
682
683 err = mpls_nh_build(cfg->rc_nlinfo.nl_net, rt, nh, 696 err = mpls_nh_build(cfg->rc_nlinfo.nl_net, rt, nh,
684 rtnh->rtnh_ifindex, nla_via, 697 rtnh->rtnh_ifindex, nla_via,
685 nla_newdst); 698 nla_newdst);
@@ -1118,6 +1131,7 @@ static int rtm_to_route_config(struct sk_buff *skb, struct nlmsghdr *nlh,
1118 1131
1119 cfg->rc_label = LABEL_NOT_SPECIFIED; 1132 cfg->rc_label = LABEL_NOT_SPECIFIED;
1120 cfg->rc_protocol = rtm->rtm_protocol; 1133 cfg->rc_protocol = rtm->rtm_protocol;
1134 cfg->rc_via_table = MPLS_NEIGH_TABLE_UNSPEC;
1121 cfg->rc_nlflags = nlh->nlmsg_flags; 1135 cfg->rc_nlflags = nlh->nlmsg_flags;
1122 cfg->rc_nlinfo.portid = NETLINK_CB(skb).portid; 1136 cfg->rc_nlinfo.portid = NETLINK_CB(skb).portid;
1123 cfg->rc_nlinfo.nlh = nlh; 1137 cfg->rc_nlinfo.nlh = nlh;
@@ -1231,7 +1245,8 @@ static int mpls_dump_route(struct sk_buff *skb, u32 portid, u32 seq, int event,
1231 nla_put_labels(skb, RTA_NEWDST, nh->nh_labels, 1245 nla_put_labels(skb, RTA_NEWDST, nh->nh_labels,
1232 nh->nh_label)) 1246 nh->nh_label))
1233 goto nla_put_failure; 1247 goto nla_put_failure;
1234 if (nla_put_via(skb, nh->nh_via_table, mpls_nh_via(rt, nh), 1248 if (nh->nh_via_table != MPLS_NEIGH_TABLE_UNSPEC &&
1249 nla_put_via(skb, nh->nh_via_table, mpls_nh_via(rt, nh),
1235 nh->nh_via_alen)) 1250 nh->nh_via_alen))
1236 goto nla_put_failure; 1251 goto nla_put_failure;
1237 dev = rtnl_dereference(nh->nh_dev); 1252 dev = rtnl_dereference(nh->nh_dev);
@@ -1257,7 +1272,8 @@ static int mpls_dump_route(struct sk_buff *skb, u32 portid, u32 seq, int event,
1257 nh->nh_labels, 1272 nh->nh_labels,
1258 nh->nh_label)) 1273 nh->nh_label))
1259 goto nla_put_failure; 1274 goto nla_put_failure;
1260 if (nla_put_via(skb, nh->nh_via_table, 1275 if (nh->nh_via_table != MPLS_NEIGH_TABLE_UNSPEC &&
1276 nla_put_via(skb, nh->nh_via_table,
1261 mpls_nh_via(rt, nh), 1277 mpls_nh_via(rt, nh),
1262 nh->nh_via_alen)) 1278 nh->nh_via_alen))
1263 goto nla_put_failure; 1279 goto nla_put_failure;
@@ -1319,7 +1335,8 @@ static inline size_t lfib_nlmsg_size(struct mpls_route *rt)
1319 1335
1320 if (nh->nh_dev) 1336 if (nh->nh_dev)
1321 payload += nla_total_size(4); /* RTA_OIF */ 1337 payload += nla_total_size(4); /* RTA_OIF */
1322 payload += nla_total_size(2 + nh->nh_via_alen); /* RTA_VIA */ 1338 if (nh->nh_via_table != MPLS_NEIGH_TABLE_UNSPEC) /* RTA_VIA */
1339 payload += nla_total_size(2 + nh->nh_via_alen);
1323 if (nh->nh_labels) /* RTA_NEWDST */ 1340 if (nh->nh_labels) /* RTA_NEWDST */
1324 payload += nla_total_size(nh->nh_labels * 4); 1341 payload += nla_total_size(nh->nh_labels * 4);
1325 } else { 1342 } else {
@@ -1328,7 +1345,9 @@ static inline size_t lfib_nlmsg_size(struct mpls_route *rt)
1328 1345
1329 for_nexthops(rt) { 1346 for_nexthops(rt) {
1330 nhsize += nla_total_size(sizeof(struct rtnexthop)); 1347 nhsize += nla_total_size(sizeof(struct rtnexthop));
1331 nhsize += nla_total_size(2 + nh->nh_via_alen); 1348 /* RTA_VIA */
1349 if (nh->nh_via_table != MPLS_NEIGH_TABLE_UNSPEC)
1350 nhsize += nla_total_size(2 + nh->nh_via_alen);
1332 if (nh->nh_labels) 1351 if (nh->nh_labels)
1333 nhsize += nla_total_size(nh->nh_labels * 4); 1352 nhsize += nla_total_size(nh->nh_labels * 4);
1334 } endfor_nexthops(rt); 1353 } endfor_nexthops(rt);
diff --git a/net/mpls/mpls_iptunnel.c b/net/mpls/mpls_iptunnel.c
index 67591aef9cae..64afd3d0b144 100644
--- a/net/mpls/mpls_iptunnel.c
+++ b/net/mpls/mpls_iptunnel.c
@@ -54,10 +54,10 @@ int mpls_output(struct net *net, struct sock *sk, struct sk_buff *skb)
54 unsigned int ttl; 54 unsigned int ttl;
55 55
56 /* Obtain the ttl */ 56 /* Obtain the ttl */
57 if (skb->protocol == htons(ETH_P_IP)) { 57 if (dst->ops->family == AF_INET) {
58 ttl = ip_hdr(skb)->ttl; 58 ttl = ip_hdr(skb)->ttl;
59 rt = (struct rtable *)dst; 59 rt = (struct rtable *)dst;
60 } else if (skb->protocol == htons(ETH_P_IPV6)) { 60 } else if (dst->ops->family == AF_INET6) {
61 ttl = ipv6_hdr(skb)->hop_limit; 61 ttl = ipv6_hdr(skb)->hop_limit;
62 rt6 = (struct rt6_info *)dst; 62 rt6 = (struct rt6_info *)dst;
63 } else { 63 } else {
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
index e22349ea7256..4692782b5280 100644
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -869,7 +869,7 @@ config NETFILTER_XT_TARGET_TEE
869 depends on IPV6 || IPV6=n 869 depends on IPV6 || IPV6=n
870 depends on !NF_CONNTRACK || NF_CONNTRACK 870 depends on !NF_CONNTRACK || NF_CONNTRACK
871 select NF_DUP_IPV4 871 select NF_DUP_IPV4
872 select NF_DUP_IPV6 if IP6_NF_IPTABLES 872 select NF_DUP_IPV6 if IP6_NF_IPTABLES != n
873 ---help--- 873 ---help---
874 This option adds a "TEE" target with which a packet can be cloned and 874 This option adds a "TEE" target with which a packet can be cloned and
875 this clone be rerouted to another nexthop. 875 this clone be rerouted to another nexthop.
@@ -882,7 +882,7 @@ config NETFILTER_XT_TARGET_TPROXY
882 depends on IP6_NF_IPTABLES || IP6_NF_IPTABLES=n 882 depends on IP6_NF_IPTABLES || IP6_NF_IPTABLES=n
883 depends on IP_NF_MANGLE 883 depends on IP_NF_MANGLE
884 select NF_DEFRAG_IPV4 884 select NF_DEFRAG_IPV4
885 select NF_DEFRAG_IPV6 if IP6_NF_IPTABLES 885 select NF_DEFRAG_IPV6 if IP6_NF_IPTABLES != n
886 help 886 help
887 This option adds a `TPROXY' target, which is somewhat similar to 887 This option adds a `TPROXY' target, which is somewhat similar to
888 REDIRECT. It can only be used in the mangle table and is useful 888 REDIRECT. It can only be used in the mangle table and is useful
@@ -1375,7 +1375,7 @@ config NETFILTER_XT_MATCH_SOCKET
1375 depends on IPV6 || IPV6=n 1375 depends on IPV6 || IPV6=n
1376 depends on IP6_NF_IPTABLES || IP6_NF_IPTABLES=n 1376 depends on IP6_NF_IPTABLES || IP6_NF_IPTABLES=n
1377 select NF_DEFRAG_IPV4 1377 select NF_DEFRAG_IPV4
1378 select NF_DEFRAG_IPV6 if IP6_NF_IPTABLES 1378 select NF_DEFRAG_IPV6 if IP6_NF_IPTABLES != n
1379 help 1379 help
1380 This option adds a `socket' match, which can be used to match 1380 This option adds a `socket' match, which can be used to match
1381 packets for which a TCP or UDP socket lookup finds a valid socket. 1381 packets for which a TCP or UDP socket lookup finds a valid socket.
diff --git a/net/netfilter/ipset/ip_set_bitmap_gen.h b/net/netfilter/ipset/ip_set_bitmap_gen.h
index d05e759ed0fa..b0bc475f641e 100644
--- a/net/netfilter/ipset/ip_set_bitmap_gen.h
+++ b/net/netfilter/ipset/ip_set_bitmap_gen.h
@@ -33,7 +33,7 @@
33#define mtype_gc IPSET_TOKEN(MTYPE, _gc) 33#define mtype_gc IPSET_TOKEN(MTYPE, _gc)
34#define mtype MTYPE 34#define mtype MTYPE
35 35
36#define get_ext(set, map, id) ((map)->extensions + (set)->dsize * (id)) 36#define get_ext(set, map, id) ((map)->extensions + ((set)->dsize * (id)))
37 37
38static void 38static void
39mtype_gc_init(struct ip_set *set, void (*gc)(unsigned long ul_set)) 39mtype_gc_init(struct ip_set *set, void (*gc)(unsigned long ul_set))
@@ -67,12 +67,9 @@ mtype_destroy(struct ip_set *set)
67 del_timer_sync(&map->gc); 67 del_timer_sync(&map->gc);
68 68
69 ip_set_free(map->members); 69 ip_set_free(map->members);
70 if (set->dsize) { 70 if (set->dsize && set->extensions & IPSET_EXT_DESTROY)
71 if (set->extensions & IPSET_EXT_DESTROY) 71 mtype_ext_cleanup(set);
72 mtype_ext_cleanup(set); 72 ip_set_free(map);
73 ip_set_free(map->extensions);
74 }
75 kfree(map);
76 73
77 set->data = NULL; 74 set->data = NULL;
78} 75}
@@ -92,16 +89,14 @@ mtype_head(struct ip_set *set, struct sk_buff *skb)
92{ 89{
93 const struct mtype *map = set->data; 90 const struct mtype *map = set->data;
94 struct nlattr *nested; 91 struct nlattr *nested;
92 size_t memsize = sizeof(*map) + map->memsize;
95 93
96 nested = ipset_nest_start(skb, IPSET_ATTR_DATA); 94 nested = ipset_nest_start(skb, IPSET_ATTR_DATA);
97 if (!nested) 95 if (!nested)
98 goto nla_put_failure; 96 goto nla_put_failure;
99 if (mtype_do_head(skb, map) || 97 if (mtype_do_head(skb, map) ||
100 nla_put_net32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref - 1)) || 98 nla_put_net32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref - 1)) ||
101 nla_put_net32(skb, IPSET_ATTR_MEMSIZE, 99 nla_put_net32(skb, IPSET_ATTR_MEMSIZE, htonl(memsize)))
102 htonl(sizeof(*map) +
103 map->memsize +
104 set->dsize * map->elements)))
105 goto nla_put_failure; 100 goto nla_put_failure;
106 if (unlikely(ip_set_put_flags(skb, set))) 101 if (unlikely(ip_set_put_flags(skb, set)))
107 goto nla_put_failure; 102 goto nla_put_failure;
diff --git a/net/netfilter/ipset/ip_set_bitmap_ip.c b/net/netfilter/ipset/ip_set_bitmap_ip.c
index 64a564334418..4783efff0bde 100644
--- a/net/netfilter/ipset/ip_set_bitmap_ip.c
+++ b/net/netfilter/ipset/ip_set_bitmap_ip.c
@@ -41,7 +41,6 @@ MODULE_ALIAS("ip_set_bitmap:ip");
41/* Type structure */ 41/* Type structure */
42struct bitmap_ip { 42struct bitmap_ip {
43 void *members; /* the set members */ 43 void *members; /* the set members */
44 void *extensions; /* data extensions */
45 u32 first_ip; /* host byte order, included in range */ 44 u32 first_ip; /* host byte order, included in range */
46 u32 last_ip; /* host byte order, included in range */ 45 u32 last_ip; /* host byte order, included in range */
47 u32 elements; /* number of max elements in the set */ 46 u32 elements; /* number of max elements in the set */
@@ -49,6 +48,8 @@ struct bitmap_ip {
49 size_t memsize; /* members size */ 48 size_t memsize; /* members size */
50 u8 netmask; /* subnet netmask */ 49 u8 netmask; /* subnet netmask */
51 struct timer_list gc; /* garbage collection */ 50 struct timer_list gc; /* garbage collection */
51 unsigned char extensions[0] /* data extensions */
52 __aligned(__alignof__(u64));
52}; 53};
53 54
54/* ADT structure for generic function args */ 55/* ADT structure for generic function args */
@@ -224,13 +225,6 @@ init_map_ip(struct ip_set *set, struct bitmap_ip *map,
224 map->members = ip_set_alloc(map->memsize); 225 map->members = ip_set_alloc(map->memsize);
225 if (!map->members) 226 if (!map->members)
226 return false; 227 return false;
227 if (set->dsize) {
228 map->extensions = ip_set_alloc(set->dsize * elements);
229 if (!map->extensions) {
230 kfree(map->members);
231 return false;
232 }
233 }
234 map->first_ip = first_ip; 228 map->first_ip = first_ip;
235 map->last_ip = last_ip; 229 map->last_ip = last_ip;
236 map->elements = elements; 230 map->elements = elements;
@@ -316,13 +310,13 @@ bitmap_ip_create(struct net *net, struct ip_set *set, struct nlattr *tb[],
316 pr_debug("hosts %u, elements %llu\n", 310 pr_debug("hosts %u, elements %llu\n",
317 hosts, (unsigned long long)elements); 311 hosts, (unsigned long long)elements);
318 312
319 map = kzalloc(sizeof(*map), GFP_KERNEL); 313 set->dsize = ip_set_elem_len(set, tb, 0, 0);
314 map = ip_set_alloc(sizeof(*map) + elements * set->dsize);
320 if (!map) 315 if (!map)
321 return -ENOMEM; 316 return -ENOMEM;
322 317
323 map->memsize = bitmap_bytes(0, elements - 1); 318 map->memsize = bitmap_bytes(0, elements - 1);
324 set->variant = &bitmap_ip; 319 set->variant = &bitmap_ip;
325 set->dsize = ip_set_elem_len(set, tb, 0);
326 if (!init_map_ip(set, map, first_ip, last_ip, 320 if (!init_map_ip(set, map, first_ip, last_ip,
327 elements, hosts, netmask)) { 321 elements, hosts, netmask)) {
328 kfree(map); 322 kfree(map);
diff --git a/net/netfilter/ipset/ip_set_bitmap_ipmac.c b/net/netfilter/ipset/ip_set_bitmap_ipmac.c
index 1430535118fb..29dde208381d 100644
--- a/net/netfilter/ipset/ip_set_bitmap_ipmac.c
+++ b/net/netfilter/ipset/ip_set_bitmap_ipmac.c
@@ -47,24 +47,26 @@ enum {
47/* Type structure */ 47/* Type structure */
48struct bitmap_ipmac { 48struct bitmap_ipmac {
49 void *members; /* the set members */ 49 void *members; /* the set members */
50 void *extensions; /* MAC + data extensions */
51 u32 first_ip; /* host byte order, included in range */ 50 u32 first_ip; /* host byte order, included in range */
52 u32 last_ip; /* host byte order, included in range */ 51 u32 last_ip; /* host byte order, included in range */
53 u32 elements; /* number of max elements in the set */ 52 u32 elements; /* number of max elements in the set */
54 size_t memsize; /* members size */ 53 size_t memsize; /* members size */
55 struct timer_list gc; /* garbage collector */ 54 struct timer_list gc; /* garbage collector */
55 unsigned char extensions[0] /* MAC + data extensions */
56 __aligned(__alignof__(u64));
56}; 57};
57 58
58/* ADT structure for generic function args */ 59/* ADT structure for generic function args */
59struct bitmap_ipmac_adt_elem { 60struct bitmap_ipmac_adt_elem {
61 unsigned char ether[ETH_ALEN] __aligned(2);
60 u16 id; 62 u16 id;
61 unsigned char *ether; 63 u16 add_mac;
62}; 64};
63 65
64struct bitmap_ipmac_elem { 66struct bitmap_ipmac_elem {
65 unsigned char ether[ETH_ALEN]; 67 unsigned char ether[ETH_ALEN];
66 unsigned char filled; 68 unsigned char filled;
67} __attribute__ ((aligned)); 69} __aligned(__alignof__(u64));
68 70
69static inline u32 71static inline u32
70ip_to_id(const struct bitmap_ipmac *m, u32 ip) 72ip_to_id(const struct bitmap_ipmac *m, u32 ip)
@@ -72,11 +74,11 @@ ip_to_id(const struct bitmap_ipmac *m, u32 ip)
72 return ip - m->first_ip; 74 return ip - m->first_ip;
73} 75}
74 76
75static inline struct bitmap_ipmac_elem * 77#define get_elem(extensions, id, dsize) \
76get_elem(void *extensions, u16 id, size_t dsize) 78 (struct bitmap_ipmac_elem *)(extensions + (id) * (dsize))
77{ 79
78 return (struct bitmap_ipmac_elem *)(extensions + id * dsize); 80#define get_const_elem(extensions, id, dsize) \
79} 81 (const struct bitmap_ipmac_elem *)(extensions + (id) * (dsize))
80 82
81/* Common functions */ 83/* Common functions */
82 84
@@ -88,10 +90,9 @@ bitmap_ipmac_do_test(const struct bitmap_ipmac_adt_elem *e,
88 90
89 if (!test_bit(e->id, map->members)) 91 if (!test_bit(e->id, map->members))
90 return 0; 92 return 0;
91 elem = get_elem(map->extensions, e->id, dsize); 93 elem = get_const_elem(map->extensions, e->id, dsize);
92 if (elem->filled == MAC_FILLED) 94 if (e->add_mac && elem->filled == MAC_FILLED)
93 return !e->ether || 95 return ether_addr_equal(e->ether, elem->ether);
94 ether_addr_equal(e->ether, elem->ether);
95 /* Trigger kernel to fill out the ethernet address */ 96 /* Trigger kernel to fill out the ethernet address */
96 return -EAGAIN; 97 return -EAGAIN;
97} 98}
@@ -103,7 +104,7 @@ bitmap_ipmac_gc_test(u16 id, const struct bitmap_ipmac *map, size_t dsize)
103 104
104 if (!test_bit(id, map->members)) 105 if (!test_bit(id, map->members))
105 return 0; 106 return 0;
106 elem = get_elem(map->extensions, id, dsize); 107 elem = get_const_elem(map->extensions, id, dsize);
107 /* Timer not started for the incomplete elements */ 108 /* Timer not started for the incomplete elements */
108 return elem->filled == MAC_FILLED; 109 return elem->filled == MAC_FILLED;
109} 110}
@@ -133,7 +134,7 @@ bitmap_ipmac_add_timeout(unsigned long *timeout,
133 * and we can reuse it later when MAC is filled out, 134 * and we can reuse it later when MAC is filled out,
134 * possibly by the kernel 135 * possibly by the kernel
135 */ 136 */
136 if (e->ether) 137 if (e->add_mac)
137 ip_set_timeout_set(timeout, t); 138 ip_set_timeout_set(timeout, t);
138 else 139 else
139 *timeout = t; 140 *timeout = t;
@@ -150,7 +151,7 @@ bitmap_ipmac_do_add(const struct bitmap_ipmac_adt_elem *e,
150 elem = get_elem(map->extensions, e->id, dsize); 151 elem = get_elem(map->extensions, e->id, dsize);
151 if (test_bit(e->id, map->members)) { 152 if (test_bit(e->id, map->members)) {
152 if (elem->filled == MAC_FILLED) { 153 if (elem->filled == MAC_FILLED) {
153 if (e->ether && 154 if (e->add_mac &&
154 (flags & IPSET_FLAG_EXIST) && 155 (flags & IPSET_FLAG_EXIST) &&
155 !ether_addr_equal(e->ether, elem->ether)) { 156 !ether_addr_equal(e->ether, elem->ether)) {
156 /* memcpy isn't atomic */ 157 /* memcpy isn't atomic */
@@ -159,7 +160,7 @@ bitmap_ipmac_do_add(const struct bitmap_ipmac_adt_elem *e,
159 ether_addr_copy(elem->ether, e->ether); 160 ether_addr_copy(elem->ether, e->ether);
160 } 161 }
161 return IPSET_ADD_FAILED; 162 return IPSET_ADD_FAILED;
162 } else if (!e->ether) 163 } else if (!e->add_mac)
163 /* Already added without ethernet address */ 164 /* Already added without ethernet address */
164 return IPSET_ADD_FAILED; 165 return IPSET_ADD_FAILED;
165 /* Fill the MAC address and trigger the timer activation */ 166 /* Fill the MAC address and trigger the timer activation */
@@ -168,7 +169,7 @@ bitmap_ipmac_do_add(const struct bitmap_ipmac_adt_elem *e,
168 ether_addr_copy(elem->ether, e->ether); 169 ether_addr_copy(elem->ether, e->ether);
169 elem->filled = MAC_FILLED; 170 elem->filled = MAC_FILLED;
170 return IPSET_ADD_START_STORED_TIMEOUT; 171 return IPSET_ADD_START_STORED_TIMEOUT;
171 } else if (e->ether) { 172 } else if (e->add_mac) {
172 /* We can store MAC too */ 173 /* We can store MAC too */
173 ether_addr_copy(elem->ether, e->ether); 174 ether_addr_copy(elem->ether, e->ether);
174 elem->filled = MAC_FILLED; 175 elem->filled = MAC_FILLED;
@@ -191,7 +192,7 @@ bitmap_ipmac_do_list(struct sk_buff *skb, const struct bitmap_ipmac *map,
191 u32 id, size_t dsize) 192 u32 id, size_t dsize)
192{ 193{
193 const struct bitmap_ipmac_elem *elem = 194 const struct bitmap_ipmac_elem *elem =
194 get_elem(map->extensions, id, dsize); 195 get_const_elem(map->extensions, id, dsize);
195 196
196 return nla_put_ipaddr4(skb, IPSET_ATTR_IP, 197 return nla_put_ipaddr4(skb, IPSET_ATTR_IP,
197 htonl(map->first_ip + id)) || 198 htonl(map->first_ip + id)) ||
@@ -213,7 +214,7 @@ bitmap_ipmac_kadt(struct ip_set *set, const struct sk_buff *skb,
213{ 214{
214 struct bitmap_ipmac *map = set->data; 215 struct bitmap_ipmac *map = set->data;
215 ipset_adtfn adtfn = set->variant->adt[adt]; 216 ipset_adtfn adtfn = set->variant->adt[adt];
216 struct bitmap_ipmac_adt_elem e = { .id = 0 }; 217 struct bitmap_ipmac_adt_elem e = { .id = 0, .add_mac = 1 };
217 struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set); 218 struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
218 u32 ip; 219 u32 ip;
219 220
@@ -231,7 +232,7 @@ bitmap_ipmac_kadt(struct ip_set *set, const struct sk_buff *skb,
231 return -EINVAL; 232 return -EINVAL;
232 233
233 e.id = ip_to_id(map, ip); 234 e.id = ip_to_id(map, ip);
234 e.ether = eth_hdr(skb)->h_source; 235 memcpy(e.ether, eth_hdr(skb)->h_source, ETH_ALEN);
235 236
236 return adtfn(set, &e, &ext, &opt->ext, opt->cmdflags); 237 return adtfn(set, &e, &ext, &opt->ext, opt->cmdflags);
237} 238}
@@ -265,11 +266,10 @@ bitmap_ipmac_uadt(struct ip_set *set, struct nlattr *tb[],
265 return -IPSET_ERR_BITMAP_RANGE; 266 return -IPSET_ERR_BITMAP_RANGE;
266 267
267 e.id = ip_to_id(map, ip); 268 e.id = ip_to_id(map, ip);
268 if (tb[IPSET_ATTR_ETHER]) 269 if (tb[IPSET_ATTR_ETHER]) {
269 e.ether = nla_data(tb[IPSET_ATTR_ETHER]); 270 memcpy(e.ether, nla_data(tb[IPSET_ATTR_ETHER]), ETH_ALEN);
270 else 271 e.add_mac = 1;
271 e.ether = NULL; 272 }
272
273 ret = adtfn(set, &e, &ext, &ext, flags); 273 ret = adtfn(set, &e, &ext, &ext, flags);
274 274
275 return ip_set_eexist(ret, flags) ? 0 : ret; 275 return ip_set_eexist(ret, flags) ? 0 : ret;
@@ -300,13 +300,6 @@ init_map_ipmac(struct ip_set *set, struct bitmap_ipmac *map,
300 map->members = ip_set_alloc(map->memsize); 300 map->members = ip_set_alloc(map->memsize);
301 if (!map->members) 301 if (!map->members)
302 return false; 302 return false;
303 if (set->dsize) {
304 map->extensions = ip_set_alloc(set->dsize * elements);
305 if (!map->extensions) {
306 kfree(map->members);
307 return false;
308 }
309 }
310 map->first_ip = first_ip; 303 map->first_ip = first_ip;
311 map->last_ip = last_ip; 304 map->last_ip = last_ip;
312 map->elements = elements; 305 map->elements = elements;
@@ -361,14 +354,15 @@ bitmap_ipmac_create(struct net *net, struct ip_set *set, struct nlattr *tb[],
361 if (elements > IPSET_BITMAP_MAX_RANGE + 1) 354 if (elements > IPSET_BITMAP_MAX_RANGE + 1)
362 return -IPSET_ERR_BITMAP_RANGE_SIZE; 355 return -IPSET_ERR_BITMAP_RANGE_SIZE;
363 356
364 map = kzalloc(sizeof(*map), GFP_KERNEL); 357 set->dsize = ip_set_elem_len(set, tb,
358 sizeof(struct bitmap_ipmac_elem),
359 __alignof__(struct bitmap_ipmac_elem));
360 map = ip_set_alloc(sizeof(*map) + elements * set->dsize);
365 if (!map) 361 if (!map)
366 return -ENOMEM; 362 return -ENOMEM;
367 363
368 map->memsize = bitmap_bytes(0, elements - 1); 364 map->memsize = bitmap_bytes(0, elements - 1);
369 set->variant = &bitmap_ipmac; 365 set->variant = &bitmap_ipmac;
370 set->dsize = ip_set_elem_len(set, tb,
371 sizeof(struct bitmap_ipmac_elem));
372 if (!init_map_ipmac(set, map, first_ip, last_ip, elements)) { 366 if (!init_map_ipmac(set, map, first_ip, last_ip, elements)) {
373 kfree(map); 367 kfree(map);
374 return -ENOMEM; 368 return -ENOMEM;
diff --git a/net/netfilter/ipset/ip_set_bitmap_port.c b/net/netfilter/ipset/ip_set_bitmap_port.c
index 5338ccd5da46..7f0c733358a4 100644
--- a/net/netfilter/ipset/ip_set_bitmap_port.c
+++ b/net/netfilter/ipset/ip_set_bitmap_port.c
@@ -35,12 +35,13 @@ MODULE_ALIAS("ip_set_bitmap:port");
35/* Type structure */ 35/* Type structure */
36struct bitmap_port { 36struct bitmap_port {
37 void *members; /* the set members */ 37 void *members; /* the set members */
38 void *extensions; /* data extensions */
39 u16 first_port; /* host byte order, included in range */ 38 u16 first_port; /* host byte order, included in range */
40 u16 last_port; /* host byte order, included in range */ 39 u16 last_port; /* host byte order, included in range */
41 u32 elements; /* number of max elements in the set */ 40 u32 elements; /* number of max elements in the set */
42 size_t memsize; /* members size */ 41 size_t memsize; /* members size */
43 struct timer_list gc; /* garbage collection */ 42 struct timer_list gc; /* garbage collection */
43 unsigned char extensions[0] /* data extensions */
44 __aligned(__alignof__(u64));
44}; 45};
45 46
46/* ADT structure for generic function args */ 47/* ADT structure for generic function args */
@@ -209,13 +210,6 @@ init_map_port(struct ip_set *set, struct bitmap_port *map,
209 map->members = ip_set_alloc(map->memsize); 210 map->members = ip_set_alloc(map->memsize);
210 if (!map->members) 211 if (!map->members)
211 return false; 212 return false;
212 if (set->dsize) {
213 map->extensions = ip_set_alloc(set->dsize * map->elements);
214 if (!map->extensions) {
215 kfree(map->members);
216 return false;
217 }
218 }
219 map->first_port = first_port; 213 map->first_port = first_port;
220 map->last_port = last_port; 214 map->last_port = last_port;
221 set->timeout = IPSET_NO_TIMEOUT; 215 set->timeout = IPSET_NO_TIMEOUT;
@@ -232,6 +226,7 @@ bitmap_port_create(struct net *net, struct ip_set *set, struct nlattr *tb[],
232{ 226{
233 struct bitmap_port *map; 227 struct bitmap_port *map;
234 u16 first_port, last_port; 228 u16 first_port, last_port;
229 u32 elements;
235 230
236 if (unlikely(!ip_set_attr_netorder(tb, IPSET_ATTR_PORT) || 231 if (unlikely(!ip_set_attr_netorder(tb, IPSET_ATTR_PORT) ||
237 !ip_set_attr_netorder(tb, IPSET_ATTR_PORT_TO) || 232 !ip_set_attr_netorder(tb, IPSET_ATTR_PORT_TO) ||
@@ -248,14 +243,15 @@ bitmap_port_create(struct net *net, struct ip_set *set, struct nlattr *tb[],
248 last_port = tmp; 243 last_port = tmp;
249 } 244 }
250 245
251 map = kzalloc(sizeof(*map), GFP_KERNEL); 246 elements = last_port - first_port + 1;
247 set->dsize = ip_set_elem_len(set, tb, 0, 0);
248 map = ip_set_alloc(sizeof(*map) + elements * set->dsize);
252 if (!map) 249 if (!map)
253 return -ENOMEM; 250 return -ENOMEM;
254 251
255 map->elements = last_port - first_port + 1; 252 map->elements = elements;
256 map->memsize = bitmap_bytes(0, map->elements); 253 map->memsize = bitmap_bytes(0, map->elements);
257 set->variant = &bitmap_port; 254 set->variant = &bitmap_port;
258 set->dsize = ip_set_elem_len(set, tb, 0);
259 if (!init_map_port(set, map, first_port, last_port)) { 255 if (!init_map_port(set, map, first_port, last_port)) {
260 kfree(map); 256 kfree(map);
261 return -ENOMEM; 257 return -ENOMEM;
diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c
index 69ab9c2634e1..54f3d7cb23e6 100644
--- a/net/netfilter/ipset/ip_set_core.c
+++ b/net/netfilter/ipset/ip_set_core.c
@@ -364,25 +364,27 @@ add_extension(enum ip_set_ext_id id, u32 flags, struct nlattr *tb[])
364} 364}
365 365
366size_t 366size_t
367ip_set_elem_len(struct ip_set *set, struct nlattr *tb[], size_t len) 367ip_set_elem_len(struct ip_set *set, struct nlattr *tb[], size_t len,
368 size_t align)
368{ 369{
369 enum ip_set_ext_id id; 370 enum ip_set_ext_id id;
370 size_t offset = len;
371 u32 cadt_flags = 0; 371 u32 cadt_flags = 0;
372 372
373 if (tb[IPSET_ATTR_CADT_FLAGS]) 373 if (tb[IPSET_ATTR_CADT_FLAGS])
374 cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]); 374 cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]);
375 if (cadt_flags & IPSET_FLAG_WITH_FORCEADD) 375 if (cadt_flags & IPSET_FLAG_WITH_FORCEADD)
376 set->flags |= IPSET_CREATE_FLAG_FORCEADD; 376 set->flags |= IPSET_CREATE_FLAG_FORCEADD;
377 if (!align)
378 align = 1;
377 for (id = 0; id < IPSET_EXT_ID_MAX; id++) { 379 for (id = 0; id < IPSET_EXT_ID_MAX; id++) {
378 if (!add_extension(id, cadt_flags, tb)) 380 if (!add_extension(id, cadt_flags, tb))
379 continue; 381 continue;
380 offset = ALIGN(offset, ip_set_extensions[id].align); 382 len = ALIGN(len, ip_set_extensions[id].align);
381 set->offset[id] = offset; 383 set->offset[id] = len;
382 set->extensions |= ip_set_extensions[id].type; 384 set->extensions |= ip_set_extensions[id].type;
383 offset += ip_set_extensions[id].len; 385 len += ip_set_extensions[id].len;
384 } 386 }
385 return offset; 387 return ALIGN(len, align);
386} 388}
387EXPORT_SYMBOL_GPL(ip_set_elem_len); 389EXPORT_SYMBOL_GPL(ip_set_elem_len);
388 390
diff --git a/net/netfilter/ipset/ip_set_hash_gen.h b/net/netfilter/ipset/ip_set_hash_gen.h
index 691b54fcaf2a..e5336ab36d67 100644
--- a/net/netfilter/ipset/ip_set_hash_gen.h
+++ b/net/netfilter/ipset/ip_set_hash_gen.h
@@ -72,8 +72,9 @@ struct hbucket {
72 DECLARE_BITMAP(used, AHASH_MAX_TUNED); 72 DECLARE_BITMAP(used, AHASH_MAX_TUNED);
73 u8 size; /* size of the array */ 73 u8 size; /* size of the array */
74 u8 pos; /* position of the first free entry */ 74 u8 pos; /* position of the first free entry */
75 unsigned char value[0]; /* the array of the values */ 75 unsigned char value[0] /* the array of the values */
76} __attribute__ ((aligned)); 76 __aligned(__alignof__(u64));
77};
77 78
78/* The hash table: the table size stored here in order to make resizing easy */ 79/* The hash table: the table size stored here in order to make resizing easy */
79struct htable { 80struct htable {
@@ -475,7 +476,7 @@ static void
475mtype_expire(struct ip_set *set, struct htype *h, u8 nets_length, size_t dsize) 476mtype_expire(struct ip_set *set, struct htype *h, u8 nets_length, size_t dsize)
476{ 477{
477 struct htable *t; 478 struct htable *t;
478 struct hbucket *n; 479 struct hbucket *n, *tmp;
479 struct mtype_elem *data; 480 struct mtype_elem *data;
480 u32 i, j, d; 481 u32 i, j, d;
481#ifdef IP_SET_HASH_WITH_NETS 482#ifdef IP_SET_HASH_WITH_NETS
@@ -510,9 +511,14 @@ mtype_expire(struct ip_set *set, struct htype *h, u8 nets_length, size_t dsize)
510 } 511 }
511 } 512 }
512 if (d >= AHASH_INIT_SIZE) { 513 if (d >= AHASH_INIT_SIZE) {
513 struct hbucket *tmp = kzalloc(sizeof(*tmp) + 514 if (d >= n->size) {
514 (n->size - AHASH_INIT_SIZE) * dsize, 515 rcu_assign_pointer(hbucket(t, i), NULL);
515 GFP_ATOMIC); 516 kfree_rcu(n, rcu);
517 continue;
518 }
519 tmp = kzalloc(sizeof(*tmp) +
520 (n->size - AHASH_INIT_SIZE) * dsize,
521 GFP_ATOMIC);
516 if (!tmp) 522 if (!tmp)
517 /* Still try to delete expired elements */ 523 /* Still try to delete expired elements */
518 continue; 524 continue;
@@ -522,7 +528,7 @@ mtype_expire(struct ip_set *set, struct htype *h, u8 nets_length, size_t dsize)
522 continue; 528 continue;
523 data = ahash_data(n, j, dsize); 529 data = ahash_data(n, j, dsize);
524 memcpy(tmp->value + d * dsize, data, dsize); 530 memcpy(tmp->value + d * dsize, data, dsize);
525 set_bit(j, tmp->used); 531 set_bit(d, tmp->used);
526 d++; 532 d++;
527 } 533 }
528 tmp->pos = d; 534 tmp->pos = d;
@@ -1323,12 +1329,14 @@ IPSET_TOKEN(HTYPE, _create)(struct net *net, struct ip_set *set,
1323#endif 1329#endif
1324 set->variant = &IPSET_TOKEN(HTYPE, 4_variant); 1330 set->variant = &IPSET_TOKEN(HTYPE, 4_variant);
1325 set->dsize = ip_set_elem_len(set, tb, 1331 set->dsize = ip_set_elem_len(set, tb,
1326 sizeof(struct IPSET_TOKEN(HTYPE, 4_elem))); 1332 sizeof(struct IPSET_TOKEN(HTYPE, 4_elem)),
1333 __alignof__(struct IPSET_TOKEN(HTYPE, 4_elem)));
1327#ifndef IP_SET_PROTO_UNDEF 1334#ifndef IP_SET_PROTO_UNDEF
1328 } else { 1335 } else {
1329 set->variant = &IPSET_TOKEN(HTYPE, 6_variant); 1336 set->variant = &IPSET_TOKEN(HTYPE, 6_variant);
1330 set->dsize = ip_set_elem_len(set, tb, 1337 set->dsize = ip_set_elem_len(set, tb,
1331 sizeof(struct IPSET_TOKEN(HTYPE, 6_elem))); 1338 sizeof(struct IPSET_TOKEN(HTYPE, 6_elem)),
1339 __alignof__(struct IPSET_TOKEN(HTYPE, 6_elem)));
1332 } 1340 }
1333#endif 1341#endif
1334 if (tb[IPSET_ATTR_TIMEOUT]) { 1342 if (tb[IPSET_ATTR_TIMEOUT]) {
diff --git a/net/netfilter/ipset/ip_set_list_set.c b/net/netfilter/ipset/ip_set_list_set.c
index 5a30ce6e8c90..bbede95c9f68 100644
--- a/net/netfilter/ipset/ip_set_list_set.c
+++ b/net/netfilter/ipset/ip_set_list_set.c
@@ -31,7 +31,7 @@ struct set_elem {
31 struct rcu_head rcu; 31 struct rcu_head rcu;
32 struct list_head list; 32 struct list_head list;
33 ip_set_id_t id; 33 ip_set_id_t id;
34}; 34} __aligned(__alignof__(u64));
35 35
36struct set_adt_elem { 36struct set_adt_elem {
37 ip_set_id_t id; 37 ip_set_id_t id;
@@ -618,7 +618,8 @@ list_set_create(struct net *net, struct ip_set *set, struct nlattr *tb[],
618 size = IP_SET_LIST_MIN_SIZE; 618 size = IP_SET_LIST_MIN_SIZE;
619 619
620 set->variant = &set_variant; 620 set->variant = &set_variant;
621 set->dsize = ip_set_elem_len(set, tb, sizeof(struct set_elem)); 621 set->dsize = ip_set_elem_len(set, tb, sizeof(struct set_elem),
622 __alignof__(struct set_elem));
622 if (!init_list_set(net, set, size)) 623 if (!init_list_set(net, set, size))
623 return -ENOMEM; 624 return -ENOMEM;
624 if (tb[IPSET_ATTR_TIMEOUT]) { 625 if (tb[IPSET_ATTR_TIMEOUT]) {
diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
index 1e24fff53e4b..f57b4dcdb233 100644
--- a/net/netfilter/ipvs/ip_vs_core.c
+++ b/net/netfilter/ipvs/ip_vs_core.c
@@ -1176,6 +1176,7 @@ ip_vs_out(struct netns_ipvs *ipvs, unsigned int hooknum, struct sk_buff *skb, in
1176 struct ip_vs_protocol *pp; 1176 struct ip_vs_protocol *pp;
1177 struct ip_vs_proto_data *pd; 1177 struct ip_vs_proto_data *pd;
1178 struct ip_vs_conn *cp; 1178 struct ip_vs_conn *cp;
1179 struct sock *sk;
1179 1180
1180 EnterFunction(11); 1181 EnterFunction(11);
1181 1182
@@ -1183,13 +1184,12 @@ ip_vs_out(struct netns_ipvs *ipvs, unsigned int hooknum, struct sk_buff *skb, in
1183 if (skb->ipvs_property) 1184 if (skb->ipvs_property)
1184 return NF_ACCEPT; 1185 return NF_ACCEPT;
1185 1186
1187 sk = skb_to_full_sk(skb);
1186 /* Bad... Do not break raw sockets */ 1188 /* Bad... Do not break raw sockets */
1187 if (unlikely(skb->sk != NULL && hooknum == NF_INET_LOCAL_OUT && 1189 if (unlikely(sk && hooknum == NF_INET_LOCAL_OUT &&
1188 af == AF_INET)) { 1190 af == AF_INET)) {
1189 struct sock *sk = skb->sk;
1190 struct inet_sock *inet = inet_sk(skb->sk);
1191 1191
1192 if (inet && sk->sk_family == PF_INET && inet->nodefrag) 1192 if (sk->sk_family == PF_INET && inet_sk(sk)->nodefrag)
1193 return NF_ACCEPT; 1193 return NF_ACCEPT;
1194 } 1194 }
1195 1195
@@ -1681,6 +1681,7 @@ ip_vs_in(struct netns_ipvs *ipvs, unsigned int hooknum, struct sk_buff *skb, int
1681 struct ip_vs_conn *cp; 1681 struct ip_vs_conn *cp;
1682 int ret, pkts; 1682 int ret, pkts;
1683 int conn_reuse_mode; 1683 int conn_reuse_mode;
1684 struct sock *sk;
1684 1685
1685 /* Already marked as IPVS request or reply? */ 1686 /* Already marked as IPVS request or reply? */
1686 if (skb->ipvs_property) 1687 if (skb->ipvs_property)
@@ -1708,12 +1709,11 @@ ip_vs_in(struct netns_ipvs *ipvs, unsigned int hooknum, struct sk_buff *skb, int
1708 ip_vs_fill_iph_skb(af, skb, false, &iph); 1709 ip_vs_fill_iph_skb(af, skb, false, &iph);
1709 1710
1710 /* Bad... Do not break raw sockets */ 1711 /* Bad... Do not break raw sockets */
1711 if (unlikely(skb->sk != NULL && hooknum == NF_INET_LOCAL_OUT && 1712 sk = skb_to_full_sk(skb);
1713 if (unlikely(sk && hooknum == NF_INET_LOCAL_OUT &&
1712 af == AF_INET)) { 1714 af == AF_INET)) {
1713 struct sock *sk = skb->sk;
1714 struct inet_sock *inet = inet_sk(skb->sk);
1715 1715
1716 if (inet && sk->sk_family == PF_INET && inet->nodefrag) 1716 if (sk->sk_family == PF_INET && inet_sk(sk)->nodefrag)
1717 return NF_ACCEPT; 1717 return NF_ACCEPT;
1718 } 1718 }
1719 1719
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index 93cc4737018f..2cb429d34c03 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -89,6 +89,7 @@ nf_tables_afinfo_lookup(struct net *net, int family, bool autoload)
89} 89}
90 90
91static void nft_ctx_init(struct nft_ctx *ctx, 91static void nft_ctx_init(struct nft_ctx *ctx,
92 struct net *net,
92 const struct sk_buff *skb, 93 const struct sk_buff *skb,
93 const struct nlmsghdr *nlh, 94 const struct nlmsghdr *nlh,
94 struct nft_af_info *afi, 95 struct nft_af_info *afi,
@@ -96,7 +97,7 @@ static void nft_ctx_init(struct nft_ctx *ctx,
96 struct nft_chain *chain, 97 struct nft_chain *chain,
97 const struct nlattr * const *nla) 98 const struct nlattr * const *nla)
98{ 99{
99 ctx->net = sock_net(skb->sk); 100 ctx->net = net;
100 ctx->afi = afi; 101 ctx->afi = afi;
101 ctx->table = table; 102 ctx->table = table;
102 ctx->chain = chain; 103 ctx->chain = chain;
@@ -672,15 +673,14 @@ err:
672 return ret; 673 return ret;
673} 674}
674 675
675static int nf_tables_newtable(struct sock *nlsk, struct sk_buff *skb, 676static int nf_tables_newtable(struct net *net, struct sock *nlsk,
676 const struct nlmsghdr *nlh, 677 struct sk_buff *skb, const struct nlmsghdr *nlh,
677 const struct nlattr * const nla[]) 678 const struct nlattr * const nla[])
678{ 679{
679 const struct nfgenmsg *nfmsg = nlmsg_data(nlh); 680 const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
680 const struct nlattr *name; 681 const struct nlattr *name;
681 struct nft_af_info *afi; 682 struct nft_af_info *afi;
682 struct nft_table *table; 683 struct nft_table *table;
683 struct net *net = sock_net(skb->sk);
684 int family = nfmsg->nfgen_family; 684 int family = nfmsg->nfgen_family;
685 u32 flags = 0; 685 u32 flags = 0;
686 struct nft_ctx ctx; 686 struct nft_ctx ctx;
@@ -706,7 +706,7 @@ static int nf_tables_newtable(struct sock *nlsk, struct sk_buff *skb,
706 if (nlh->nlmsg_flags & NLM_F_REPLACE) 706 if (nlh->nlmsg_flags & NLM_F_REPLACE)
707 return -EOPNOTSUPP; 707 return -EOPNOTSUPP;
708 708
709 nft_ctx_init(&ctx, skb, nlh, afi, table, NULL, nla); 709 nft_ctx_init(&ctx, net, skb, nlh, afi, table, NULL, nla);
710 return nf_tables_updtable(&ctx); 710 return nf_tables_updtable(&ctx);
711 } 711 }
712 712
@@ -730,7 +730,7 @@ static int nf_tables_newtable(struct sock *nlsk, struct sk_buff *skb,
730 INIT_LIST_HEAD(&table->sets); 730 INIT_LIST_HEAD(&table->sets);
731 table->flags = flags; 731 table->flags = flags;
732 732
733 nft_ctx_init(&ctx, skb, nlh, afi, table, NULL, nla); 733 nft_ctx_init(&ctx, net, skb, nlh, afi, table, NULL, nla);
734 err = nft_trans_table_add(&ctx, NFT_MSG_NEWTABLE); 734 err = nft_trans_table_add(&ctx, NFT_MSG_NEWTABLE);
735 if (err < 0) 735 if (err < 0)
736 goto err3; 736 goto err3;
@@ -810,18 +810,17 @@ out:
810 return err; 810 return err;
811} 811}
812 812
813static int nf_tables_deltable(struct sock *nlsk, struct sk_buff *skb, 813static int nf_tables_deltable(struct net *net, struct sock *nlsk,
814 const struct nlmsghdr *nlh, 814 struct sk_buff *skb, const struct nlmsghdr *nlh,
815 const struct nlattr * const nla[]) 815 const struct nlattr * const nla[])
816{ 816{
817 const struct nfgenmsg *nfmsg = nlmsg_data(nlh); 817 const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
818 struct nft_af_info *afi; 818 struct nft_af_info *afi;
819 struct nft_table *table; 819 struct nft_table *table;
820 struct net *net = sock_net(skb->sk);
821 int family = nfmsg->nfgen_family; 820 int family = nfmsg->nfgen_family;
822 struct nft_ctx ctx; 821 struct nft_ctx ctx;
823 822
824 nft_ctx_init(&ctx, skb, nlh, NULL, NULL, NULL, nla); 823 nft_ctx_init(&ctx, net, skb, nlh, NULL, NULL, NULL, nla);
825 if (family == AF_UNSPEC || nla[NFTA_TABLE_NAME] == NULL) 824 if (family == AF_UNSPEC || nla[NFTA_TABLE_NAME] == NULL)
826 return nft_flush(&ctx, family); 825 return nft_flush(&ctx, family);
827 826
@@ -1221,8 +1220,8 @@ static void nf_tables_chain_destroy(struct nft_chain *chain)
1221 } 1220 }
1222} 1221}
1223 1222
1224static int nf_tables_newchain(struct sock *nlsk, struct sk_buff *skb, 1223static int nf_tables_newchain(struct net *net, struct sock *nlsk,
1225 const struct nlmsghdr *nlh, 1224 struct sk_buff *skb, const struct nlmsghdr *nlh,
1226 const struct nlattr * const nla[]) 1225 const struct nlattr * const nla[])
1227{ 1226{
1228 const struct nfgenmsg *nfmsg = nlmsg_data(nlh); 1227 const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
@@ -1232,7 +1231,6 @@ static int nf_tables_newchain(struct sock *nlsk, struct sk_buff *skb,
1232 struct nft_chain *chain; 1231 struct nft_chain *chain;
1233 struct nft_base_chain *basechain = NULL; 1232 struct nft_base_chain *basechain = NULL;
1234 struct nlattr *ha[NFTA_HOOK_MAX + 1]; 1233 struct nlattr *ha[NFTA_HOOK_MAX + 1];
1235 struct net *net = sock_net(skb->sk);
1236 int family = nfmsg->nfgen_family; 1234 int family = nfmsg->nfgen_family;
1237 struct net_device *dev = NULL; 1235 struct net_device *dev = NULL;
1238 u8 policy = NF_ACCEPT; 1236 u8 policy = NF_ACCEPT;
@@ -1313,7 +1311,7 @@ static int nf_tables_newchain(struct sock *nlsk, struct sk_buff *skb,
1313 return PTR_ERR(stats); 1311 return PTR_ERR(stats);
1314 } 1312 }
1315 1313
1316 nft_ctx_init(&ctx, skb, nlh, afi, table, chain, nla); 1314 nft_ctx_init(&ctx, net, skb, nlh, afi, table, chain, nla);
1317 trans = nft_trans_alloc(&ctx, NFT_MSG_NEWCHAIN, 1315 trans = nft_trans_alloc(&ctx, NFT_MSG_NEWCHAIN,
1318 sizeof(struct nft_trans_chain)); 1316 sizeof(struct nft_trans_chain));
1319 if (trans == NULL) { 1317 if (trans == NULL) {
@@ -1461,7 +1459,7 @@ static int nf_tables_newchain(struct sock *nlsk, struct sk_buff *skb,
1461 if (err < 0) 1459 if (err < 0)
1462 goto err1; 1460 goto err1;
1463 1461
1464 nft_ctx_init(&ctx, skb, nlh, afi, table, chain, nla); 1462 nft_ctx_init(&ctx, net, skb, nlh, afi, table, chain, nla);
1465 err = nft_trans_chain_add(&ctx, NFT_MSG_NEWCHAIN); 1463 err = nft_trans_chain_add(&ctx, NFT_MSG_NEWCHAIN);
1466 if (err < 0) 1464 if (err < 0)
1467 goto err2; 1465 goto err2;
@@ -1476,15 +1474,14 @@ err1:
1476 return err; 1474 return err;
1477} 1475}
1478 1476
1479static int nf_tables_delchain(struct sock *nlsk, struct sk_buff *skb, 1477static int nf_tables_delchain(struct net *net, struct sock *nlsk,
1480 const struct nlmsghdr *nlh, 1478 struct sk_buff *skb, const struct nlmsghdr *nlh,
1481 const struct nlattr * const nla[]) 1479 const struct nlattr * const nla[])
1482{ 1480{
1483 const struct nfgenmsg *nfmsg = nlmsg_data(nlh); 1481 const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
1484 struct nft_af_info *afi; 1482 struct nft_af_info *afi;
1485 struct nft_table *table; 1483 struct nft_table *table;
1486 struct nft_chain *chain; 1484 struct nft_chain *chain;
1487 struct net *net = sock_net(skb->sk);
1488 int family = nfmsg->nfgen_family; 1485 int family = nfmsg->nfgen_family;
1489 struct nft_ctx ctx; 1486 struct nft_ctx ctx;
1490 1487
@@ -1506,7 +1503,7 @@ static int nf_tables_delchain(struct sock *nlsk, struct sk_buff *skb,
1506 if (chain->use > 0) 1503 if (chain->use > 0)
1507 return -EBUSY; 1504 return -EBUSY;
1508 1505
1509 nft_ctx_init(&ctx, skb, nlh, afi, table, chain, nla); 1506 nft_ctx_init(&ctx, net, skb, nlh, afi, table, chain, nla);
1510 1507
1511 return nft_delchain(&ctx); 1508 return nft_delchain(&ctx);
1512} 1509}
@@ -2010,13 +2007,12 @@ static void nf_tables_rule_destroy(const struct nft_ctx *ctx,
2010 2007
2011static struct nft_expr_info *info; 2008static struct nft_expr_info *info;
2012 2009
2013static int nf_tables_newrule(struct sock *nlsk, struct sk_buff *skb, 2010static int nf_tables_newrule(struct net *net, struct sock *nlsk,
2014 const struct nlmsghdr *nlh, 2011 struct sk_buff *skb, const struct nlmsghdr *nlh,
2015 const struct nlattr * const nla[]) 2012 const struct nlattr * const nla[])
2016{ 2013{
2017 const struct nfgenmsg *nfmsg = nlmsg_data(nlh); 2014 const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
2018 struct nft_af_info *afi; 2015 struct nft_af_info *afi;
2019 struct net *net = sock_net(skb->sk);
2020 struct nft_table *table; 2016 struct nft_table *table;
2021 struct nft_chain *chain; 2017 struct nft_chain *chain;
2022 struct nft_rule *rule, *old_rule = NULL; 2018 struct nft_rule *rule, *old_rule = NULL;
@@ -2075,7 +2071,7 @@ static int nf_tables_newrule(struct sock *nlsk, struct sk_buff *skb,
2075 return PTR_ERR(old_rule); 2071 return PTR_ERR(old_rule);
2076 } 2072 }
2077 2073
2078 nft_ctx_init(&ctx, skb, nlh, afi, table, chain, nla); 2074 nft_ctx_init(&ctx, net, skb, nlh, afi, table, chain, nla);
2079 2075
2080 n = 0; 2076 n = 0;
2081 size = 0; 2077 size = 0;
@@ -2176,13 +2172,12 @@ err1:
2176 return err; 2172 return err;
2177} 2173}
2178 2174
2179static int nf_tables_delrule(struct sock *nlsk, struct sk_buff *skb, 2175static int nf_tables_delrule(struct net *net, struct sock *nlsk,
2180 const struct nlmsghdr *nlh, 2176 struct sk_buff *skb, const struct nlmsghdr *nlh,
2181 const struct nlattr * const nla[]) 2177 const struct nlattr * const nla[])
2182{ 2178{
2183 const struct nfgenmsg *nfmsg = nlmsg_data(nlh); 2179 const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
2184 struct nft_af_info *afi; 2180 struct nft_af_info *afi;
2185 struct net *net = sock_net(skb->sk);
2186 struct nft_table *table; 2181 struct nft_table *table;
2187 struct nft_chain *chain = NULL; 2182 struct nft_chain *chain = NULL;
2188 struct nft_rule *rule; 2183 struct nft_rule *rule;
@@ -2205,7 +2200,7 @@ static int nf_tables_delrule(struct sock *nlsk, struct sk_buff *skb,
2205 return PTR_ERR(chain); 2200 return PTR_ERR(chain);
2206 } 2201 }
2207 2202
2208 nft_ctx_init(&ctx, skb, nlh, afi, table, chain, nla); 2203 nft_ctx_init(&ctx, net, skb, nlh, afi, table, chain, nla);
2209 2204
2210 if (chain) { 2205 if (chain) {
2211 if (nla[NFTA_RULE_HANDLE]) { 2206 if (nla[NFTA_RULE_HANDLE]) {
@@ -2344,12 +2339,11 @@ static const struct nla_policy nft_set_desc_policy[NFTA_SET_DESC_MAX + 1] = {
2344 [NFTA_SET_DESC_SIZE] = { .type = NLA_U32 }, 2339 [NFTA_SET_DESC_SIZE] = { .type = NLA_U32 },
2345}; 2340};
2346 2341
2347static int nft_ctx_init_from_setattr(struct nft_ctx *ctx, 2342static int nft_ctx_init_from_setattr(struct nft_ctx *ctx, struct net *net,
2348 const struct sk_buff *skb, 2343 const struct sk_buff *skb,
2349 const struct nlmsghdr *nlh, 2344 const struct nlmsghdr *nlh,
2350 const struct nlattr * const nla[]) 2345 const struct nlattr * const nla[])
2351{ 2346{
2352 struct net *net = sock_net(skb->sk);
2353 const struct nfgenmsg *nfmsg = nlmsg_data(nlh); 2347 const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
2354 struct nft_af_info *afi = NULL; 2348 struct nft_af_info *afi = NULL;
2355 struct nft_table *table = NULL; 2349 struct nft_table *table = NULL;
@@ -2371,7 +2365,7 @@ static int nft_ctx_init_from_setattr(struct nft_ctx *ctx,
2371 return -ENOENT; 2365 return -ENOENT;
2372 } 2366 }
2373 2367
2374 nft_ctx_init(ctx, skb, nlh, afi, table, NULL, nla); 2368 nft_ctx_init(ctx, net, skb, nlh, afi, table, NULL, nla);
2375 return 0; 2369 return 0;
2376} 2370}
2377 2371
@@ -2623,6 +2617,7 @@ static int nf_tables_getset(struct sock *nlsk, struct sk_buff *skb,
2623 const struct nlmsghdr *nlh, 2617 const struct nlmsghdr *nlh,
2624 const struct nlattr * const nla[]) 2618 const struct nlattr * const nla[])
2625{ 2619{
2620 struct net *net = sock_net(skb->sk);
2626 const struct nft_set *set; 2621 const struct nft_set *set;
2627 struct nft_ctx ctx; 2622 struct nft_ctx ctx;
2628 struct sk_buff *skb2; 2623 struct sk_buff *skb2;
@@ -2630,7 +2625,7 @@ static int nf_tables_getset(struct sock *nlsk, struct sk_buff *skb,
2630 int err; 2625 int err;
2631 2626
2632 /* Verify existence before starting dump */ 2627 /* Verify existence before starting dump */
2633 err = nft_ctx_init_from_setattr(&ctx, skb, nlh, nla); 2628 err = nft_ctx_init_from_setattr(&ctx, net, skb, nlh, nla);
2634 if (err < 0) 2629 if (err < 0)
2635 return err; 2630 return err;
2636 2631
@@ -2693,14 +2688,13 @@ static int nf_tables_set_desc_parse(const struct nft_ctx *ctx,
2693 return 0; 2688 return 0;
2694} 2689}
2695 2690
2696static int nf_tables_newset(struct sock *nlsk, struct sk_buff *skb, 2691static int nf_tables_newset(struct net *net, struct sock *nlsk,
2697 const struct nlmsghdr *nlh, 2692 struct sk_buff *skb, const struct nlmsghdr *nlh,
2698 const struct nlattr * const nla[]) 2693 const struct nlattr * const nla[])
2699{ 2694{
2700 const struct nfgenmsg *nfmsg = nlmsg_data(nlh); 2695 const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
2701 const struct nft_set_ops *ops; 2696 const struct nft_set_ops *ops;
2702 struct nft_af_info *afi; 2697 struct nft_af_info *afi;
2703 struct net *net = sock_net(skb->sk);
2704 struct nft_table *table; 2698 struct nft_table *table;
2705 struct nft_set *set; 2699 struct nft_set *set;
2706 struct nft_ctx ctx; 2700 struct nft_ctx ctx;
@@ -2798,7 +2792,7 @@ static int nf_tables_newset(struct sock *nlsk, struct sk_buff *skb,
2798 if (IS_ERR(table)) 2792 if (IS_ERR(table))
2799 return PTR_ERR(table); 2793 return PTR_ERR(table);
2800 2794
2801 nft_ctx_init(&ctx, skb, nlh, afi, table, NULL, nla); 2795 nft_ctx_init(&ctx, net, skb, nlh, afi, table, NULL, nla);
2802 2796
2803 set = nf_tables_set_lookup(table, nla[NFTA_SET_NAME]); 2797 set = nf_tables_set_lookup(table, nla[NFTA_SET_NAME]);
2804 if (IS_ERR(set)) { 2798 if (IS_ERR(set)) {
@@ -2882,8 +2876,8 @@ static void nf_tables_set_destroy(const struct nft_ctx *ctx, struct nft_set *set
2882 nft_set_destroy(set); 2876 nft_set_destroy(set);
2883} 2877}
2884 2878
2885static int nf_tables_delset(struct sock *nlsk, struct sk_buff *skb, 2879static int nf_tables_delset(struct net *net, struct sock *nlsk,
2886 const struct nlmsghdr *nlh, 2880 struct sk_buff *skb, const struct nlmsghdr *nlh,
2887 const struct nlattr * const nla[]) 2881 const struct nlattr * const nla[])
2888{ 2882{
2889 const struct nfgenmsg *nfmsg = nlmsg_data(nlh); 2883 const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
@@ -2896,7 +2890,7 @@ static int nf_tables_delset(struct sock *nlsk, struct sk_buff *skb,
2896 if (nla[NFTA_SET_TABLE] == NULL) 2890 if (nla[NFTA_SET_TABLE] == NULL)
2897 return -EINVAL; 2891 return -EINVAL;
2898 2892
2899 err = nft_ctx_init_from_setattr(&ctx, skb, nlh, nla); 2893 err = nft_ctx_init_from_setattr(&ctx, net, skb, nlh, nla);
2900 if (err < 0) 2894 if (err < 0)
2901 return err; 2895 return err;
2902 2896
@@ -3024,7 +3018,7 @@ static const struct nla_policy nft_set_elem_list_policy[NFTA_SET_ELEM_LIST_MAX +
3024 [NFTA_SET_ELEM_LIST_SET_ID] = { .type = NLA_U32 }, 3018 [NFTA_SET_ELEM_LIST_SET_ID] = { .type = NLA_U32 },
3025}; 3019};
3026 3020
3027static int nft_ctx_init_from_elemattr(struct nft_ctx *ctx, 3021static int nft_ctx_init_from_elemattr(struct nft_ctx *ctx, struct net *net,
3028 const struct sk_buff *skb, 3022 const struct sk_buff *skb,
3029 const struct nlmsghdr *nlh, 3023 const struct nlmsghdr *nlh,
3030 const struct nlattr * const nla[], 3024 const struct nlattr * const nla[],
@@ -3033,7 +3027,6 @@ static int nft_ctx_init_from_elemattr(struct nft_ctx *ctx,
3033 const struct nfgenmsg *nfmsg = nlmsg_data(nlh); 3027 const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
3034 struct nft_af_info *afi; 3028 struct nft_af_info *afi;
3035 struct nft_table *table; 3029 struct nft_table *table;
3036 struct net *net = sock_net(skb->sk);
3037 3030
3038 afi = nf_tables_afinfo_lookup(net, nfmsg->nfgen_family, false); 3031 afi = nf_tables_afinfo_lookup(net, nfmsg->nfgen_family, false);
3039 if (IS_ERR(afi)) 3032 if (IS_ERR(afi))
@@ -3045,7 +3038,7 @@ static int nft_ctx_init_from_elemattr(struct nft_ctx *ctx,
3045 if (!trans && (table->flags & NFT_TABLE_INACTIVE)) 3038 if (!trans && (table->flags & NFT_TABLE_INACTIVE))
3046 return -ENOENT; 3039 return -ENOENT;
3047 3040
3048 nft_ctx_init(ctx, skb, nlh, afi, table, NULL, nla); 3041 nft_ctx_init(ctx, net, skb, nlh, afi, table, NULL, nla);
3049 return 0; 3042 return 0;
3050} 3043}
3051 3044
@@ -3135,6 +3128,7 @@ static int nf_tables_dump_setelem(const struct nft_ctx *ctx,
3135 3128
3136static int nf_tables_dump_set(struct sk_buff *skb, struct netlink_callback *cb) 3129static int nf_tables_dump_set(struct sk_buff *skb, struct netlink_callback *cb)
3137{ 3130{
3131 struct net *net = sock_net(skb->sk);
3138 const struct nft_set *set; 3132 const struct nft_set *set;
3139 struct nft_set_dump_args args; 3133 struct nft_set_dump_args args;
3140 struct nft_ctx ctx; 3134 struct nft_ctx ctx;
@@ -3150,8 +3144,8 @@ static int nf_tables_dump_set(struct sk_buff *skb, struct netlink_callback *cb)
3150 if (err < 0) 3144 if (err < 0)
3151 return err; 3145 return err;
3152 3146
3153 err = nft_ctx_init_from_elemattr(&ctx, cb->skb, cb->nlh, (void *)nla, 3147 err = nft_ctx_init_from_elemattr(&ctx, net, cb->skb, cb->nlh,
3154 false); 3148 (void *)nla, false);
3155 if (err < 0) 3149 if (err < 0)
3156 return err; 3150 return err;
3157 3151
@@ -3212,11 +3206,12 @@ static int nf_tables_getsetelem(struct sock *nlsk, struct sk_buff *skb,
3212 const struct nlmsghdr *nlh, 3206 const struct nlmsghdr *nlh,
3213 const struct nlattr * const nla[]) 3207 const struct nlattr * const nla[])
3214{ 3208{
3209 struct net *net = sock_net(skb->sk);
3215 const struct nft_set *set; 3210 const struct nft_set *set;
3216 struct nft_ctx ctx; 3211 struct nft_ctx ctx;
3217 int err; 3212 int err;
3218 3213
3219 err = nft_ctx_init_from_elemattr(&ctx, skb, nlh, nla, false); 3214 err = nft_ctx_init_from_elemattr(&ctx, net, skb, nlh, nla, false);
3220 if (err < 0) 3215 if (err < 0)
3221 return err; 3216 return err;
3222 3217
@@ -3528,11 +3523,10 @@ err1:
3528 return err; 3523 return err;
3529} 3524}
3530 3525
3531static int nf_tables_newsetelem(struct sock *nlsk, struct sk_buff *skb, 3526static int nf_tables_newsetelem(struct net *net, struct sock *nlsk,
3532 const struct nlmsghdr *nlh, 3527 struct sk_buff *skb, const struct nlmsghdr *nlh,
3533 const struct nlattr * const nla[]) 3528 const struct nlattr * const nla[])
3534{ 3529{
3535 struct net *net = sock_net(skb->sk);
3536 const struct nlattr *attr; 3530 const struct nlattr *attr;
3537 struct nft_set *set; 3531 struct nft_set *set;
3538 struct nft_ctx ctx; 3532 struct nft_ctx ctx;
@@ -3541,7 +3535,7 @@ static int nf_tables_newsetelem(struct sock *nlsk, struct sk_buff *skb,
3541 if (nla[NFTA_SET_ELEM_LIST_ELEMENTS] == NULL) 3535 if (nla[NFTA_SET_ELEM_LIST_ELEMENTS] == NULL)
3542 return -EINVAL; 3536 return -EINVAL;
3543 3537
3544 err = nft_ctx_init_from_elemattr(&ctx, skb, nlh, nla, true); 3538 err = nft_ctx_init_from_elemattr(&ctx, net, skb, nlh, nla, true);
3545 if (err < 0) 3539 if (err < 0)
3546 return err; 3540 return err;
3547 3541
@@ -3623,8 +3617,8 @@ err1:
3623 return err; 3617 return err;
3624} 3618}
3625 3619
3626static int nf_tables_delsetelem(struct sock *nlsk, struct sk_buff *skb, 3620static int nf_tables_delsetelem(struct net *net, struct sock *nlsk,
3627 const struct nlmsghdr *nlh, 3621 struct sk_buff *skb, const struct nlmsghdr *nlh,
3628 const struct nlattr * const nla[]) 3622 const struct nlattr * const nla[])
3629{ 3623{
3630 const struct nlattr *attr; 3624 const struct nlattr *attr;
@@ -3635,7 +3629,7 @@ static int nf_tables_delsetelem(struct sock *nlsk, struct sk_buff *skb,
3635 if (nla[NFTA_SET_ELEM_LIST_ELEMENTS] == NULL) 3629 if (nla[NFTA_SET_ELEM_LIST_ELEMENTS] == NULL)
3636 return -EINVAL; 3630 return -EINVAL;
3637 3631
3638 err = nft_ctx_init_from_elemattr(&ctx, skb, nlh, nla, false); 3632 err = nft_ctx_init_from_elemattr(&ctx, net, skb, nlh, nla, false);
3639 if (err < 0) 3633 if (err < 0)
3640 return err; 3634 return err;
3641 3635
@@ -4030,7 +4024,8 @@ static int nf_tables_abort(struct sk_buff *skb)
4030 struct nft_trans *trans, *next; 4024 struct nft_trans *trans, *next;
4031 struct nft_trans_elem *te; 4025 struct nft_trans_elem *te;
4032 4026
4033 list_for_each_entry_safe(trans, next, &net->nft.commit_list, list) { 4027 list_for_each_entry_safe_reverse(trans, next, &net->nft.commit_list,
4028 list) {
4034 switch (trans->msg_type) { 4029 switch (trans->msg_type) {
4035 case NFT_MSG_NEWTABLE: 4030 case NFT_MSG_NEWTABLE:
4036 if (nft_trans_table_update(trans)) { 4031 if (nft_trans_table_update(trans)) {
diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c
index 46453ab318db..77afe913d03d 100644
--- a/net/netfilter/nfnetlink.c
+++ b/net/netfilter/nfnetlink.c
@@ -295,8 +295,6 @@ replay:
295 if (!skb) 295 if (!skb)
296 return netlink_ack(oskb, nlh, -ENOMEM); 296 return netlink_ack(oskb, nlh, -ENOMEM);
297 297
298 skb->sk = oskb->sk;
299
300 nfnl_lock(subsys_id); 298 nfnl_lock(subsys_id);
301 ss = rcu_dereference_protected(table[subsys_id].subsys, 299 ss = rcu_dereference_protected(table[subsys_id].subsys,
302 lockdep_is_held(&table[subsys_id].mutex)); 300 lockdep_is_held(&table[subsys_id].mutex));
@@ -381,7 +379,7 @@ replay:
381 goto ack; 379 goto ack;
382 380
383 if (nc->call_batch) { 381 if (nc->call_batch) {
384 err = nc->call_batch(net->nfnl, skb, nlh, 382 err = nc->call_batch(net, net->nfnl, skb, nlh,
385 (const struct nlattr **)cda); 383 (const struct nlattr **)cda);
386 } 384 }
387 385
diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
index 06eb48fceb42..740cce4685ac 100644
--- a/net/netfilter/nfnetlink_log.c
+++ b/net/netfilter/nfnetlink_log.c
@@ -825,7 +825,7 @@ nfulnl_recv_config(struct sock *ctnl, struct sk_buff *skb,
825 struct net *net = sock_net(ctnl); 825 struct net *net = sock_net(ctnl);
826 struct nfnl_log_net *log = nfnl_log_pernet(net); 826 struct nfnl_log_net *log = nfnl_log_pernet(net);
827 int ret = 0; 827 int ret = 0;
828 u16 flags; 828 u16 flags = 0;
829 829
830 if (nfula[NFULA_CFG_CMD]) { 830 if (nfula[NFULA_CFG_CMD]) {
831 u_int8_t pf = nfmsg->nfgen_family; 831 u_int8_t pf = nfmsg->nfgen_family;
diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c
index 7d81d280cb4f..861c6615253b 100644
--- a/net/netfilter/nfnetlink_queue.c
+++ b/net/netfilter/nfnetlink_queue.c
@@ -365,8 +365,9 @@ nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue,
365 break; 365 break;
366 } 366 }
367 367
368 nfnl_ct = rcu_dereference(nfnl_ct_hook);
369
368 if (queue->flags & NFQA_CFG_F_CONNTRACK) { 370 if (queue->flags & NFQA_CFG_F_CONNTRACK) {
369 nfnl_ct = rcu_dereference(nfnl_ct_hook);
370 if (nfnl_ct != NULL) { 371 if (nfnl_ct != NULL) {
371 ct = nfnl_ct->get_ct(entskb, &ctinfo); 372 ct = nfnl_ct->get_ct(entskb, &ctinfo);
372 if (ct != NULL) 373 if (ct != NULL)
@@ -1064,9 +1065,10 @@ nfqnl_recv_verdict(struct sock *ctnl, struct sk_buff *skb,
1064 if (entry == NULL) 1065 if (entry == NULL)
1065 return -ENOENT; 1066 return -ENOENT;
1066 1067
1068 /* rcu lock already held from nfnl->call_rcu. */
1069 nfnl_ct = rcu_dereference(nfnl_ct_hook);
1070
1067 if (nfqa[NFQA_CT]) { 1071 if (nfqa[NFQA_CT]) {
1068 /* rcu lock already held from nfnl->call_rcu. */
1069 nfnl_ct = rcu_dereference(nfnl_ct_hook);
1070 if (nfnl_ct != NULL) 1072 if (nfnl_ct != NULL)
1071 ct = nfqnl_ct_parse(nfnl_ct, nlh, nfqa, entry, &ctinfo); 1073 ct = nfqnl_ct_parse(nfnl_ct, nlh, nfqa, entry, &ctinfo);
1072 } 1074 }
@@ -1417,6 +1419,7 @@ static int __init nfnetlink_queue_init(void)
1417 1419
1418cleanup_netlink_notifier: 1420cleanup_netlink_notifier:
1419 netlink_unregister_notifier(&nfqnl_rtnl_notifier); 1421 netlink_unregister_notifier(&nfqnl_rtnl_notifier);
1422 unregister_pernet_subsys(&nfnl_queue_net_ops);
1420out: 1423out:
1421 return status; 1424 return status;
1422} 1425}
diff --git a/net/netfilter/nft_counter.c b/net/netfilter/nft_counter.c
index 1067fb4c1ffa..c7808fc19719 100644
--- a/net/netfilter/nft_counter.c
+++ b/net/netfilter/nft_counter.c
@@ -47,27 +47,34 @@ static void nft_counter_eval(const struct nft_expr *expr,
47 local_bh_enable(); 47 local_bh_enable();
48} 48}
49 49
50static int nft_counter_dump(struct sk_buff *skb, const struct nft_expr *expr) 50static void nft_counter_fetch(const struct nft_counter_percpu __percpu *counter,
51 struct nft_counter *total)
51{ 52{
52 struct nft_counter_percpu_priv *priv = nft_expr_priv(expr); 53 const struct nft_counter_percpu *cpu_stats;
53 struct nft_counter_percpu *cpu_stats;
54 struct nft_counter total;
55 u64 bytes, packets; 54 u64 bytes, packets;
56 unsigned int seq; 55 unsigned int seq;
57 int cpu; 56 int cpu;
58 57
59 memset(&total, 0, sizeof(total)); 58 memset(total, 0, sizeof(*total));
60 for_each_possible_cpu(cpu) { 59 for_each_possible_cpu(cpu) {
61 cpu_stats = per_cpu_ptr(priv->counter, cpu); 60 cpu_stats = per_cpu_ptr(counter, cpu);
62 do { 61 do {
63 seq = u64_stats_fetch_begin_irq(&cpu_stats->syncp); 62 seq = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
64 bytes = cpu_stats->counter.bytes; 63 bytes = cpu_stats->counter.bytes;
65 packets = cpu_stats->counter.packets; 64 packets = cpu_stats->counter.packets;
66 } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, seq)); 65 } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, seq));
67 66
68 total.packets += packets; 67 total->packets += packets;
69 total.bytes += bytes; 68 total->bytes += bytes;
70 } 69 }
70}
71
72static int nft_counter_dump(struct sk_buff *skb, const struct nft_expr *expr)
73{
74 struct nft_counter_percpu_priv *priv = nft_expr_priv(expr);
75 struct nft_counter total;
76
77 nft_counter_fetch(priv->counter, &total);
71 78
72 if (nla_put_be64(skb, NFTA_COUNTER_BYTES, cpu_to_be64(total.bytes)) || 79 if (nla_put_be64(skb, NFTA_COUNTER_BYTES, cpu_to_be64(total.bytes)) ||
73 nla_put_be64(skb, NFTA_COUNTER_PACKETS, cpu_to_be64(total.packets))) 80 nla_put_be64(skb, NFTA_COUNTER_PACKETS, cpu_to_be64(total.packets)))
@@ -118,6 +125,31 @@ static void nft_counter_destroy(const struct nft_ctx *ctx,
118 free_percpu(priv->counter); 125 free_percpu(priv->counter);
119} 126}
120 127
128static int nft_counter_clone(struct nft_expr *dst, const struct nft_expr *src)
129{
130 struct nft_counter_percpu_priv *priv = nft_expr_priv(src);
131 struct nft_counter_percpu_priv *priv_clone = nft_expr_priv(dst);
132 struct nft_counter_percpu __percpu *cpu_stats;
133 struct nft_counter_percpu *this_cpu;
134 struct nft_counter total;
135
136 nft_counter_fetch(priv->counter, &total);
137
138 cpu_stats = __netdev_alloc_pcpu_stats(struct nft_counter_percpu,
139 GFP_ATOMIC);
140 if (cpu_stats == NULL)
141 return ENOMEM;
142
143 preempt_disable();
144 this_cpu = this_cpu_ptr(cpu_stats);
145 this_cpu->counter.packets = total.packets;
146 this_cpu->counter.bytes = total.bytes;
147 preempt_enable();
148
149 priv_clone->counter = cpu_stats;
150 return 0;
151}
152
121static struct nft_expr_type nft_counter_type; 153static struct nft_expr_type nft_counter_type;
122static const struct nft_expr_ops nft_counter_ops = { 154static const struct nft_expr_ops nft_counter_ops = {
123 .type = &nft_counter_type, 155 .type = &nft_counter_type,
@@ -126,6 +158,7 @@ static const struct nft_expr_ops nft_counter_ops = {
126 .init = nft_counter_init, 158 .init = nft_counter_init,
127 .destroy = nft_counter_destroy, 159 .destroy = nft_counter_destroy,
128 .dump = nft_counter_dump, 160 .dump = nft_counter_dump,
161 .clone = nft_counter_clone,
129}; 162};
130 163
131static struct nft_expr_type nft_counter_type __read_mostly = { 164static struct nft_expr_type nft_counter_type __read_mostly = {
diff --git a/net/netfilter/nft_dynset.c b/net/netfilter/nft_dynset.c
index 513a8ef60a59..9dec3bd1b63c 100644
--- a/net/netfilter/nft_dynset.c
+++ b/net/netfilter/nft_dynset.c
@@ -50,8 +50,9 @@ static void *nft_dynset_new(struct nft_set *set, const struct nft_expr *expr,
50 } 50 }
51 51
52 ext = nft_set_elem_ext(set, elem); 52 ext = nft_set_elem_ext(set, elem);
53 if (priv->expr != NULL) 53 if (priv->expr != NULL &&
54 nft_expr_clone(nft_set_ext_expr(ext), priv->expr); 54 nft_expr_clone(nft_set_ext_expr(ext), priv->expr) < 0)
55 return NULL;
55 56
56 return elem; 57 return elem;
57} 58}
diff --git a/net/nfc/llcp_sock.c b/net/nfc/llcp_sock.c
index b7de0da46acd..ecf0a0196f18 100644
--- a/net/nfc/llcp_sock.c
+++ b/net/nfc/llcp_sock.c
@@ -572,7 +572,7 @@ static unsigned int llcp_sock_poll(struct file *file, struct socket *sock,
572 if (sock_writeable(sk) && sk->sk_state == LLCP_CONNECTED) 572 if (sock_writeable(sk) && sk->sk_state == LLCP_CONNECTED)
573 mask |= POLLOUT | POLLWRNORM | POLLWRBAND; 573 mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
574 else 574 else
575 set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); 575 sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
576 576
577 pr_debug("mask 0x%x\n", mask); 577 pr_debug("mask 0x%x\n", mask);
578 578
diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c
index c2cc11168fd5..3e8892216f94 100644
--- a/net/openvswitch/conntrack.c
+++ b/net/openvswitch/conntrack.c
@@ -53,6 +53,8 @@ struct ovs_conntrack_info {
53 struct md_labels labels; 53 struct md_labels labels;
54}; 54};
55 55
56static void __ovs_ct_free_action(struct ovs_conntrack_info *ct_info);
57
56static u16 key_to_nfproto(const struct sw_flow_key *key) 58static u16 key_to_nfproto(const struct sw_flow_key *key)
57{ 59{
58 switch (ntohs(key->eth.type)) { 60 switch (ntohs(key->eth.type)) {
@@ -141,6 +143,7 @@ static void __ovs_ct_update_key(struct sw_flow_key *key, u8 state,
141 * previously sent the packet to conntrack via the ct action. 143 * previously sent the packet to conntrack via the ct action.
142 */ 144 */
143static void ovs_ct_update_key(const struct sk_buff *skb, 145static void ovs_ct_update_key(const struct sk_buff *skb,
146 const struct ovs_conntrack_info *info,
144 struct sw_flow_key *key, bool post_ct) 147 struct sw_flow_key *key, bool post_ct)
145{ 148{
146 const struct nf_conntrack_zone *zone = &nf_ct_zone_dflt; 149 const struct nf_conntrack_zone *zone = &nf_ct_zone_dflt;
@@ -158,13 +161,15 @@ static void ovs_ct_update_key(const struct sk_buff *skb,
158 zone = nf_ct_zone(ct); 161 zone = nf_ct_zone(ct);
159 } else if (post_ct) { 162 } else if (post_ct) {
160 state = OVS_CS_F_TRACKED | OVS_CS_F_INVALID; 163 state = OVS_CS_F_TRACKED | OVS_CS_F_INVALID;
164 if (info)
165 zone = &info->zone;
161 } 166 }
162 __ovs_ct_update_key(key, state, zone, ct); 167 __ovs_ct_update_key(key, state, zone, ct);
163} 168}
164 169
165void ovs_ct_fill_key(const struct sk_buff *skb, struct sw_flow_key *key) 170void ovs_ct_fill_key(const struct sk_buff *skb, struct sw_flow_key *key)
166{ 171{
167 ovs_ct_update_key(skb, key, false); 172 ovs_ct_update_key(skb, NULL, key, false);
168} 173}
169 174
170int ovs_ct_put_key(const struct sw_flow_key *key, struct sk_buff *skb) 175int ovs_ct_put_key(const struct sw_flow_key *key, struct sk_buff *skb)
@@ -418,7 +423,7 @@ static int __ovs_ct_lookup(struct net *net, struct sw_flow_key *key,
418 } 423 }
419 } 424 }
420 425
421 ovs_ct_update_key(skb, key, true); 426 ovs_ct_update_key(skb, info, key, true);
422 427
423 return 0; 428 return 0;
424} 429}
@@ -708,7 +713,7 @@ int ovs_ct_copy_action(struct net *net, const struct nlattr *attr,
708 nf_conntrack_get(&ct_info.ct->ct_general); 713 nf_conntrack_get(&ct_info.ct->ct_general);
709 return 0; 714 return 0;
710err_free_ct: 715err_free_ct:
711 nf_conntrack_free(ct_info.ct); 716 __ovs_ct_free_action(&ct_info);
712 return err; 717 return err;
713} 718}
714 719
@@ -750,6 +755,11 @@ void ovs_ct_free_action(const struct nlattr *a)
750{ 755{
751 struct ovs_conntrack_info *ct_info = nla_data(a); 756 struct ovs_conntrack_info *ct_info = nla_data(a);
752 757
758 __ovs_ct_free_action(ct_info);
759}
760
761static void __ovs_ct_free_action(struct ovs_conntrack_info *ct_info)
762{
753 if (ct_info->helper) 763 if (ct_info->helper)
754 module_put(ct_info->helper->me); 764 module_put(ct_info->helper->me);
755 if (ct_info->ct) 765 if (ct_info->ct)
diff --git a/net/openvswitch/dp_notify.c b/net/openvswitch/dp_notify.c
index a7a80a6b77b0..653d073bae45 100644
--- a/net/openvswitch/dp_notify.c
+++ b/net/openvswitch/dp_notify.c
@@ -58,7 +58,7 @@ void ovs_dp_notify_wq(struct work_struct *work)
58 struct hlist_node *n; 58 struct hlist_node *n;
59 59
60 hlist_for_each_entry_safe(vport, n, &dp->ports[i], dp_hash_node) { 60 hlist_for_each_entry_safe(vport, n, &dp->ports[i], dp_hash_node) {
61 if (vport->ops->type != OVS_VPORT_TYPE_NETDEV) 61 if (vport->ops->type == OVS_VPORT_TYPE_INTERNAL)
62 continue; 62 continue;
63 63
64 if (!(vport->dev->priv_flags & IFF_OVS_DATAPATH)) 64 if (!(vport->dev->priv_flags & IFF_OVS_DATAPATH))
diff --git a/net/openvswitch/vport-geneve.c b/net/openvswitch/vport-geneve.c
index efb736bb6855..e41cd12d9b2d 100644
--- a/net/openvswitch/vport-geneve.c
+++ b/net/openvswitch/vport-geneve.c
@@ -117,7 +117,6 @@ static struct vport_ops ovs_geneve_vport_ops = {
117 .destroy = ovs_netdev_tunnel_destroy, 117 .destroy = ovs_netdev_tunnel_destroy,
118 .get_options = geneve_get_options, 118 .get_options = geneve_get_options,
119 .send = dev_queue_xmit, 119 .send = dev_queue_xmit,
120 .owner = THIS_MODULE,
121}; 120};
122 121
123static int __init ovs_geneve_tnl_init(void) 122static int __init ovs_geneve_tnl_init(void)
diff --git a/net/openvswitch/vport-gre.c b/net/openvswitch/vport-gre.c
index c3257d78d3d2..7f8897f33a67 100644
--- a/net/openvswitch/vport-gre.c
+++ b/net/openvswitch/vport-gre.c
@@ -89,7 +89,6 @@ static struct vport_ops ovs_gre_vport_ops = {
89 .create = gre_create, 89 .create = gre_create,
90 .send = dev_queue_xmit, 90 .send = dev_queue_xmit,
91 .destroy = ovs_netdev_tunnel_destroy, 91 .destroy = ovs_netdev_tunnel_destroy,
92 .owner = THIS_MODULE,
93}; 92};
94 93
95static int __init ovs_gre_tnl_init(void) 94static int __init ovs_gre_tnl_init(void)
diff --git a/net/openvswitch/vport-netdev.c b/net/openvswitch/vport-netdev.c
index b327368a3848..6b0190b987ec 100644
--- a/net/openvswitch/vport-netdev.c
+++ b/net/openvswitch/vport-netdev.c
@@ -180,9 +180,13 @@ void ovs_netdev_tunnel_destroy(struct vport *vport)
180 if (vport->dev->priv_flags & IFF_OVS_DATAPATH) 180 if (vport->dev->priv_flags & IFF_OVS_DATAPATH)
181 ovs_netdev_detach_dev(vport); 181 ovs_netdev_detach_dev(vport);
182 182
183 /* Early release so we can unregister the device */ 183 /* We can be invoked by both explicit vport deletion and
184 * underlying netdev deregistration; delete the link only
185 * if it's not already shutting down.
186 */
187 if (vport->dev->reg_state == NETREG_REGISTERED)
188 rtnl_delete_link(vport->dev);
184 dev_put(vport->dev); 189 dev_put(vport->dev);
185 rtnl_delete_link(vport->dev);
186 vport->dev = NULL; 190 vport->dev = NULL;
187 rtnl_unlock(); 191 rtnl_unlock();
188 192
diff --git a/net/openvswitch/vport.c b/net/openvswitch/vport.c
index 0ac0fd004d7e..31cbc8c5c7db 100644
--- a/net/openvswitch/vport.c
+++ b/net/openvswitch/vport.c
@@ -71,7 +71,7 @@ static struct hlist_head *hash_bucket(const struct net *net, const char *name)
71 return &dev_table[hash & (VPORT_HASH_BUCKETS - 1)]; 71 return &dev_table[hash & (VPORT_HASH_BUCKETS - 1)];
72} 72}
73 73
74int ovs_vport_ops_register(struct vport_ops *ops) 74int __ovs_vport_ops_register(struct vport_ops *ops)
75{ 75{
76 int err = -EEXIST; 76 int err = -EEXIST;
77 struct vport_ops *o; 77 struct vport_ops *o;
@@ -87,7 +87,7 @@ errout:
87 ovs_unlock(); 87 ovs_unlock();
88 return err; 88 return err;
89} 89}
90EXPORT_SYMBOL_GPL(ovs_vport_ops_register); 90EXPORT_SYMBOL_GPL(__ovs_vport_ops_register);
91 91
92void ovs_vport_ops_unregister(struct vport_ops *ops) 92void ovs_vport_ops_unregister(struct vport_ops *ops)
93{ 93{
@@ -256,8 +256,8 @@ int ovs_vport_set_options(struct vport *vport, struct nlattr *options)
256 * 256 *
257 * @vport: vport to delete. 257 * @vport: vport to delete.
258 * 258 *
259 * Detaches @vport from its datapath and destroys it. It is possible to fail 259 * Detaches @vport from its datapath and destroys it. ovs_mutex must
260 * for reasons such as lack of memory. ovs_mutex must be held. 260 * be held.
261 */ 261 */
262void ovs_vport_del(struct vport *vport) 262void ovs_vport_del(struct vport *vport)
263{ 263{
diff --git a/net/openvswitch/vport.h b/net/openvswitch/vport.h
index bdfd82a7c064..8ea3a96980ac 100644
--- a/net/openvswitch/vport.h
+++ b/net/openvswitch/vport.h
@@ -196,7 +196,13 @@ static inline const char *ovs_vport_name(struct vport *vport)
196 return vport->dev->name; 196 return vport->dev->name;
197} 197}
198 198
199int ovs_vport_ops_register(struct vport_ops *ops); 199int __ovs_vport_ops_register(struct vport_ops *ops);
200#define ovs_vport_ops_register(ops) \
201 ({ \
202 (ops)->owner = THIS_MODULE; \
203 __ovs_vport_ops_register(ops); \
204 })
205
200void ovs_vport_ops_unregister(struct vport_ops *ops); 206void ovs_vport_ops_unregister(struct vport_ops *ops);
201 207
202static inline struct rtable *ovs_tunnel_route_lookup(struct net *net, 208static inline struct rtable *ovs_tunnel_route_lookup(struct net *net,
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index af399cac5205..992396aa635c 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -1741,6 +1741,20 @@ static void fanout_release(struct sock *sk)
1741 kfree_rcu(po->rollover, rcu); 1741 kfree_rcu(po->rollover, rcu);
1742} 1742}
1743 1743
1744static bool packet_extra_vlan_len_allowed(const struct net_device *dev,
1745 struct sk_buff *skb)
1746{
1747 /* Earlier code assumed this would be a VLAN pkt, double-check
1748 * this now that we have the actual packet in hand. We can only
1749 * do this check on Ethernet devices.
1750 */
1751 if (unlikely(dev->type != ARPHRD_ETHER))
1752 return false;
1753
1754 skb_reset_mac_header(skb);
1755 return likely(eth_hdr(skb)->h_proto == htons(ETH_P_8021Q));
1756}
1757
1744static const struct proto_ops packet_ops; 1758static const struct proto_ops packet_ops;
1745 1759
1746static const struct proto_ops packet_ops_spkt; 1760static const struct proto_ops packet_ops_spkt;
@@ -1902,18 +1916,10 @@ retry:
1902 goto retry; 1916 goto retry;
1903 } 1917 }
1904 1918
1905 if (len > (dev->mtu + dev->hard_header_len + extra_len)) { 1919 if (len > (dev->mtu + dev->hard_header_len + extra_len) &&
1906 /* Earlier code assumed this would be a VLAN pkt, 1920 !packet_extra_vlan_len_allowed(dev, skb)) {
1907 * double-check this now that we have the actual 1921 err = -EMSGSIZE;
1908 * packet in hand. 1922 goto out_unlock;
1909 */
1910 struct ethhdr *ehdr;
1911 skb_reset_mac_header(skb);
1912 ehdr = eth_hdr(skb);
1913 if (ehdr->h_proto != htons(ETH_P_8021Q)) {
1914 err = -EMSGSIZE;
1915 goto out_unlock;
1916 }
1917 } 1923 }
1918 1924
1919 skb->protocol = proto; 1925 skb->protocol = proto;
@@ -2323,8 +2329,8 @@ static void tpacket_destruct_skb(struct sk_buff *skb)
2323static bool ll_header_truncated(const struct net_device *dev, int len) 2329static bool ll_header_truncated(const struct net_device *dev, int len)
2324{ 2330{
2325 /* net device doesn't like empty head */ 2331 /* net device doesn't like empty head */
2326 if (unlikely(len <= dev->hard_header_len)) { 2332 if (unlikely(len < dev->hard_header_len)) {
2327 net_warn_ratelimited("%s: packet size is too short (%d <= %d)\n", 2333 net_warn_ratelimited("%s: packet size is too short (%d < %d)\n",
2328 current->comm, len, dev->hard_header_len); 2334 current->comm, len, dev->hard_header_len);
2329 return true; 2335 return true;
2330 } 2336 }
@@ -2332,6 +2338,15 @@ static bool ll_header_truncated(const struct net_device *dev, int len)
2332 return false; 2338 return false;
2333} 2339}
2334 2340
2341static void tpacket_set_protocol(const struct net_device *dev,
2342 struct sk_buff *skb)
2343{
2344 if (dev->type == ARPHRD_ETHER) {
2345 skb_reset_mac_header(skb);
2346 skb->protocol = eth_hdr(skb)->h_proto;
2347 }
2348}
2349
2335static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb, 2350static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
2336 void *frame, struct net_device *dev, int size_max, 2351 void *frame, struct net_device *dev, int size_max,
2337 __be16 proto, unsigned char *addr, int hlen) 2352 __be16 proto, unsigned char *addr, int hlen)
@@ -2368,8 +2383,6 @@ static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
2368 skb_reserve(skb, hlen); 2383 skb_reserve(skb, hlen);
2369 skb_reset_network_header(skb); 2384 skb_reset_network_header(skb);
2370 2385
2371 if (!packet_use_direct_xmit(po))
2372 skb_probe_transport_header(skb, 0);
2373 if (unlikely(po->tp_tx_has_off)) { 2386 if (unlikely(po->tp_tx_has_off)) {
2374 int off_min, off_max, off; 2387 int off_min, off_max, off;
2375 off_min = po->tp_hdrlen - sizeof(struct sockaddr_ll); 2388 off_min = po->tp_hdrlen - sizeof(struct sockaddr_ll);
@@ -2415,6 +2428,8 @@ static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
2415 dev->hard_header_len); 2428 dev->hard_header_len);
2416 if (unlikely(err)) 2429 if (unlikely(err))
2417 return err; 2430 return err;
2431 if (!skb->protocol)
2432 tpacket_set_protocol(dev, skb);
2418 2433
2419 data += dev->hard_header_len; 2434 data += dev->hard_header_len;
2420 to_write -= dev->hard_header_len; 2435 to_write -= dev->hard_header_len;
@@ -2449,6 +2464,8 @@ static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
2449 len = ((to_write > len_max) ? len_max : to_write); 2464 len = ((to_write > len_max) ? len_max : to_write);
2450 } 2465 }
2451 2466
2467 skb_probe_transport_header(skb, 0);
2468
2452 return tp_len; 2469 return tp_len;
2453} 2470}
2454 2471
@@ -2493,12 +2510,13 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
2493 if (unlikely(!(dev->flags & IFF_UP))) 2510 if (unlikely(!(dev->flags & IFF_UP)))
2494 goto out_put; 2511 goto out_put;
2495 2512
2496 reserve = dev->hard_header_len + VLAN_HLEN; 2513 if (po->sk.sk_socket->type == SOCK_RAW)
2514 reserve = dev->hard_header_len;
2497 size_max = po->tx_ring.frame_size 2515 size_max = po->tx_ring.frame_size
2498 - (po->tp_hdrlen - sizeof(struct sockaddr_ll)); 2516 - (po->tp_hdrlen - sizeof(struct sockaddr_ll));
2499 2517
2500 if (size_max > dev->mtu + reserve) 2518 if (size_max > dev->mtu + reserve + VLAN_HLEN)
2501 size_max = dev->mtu + reserve; 2519 size_max = dev->mtu + reserve + VLAN_HLEN;
2502 2520
2503 do { 2521 do {
2504 ph = packet_current_frame(po, &po->tx_ring, 2522 ph = packet_current_frame(po, &po->tx_ring,
@@ -2525,18 +2543,10 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
2525 tp_len = tpacket_fill_skb(po, skb, ph, dev, size_max, proto, 2543 tp_len = tpacket_fill_skb(po, skb, ph, dev, size_max, proto,
2526 addr, hlen); 2544 addr, hlen);
2527 if (likely(tp_len >= 0) && 2545 if (likely(tp_len >= 0) &&
2528 tp_len > dev->mtu + dev->hard_header_len) { 2546 tp_len > dev->mtu + reserve &&
2529 struct ethhdr *ehdr; 2547 !packet_extra_vlan_len_allowed(dev, skb))
2530 /* Earlier code assumed this would be a VLAN pkt, 2548 tp_len = -EMSGSIZE;
2531 * double-check this now that we have the actual
2532 * packet in hand.
2533 */
2534 2549
2535 skb_reset_mac_header(skb);
2536 ehdr = eth_hdr(skb);
2537 if (ehdr->h_proto != htons(ETH_P_8021Q))
2538 tp_len = -EMSGSIZE;
2539 }
2540 if (unlikely(tp_len < 0)) { 2550 if (unlikely(tp_len < 0)) {
2541 if (po->tp_loss) { 2551 if (po->tp_loss) {
2542 __packet_set_status(po, ph, 2552 __packet_set_status(po, ph,
@@ -2765,18 +2775,10 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
2765 2775
2766 sock_tx_timestamp(sk, &skb_shinfo(skb)->tx_flags); 2776 sock_tx_timestamp(sk, &skb_shinfo(skb)->tx_flags);
2767 2777
2768 if (!gso_type && (len > dev->mtu + reserve + extra_len)) { 2778 if (!gso_type && (len > dev->mtu + reserve + extra_len) &&
2769 /* Earlier code assumed this would be a VLAN pkt, 2779 !packet_extra_vlan_len_allowed(dev, skb)) {
2770 * double-check this now that we have the actual 2780 err = -EMSGSIZE;
2771 * packet in hand. 2781 goto out_free;
2772 */
2773 struct ethhdr *ehdr;
2774 skb_reset_mac_header(skb);
2775 ehdr = eth_hdr(skb);
2776 if (ehdr->h_proto != htons(ETH_P_8021Q)) {
2777 err = -EMSGSIZE;
2778 goto out_free;
2779 }
2780 } 2782 }
2781 2783
2782 skb->protocol = proto; 2784 skb->protocol = proto;
@@ -2807,8 +2809,8 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
2807 len += vnet_hdr_len; 2809 len += vnet_hdr_len;
2808 } 2810 }
2809 2811
2810 if (!packet_use_direct_xmit(po)) 2812 skb_probe_transport_header(skb, reserve);
2811 skb_probe_transport_header(skb, reserve); 2813
2812 if (unlikely(extra_len == 4)) 2814 if (unlikely(extra_len == 4))
2813 skb->no_fcs = 1; 2815 skb->no_fcs = 1;
2814 2816
@@ -4107,7 +4109,7 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
4107 err = -EINVAL; 4109 err = -EINVAL;
4108 if (unlikely((int)req->tp_block_size <= 0)) 4110 if (unlikely((int)req->tp_block_size <= 0))
4109 goto out; 4111 goto out;
4110 if (unlikely(req->tp_block_size & (PAGE_SIZE - 1))) 4112 if (unlikely(!PAGE_ALIGNED(req->tp_block_size)))
4111 goto out; 4113 goto out;
4112 if (po->tp_version >= TPACKET_V3 && 4114 if (po->tp_version >= TPACKET_V3 &&
4113 (int)(req->tp_block_size - 4115 (int)(req->tp_block_size -
@@ -4119,8 +4121,8 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
4119 if (unlikely(req->tp_frame_size & (TPACKET_ALIGNMENT - 1))) 4121 if (unlikely(req->tp_frame_size & (TPACKET_ALIGNMENT - 1)))
4120 goto out; 4122 goto out;
4121 4123
4122 rb->frames_per_block = req->tp_block_size/req->tp_frame_size; 4124 rb->frames_per_block = req->tp_block_size / req->tp_frame_size;
4123 if (unlikely(rb->frames_per_block <= 0)) 4125 if (unlikely(rb->frames_per_block == 0))
4124 goto out; 4126 goto out;
4125 if (unlikely((rb->frames_per_block * req->tp_block_nr) != 4127 if (unlikely((rb->frames_per_block * req->tp_block_nr) !=
4126 req->tp_frame_nr)) 4128 req->tp_frame_nr))
diff --git a/net/rds/connection.c b/net/rds/connection.c
index d4564036a339..e3b118cae81d 100644
--- a/net/rds/connection.c
+++ b/net/rds/connection.c
@@ -186,12 +186,6 @@ static struct rds_connection *__rds_conn_create(struct net *net,
186 } 186 }
187 } 187 }
188 188
189 if (trans == NULL) {
190 kmem_cache_free(rds_conn_slab, conn);
191 conn = ERR_PTR(-ENODEV);
192 goto out;
193 }
194
195 conn->c_trans = trans; 189 conn->c_trans = trans;
196 190
197 ret = trans->conn_alloc(conn, gfp); 191 ret = trans->conn_alloc(conn, gfp);
diff --git a/net/rds/send.c b/net/rds/send.c
index 827155c2ead1..c9cdb358ea88 100644
--- a/net/rds/send.c
+++ b/net/rds/send.c
@@ -1013,11 +1013,13 @@ int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len)
1013 release_sock(sk); 1013 release_sock(sk);
1014 } 1014 }
1015 1015
1016 /* racing with another thread binding seems ok here */ 1016 lock_sock(sk);
1017 if (daddr == 0 || rs->rs_bound_addr == 0) { 1017 if (daddr == 0 || rs->rs_bound_addr == 0) {
1018 release_sock(sk);
1018 ret = -ENOTCONN; /* XXX not a great errno */ 1019 ret = -ENOTCONN; /* XXX not a great errno */
1019 goto out; 1020 goto out;
1020 } 1021 }
1022 release_sock(sk);
1021 1023
1022 if (payload_len > rds_sk_sndbuf(rs)) { 1024 if (payload_len > rds_sk_sndbuf(rs)) {
1023 ret = -EMSGSIZE; 1025 ret = -EMSGSIZE;
diff --git a/net/rfkill/core.c b/net/rfkill/core.c
index b41e9ea2ffff..f53bf3b6558b 100644
--- a/net/rfkill/core.c
+++ b/net/rfkill/core.c
@@ -49,7 +49,6 @@
49struct rfkill { 49struct rfkill {
50 spinlock_t lock; 50 spinlock_t lock;
51 51
52 const char *name;
53 enum rfkill_type type; 52 enum rfkill_type type;
54 53
55 unsigned long state; 54 unsigned long state;
@@ -73,6 +72,7 @@ struct rfkill {
73 struct delayed_work poll_work; 72 struct delayed_work poll_work;
74 struct work_struct uevent_work; 73 struct work_struct uevent_work;
75 struct work_struct sync_work; 74 struct work_struct sync_work;
75 char name[];
76}; 76};
77#define to_rfkill(d) container_of(d, struct rfkill, dev) 77#define to_rfkill(d) container_of(d, struct rfkill, dev)
78 78
@@ -876,14 +876,14 @@ struct rfkill * __must_check rfkill_alloc(const char *name,
876 if (WARN_ON(type == RFKILL_TYPE_ALL || type >= NUM_RFKILL_TYPES)) 876 if (WARN_ON(type == RFKILL_TYPE_ALL || type >= NUM_RFKILL_TYPES))
877 return NULL; 877 return NULL;
878 878
879 rfkill = kzalloc(sizeof(*rfkill), GFP_KERNEL); 879 rfkill = kzalloc(sizeof(*rfkill) + strlen(name) + 1, GFP_KERNEL);
880 if (!rfkill) 880 if (!rfkill)
881 return NULL; 881 return NULL;
882 882
883 spin_lock_init(&rfkill->lock); 883 spin_lock_init(&rfkill->lock);
884 INIT_LIST_HEAD(&rfkill->node); 884 INIT_LIST_HEAD(&rfkill->node);
885 rfkill->type = type; 885 rfkill->type = type;
886 rfkill->name = name; 886 strcpy(rfkill->name, name);
887 rfkill->ops = ops; 887 rfkill->ops = ops;
888 rfkill->data = ops_data; 888 rfkill->data = ops_data;
889 889
diff --git a/net/rxrpc/ar-ack.c b/net/rxrpc/ar-ack.c
index e0547f521f20..adc555e0323d 100644
--- a/net/rxrpc/ar-ack.c
+++ b/net/rxrpc/ar-ack.c
@@ -723,8 +723,10 @@ process_further:
723 723
724 if ((call->state == RXRPC_CALL_CLIENT_AWAIT_REPLY || 724 if ((call->state == RXRPC_CALL_CLIENT_AWAIT_REPLY ||
725 call->state == RXRPC_CALL_SERVER_AWAIT_ACK) && 725 call->state == RXRPC_CALL_SERVER_AWAIT_ACK) &&
726 hard > tx) 726 hard > tx) {
727 call->acks_hard = tx;
727 goto all_acked; 728 goto all_acked;
729 }
728 730
729 smp_rmb(); 731 smp_rmb();
730 rxrpc_rotate_tx_window(call, hard - 1); 732 rxrpc_rotate_tx_window(call, hard - 1);
diff --git a/net/rxrpc/ar-output.c b/net/rxrpc/ar-output.c
index a40d3afe93b7..14c4e12c47b0 100644
--- a/net/rxrpc/ar-output.c
+++ b/net/rxrpc/ar-output.c
@@ -531,7 +531,7 @@ static int rxrpc_send_data(struct rxrpc_sock *rx,
531 timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT); 531 timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
532 532
533 /* this should be in poll */ 533 /* this should be in poll */
534 clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); 534 sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
535 535
536 if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN)) 536 if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
537 return -EPIPE; 537 return -EPIPE;
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index f43c8f33f09e..b5c2cf2aa6d4 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -253,7 +253,8 @@ int qdisc_set_default(const char *name)
253} 253}
254 254
255/* We know handle. Find qdisc among all qdisc's attached to device 255/* We know handle. Find qdisc among all qdisc's attached to device
256 (root qdisc, all its children, children of children etc.) 256 * (root qdisc, all its children, children of children etc.)
257 * Note: caller either uses rtnl or rcu_read_lock()
257 */ 258 */
258 259
259static struct Qdisc *qdisc_match_from_root(struct Qdisc *root, u32 handle) 260static struct Qdisc *qdisc_match_from_root(struct Qdisc *root, u32 handle)
@@ -264,7 +265,7 @@ static struct Qdisc *qdisc_match_from_root(struct Qdisc *root, u32 handle)
264 root->handle == handle) 265 root->handle == handle)
265 return root; 266 return root;
266 267
267 list_for_each_entry(q, &root->list, list) { 268 list_for_each_entry_rcu(q, &root->list, list) {
268 if (q->handle == handle) 269 if (q->handle == handle)
269 return q; 270 return q;
270 } 271 }
@@ -277,15 +278,18 @@ void qdisc_list_add(struct Qdisc *q)
277 struct Qdisc *root = qdisc_dev(q)->qdisc; 278 struct Qdisc *root = qdisc_dev(q)->qdisc;
278 279
279 WARN_ON_ONCE(root == &noop_qdisc); 280 WARN_ON_ONCE(root == &noop_qdisc);
280 list_add_tail(&q->list, &root->list); 281 ASSERT_RTNL();
282 list_add_tail_rcu(&q->list, &root->list);
281 } 283 }
282} 284}
283EXPORT_SYMBOL(qdisc_list_add); 285EXPORT_SYMBOL(qdisc_list_add);
284 286
285void qdisc_list_del(struct Qdisc *q) 287void qdisc_list_del(struct Qdisc *q)
286{ 288{
287 if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS)) 289 if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS)) {
288 list_del(&q->list); 290 ASSERT_RTNL();
291 list_del_rcu(&q->list);
292 }
289} 293}
290EXPORT_SYMBOL(qdisc_list_del); 294EXPORT_SYMBOL(qdisc_list_del);
291 295
@@ -750,14 +754,18 @@ void qdisc_tree_decrease_qlen(struct Qdisc *sch, unsigned int n)
750 if (n == 0) 754 if (n == 0)
751 return; 755 return;
752 drops = max_t(int, n, 0); 756 drops = max_t(int, n, 0);
757 rcu_read_lock();
753 while ((parentid = sch->parent)) { 758 while ((parentid = sch->parent)) {
754 if (TC_H_MAJ(parentid) == TC_H_MAJ(TC_H_INGRESS)) 759 if (TC_H_MAJ(parentid) == TC_H_MAJ(TC_H_INGRESS))
755 return; 760 break;
756 761
762 if (sch->flags & TCQ_F_NOPARENT)
763 break;
764 /* TODO: perform the search on a per txq basis */
757 sch = qdisc_lookup(qdisc_dev(sch), TC_H_MAJ(parentid)); 765 sch = qdisc_lookup(qdisc_dev(sch), TC_H_MAJ(parentid));
758 if (sch == NULL) { 766 if (sch == NULL) {
759 WARN_ON(parentid != TC_H_ROOT); 767 WARN_ON_ONCE(parentid != TC_H_ROOT);
760 return; 768 break;
761 } 769 }
762 cops = sch->ops->cl_ops; 770 cops = sch->ops->cl_ops;
763 if (cops->qlen_notify) { 771 if (cops->qlen_notify) {
@@ -768,6 +776,7 @@ void qdisc_tree_decrease_qlen(struct Qdisc *sch, unsigned int n)
768 sch->q.qlen -= n; 776 sch->q.qlen -= n;
769 __qdisc_qstats_drop(sch, drops); 777 __qdisc_qstats_drop(sch, drops);
770 } 778 }
779 rcu_read_unlock();
771} 780}
772EXPORT_SYMBOL(qdisc_tree_decrease_qlen); 781EXPORT_SYMBOL(qdisc_tree_decrease_qlen);
773 782
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index cb5d4ad32946..e82a1ad80aa5 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -737,7 +737,7 @@ static void attach_one_default_qdisc(struct net_device *dev,
737 return; 737 return;
738 } 738 }
739 if (!netif_is_multiqueue(dev)) 739 if (!netif_is_multiqueue(dev))
740 qdisc->flags |= TCQ_F_ONETXQUEUE; 740 qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
741 dev_queue->qdisc_sleeping = qdisc; 741 dev_queue->qdisc_sleeping = qdisc;
742} 742}
743 743
diff --git a/net/sched/sch_mq.c b/net/sched/sch_mq.c
index f3cbaecd283a..3e82f047caaf 100644
--- a/net/sched/sch_mq.c
+++ b/net/sched/sch_mq.c
@@ -63,7 +63,7 @@ static int mq_init(struct Qdisc *sch, struct nlattr *opt)
63 if (qdisc == NULL) 63 if (qdisc == NULL)
64 goto err; 64 goto err;
65 priv->qdiscs[ntx] = qdisc; 65 priv->qdiscs[ntx] = qdisc;
66 qdisc->flags |= TCQ_F_ONETXQUEUE; 66 qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
67 } 67 }
68 68
69 sch->flags |= TCQ_F_MQROOT; 69 sch->flags |= TCQ_F_MQROOT;
@@ -156,7 +156,7 @@ static int mq_graft(struct Qdisc *sch, unsigned long cl, struct Qdisc *new,
156 156
157 *old = dev_graft_qdisc(dev_queue, new); 157 *old = dev_graft_qdisc(dev_queue, new);
158 if (new) 158 if (new)
159 new->flags |= TCQ_F_ONETXQUEUE; 159 new->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
160 if (dev->flags & IFF_UP) 160 if (dev->flags & IFF_UP)
161 dev_activate(dev); 161 dev_activate(dev);
162 return 0; 162 return 0;
diff --git a/net/sched/sch_mqprio.c b/net/sched/sch_mqprio.c
index 3811a745452c..ad70ecf57ce7 100644
--- a/net/sched/sch_mqprio.c
+++ b/net/sched/sch_mqprio.c
@@ -132,7 +132,7 @@ static int mqprio_init(struct Qdisc *sch, struct nlattr *opt)
132 goto err; 132 goto err;
133 } 133 }
134 priv->qdiscs[i] = qdisc; 134 priv->qdiscs[i] = qdisc;
135 qdisc->flags |= TCQ_F_ONETXQUEUE; 135 qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
136 } 136 }
137 137
138 /* If the mqprio options indicate that hardware should own 138 /* If the mqprio options indicate that hardware should own
@@ -209,7 +209,7 @@ static int mqprio_graft(struct Qdisc *sch, unsigned long cl, struct Qdisc *new,
209 *old = dev_graft_qdisc(dev_queue, new); 209 *old = dev_graft_qdisc(dev_queue, new);
210 210
211 if (new) 211 if (new)
212 new->flags |= TCQ_F_ONETXQUEUE; 212 new->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
213 213
214 if (dev->flags & IFF_UP) 214 if (dev->flags & IFF_UP)
215 dev_activate(dev); 215 dev_activate(dev);
diff --git a/net/sctp/auth.c b/net/sctp/auth.c
index 4f15b7d730e1..1543e39f47c3 100644
--- a/net/sctp/auth.c
+++ b/net/sctp/auth.c
@@ -809,8 +809,8 @@ int sctp_auth_ep_set_hmacs(struct sctp_endpoint *ep,
809 if (!has_sha1) 809 if (!has_sha1)
810 return -EINVAL; 810 return -EINVAL;
811 811
812 memcpy(ep->auth_hmacs_list->hmac_ids, &hmacs->shmac_idents[0], 812 for (i = 0; i < hmacs->shmac_num_idents; i++)
813 hmacs->shmac_num_idents * sizeof(__u16)); 813 ep->auth_hmacs_list->hmac_ids[i] = htons(hmacs->shmac_idents[i]);
814 ep->auth_hmacs_list->param_hdr.length = htons(sizeof(sctp_paramhdr_t) + 814 ep->auth_hmacs_list->param_hdr.length = htons(sizeof(sctp_paramhdr_t) +
815 hmacs->shmac_num_idents * sizeof(__u16)); 815 hmacs->shmac_num_idents * sizeof(__u16));
816 return 0; 816 return 0;
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
index e917d27328ea..ec529121f38a 100644
--- a/net/sctp/ipv6.c
+++ b/net/sctp/ipv6.c
@@ -209,6 +209,7 @@ static int sctp_v6_xmit(struct sk_buff *skb, struct sctp_transport *transport)
209 struct sock *sk = skb->sk; 209 struct sock *sk = skb->sk;
210 struct ipv6_pinfo *np = inet6_sk(sk); 210 struct ipv6_pinfo *np = inet6_sk(sk);
211 struct flowi6 *fl6 = &transport->fl.u.ip6; 211 struct flowi6 *fl6 = &transport->fl.u.ip6;
212 int res;
212 213
213 pr_debug("%s: skb:%p, len:%d, src:%pI6 dst:%pI6\n", __func__, skb, 214 pr_debug("%s: skb:%p, len:%d, src:%pI6 dst:%pI6\n", __func__, skb,
214 skb->len, &fl6->saddr, &fl6->daddr); 215 skb->len, &fl6->saddr, &fl6->daddr);
@@ -220,7 +221,10 @@ static int sctp_v6_xmit(struct sk_buff *skb, struct sctp_transport *transport)
220 221
221 SCTP_INC_STATS(sock_net(sk), SCTP_MIB_OUTSCTPPACKS); 222 SCTP_INC_STATS(sock_net(sk), SCTP_MIB_OUTSCTPPACKS);
222 223
223 return ip6_xmit(sk, skb, fl6, np->opt, np->tclass); 224 rcu_read_lock();
225 res = ip6_xmit(sk, skb, fl6, rcu_dereference(np->opt), np->tclass);
226 rcu_read_unlock();
227 return res;
224} 228}
225 229
226/* Returns the dst cache entry for the given source and destination ip 230/* Returns the dst cache entry for the given source and destination ip
@@ -262,7 +266,10 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
262 pr_debug("src=%pI6 - ", &fl6->saddr); 266 pr_debug("src=%pI6 - ", &fl6->saddr);
263 } 267 }
264 268
265 final_p = fl6_update_dst(fl6, np->opt, &final); 269 rcu_read_lock();
270 final_p = fl6_update_dst(fl6, rcu_dereference(np->opt), &final);
271 rcu_read_unlock();
272
266 dst = ip6_dst_lookup_flow(sk, fl6, final_p); 273 dst = ip6_dst_lookup_flow(sk, fl6, final_p);
267 if (!asoc || saddr) 274 if (!asoc || saddr)
268 goto out; 275 goto out;
@@ -316,14 +323,13 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
316 } 323 }
317 } 324 }
318 } 325 }
319 rcu_read_unlock();
320
321 if (baddr) { 326 if (baddr) {
322 fl6->saddr = baddr->v6.sin6_addr; 327 fl6->saddr = baddr->v6.sin6_addr;
323 fl6->fl6_sport = baddr->v6.sin6_port; 328 fl6->fl6_sport = baddr->v6.sin6_port;
324 final_p = fl6_update_dst(fl6, np->opt, &final); 329 final_p = fl6_update_dst(fl6, rcu_dereference(np->opt), &final);
325 dst = ip6_dst_lookup_flow(sk, fl6, final_p); 330 dst = ip6_dst_lookup_flow(sk, fl6, final_p);
326 } 331 }
332 rcu_read_unlock();
327 333
328out: 334out:
329 if (!IS_ERR_OR_NULL(dst)) { 335 if (!IS_ERR_OR_NULL(dst)) {
@@ -635,6 +641,7 @@ static struct sock *sctp_v6_create_accept_sk(struct sock *sk,
635 struct sock *newsk; 641 struct sock *newsk;
636 struct ipv6_pinfo *newnp, *np = inet6_sk(sk); 642 struct ipv6_pinfo *newnp, *np = inet6_sk(sk);
637 struct sctp6_sock *newsctp6sk; 643 struct sctp6_sock *newsctp6sk;
644 struct ipv6_txoptions *opt;
638 645
639 newsk = sk_alloc(sock_net(sk), PF_INET6, GFP_KERNEL, sk->sk_prot, 0); 646 newsk = sk_alloc(sock_net(sk), PF_INET6, GFP_KERNEL, sk->sk_prot, 0);
640 if (!newsk) 647 if (!newsk)
@@ -654,6 +661,13 @@ static struct sock *sctp_v6_create_accept_sk(struct sock *sk,
654 661
655 memcpy(newnp, np, sizeof(struct ipv6_pinfo)); 662 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
656 663
664 rcu_read_lock();
665 opt = rcu_dereference(np->opt);
666 if (opt)
667 opt = ipv6_dup_options(newsk, opt);
668 RCU_INIT_POINTER(newnp->opt, opt);
669 rcu_read_unlock();
670
657 /* Initialize sk's sport, dport, rcv_saddr and daddr for getsockname() 671 /* Initialize sk's sport, dport, rcv_saddr and daddr for getsockname()
658 * and getpeername(). 672 * and getpeername().
659 */ 673 */
diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c
index 7e8f0a117106..c0380cfb16ae 100644
--- a/net/sctp/outqueue.c
+++ b/net/sctp/outqueue.c
@@ -324,6 +324,7 @@ int sctp_outq_tail(struct sctp_outq *q, struct sctp_chunk *chunk)
324 sctp_cname(SCTP_ST_CHUNK(chunk->chunk_hdr->type)) : 324 sctp_cname(SCTP_ST_CHUNK(chunk->chunk_hdr->type)) :
325 "illegal chunk"); 325 "illegal chunk");
326 326
327 sctp_chunk_hold(chunk);
327 sctp_outq_tail_data(q, chunk); 328 sctp_outq_tail_data(q, chunk);
328 if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED) 329 if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED)
329 SCTP_INC_STATS(net, SCTP_MIB_OUTUNORDERCHUNKS); 330 SCTP_INC_STATS(net, SCTP_MIB_OUTUNORDERCHUNKS);
@@ -1251,6 +1252,7 @@ int sctp_outq_sack(struct sctp_outq *q, struct sctp_chunk *chunk)
1251 */ 1252 */
1252 1253
1253 sack_a_rwnd = ntohl(sack->a_rwnd); 1254 sack_a_rwnd = ntohl(sack->a_rwnd);
1255 asoc->peer.zero_window_announced = !sack_a_rwnd;
1254 outstanding = q->outstanding_bytes; 1256 outstanding = q->outstanding_bytes;
1255 1257
1256 if (outstanding < sack_a_rwnd) 1258 if (outstanding < sack_a_rwnd)
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
index 763e06a55155..5d6a03fad378 100644
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -1652,7 +1652,7 @@ static sctp_cookie_param_t *sctp_pack_cookie(const struct sctp_endpoint *ep,
1652 1652
1653 /* Set an expiration time for the cookie. */ 1653 /* Set an expiration time for the cookie. */
1654 cookie->c.expiration = ktime_add(asoc->cookie_life, 1654 cookie->c.expiration = ktime_add(asoc->cookie_life,
1655 ktime_get()); 1655 ktime_get_real());
1656 1656
1657 /* Copy the peer's init packet. */ 1657 /* Copy the peer's init packet. */
1658 memcpy(&cookie->c.peer_init[0], init_chunk->chunk_hdr, 1658 memcpy(&cookie->c.peer_init[0], init_chunk->chunk_hdr,
@@ -1780,7 +1780,7 @@ no_hmac:
1780 if (sock_flag(ep->base.sk, SOCK_TIMESTAMP)) 1780 if (sock_flag(ep->base.sk, SOCK_TIMESTAMP))
1781 kt = skb_get_ktime(skb); 1781 kt = skb_get_ktime(skb);
1782 else 1782 else
1783 kt = ktime_get(); 1783 kt = ktime_get_real();
1784 1784
1785 if (!asoc && ktime_before(bear_cookie->expiration, kt)) { 1785 if (!asoc && ktime_before(bear_cookie->expiration, kt)) {
1786 /* 1786 /*
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index 6f46aa16cb76..cd34a4a34065 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -5412,7 +5412,8 @@ sctp_disposition_t sctp_sf_do_6_3_3_rtx(struct net *net,
5412 SCTP_INC_STATS(net, SCTP_MIB_T3_RTX_EXPIREDS); 5412 SCTP_INC_STATS(net, SCTP_MIB_T3_RTX_EXPIREDS);
5413 5413
5414 if (asoc->overall_error_count >= asoc->max_retrans) { 5414 if (asoc->overall_error_count >= asoc->max_retrans) {
5415 if (asoc->state == SCTP_STATE_SHUTDOWN_PENDING) { 5415 if (asoc->peer.zero_window_announced &&
5416 asoc->state == SCTP_STATE_SHUTDOWN_PENDING) {
5416 /* 5417 /*
5417 * We are here likely because the receiver had its rwnd 5418 * We are here likely because the receiver had its rwnd
5418 * closed for a while and we have not been able to 5419 * closed for a while and we have not been able to
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 897c01c029ca..9b6cc6de80d8 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -972,7 +972,7 @@ static int sctp_setsockopt_bindx(struct sock *sk,
972 return -EFAULT; 972 return -EFAULT;
973 973
974 /* Alloc space for the address array in kernel memory. */ 974 /* Alloc space for the address array in kernel memory. */
975 kaddrs = kmalloc(addrs_size, GFP_KERNEL); 975 kaddrs = kmalloc(addrs_size, GFP_USER | __GFP_NOWARN);
976 if (unlikely(!kaddrs)) 976 if (unlikely(!kaddrs))
977 return -ENOMEM; 977 return -ENOMEM;
978 978
@@ -1952,8 +1952,6 @@ static int sctp_sendmsg(struct sock *sk, struct msghdr *msg, size_t msg_len)
1952 1952
1953 /* Now send the (possibly) fragmented message. */ 1953 /* Now send the (possibly) fragmented message. */
1954 list_for_each_entry(chunk, &datamsg->chunks, frag_list) { 1954 list_for_each_entry(chunk, &datamsg->chunks, frag_list) {
1955 sctp_chunk_hold(chunk);
1956
1957 /* Do accounting for the write space. */ 1955 /* Do accounting for the write space. */
1958 sctp_set_owner_w(chunk); 1956 sctp_set_owner_w(chunk);
1959 1957
@@ -1966,15 +1964,13 @@ static int sctp_sendmsg(struct sock *sk, struct msghdr *msg, size_t msg_len)
1966 * breaks. 1964 * breaks.
1967 */ 1965 */
1968 err = sctp_primitive_SEND(net, asoc, datamsg); 1966 err = sctp_primitive_SEND(net, asoc, datamsg);
1967 sctp_datamsg_put(datamsg);
1969 /* Did the lower layer accept the chunk? */ 1968 /* Did the lower layer accept the chunk? */
1970 if (err) { 1969 if (err)
1971 sctp_datamsg_free(datamsg);
1972 goto out_free; 1970 goto out_free;
1973 }
1974 1971
1975 pr_debug("%s: we sent primitively\n", __func__); 1972 pr_debug("%s: we sent primitively\n", __func__);
1976 1973
1977 sctp_datamsg_put(datamsg);
1978 err = msg_len; 1974 err = msg_len;
1979 1975
1980 if (unlikely(wait_connect)) { 1976 if (unlikely(wait_connect)) {
@@ -4928,7 +4924,7 @@ static int sctp_getsockopt_local_addrs(struct sock *sk, int len,
4928 to = optval + offsetof(struct sctp_getaddrs, addrs); 4924 to = optval + offsetof(struct sctp_getaddrs, addrs);
4929 space_left = len - offsetof(struct sctp_getaddrs, addrs); 4925 space_left = len - offsetof(struct sctp_getaddrs, addrs);
4930 4926
4931 addrs = kmalloc(space_left, GFP_KERNEL); 4927 addrs = kmalloc(space_left, GFP_USER | __GFP_NOWARN);
4932 if (!addrs) 4928 if (!addrs)
4933 return -ENOMEM; 4929 return -ENOMEM;
4934 4930
@@ -6458,7 +6454,7 @@ unsigned int sctp_poll(struct file *file, struct socket *sock, poll_table *wait)
6458 if (sctp_writeable(sk)) { 6454 if (sctp_writeable(sk)) {
6459 mask |= POLLOUT | POLLWRNORM; 6455 mask |= POLLOUT | POLLWRNORM;
6460 } else { 6456 } else {
6461 set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); 6457 sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
6462 /* 6458 /*
6463 * Since the socket is not locked, the buffer 6459 * Since the socket is not locked, the buffer
6464 * might be made available after the writeable check and 6460 * might be made available after the writeable check and
@@ -6801,26 +6797,30 @@ no_packet:
6801static void __sctp_write_space(struct sctp_association *asoc) 6797static void __sctp_write_space(struct sctp_association *asoc)
6802{ 6798{
6803 struct sock *sk = asoc->base.sk; 6799 struct sock *sk = asoc->base.sk;
6804 struct socket *sock = sk->sk_socket;
6805 6800
6806 if ((sctp_wspace(asoc) > 0) && sock) { 6801 if (sctp_wspace(asoc) <= 0)
6807 if (waitqueue_active(&asoc->wait)) 6802 return;
6808 wake_up_interruptible(&asoc->wait); 6803
6804 if (waitqueue_active(&asoc->wait))
6805 wake_up_interruptible(&asoc->wait);
6809 6806
6810 if (sctp_writeable(sk)) { 6807 if (sctp_writeable(sk)) {
6811 wait_queue_head_t *wq = sk_sleep(sk); 6808 struct socket_wq *wq;
6812 6809
6813 if (wq && waitqueue_active(wq)) 6810 rcu_read_lock();
6814 wake_up_interruptible(wq); 6811 wq = rcu_dereference(sk->sk_wq);
6812 if (wq) {
6813 if (waitqueue_active(&wq->wait))
6814 wake_up_interruptible(&wq->wait);
6815 6815
6816 /* Note that we try to include the Async I/O support 6816 /* Note that we try to include the Async I/O support
6817 * here by modeling from the current TCP/UDP code. 6817 * here by modeling from the current TCP/UDP code.
6818 * We have not tested with it yet. 6818 * We have not tested with it yet.
6819 */ 6819 */
6820 if (!(sk->sk_shutdown & SEND_SHUTDOWN)) 6820 if (!(sk->sk_shutdown & SEND_SHUTDOWN))
6821 sock_wake_async(sock, 6821 sock_wake_async(wq, SOCK_WAKE_SPACE, POLL_OUT);
6822 SOCK_WAKE_SPACE, POLL_OUT);
6823 } 6822 }
6823 rcu_read_unlock();
6824 } 6824 }
6825} 6825}
6826 6826
@@ -7163,6 +7163,7 @@ void sctp_copy_sock(struct sock *newsk, struct sock *sk,
7163 newsk->sk_type = sk->sk_type; 7163 newsk->sk_type = sk->sk_type;
7164 newsk->sk_bound_dev_if = sk->sk_bound_dev_if; 7164 newsk->sk_bound_dev_if = sk->sk_bound_dev_if;
7165 newsk->sk_flags = sk->sk_flags; 7165 newsk->sk_flags = sk->sk_flags;
7166 newsk->sk_tsflags = sk->sk_tsflags;
7166 newsk->sk_no_check_tx = sk->sk_no_check_tx; 7167 newsk->sk_no_check_tx = sk->sk_no_check_tx;
7167 newsk->sk_no_check_rx = sk->sk_no_check_rx; 7168 newsk->sk_no_check_rx = sk->sk_no_check_rx;
7168 newsk->sk_reuse = sk->sk_reuse; 7169 newsk->sk_reuse = sk->sk_reuse;
@@ -7195,6 +7196,9 @@ void sctp_copy_sock(struct sock *newsk, struct sock *sk,
7195 newinet->mc_ttl = 1; 7196 newinet->mc_ttl = 1;
7196 newinet->mc_index = 0; 7197 newinet->mc_index = 0;
7197 newinet->mc_list = NULL; 7198 newinet->mc_list = NULL;
7199
7200 if (newsk->sk_flags & SK_FLAGS_TIMESTAMP)
7201 net_enable_timestamp();
7198} 7202}
7199 7203
7200static inline void sctp_copy_descendant(struct sock *sk_to, 7204static inline void sctp_copy_descendant(struct sock *sk_to,
@@ -7375,6 +7379,13 @@ struct proto sctp_prot = {
7375 7379
7376#if IS_ENABLED(CONFIG_IPV6) 7380#if IS_ENABLED(CONFIG_IPV6)
7377 7381
7382#include <net/transp_v6.h>
7383static void sctp_v6_destroy_sock(struct sock *sk)
7384{
7385 sctp_destroy_sock(sk);
7386 inet6_destroy_sock(sk);
7387}
7388
7378struct proto sctpv6_prot = { 7389struct proto sctpv6_prot = {
7379 .name = "SCTPv6", 7390 .name = "SCTPv6",
7380 .owner = THIS_MODULE, 7391 .owner = THIS_MODULE,
@@ -7384,7 +7395,7 @@ struct proto sctpv6_prot = {
7384 .accept = sctp_accept, 7395 .accept = sctp_accept,
7385 .ioctl = sctp_ioctl, 7396 .ioctl = sctp_ioctl,
7386 .init = sctp_init_sock, 7397 .init = sctp_init_sock,
7387 .destroy = sctp_destroy_sock, 7398 .destroy = sctp_v6_destroy_sock,
7388 .shutdown = sctp_shutdown, 7399 .shutdown = sctp_shutdown,
7389 .setsockopt = sctp_setsockopt, 7400 .setsockopt = sctp_setsockopt,
7390 .getsockopt = sctp_getsockopt, 7401 .getsockopt = sctp_getsockopt,
diff --git a/net/socket.c b/net/socket.c
index dd2c247c99e3..29822d6dd91e 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -1056,27 +1056,20 @@ static int sock_fasync(int fd, struct file *filp, int on)
1056 return 0; 1056 return 0;
1057} 1057}
1058 1058
1059/* This function may be called only under socket lock or callback_lock or rcu_lock */ 1059/* This function may be called only under rcu_lock */
1060 1060
1061int sock_wake_async(struct socket *sock, int how, int band) 1061int sock_wake_async(struct socket_wq *wq, int how, int band)
1062{ 1062{
1063 struct socket_wq *wq; 1063 if (!wq || !wq->fasync_list)
1064
1065 if (!sock)
1066 return -1;
1067 rcu_read_lock();
1068 wq = rcu_dereference(sock->wq);
1069 if (!wq || !wq->fasync_list) {
1070 rcu_read_unlock();
1071 return -1; 1064 return -1;
1072 } 1065
1073 switch (how) { 1066 switch (how) {
1074 case SOCK_WAKE_WAITD: 1067 case SOCK_WAKE_WAITD:
1075 if (test_bit(SOCK_ASYNC_WAITDATA, &sock->flags)) 1068 if (test_bit(SOCKWQ_ASYNC_WAITDATA, &wq->flags))
1076 break; 1069 break;
1077 goto call_kill; 1070 goto call_kill;
1078 case SOCK_WAKE_SPACE: 1071 case SOCK_WAKE_SPACE:
1079 if (!test_and_clear_bit(SOCK_ASYNC_NOSPACE, &sock->flags)) 1072 if (!test_and_clear_bit(SOCKWQ_ASYNC_NOSPACE, &wq->flags))
1080 break; 1073 break;
1081 /* fall through */ 1074 /* fall through */
1082 case SOCK_WAKE_IO: 1075 case SOCK_WAKE_IO:
@@ -1086,7 +1079,7 @@ call_kill:
1086 case SOCK_WAKE_URG: 1079 case SOCK_WAKE_URG:
1087 kill_fasync(&wq->fasync_list, SIGURG, band); 1080 kill_fasync(&wq->fasync_list, SIGURG, band);
1088 } 1081 }
1089 rcu_read_unlock(); 1082
1090 return 0; 1083 return 0;
1091} 1084}
1092EXPORT_SYMBOL(sock_wake_async); 1085EXPORT_SYMBOL(sock_wake_async);
@@ -1702,6 +1695,7 @@ SYSCALL_DEFINE6(recvfrom, int, fd, void __user *, ubuf, size_t, size,
1702 msg.msg_name = addr ? (struct sockaddr *)&address : NULL; 1695 msg.msg_name = addr ? (struct sockaddr *)&address : NULL;
1703 /* We assume all kernel code knows the size of sockaddr_storage */ 1696 /* We assume all kernel code knows the size of sockaddr_storage */
1704 msg.msg_namelen = 0; 1697 msg.msg_namelen = 0;
1698 msg.msg_iocb = NULL;
1705 if (sock->file->f_flags & O_NONBLOCK) 1699 if (sock->file->f_flags & O_NONBLOCK)
1706 flags |= MSG_DONTWAIT; 1700 flags |= MSG_DONTWAIT;
1707 err = sock_recvmsg(sock, &msg, iov_iter_count(&msg.msg_iter), flags); 1701 err = sock_recvmsg(sock, &msg, iov_iter_count(&msg.msg_iter), flags);
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
index f14f24ee9983..73ad57a59989 100644
--- a/net/sunrpc/sched.c
+++ b/net/sunrpc/sched.c
@@ -250,11 +250,11 @@ void rpc_destroy_wait_queue(struct rpc_wait_queue *queue)
250} 250}
251EXPORT_SYMBOL_GPL(rpc_destroy_wait_queue); 251EXPORT_SYMBOL_GPL(rpc_destroy_wait_queue);
252 252
253static int rpc_wait_bit_killable(struct wait_bit_key *key) 253static int rpc_wait_bit_killable(struct wait_bit_key *key, int mode)
254{ 254{
255 if (fatal_signal_pending(current))
256 return -ERESTARTSYS;
257 freezable_schedule_unsafe(); 255 freezable_schedule_unsafe();
256 if (signal_pending_state(mode, current))
257 return -ERESTARTSYS;
258 return 0; 258 return 0;
259} 259}
260 260
diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
index bc5b7b5032ca..cc9852897395 100644
--- a/net/sunrpc/svc.c
+++ b/net/sunrpc/svc.c
@@ -1364,6 +1364,19 @@ bc_svc_process(struct svc_serv *serv, struct rpc_rqst *req,
1364 memcpy(&rqstp->rq_arg, &req->rq_rcv_buf, sizeof(rqstp->rq_arg)); 1364 memcpy(&rqstp->rq_arg, &req->rq_rcv_buf, sizeof(rqstp->rq_arg));
1365 memcpy(&rqstp->rq_res, &req->rq_snd_buf, sizeof(rqstp->rq_res)); 1365 memcpy(&rqstp->rq_res, &req->rq_snd_buf, sizeof(rqstp->rq_res));
1366 1366
1367 /* Adjust the argument buffer length */
1368 rqstp->rq_arg.len = req->rq_private_buf.len;
1369 if (rqstp->rq_arg.len <= rqstp->rq_arg.head[0].iov_len) {
1370 rqstp->rq_arg.head[0].iov_len = rqstp->rq_arg.len;
1371 rqstp->rq_arg.page_len = 0;
1372 } else if (rqstp->rq_arg.len <= rqstp->rq_arg.head[0].iov_len +
1373 rqstp->rq_arg.page_len)
1374 rqstp->rq_arg.page_len = rqstp->rq_arg.len -
1375 rqstp->rq_arg.head[0].iov_len;
1376 else
1377 rqstp->rq_arg.len = rqstp->rq_arg.head[0].iov_len +
1378 rqstp->rq_arg.page_len;
1379
1367 /* reset result send buffer "put" position */ 1380 /* reset result send buffer "put" position */
1368 resv->iov_len = 0; 1381 resv->iov_len = 0;
1369 1382
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index 1d1a70498910..2ffaf6a79499 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -398,7 +398,7 @@ static int xs_sendpages(struct socket *sock, struct sockaddr *addr, int addrlen,
398 if (unlikely(!sock)) 398 if (unlikely(!sock))
399 return -ENOTSOCK; 399 return -ENOTSOCK;
400 400
401 clear_bit(SOCK_ASYNC_NOSPACE, &sock->flags); 401 clear_bit(SOCKWQ_ASYNC_NOSPACE, &sock->flags);
402 if (base != 0) { 402 if (base != 0) {
403 addr = NULL; 403 addr = NULL;
404 addrlen = 0; 404 addrlen = 0;
@@ -442,7 +442,7 @@ static void xs_nospace_callback(struct rpc_task *task)
442 struct sock_xprt *transport = container_of(task->tk_rqstp->rq_xprt, struct sock_xprt, xprt); 442 struct sock_xprt *transport = container_of(task->tk_rqstp->rq_xprt, struct sock_xprt, xprt);
443 443
444 transport->inet->sk_write_pending--; 444 transport->inet->sk_write_pending--;
445 clear_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags); 445 clear_bit(SOCKWQ_ASYNC_NOSPACE, &transport->sock->flags);
446} 446}
447 447
448/** 448/**
@@ -467,7 +467,7 @@ static int xs_nospace(struct rpc_task *task)
467 467
468 /* Don't race with disconnect */ 468 /* Don't race with disconnect */
469 if (xprt_connected(xprt)) { 469 if (xprt_connected(xprt)) {
470 if (test_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags)) { 470 if (test_bit(SOCKWQ_ASYNC_NOSPACE, &transport->sock->flags)) {
471 /* 471 /*
472 * Notify TCP that we're limited by the application 472 * Notify TCP that we're limited by the application
473 * window size 473 * window size
@@ -478,7 +478,7 @@ static int xs_nospace(struct rpc_task *task)
478 xprt_wait_for_buffer_space(task, xs_nospace_callback); 478 xprt_wait_for_buffer_space(task, xs_nospace_callback);
479 } 479 }
480 } else { 480 } else {
481 clear_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags); 481 clear_bit(SOCKWQ_ASYNC_NOSPACE, &transport->sock->flags);
482 ret = -ENOTCONN; 482 ret = -ENOTCONN;
483 } 483 }
484 484
@@ -626,7 +626,7 @@ process_status:
626 case -EPERM: 626 case -EPERM:
627 /* When the server has died, an ICMP port unreachable message 627 /* When the server has died, an ICMP port unreachable message
628 * prompts ECONNREFUSED. */ 628 * prompts ECONNREFUSED. */
629 clear_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags); 629 clear_bit(SOCKWQ_ASYNC_NOSPACE, &transport->sock->flags);
630 } 630 }
631 631
632 return status; 632 return status;
@@ -715,7 +715,7 @@ static int xs_tcp_send_request(struct rpc_task *task)
715 case -EADDRINUSE: 715 case -EADDRINUSE:
716 case -ENOBUFS: 716 case -ENOBUFS:
717 case -EPIPE: 717 case -EPIPE:
718 clear_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags); 718 clear_bit(SOCKWQ_ASYNC_NOSPACE, &transport->sock->flags);
719 } 719 }
720 720
721 return status; 721 return status;
@@ -1618,7 +1618,7 @@ static void xs_write_space(struct sock *sk)
1618 1618
1619 if (unlikely(!(xprt = xprt_from_sock(sk)))) 1619 if (unlikely(!(xprt = xprt_from_sock(sk))))
1620 return; 1620 return;
1621 if (test_and_clear_bit(SOCK_ASYNC_NOSPACE, &sock->flags) == 0) 1621 if (test_and_clear_bit(SOCKWQ_ASYNC_NOSPACE, &sock->flags) == 0)
1622 return; 1622 return;
1623 1623
1624 xprt_write_space(xprt); 1624 xprt_write_space(xprt);
diff --git a/net/tipc/link.c b/net/tipc/link.c
index 9efbdbde2b08..91aea071ab27 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -191,6 +191,7 @@ void tipc_link_add_bc_peer(struct tipc_link *snd_l,
191 191
192 snd_l->ackers++; 192 snd_l->ackers++;
193 rcv_l->acked = snd_l->snd_nxt - 1; 193 rcv_l->acked = snd_l->snd_nxt - 1;
194 snd_l->state = LINK_ESTABLISHED;
194 tipc_link_build_bc_init_msg(uc_l, xmitq); 195 tipc_link_build_bc_init_msg(uc_l, xmitq);
195} 196}
196 197
@@ -206,6 +207,7 @@ void tipc_link_remove_bc_peer(struct tipc_link *snd_l,
206 rcv_l->state = LINK_RESET; 207 rcv_l->state = LINK_RESET;
207 if (!snd_l->ackers) { 208 if (!snd_l->ackers) {
208 tipc_link_reset(snd_l); 209 tipc_link_reset(snd_l);
210 snd_l->state = LINK_RESET;
209 __skb_queue_purge(xmitq); 211 __skb_queue_purge(xmitq);
210 } 212 }
211} 213}
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index 552dbaba9cf3..b53246fb0412 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -105,6 +105,7 @@ struct tipc_sock {
105static int tipc_backlog_rcv(struct sock *sk, struct sk_buff *skb); 105static int tipc_backlog_rcv(struct sock *sk, struct sk_buff *skb);
106static void tipc_data_ready(struct sock *sk); 106static void tipc_data_ready(struct sock *sk);
107static void tipc_write_space(struct sock *sk); 107static void tipc_write_space(struct sock *sk);
108static void tipc_sock_destruct(struct sock *sk);
108static int tipc_release(struct socket *sock); 109static int tipc_release(struct socket *sock);
109static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags); 110static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags);
110static int tipc_wait_for_sndmsg(struct socket *sock, long *timeo_p); 111static int tipc_wait_for_sndmsg(struct socket *sock, long *timeo_p);
@@ -381,6 +382,7 @@ static int tipc_sk_create(struct net *net, struct socket *sock,
381 sk->sk_rcvbuf = sysctl_tipc_rmem[1]; 382 sk->sk_rcvbuf = sysctl_tipc_rmem[1];
382 sk->sk_data_ready = tipc_data_ready; 383 sk->sk_data_ready = tipc_data_ready;
383 sk->sk_write_space = tipc_write_space; 384 sk->sk_write_space = tipc_write_space;
385 sk->sk_destruct = tipc_sock_destruct;
384 tsk->conn_timeout = CONN_TIMEOUT_DEFAULT; 386 tsk->conn_timeout = CONN_TIMEOUT_DEFAULT;
385 tsk->sent_unacked = 0; 387 tsk->sent_unacked = 0;
386 atomic_set(&tsk->dupl_rcvcnt, 0); 388 atomic_set(&tsk->dupl_rcvcnt, 0);
@@ -470,9 +472,6 @@ static int tipc_release(struct socket *sock)
470 tipc_node_remove_conn(net, dnode, tsk->portid); 472 tipc_node_remove_conn(net, dnode, tsk->portid);
471 } 473 }
472 474
473 /* Discard any remaining (connection-based) messages in receive queue */
474 __skb_queue_purge(&sk->sk_receive_queue);
475
476 /* Reject any messages that accumulated in backlog queue */ 475 /* Reject any messages that accumulated in backlog queue */
477 sock->state = SS_DISCONNECTING; 476 sock->state = SS_DISCONNECTING;
478 release_sock(sk); 477 release_sock(sk);
@@ -1515,6 +1514,11 @@ static void tipc_data_ready(struct sock *sk)
1515 rcu_read_unlock(); 1514 rcu_read_unlock();
1516} 1515}
1517 1516
1517static void tipc_sock_destruct(struct sock *sk)
1518{
1519 __skb_queue_purge(&sk->sk_receive_queue);
1520}
1521
1518/** 1522/**
1519 * filter_connect - Handle all incoming messages for a connection-based socket 1523 * filter_connect - Handle all incoming messages for a connection-based socket
1520 * @tsk: TIPC socket 1524 * @tsk: TIPC socket
diff --git a/net/tipc/udp_media.c b/net/tipc/udp_media.c
index ad2719ad4c1b..70c03271b798 100644
--- a/net/tipc/udp_media.c
+++ b/net/tipc/udp_media.c
@@ -158,8 +158,11 @@ static int tipc_udp_send_msg(struct net *net, struct sk_buff *skb,
158 struct udp_media_addr *src = (struct udp_media_addr *)&b->addr.value; 158 struct udp_media_addr *src = (struct udp_media_addr *)&b->addr.value;
159 struct rtable *rt; 159 struct rtable *rt;
160 160
161 if (skb_headroom(skb) < UDP_MIN_HEADROOM) 161 if (skb_headroom(skb) < UDP_MIN_HEADROOM) {
162 pskb_expand_head(skb, UDP_MIN_HEADROOM, 0, GFP_ATOMIC); 162 err = pskb_expand_head(skb, UDP_MIN_HEADROOM, 0, GFP_ATOMIC);
163 if (err)
164 goto tx_error;
165 }
163 166
164 skb_set_inner_protocol(skb, htons(ETH_P_TIPC)); 167 skb_set_inner_protocol(skb, htons(ETH_P_TIPC));
165 ub = rcu_dereference_rtnl(b->media_ptr); 168 ub = rcu_dereference_rtnl(b->media_ptr);
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index aaa0b58d6aba..a4631477cedf 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -326,6 +326,118 @@ found:
326 return s; 326 return s;
327} 327}
328 328
329/* Support code for asymmetrically connected dgram sockets
330 *
331 * If a datagram socket is connected to a socket not itself connected
332 * to the first socket (eg, /dev/log), clients may only enqueue more
333 * messages if the present receive queue of the server socket is not
334 * "too large". This means there's a second writeability condition
335 * poll and sendmsg need to test. The dgram recv code will do a wake
336 * up on the peer_wait wait queue of a socket upon reception of a
337 * datagram which needs to be propagated to sleeping would-be writers
338 * since these might not have sent anything so far. This can't be
339 * accomplished via poll_wait because the lifetime of the server
340 * socket might be less than that of its clients if these break their
341 * association with it or if the server socket is closed while clients
342 * are still connected to it and there's no way to inform "a polling
343 * implementation" that it should let go of a certain wait queue
344 *
345 * In order to propagate a wake up, a wait_queue_t of the client
346 * socket is enqueued on the peer_wait queue of the server socket
347 * whose wake function does a wake_up on the ordinary client socket
348 * wait queue. This connection is established whenever a write (or
349 * poll for write) hit the flow control condition and broken when the
350 * association to the server socket is dissolved or after a wake up
351 * was relayed.
352 */
353
354static int unix_dgram_peer_wake_relay(wait_queue_t *q, unsigned mode, int flags,
355 void *key)
356{
357 struct unix_sock *u;
358 wait_queue_head_t *u_sleep;
359
360 u = container_of(q, struct unix_sock, peer_wake);
361
362 __remove_wait_queue(&unix_sk(u->peer_wake.private)->peer_wait,
363 q);
364 u->peer_wake.private = NULL;
365
366 /* relaying can only happen while the wq still exists */
367 u_sleep = sk_sleep(&u->sk);
368 if (u_sleep)
369 wake_up_interruptible_poll(u_sleep, key);
370
371 return 0;
372}
373
374static int unix_dgram_peer_wake_connect(struct sock *sk, struct sock *other)
375{
376 struct unix_sock *u, *u_other;
377 int rc;
378
379 u = unix_sk(sk);
380 u_other = unix_sk(other);
381 rc = 0;
382 spin_lock(&u_other->peer_wait.lock);
383
384 if (!u->peer_wake.private) {
385 u->peer_wake.private = other;
386 __add_wait_queue(&u_other->peer_wait, &u->peer_wake);
387
388 rc = 1;
389 }
390
391 spin_unlock(&u_other->peer_wait.lock);
392 return rc;
393}
394
395static void unix_dgram_peer_wake_disconnect(struct sock *sk,
396 struct sock *other)
397{
398 struct unix_sock *u, *u_other;
399
400 u = unix_sk(sk);
401 u_other = unix_sk(other);
402 spin_lock(&u_other->peer_wait.lock);
403
404 if (u->peer_wake.private == other) {
405 __remove_wait_queue(&u_other->peer_wait, &u->peer_wake);
406 u->peer_wake.private = NULL;
407 }
408
409 spin_unlock(&u_other->peer_wait.lock);
410}
411
412static void unix_dgram_peer_wake_disconnect_wakeup(struct sock *sk,
413 struct sock *other)
414{
415 unix_dgram_peer_wake_disconnect(sk, other);
416 wake_up_interruptible_poll(sk_sleep(sk),
417 POLLOUT |
418 POLLWRNORM |
419 POLLWRBAND);
420}
421
422/* preconditions:
423 * - unix_peer(sk) == other
424 * - association is stable
425 */
426static int unix_dgram_peer_wake_me(struct sock *sk, struct sock *other)
427{
428 int connected;
429
430 connected = unix_dgram_peer_wake_connect(sk, other);
431
432 if (unix_recvq_full(other))
433 return 1;
434
435 if (connected)
436 unix_dgram_peer_wake_disconnect(sk, other);
437
438 return 0;
439}
440
329static int unix_writable(const struct sock *sk) 441static int unix_writable(const struct sock *sk)
330{ 442{
331 return sk->sk_state != TCP_LISTEN && 443 return sk->sk_state != TCP_LISTEN &&
@@ -431,6 +543,8 @@ static void unix_release_sock(struct sock *sk, int embrion)
431 skpair->sk_state_change(skpair); 543 skpair->sk_state_change(skpair);
432 sk_wake_async(skpair, SOCK_WAKE_WAITD, POLL_HUP); 544 sk_wake_async(skpair, SOCK_WAKE_WAITD, POLL_HUP);
433 } 545 }
546
547 unix_dgram_peer_wake_disconnect(sk, skpair);
434 sock_put(skpair); /* It may now die */ 548 sock_put(skpair); /* It may now die */
435 unix_peer(sk) = NULL; 549 unix_peer(sk) = NULL;
436 } 550 }
@@ -441,6 +555,7 @@ static void unix_release_sock(struct sock *sk, int embrion)
441 if (state == TCP_LISTEN) 555 if (state == TCP_LISTEN)
442 unix_release_sock(skb->sk, 1); 556 unix_release_sock(skb->sk, 1);
443 /* passed fds are erased in the kfree_skb hook */ 557 /* passed fds are erased in the kfree_skb hook */
558 UNIXCB(skb).consumed = skb->len;
444 kfree_skb(skb); 559 kfree_skb(skb);
445 } 560 }
446 561
@@ -665,6 +780,7 @@ static struct sock *unix_create1(struct net *net, struct socket *sock, int kern)
665 INIT_LIST_HEAD(&u->link); 780 INIT_LIST_HEAD(&u->link);
666 mutex_init(&u->readlock); /* single task reading lock */ 781 mutex_init(&u->readlock); /* single task reading lock */
667 init_waitqueue_head(&u->peer_wait); 782 init_waitqueue_head(&u->peer_wait);
783 init_waitqueue_func_entry(&u->peer_wake, unix_dgram_peer_wake_relay);
668 unix_insert_socket(unix_sockets_unbound(sk), sk); 784 unix_insert_socket(unix_sockets_unbound(sk), sk);
669out: 785out:
670 if (sk == NULL) 786 if (sk == NULL)
@@ -1032,6 +1148,8 @@ restart:
1032 if (unix_peer(sk)) { 1148 if (unix_peer(sk)) {
1033 struct sock *old_peer = unix_peer(sk); 1149 struct sock *old_peer = unix_peer(sk);
1034 unix_peer(sk) = other; 1150 unix_peer(sk) = other;
1151 unix_dgram_peer_wake_disconnect_wakeup(sk, old_peer);
1152
1035 unix_state_double_unlock(sk, other); 1153 unix_state_double_unlock(sk, other);
1036 1154
1037 if (other != old_peer) 1155 if (other != old_peer)
@@ -1433,6 +1551,14 @@ static int unix_scm_to_skb(struct scm_cookie *scm, struct sk_buff *skb, bool sen
1433 return err; 1551 return err;
1434} 1552}
1435 1553
1554static bool unix_passcred_enabled(const struct socket *sock,
1555 const struct sock *other)
1556{
1557 return test_bit(SOCK_PASSCRED, &sock->flags) ||
1558 !other->sk_socket ||
1559 test_bit(SOCK_PASSCRED, &other->sk_socket->flags);
1560}
1561
1436/* 1562/*
1437 * Some apps rely on write() giving SCM_CREDENTIALS 1563 * Some apps rely on write() giving SCM_CREDENTIALS
1438 * We include credentials if source or destination socket 1564 * We include credentials if source or destination socket
@@ -1443,14 +1569,41 @@ static void maybe_add_creds(struct sk_buff *skb, const struct socket *sock,
1443{ 1569{
1444 if (UNIXCB(skb).pid) 1570 if (UNIXCB(skb).pid)
1445 return; 1571 return;
1446 if (test_bit(SOCK_PASSCRED, &sock->flags) || 1572 if (unix_passcred_enabled(sock, other)) {
1447 !other->sk_socket ||
1448 test_bit(SOCK_PASSCRED, &other->sk_socket->flags)) {
1449 UNIXCB(skb).pid = get_pid(task_tgid(current)); 1573 UNIXCB(skb).pid = get_pid(task_tgid(current));
1450 current_uid_gid(&UNIXCB(skb).uid, &UNIXCB(skb).gid); 1574 current_uid_gid(&UNIXCB(skb).uid, &UNIXCB(skb).gid);
1451 } 1575 }
1452} 1576}
1453 1577
1578static int maybe_init_creds(struct scm_cookie *scm,
1579 struct socket *socket,
1580 const struct sock *other)
1581{
1582 int err;
1583 struct msghdr msg = { .msg_controllen = 0 };
1584
1585 err = scm_send(socket, &msg, scm, false);
1586 if (err)
1587 return err;
1588
1589 if (unix_passcred_enabled(socket, other)) {
1590 scm->pid = get_pid(task_tgid(current));
1591 current_uid_gid(&scm->creds.uid, &scm->creds.gid);
1592 }
1593 return err;
1594}
1595
1596static bool unix_skb_scm_eq(struct sk_buff *skb,
1597 struct scm_cookie *scm)
1598{
1599 const struct unix_skb_parms *u = &UNIXCB(skb);
1600
1601 return u->pid == scm->pid &&
1602 uid_eq(u->uid, scm->creds.uid) &&
1603 gid_eq(u->gid, scm->creds.gid) &&
1604 unix_secdata_eq(scm, skb);
1605}
1606
1454/* 1607/*
1455 * Send AF_UNIX data. 1608 * Send AF_UNIX data.
1456 */ 1609 */
@@ -1471,6 +1624,7 @@ static int unix_dgram_sendmsg(struct socket *sock, struct msghdr *msg,
1471 struct scm_cookie scm; 1624 struct scm_cookie scm;
1472 int max_level; 1625 int max_level;
1473 int data_len = 0; 1626 int data_len = 0;
1627 int sk_locked;
1474 1628
1475 wait_for_unix_gc(); 1629 wait_for_unix_gc();
1476 err = scm_send(sock, msg, &scm, false); 1630 err = scm_send(sock, msg, &scm, false);
@@ -1549,12 +1703,14 @@ restart:
1549 goto out_free; 1703 goto out_free;
1550 } 1704 }
1551 1705
1706 sk_locked = 0;
1552 unix_state_lock(other); 1707 unix_state_lock(other);
1708restart_locked:
1553 err = -EPERM; 1709 err = -EPERM;
1554 if (!unix_may_send(sk, other)) 1710 if (!unix_may_send(sk, other))
1555 goto out_unlock; 1711 goto out_unlock;
1556 1712
1557 if (sock_flag(other, SOCK_DEAD)) { 1713 if (unlikely(sock_flag(other, SOCK_DEAD))) {
1558 /* 1714 /*
1559 * Check with 1003.1g - what should 1715 * Check with 1003.1g - what should
1560 * datagram error 1716 * datagram error
@@ -1562,10 +1718,14 @@ restart:
1562 unix_state_unlock(other); 1718 unix_state_unlock(other);
1563 sock_put(other); 1719 sock_put(other);
1564 1720
1721 if (!sk_locked)
1722 unix_state_lock(sk);
1723
1565 err = 0; 1724 err = 0;
1566 unix_state_lock(sk);
1567 if (unix_peer(sk) == other) { 1725 if (unix_peer(sk) == other) {
1568 unix_peer(sk) = NULL; 1726 unix_peer(sk) = NULL;
1727 unix_dgram_peer_wake_disconnect_wakeup(sk, other);
1728
1569 unix_state_unlock(sk); 1729 unix_state_unlock(sk);
1570 1730
1571 unix_dgram_disconnected(sk, other); 1731 unix_dgram_disconnected(sk, other);
@@ -1591,21 +1751,38 @@ restart:
1591 goto out_unlock; 1751 goto out_unlock;
1592 } 1752 }
1593 1753
1594 if (unix_peer(other) != sk && unix_recvq_full(other)) { 1754 if (unlikely(unix_peer(other) != sk && unix_recvq_full(other))) {
1595 if (!timeo) { 1755 if (timeo) {
1596 err = -EAGAIN; 1756 timeo = unix_wait_for_peer(other, timeo);
1597 goto out_unlock; 1757
1758 err = sock_intr_errno(timeo);
1759 if (signal_pending(current))
1760 goto out_free;
1761
1762 goto restart;
1598 } 1763 }
1599 1764
1600 timeo = unix_wait_for_peer(other, timeo); 1765 if (!sk_locked) {
1766 unix_state_unlock(other);
1767 unix_state_double_lock(sk, other);
1768 }
1601 1769
1602 err = sock_intr_errno(timeo); 1770 if (unix_peer(sk) != other ||
1603 if (signal_pending(current)) 1771 unix_dgram_peer_wake_me(sk, other)) {
1604 goto out_free; 1772 err = -EAGAIN;
1773 sk_locked = 1;
1774 goto out_unlock;
1775 }
1605 1776
1606 goto restart; 1777 if (!sk_locked) {
1778 sk_locked = 1;
1779 goto restart_locked;
1780 }
1607 } 1781 }
1608 1782
1783 if (unlikely(sk_locked))
1784 unix_state_unlock(sk);
1785
1609 if (sock_flag(other, SOCK_RCVTSTAMP)) 1786 if (sock_flag(other, SOCK_RCVTSTAMP))
1610 __net_timestamp(skb); 1787 __net_timestamp(skb);
1611 maybe_add_creds(skb, sock, other); 1788 maybe_add_creds(skb, sock, other);
@@ -1619,6 +1796,8 @@ restart:
1619 return len; 1796 return len;
1620 1797
1621out_unlock: 1798out_unlock:
1799 if (sk_locked)
1800 unix_state_unlock(sk);
1622 unix_state_unlock(other); 1801 unix_state_unlock(other);
1623out_free: 1802out_free:
1624 kfree_skb(skb); 1803 kfree_skb(skb);
@@ -1740,8 +1919,10 @@ out_err:
1740static ssize_t unix_stream_sendpage(struct socket *socket, struct page *page, 1919static ssize_t unix_stream_sendpage(struct socket *socket, struct page *page,
1741 int offset, size_t size, int flags) 1920 int offset, size_t size, int flags)
1742{ 1921{
1743 int err = 0; 1922 int err;
1744 bool send_sigpipe = true; 1923 bool send_sigpipe = false;
1924 bool init_scm = true;
1925 struct scm_cookie scm;
1745 struct sock *other, *sk = socket->sk; 1926 struct sock *other, *sk = socket->sk;
1746 struct sk_buff *skb, *newskb = NULL, *tail = NULL; 1927 struct sk_buff *skb, *newskb = NULL, *tail = NULL;
1747 1928
@@ -1759,7 +1940,7 @@ alloc_skb:
1759 newskb = sock_alloc_send_pskb(sk, 0, 0, flags & MSG_DONTWAIT, 1940 newskb = sock_alloc_send_pskb(sk, 0, 0, flags & MSG_DONTWAIT,
1760 &err, 0); 1941 &err, 0);
1761 if (!newskb) 1942 if (!newskb)
1762 return err; 1943 goto err;
1763 } 1944 }
1764 1945
1765 /* we must acquire readlock as we modify already present 1946 /* we must acquire readlock as we modify already present
@@ -1768,12 +1949,12 @@ alloc_skb:
1768 err = mutex_lock_interruptible(&unix_sk(other)->readlock); 1949 err = mutex_lock_interruptible(&unix_sk(other)->readlock);
1769 if (err) { 1950 if (err) {
1770 err = flags & MSG_DONTWAIT ? -EAGAIN : -ERESTARTSYS; 1951 err = flags & MSG_DONTWAIT ? -EAGAIN : -ERESTARTSYS;
1771 send_sigpipe = false;
1772 goto err; 1952 goto err;
1773 } 1953 }
1774 1954
1775 if (sk->sk_shutdown & SEND_SHUTDOWN) { 1955 if (sk->sk_shutdown & SEND_SHUTDOWN) {
1776 err = -EPIPE; 1956 err = -EPIPE;
1957 send_sigpipe = true;
1777 goto err_unlock; 1958 goto err_unlock;
1778 } 1959 }
1779 1960
@@ -1782,23 +1963,34 @@ alloc_skb:
1782 if (sock_flag(other, SOCK_DEAD) || 1963 if (sock_flag(other, SOCK_DEAD) ||
1783 other->sk_shutdown & RCV_SHUTDOWN) { 1964 other->sk_shutdown & RCV_SHUTDOWN) {
1784 err = -EPIPE; 1965 err = -EPIPE;
1966 send_sigpipe = true;
1785 goto err_state_unlock; 1967 goto err_state_unlock;
1786 } 1968 }
1787 1969
1970 if (init_scm) {
1971 err = maybe_init_creds(&scm, socket, other);
1972 if (err)
1973 goto err_state_unlock;
1974 init_scm = false;
1975 }
1976
1788 skb = skb_peek_tail(&other->sk_receive_queue); 1977 skb = skb_peek_tail(&other->sk_receive_queue);
1789 if (tail && tail == skb) { 1978 if (tail && tail == skb) {
1790 skb = newskb; 1979 skb = newskb;
1791 } else if (!skb) { 1980 } else if (!skb || !unix_skb_scm_eq(skb, &scm)) {
1792 if (newskb) 1981 if (newskb) {
1793 skb = newskb; 1982 skb = newskb;
1794 else 1983 } else {
1984 tail = skb;
1795 goto alloc_skb; 1985 goto alloc_skb;
1986 }
1796 } else if (newskb) { 1987 } else if (newskb) {
1797 /* this is fast path, we don't necessarily need to 1988 /* this is fast path, we don't necessarily need to
1798 * call to kfree_skb even though with newskb == NULL 1989 * call to kfree_skb even though with newskb == NULL
1799 * this - does no harm 1990 * this - does no harm
1800 */ 1991 */
1801 consume_skb(newskb); 1992 consume_skb(newskb);
1993 newskb = NULL;
1802 } 1994 }
1803 1995
1804 if (skb_append_pagefrags(skb, page, offset, size)) { 1996 if (skb_append_pagefrags(skb, page, offset, size)) {
@@ -1811,14 +2003,20 @@ alloc_skb:
1811 skb->truesize += size; 2003 skb->truesize += size;
1812 atomic_add(size, &sk->sk_wmem_alloc); 2004 atomic_add(size, &sk->sk_wmem_alloc);
1813 2005
1814 if (newskb) 2006 if (newskb) {
2007 err = unix_scm_to_skb(&scm, skb, false);
2008 if (err)
2009 goto err_state_unlock;
2010 spin_lock(&other->sk_receive_queue.lock);
1815 __skb_queue_tail(&other->sk_receive_queue, newskb); 2011 __skb_queue_tail(&other->sk_receive_queue, newskb);
2012 spin_unlock(&other->sk_receive_queue.lock);
2013 }
1816 2014
1817 unix_state_unlock(other); 2015 unix_state_unlock(other);
1818 mutex_unlock(&unix_sk(other)->readlock); 2016 mutex_unlock(&unix_sk(other)->readlock);
1819 2017
1820 other->sk_data_ready(other); 2018 other->sk_data_ready(other);
1821 2019 scm_destroy(&scm);
1822 return size; 2020 return size;
1823 2021
1824err_state_unlock: 2022err_state_unlock:
@@ -1829,6 +2027,8 @@ err:
1829 kfree_skb(newskb); 2027 kfree_skb(newskb);
1830 if (send_sigpipe && !(flags & MSG_NOSIGNAL)) 2028 if (send_sigpipe && !(flags & MSG_NOSIGNAL))
1831 send_sig(SIGPIPE, current, 0); 2029 send_sig(SIGPIPE, current, 0);
2030 if (!init_scm)
2031 scm_destroy(&scm);
1832 return err; 2032 return err;
1833} 2033}
1834 2034
@@ -1991,7 +2191,7 @@ static long unix_stream_data_wait(struct sock *sk, long timeo,
1991 !timeo) 2191 !timeo)
1992 break; 2192 break;
1993 2193
1994 set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); 2194 sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
1995 unix_state_unlock(sk); 2195 unix_state_unlock(sk);
1996 timeo = freezable_schedule_timeout(timeo); 2196 timeo = freezable_schedule_timeout(timeo);
1997 unix_state_lock(sk); 2197 unix_state_lock(sk);
@@ -1999,7 +2199,7 @@ static long unix_stream_data_wait(struct sock *sk, long timeo,
1999 if (sock_flag(sk, SOCK_DEAD)) 2199 if (sock_flag(sk, SOCK_DEAD))
2000 break; 2200 break;
2001 2201
2002 clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); 2202 sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
2003 } 2203 }
2004 2204
2005 finish_wait(sk_sleep(sk), &wait); 2205 finish_wait(sk_sleep(sk), &wait);
@@ -2056,14 +2256,7 @@ static int unix_stream_read_generic(struct unix_stream_read_state *state)
2056 /* Lock the socket to prevent queue disordering 2256 /* Lock the socket to prevent queue disordering
2057 * while sleeps in memcpy_tomsg 2257 * while sleeps in memcpy_tomsg
2058 */ 2258 */
2059 err = mutex_lock_interruptible(&u->readlock); 2259 mutex_lock(&u->readlock);
2060 if (unlikely(err)) {
2061 /* recvmsg() in non blocking mode is supposed to return -EAGAIN
2062 * sk_rcvtimeo is not honored by mutex_lock_interruptible()
2063 */
2064 err = noblock ? -EAGAIN : -ERESTARTSYS;
2065 goto out;
2066 }
2067 2260
2068 if (flags & MSG_PEEK) 2261 if (flags & MSG_PEEK)
2069 skip = sk_peek_offset(sk, flags); 2262 skip = sk_peek_offset(sk, flags);
@@ -2072,6 +2265,7 @@ static int unix_stream_read_generic(struct unix_stream_read_state *state)
2072 2265
2073 do { 2266 do {
2074 int chunk; 2267 int chunk;
2268 bool drop_skb;
2075 struct sk_buff *skb, *last; 2269 struct sk_buff *skb, *last;
2076 2270
2077 unix_state_lock(sk); 2271 unix_state_lock(sk);
@@ -2106,12 +2300,12 @@ again:
2106 timeo = unix_stream_data_wait(sk, timeo, last, 2300 timeo = unix_stream_data_wait(sk, timeo, last,
2107 last_len); 2301 last_len);
2108 2302
2109 if (signal_pending(current) || 2303 if (signal_pending(current)) {
2110 mutex_lock_interruptible(&u->readlock)) {
2111 err = sock_intr_errno(timeo); 2304 err = sock_intr_errno(timeo);
2112 goto out; 2305 goto out;
2113 } 2306 }
2114 2307
2308 mutex_lock(&u->readlock);
2115 continue; 2309 continue;
2116unlock: 2310unlock:
2117 unix_state_unlock(sk); 2311 unix_state_unlock(sk);
@@ -2131,10 +2325,7 @@ unlock:
2131 2325
2132 if (check_creds) { 2326 if (check_creds) {
2133 /* Never glue messages from different writers */ 2327 /* Never glue messages from different writers */
2134 if ((UNIXCB(skb).pid != scm.pid) || 2328 if (!unix_skb_scm_eq(skb, &scm))
2135 !uid_eq(UNIXCB(skb).uid, scm.creds.uid) ||
2136 !gid_eq(UNIXCB(skb).gid, scm.creds.gid) ||
2137 !unix_secdata_eq(&scm, skb))
2138 break; 2329 break;
2139 } else if (test_bit(SOCK_PASSCRED, &sock->flags)) { 2330 } else if (test_bit(SOCK_PASSCRED, &sock->flags)) {
2140 /* Copy credentials */ 2331 /* Copy credentials */
@@ -2152,7 +2343,11 @@ unlock:
2152 } 2343 }
2153 2344
2154 chunk = min_t(unsigned int, unix_skb_len(skb) - skip, size); 2345 chunk = min_t(unsigned int, unix_skb_len(skb) - skip, size);
2346 skb_get(skb);
2155 chunk = state->recv_actor(skb, skip, chunk, state); 2347 chunk = state->recv_actor(skb, skip, chunk, state);
2348 drop_skb = !unix_skb_len(skb);
2349 /* skb is only safe to use if !drop_skb */
2350 consume_skb(skb);
2156 if (chunk < 0) { 2351 if (chunk < 0) {
2157 if (copied == 0) 2352 if (copied == 0)
2158 copied = -EFAULT; 2353 copied = -EFAULT;
@@ -2161,6 +2356,18 @@ unlock:
2161 copied += chunk; 2356 copied += chunk;
2162 size -= chunk; 2357 size -= chunk;
2163 2358
2359 if (drop_skb) {
2360 /* the skb was touched by a concurrent reader;
2361 * we should not expect anything from this skb
2362 * anymore and assume it invalid - we can be
2363 * sure it was dropped from the socket queue
2364 *
2365 * let's report a short read
2366 */
2367 err = 0;
2368 break;
2369 }
2370
2164 /* Mark read part of skb as used */ 2371 /* Mark read part of skb as used */
2165 if (!(flags & MSG_PEEK)) { 2372 if (!(flags & MSG_PEEK)) {
2166 UNIXCB(skb).consumed += chunk; 2373 UNIXCB(skb).consumed += chunk;
@@ -2454,20 +2661,22 @@ static unsigned int unix_dgram_poll(struct file *file, struct socket *sock,
2454 return mask; 2661 return mask;
2455 2662
2456 writable = unix_writable(sk); 2663 writable = unix_writable(sk);
2457 other = unix_peer_get(sk); 2664 if (writable) {
2458 if (other) { 2665 unix_state_lock(sk);
2459 if (unix_peer(other) != sk) { 2666
2460 sock_poll_wait(file, &unix_sk(other)->peer_wait, wait); 2667 other = unix_peer(sk);
2461 if (unix_recvq_full(other)) 2668 if (other && unix_peer(other) != sk &&
2462 writable = 0; 2669 unix_recvq_full(other) &&
2463 } 2670 unix_dgram_peer_wake_me(sk, other))
2464 sock_put(other); 2671 writable = 0;
2672
2673 unix_state_unlock(sk);
2465 } 2674 }
2466 2675
2467 if (writable) 2676 if (writable)
2468 mask |= POLLOUT | POLLWRNORM | POLLWRBAND; 2677 mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
2469 else 2678 else
2470 set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); 2679 sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
2471 2680
2472 return mask; 2681 return mask;
2473} 2682}
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index c71e274c810a..75b0d23ee882 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -7941,8 +7941,10 @@ static int nl80211_connect(struct sk_buff *skb, struct genl_info *info)
7941 if (nla_get_flag(info->attrs[NL80211_ATTR_USE_RRM])) { 7941 if (nla_get_flag(info->attrs[NL80211_ATTR_USE_RRM])) {
7942 if (!(rdev->wiphy.features & 7942 if (!(rdev->wiphy.features &
7943 NL80211_FEATURE_DS_PARAM_SET_IE_IN_PROBES) || 7943 NL80211_FEATURE_DS_PARAM_SET_IE_IN_PROBES) ||
7944 !(rdev->wiphy.features & NL80211_FEATURE_QUIET)) 7944 !(rdev->wiphy.features & NL80211_FEATURE_QUIET)) {
7945 kzfree(connkeys);
7945 return -EINVAL; 7946 return -EINVAL;
7947 }
7946 connect.flags |= ASSOC_REQ_USE_RRM; 7948 connect.flags |= ASSOC_REQ_USE_RRM;
7947 } 7949 }
7948 7950
@@ -9503,6 +9505,7 @@ static int nl80211_set_wowlan(struct sk_buff *skb, struct genl_info *info)
9503 if (new_triggers.tcp && new_triggers.tcp->sock) 9505 if (new_triggers.tcp && new_triggers.tcp->sock)
9504 sock_release(new_triggers.tcp->sock); 9506 sock_release(new_triggers.tcp->sock);
9505 kfree(new_triggers.tcp); 9507 kfree(new_triggers.tcp);
9508 kfree(new_triggers.nd_config);
9506 return err; 9509 return err;
9507} 9510}
9508#endif 9511#endif
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index 2e8d6f39ed56..06d050da0d94 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -3029,6 +3029,7 @@ int set_regdom(const struct ieee80211_regdomain *rd,
3029 break; 3029 break;
3030 default: 3030 default:
3031 WARN(1, "invalid initiator %d\n", lr->initiator); 3031 WARN(1, "invalid initiator %d\n", lr->initiator);
3032 kfree(rd);
3032 return -EINVAL; 3033 return -EINVAL;
3033 } 3034 }
3034 3035
@@ -3221,8 +3222,10 @@ int __init regulatory_init(void)
3221 /* We always try to get an update for the static regdomain */ 3222 /* We always try to get an update for the static regdomain */
3222 err = regulatory_hint_core(cfg80211_world_regdom->alpha2); 3223 err = regulatory_hint_core(cfg80211_world_regdom->alpha2);
3223 if (err) { 3224 if (err) {
3224 if (err == -ENOMEM) 3225 if (err == -ENOMEM) {
3226 platform_device_unregister(reg_pdev);
3225 return err; 3227 return err;
3228 }
3226 /* 3229 /*
3227 * N.B. kobject_uevent_env() can fail mainly for when we're out 3230 * N.B. kobject_uevent_env() can fail mainly for when we're out
3228 * memory which is handled and propagated appropriately above 3231 * memory which is handled and propagated appropriately above
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index 09bfcbac63bb..948fa5560de5 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -303,6 +303,14 @@ struct xfrm_policy *xfrm_policy_alloc(struct net *net, gfp_t gfp)
303} 303}
304EXPORT_SYMBOL(xfrm_policy_alloc); 304EXPORT_SYMBOL(xfrm_policy_alloc);
305 305
306static void xfrm_policy_destroy_rcu(struct rcu_head *head)
307{
308 struct xfrm_policy *policy = container_of(head, struct xfrm_policy, rcu);
309
310 security_xfrm_policy_free(policy->security);
311 kfree(policy);
312}
313
306/* Destroy xfrm_policy: descendant resources must be released to this moment. */ 314/* Destroy xfrm_policy: descendant resources must be released to this moment. */
307 315
308void xfrm_policy_destroy(struct xfrm_policy *policy) 316void xfrm_policy_destroy(struct xfrm_policy *policy)
@@ -312,8 +320,7 @@ void xfrm_policy_destroy(struct xfrm_policy *policy)
312 if (del_timer(&policy->timer) || del_timer(&policy->polq.hold_timer)) 320 if (del_timer(&policy->timer) || del_timer(&policy->polq.hold_timer))
313 BUG(); 321 BUG();
314 322
315 security_xfrm_policy_free(policy->security); 323 call_rcu(&policy->rcu, xfrm_policy_destroy_rcu);
316 kfree(policy);
317} 324}
318EXPORT_SYMBOL(xfrm_policy_destroy); 325EXPORT_SYMBOL(xfrm_policy_destroy);
319 326
@@ -1214,8 +1221,10 @@ static struct xfrm_policy *xfrm_sk_policy_lookup(const struct sock *sk, int dir,
1214 struct xfrm_policy *pol; 1221 struct xfrm_policy *pol;
1215 struct net *net = sock_net(sk); 1222 struct net *net = sock_net(sk);
1216 1223
1224 rcu_read_lock();
1217 read_lock_bh(&net->xfrm.xfrm_policy_lock); 1225 read_lock_bh(&net->xfrm.xfrm_policy_lock);
1218 if ((pol = sk->sk_policy[dir]) != NULL) { 1226 pol = rcu_dereference(sk->sk_policy[dir]);
1227 if (pol != NULL) {
1219 bool match = xfrm_selector_match(&pol->selector, fl, 1228 bool match = xfrm_selector_match(&pol->selector, fl,
1220 sk->sk_family); 1229 sk->sk_family);
1221 int err = 0; 1230 int err = 0;
@@ -1239,6 +1248,7 @@ static struct xfrm_policy *xfrm_sk_policy_lookup(const struct sock *sk, int dir,
1239 } 1248 }
1240out: 1249out:
1241 read_unlock_bh(&net->xfrm.xfrm_policy_lock); 1250 read_unlock_bh(&net->xfrm.xfrm_policy_lock);
1251 rcu_read_unlock();
1242 return pol; 1252 return pol;
1243} 1253}
1244 1254
@@ -1307,13 +1317,14 @@ int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol)
1307#endif 1317#endif
1308 1318
1309 write_lock_bh(&net->xfrm.xfrm_policy_lock); 1319 write_lock_bh(&net->xfrm.xfrm_policy_lock);
1310 old_pol = sk->sk_policy[dir]; 1320 old_pol = rcu_dereference_protected(sk->sk_policy[dir],
1311 sk->sk_policy[dir] = pol; 1321 lockdep_is_held(&net->xfrm.xfrm_policy_lock));
1312 if (pol) { 1322 if (pol) {
1313 pol->curlft.add_time = get_seconds(); 1323 pol->curlft.add_time = get_seconds();
1314 pol->index = xfrm_gen_index(net, XFRM_POLICY_MAX+dir, 0); 1324 pol->index = xfrm_gen_index(net, XFRM_POLICY_MAX+dir, 0);
1315 xfrm_sk_policy_link(pol, dir); 1325 xfrm_sk_policy_link(pol, dir);
1316 } 1326 }
1327 rcu_assign_pointer(sk->sk_policy[dir], pol);
1317 if (old_pol) { 1328 if (old_pol) {
1318 if (pol) 1329 if (pol)
1319 xfrm_policy_requeue(old_pol, pol); 1330 xfrm_policy_requeue(old_pol, pol);
@@ -1361,17 +1372,26 @@ static struct xfrm_policy *clone_policy(const struct xfrm_policy *old, int dir)
1361 return newp; 1372 return newp;
1362} 1373}
1363 1374
1364int __xfrm_sk_clone_policy(struct sock *sk) 1375int __xfrm_sk_clone_policy(struct sock *sk, const struct sock *osk)
1365{ 1376{
1366 struct xfrm_policy *p0 = sk->sk_policy[0], 1377 const struct xfrm_policy *p;
1367 *p1 = sk->sk_policy[1]; 1378 struct xfrm_policy *np;
1379 int i, ret = 0;
1368 1380
1369 sk->sk_policy[0] = sk->sk_policy[1] = NULL; 1381 rcu_read_lock();
1370 if (p0 && (sk->sk_policy[0] = clone_policy(p0, 0)) == NULL) 1382 for (i = 0; i < 2; i++) {
1371 return -ENOMEM; 1383 p = rcu_dereference(osk->sk_policy[i]);
1372 if (p1 && (sk->sk_policy[1] = clone_policy(p1, 1)) == NULL) 1384 if (p) {
1373 return -ENOMEM; 1385 np = clone_policy(p, i);
1374 return 0; 1386 if (unlikely(!np)) {
1387 ret = -ENOMEM;
1388 break;
1389 }
1390 rcu_assign_pointer(sk->sk_policy[i], np);
1391 }
1392 }
1393 rcu_read_unlock();
1394 return ret;
1375} 1395}
1376 1396
1377static int 1397static int
@@ -2198,6 +2218,7 @@ struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig,
2198 xdst = NULL; 2218 xdst = NULL;
2199 route = NULL; 2219 route = NULL;
2200 2220
2221 sk = sk_const_to_full_sk(sk);
2201 if (sk && sk->sk_policy[XFRM_POLICY_OUT]) { 2222 if (sk && sk->sk_policy[XFRM_POLICY_OUT]) {
2202 num_pols = 1; 2223 num_pols = 1;
2203 pols[0] = xfrm_sk_policy_lookup(sk, XFRM_POLICY_OUT, fl); 2224 pols[0] = xfrm_sk_policy_lookup(sk, XFRM_POLICY_OUT, fl);
@@ -2477,6 +2498,7 @@ int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb,
2477 } 2498 }
2478 2499
2479 pol = NULL; 2500 pol = NULL;
2501 sk = sk_to_full_sk(sk);
2480 if (sk && sk->sk_policy[dir]) { 2502 if (sk && sk->sk_policy[dir]) {
2481 pol = xfrm_sk_policy_lookup(sk, dir, &fl); 2503 pol = xfrm_sk_policy_lookup(sk, dir, &fl);
2482 if (IS_ERR(pol)) { 2504 if (IS_ERR(pol)) {
diff --git a/samples/bpf/Makefile b/samples/bpf/Makefile
index 79b4596b5f9a..edd638b5825f 100644
--- a/samples/bpf/Makefile
+++ b/samples/bpf/Makefile
@@ -67,10 +67,13 @@ HOSTLOADLIBES_lathist += -lelf
67# point this to your LLVM backend with bpf support 67# point this to your LLVM backend with bpf support
68LLC=$(srctree)/tools/bpf/llvm/bld/Debug+Asserts/bin/llc 68LLC=$(srctree)/tools/bpf/llvm/bld/Debug+Asserts/bin/llc
69 69
70# asm/sysreg.h inline assmbly used by it is incompatible with llvm.
71# But, ehere is not easy way to fix it, so just exclude it since it is
72# useless for BPF samples.
70$(obj)/%.o: $(src)/%.c 73$(obj)/%.o: $(src)/%.c
71 clang $(NOSTDINC_FLAGS) $(LINUXINCLUDE) $(EXTRA_CFLAGS) \ 74 clang $(NOSTDINC_FLAGS) $(LINUXINCLUDE) $(EXTRA_CFLAGS) \
72 -D__KERNEL__ -Wno-unused-value -Wno-pointer-sign \ 75 -D__KERNEL__ -D__ASM_SYSREG_H -Wno-unused-value -Wno-pointer-sign \
73 -O2 -emit-llvm -c $< -o -| $(LLC) -march=bpf -filetype=obj -o $@ 76 -O2 -emit-llvm -c $< -o -| $(LLC) -march=bpf -filetype=obj -o $@
74 clang $(NOSTDINC_FLAGS) $(LINUXINCLUDE) $(EXTRA_CFLAGS) \ 77 clang $(NOSTDINC_FLAGS) $(LINUXINCLUDE) $(EXTRA_CFLAGS) \
75 -D__KERNEL__ -Wno-unused-value -Wno-pointer-sign \ 78 -D__KERNEL__ -D__ASM_SYSREG_H -Wno-unused-value -Wno-pointer-sign \
76 -O2 -emit-llvm -c $< -o -| $(LLC) -march=bpf -filetype=asm -o $@.s 79 -O2 -emit-llvm -c $< -o -| $(LLC) -march=bpf -filetype=asm -o $@.s
diff --git a/scripts/kernel-doc b/scripts/kernel-doc
index 125b906cd1d4..638a38e1b419 100755
--- a/scripts/kernel-doc
+++ b/scripts/kernel-doc
@@ -2711,7 +2711,7 @@ $kernelversion = get_kernel_version();
2711 2711
2712# generate a sequence of code that will splice in highlighting information 2712# generate a sequence of code that will splice in highlighting information
2713# using the s// operator. 2713# using the s// operator.
2714foreach my $k (keys @highlights) { 2714for (my $k = 0; $k < @highlights; $k++) {
2715 my $pattern = $highlights[$k][0]; 2715 my $pattern = $highlights[$k][0];
2716 my $result = $highlights[$k][1]; 2716 my $result = $highlights[$k][1];
2717# print STDERR "scanning pattern:$pattern, highlight:($result)\n"; 2717# print STDERR "scanning pattern:$pattern, highlight:($result)\n";
diff --git a/scripts/link-vmlinux.sh b/scripts/link-vmlinux.sh
index 1a10d8ac8162..dacf71a43ad4 100755
--- a/scripts/link-vmlinux.sh
+++ b/scripts/link-vmlinux.sh
@@ -62,7 +62,7 @@ vmlinux_link()
62 -Wl,--start-group \ 62 -Wl,--start-group \
63 ${KBUILD_VMLINUX_MAIN} \ 63 ${KBUILD_VMLINUX_MAIN} \
64 -Wl,--end-group \ 64 -Wl,--end-group \
65 -lutil ${1} 65 -lutil -lrt ${1}
66 rm -f linux 66 rm -f linux
67 fi 67 fi
68} 68}
diff --git a/security/keys/encrypted-keys/encrypted.c b/security/keys/encrypted-keys/encrypted.c
index 927db9f35ad6..696ccfa08d10 100644
--- a/security/keys/encrypted-keys/encrypted.c
+++ b/security/keys/encrypted-keys/encrypted.c
@@ -845,6 +845,8 @@ static int encrypted_update(struct key *key, struct key_preparsed_payload *prep)
845 size_t datalen = prep->datalen; 845 size_t datalen = prep->datalen;
846 int ret = 0; 846 int ret = 0;
847 847
848 if (test_bit(KEY_FLAG_NEGATIVE, &key->flags))
849 return -ENOKEY;
848 if (datalen <= 0 || datalen > 32767 || !prep->data) 850 if (datalen <= 0 || datalen > 32767 || !prep->data)
849 return -EINVAL; 851 return -EINVAL;
850 852
diff --git a/security/keys/trusted.c b/security/keys/trusted.c
index 903dace648a1..16dec53184b6 100644
--- a/security/keys/trusted.c
+++ b/security/keys/trusted.c
@@ -1007,13 +1007,16 @@ static void trusted_rcu_free(struct rcu_head *rcu)
1007 */ 1007 */
1008static int trusted_update(struct key *key, struct key_preparsed_payload *prep) 1008static int trusted_update(struct key *key, struct key_preparsed_payload *prep)
1009{ 1009{
1010 struct trusted_key_payload *p = key->payload.data[0]; 1010 struct trusted_key_payload *p;
1011 struct trusted_key_payload *new_p; 1011 struct trusted_key_payload *new_p;
1012 struct trusted_key_options *new_o; 1012 struct trusted_key_options *new_o;
1013 size_t datalen = prep->datalen; 1013 size_t datalen = prep->datalen;
1014 char *datablob; 1014 char *datablob;
1015 int ret = 0; 1015 int ret = 0;
1016 1016
1017 if (test_bit(KEY_FLAG_NEGATIVE, &key->flags))
1018 return -ENOKEY;
1019 p = key->payload.data[0];
1017 if (!p->migratable) 1020 if (!p->migratable)
1018 return -EPERM; 1021 return -EPERM;
1019 if (datalen <= 0 || datalen > 32767 || !prep->data) 1022 if (datalen <= 0 || datalen > 32767 || !prep->data)
diff --git a/security/keys/user_defined.c b/security/keys/user_defined.c
index 28cb30f80256..8705d79b2c6f 100644
--- a/security/keys/user_defined.c
+++ b/security/keys/user_defined.c
@@ -120,7 +120,10 @@ int user_update(struct key *key, struct key_preparsed_payload *prep)
120 120
121 if (ret == 0) { 121 if (ret == 0) {
122 /* attach the new data, displacing the old */ 122 /* attach the new data, displacing the old */
123 zap = key->payload.data[0]; 123 if (!test_bit(KEY_FLAG_NEGATIVE, &key->flags))
124 zap = key->payload.data[0];
125 else
126 zap = NULL;
124 rcu_assign_keypointer(key, upayload); 127 rcu_assign_keypointer(key, upayload);
125 key->expiry = 0; 128 key->expiry = 0;
126 } 129 }
diff --git a/security/selinux/ss/conditional.c b/security/selinux/ss/conditional.c
index 18643bf9894d..456e1a9bcfde 100644
--- a/security/selinux/ss/conditional.c
+++ b/security/selinux/ss/conditional.c
@@ -638,7 +638,7 @@ void cond_compute_av(struct avtab *ctab, struct avtab_key *key,
638{ 638{
639 struct avtab_node *node; 639 struct avtab_node *node;
640 640
641 if (!ctab || !key || !avd || !xperms) 641 if (!ctab || !key || !avd)
642 return; 642 return;
643 643
644 for (node = avtab_search_node(ctab, key); node; 644 for (node = avtab_search_node(ctab, key); node;
@@ -657,7 +657,7 @@ void cond_compute_av(struct avtab *ctab, struct avtab_key *key,
657 if ((u16)(AVTAB_AUDITALLOW|AVTAB_ENABLED) == 657 if ((u16)(AVTAB_AUDITALLOW|AVTAB_ENABLED) ==
658 (node->key.specified & (AVTAB_AUDITALLOW|AVTAB_ENABLED))) 658 (node->key.specified & (AVTAB_AUDITALLOW|AVTAB_ENABLED)))
659 avd->auditallow |= node->datum.u.data; 659 avd->auditallow |= node->datum.u.data;
660 if ((node->key.specified & AVTAB_ENABLED) && 660 if (xperms && (node->key.specified & AVTAB_ENABLED) &&
661 (node->key.specified & AVTAB_XPERMS)) 661 (node->key.specified & AVTAB_XPERMS))
662 services_compute_xperms_drivers(xperms, node); 662 services_compute_xperms_drivers(xperms, node);
663 } 663 }
diff --git a/sound/firewire/dice/dice.c b/sound/firewire/dice/dice.c
index 5d99436dfcae..0cda05c72f50 100644
--- a/sound/firewire/dice/dice.c
+++ b/sound/firewire/dice/dice.c
@@ -12,9 +12,11 @@ MODULE_AUTHOR("Clemens Ladisch <clemens@ladisch.de>");
12MODULE_LICENSE("GPL v2"); 12MODULE_LICENSE("GPL v2");
13 13
14#define OUI_WEISS 0x001c6a 14#define OUI_WEISS 0x001c6a
15#define OUI_LOUD 0x000ff2
15 16
16#define DICE_CATEGORY_ID 0x04 17#define DICE_CATEGORY_ID 0x04
17#define WEISS_CATEGORY_ID 0x00 18#define WEISS_CATEGORY_ID 0x00
19#define LOUD_CATEGORY_ID 0x10
18 20
19static int dice_interface_check(struct fw_unit *unit) 21static int dice_interface_check(struct fw_unit *unit)
20{ 22{
@@ -57,6 +59,8 @@ static int dice_interface_check(struct fw_unit *unit)
57 } 59 }
58 if (vendor == OUI_WEISS) 60 if (vendor == OUI_WEISS)
59 category = WEISS_CATEGORY_ID; 61 category = WEISS_CATEGORY_ID;
62 else if (vendor == OUI_LOUD)
63 category = LOUD_CATEGORY_ID;
60 else 64 else
61 category = DICE_CATEGORY_ID; 65 category = DICE_CATEGORY_ID;
62 if (device->config_rom[3] != ((vendor << 8) | category) || 66 if (device->config_rom[3] != ((vendor << 8) | category) ||
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index 8a7fbdcb4072..bff5c8b329d1 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -312,6 +312,10 @@ enum {
312 (AZX_DCAPS_INTEL_PCH | AZX_DCAPS_SEPARATE_STREAM_TAG |\ 312 (AZX_DCAPS_INTEL_PCH | AZX_DCAPS_SEPARATE_STREAM_TAG |\
313 AZX_DCAPS_I915_POWERWELL) 313 AZX_DCAPS_I915_POWERWELL)
314 314
315#define AZX_DCAPS_INTEL_BROXTON \
316 (AZX_DCAPS_INTEL_PCH | AZX_DCAPS_SEPARATE_STREAM_TAG |\
317 AZX_DCAPS_I915_POWERWELL)
318
315/* quirks for ATI SB / AMD Hudson */ 319/* quirks for ATI SB / AMD Hudson */
316#define AZX_DCAPS_PRESET_ATI_SB \ 320#define AZX_DCAPS_PRESET_ATI_SB \
317 (AZX_DCAPS_NO_TCSEL | AZX_DCAPS_SYNC_WRITE | AZX_DCAPS_POSFIX_LPIB |\ 321 (AZX_DCAPS_NO_TCSEL | AZX_DCAPS_SYNC_WRITE | AZX_DCAPS_POSFIX_LPIB |\
@@ -351,6 +355,8 @@ enum {
351 ((pci)->device == 0x0d0c) || \ 355 ((pci)->device == 0x0d0c) || \
352 ((pci)->device == 0x160c)) 356 ((pci)->device == 0x160c))
353 357
358#define IS_BROXTON(pci) ((pci)->device == 0x5a98)
359
354static char *driver_short_names[] = { 360static char *driver_short_names[] = {
355 [AZX_DRIVER_ICH] = "HDA Intel", 361 [AZX_DRIVER_ICH] = "HDA Intel",
356 [AZX_DRIVER_PCH] = "HDA Intel PCH", 362 [AZX_DRIVER_PCH] = "HDA Intel PCH",
@@ -502,15 +508,36 @@ static void azx_init_pci(struct azx *chip)
502 } 508 }
503} 509}
504 510
511/*
512 * In BXT-P A0, HD-Audio DMA requests is later than expected,
513 * and makes an audio stream sensitive to system latencies when
514 * 24/32 bits are playing.
515 * Adjusting threshold of DMA fifo to force the DMA request
516 * sooner to improve latency tolerance at the expense of power.
517 */
518static void bxt_reduce_dma_latency(struct azx *chip)
519{
520 u32 val;
521
522 val = azx_readl(chip, SKL_EM4L);
523 val &= (0x3 << 20);
524 azx_writel(chip, SKL_EM4L, val);
525}
526
505static void hda_intel_init_chip(struct azx *chip, bool full_reset) 527static void hda_intel_init_chip(struct azx *chip, bool full_reset)
506{ 528{
507 struct hdac_bus *bus = azx_bus(chip); 529 struct hdac_bus *bus = azx_bus(chip);
530 struct pci_dev *pci = chip->pci;
508 531
509 if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL) 532 if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL)
510 snd_hdac_set_codec_wakeup(bus, true); 533 snd_hdac_set_codec_wakeup(bus, true);
511 azx_init_chip(chip, full_reset); 534 azx_init_chip(chip, full_reset);
512 if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL) 535 if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL)
513 snd_hdac_set_codec_wakeup(bus, false); 536 snd_hdac_set_codec_wakeup(bus, false);
537
538 /* reduce dma latency to avoid noise */
539 if (IS_BROXTON(pci))
540 bxt_reduce_dma_latency(chip);
514} 541}
515 542
516/* calculate runtime delay from LPIB */ 543/* calculate runtime delay from LPIB */
@@ -2124,6 +2151,9 @@ static const struct pci_device_id azx_ids[] = {
2124 /* Sunrise Point-LP */ 2151 /* Sunrise Point-LP */
2125 { PCI_DEVICE(0x8086, 0x9d70), 2152 { PCI_DEVICE(0x8086, 0x9d70),
2126 .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_SKYLAKE }, 2153 .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_SKYLAKE },
2154 /* Broxton-P(Apollolake) */
2155 { PCI_DEVICE(0x8086, 0x5a98),
2156 .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_BROXTON },
2127 /* Haswell */ 2157 /* Haswell */
2128 { PCI_DEVICE(0x8086, 0x0a0c), 2158 { PCI_DEVICE(0x8086, 0x0a0c),
2129 .driver_data = AZX_DRIVER_HDMI | AZX_DCAPS_INTEL_HASWELL }, 2159 .driver_data = AZX_DRIVER_HDMI | AZX_DCAPS_INTEL_HASWELL },
diff --git a/sound/pci/hda/patch_ca0132.c b/sound/pci/hda/patch_ca0132.c
index f8a12ca477f1..4ef2259f88ca 100644
--- a/sound/pci/hda/patch_ca0132.c
+++ b/sound/pci/hda/patch_ca0132.c
@@ -778,7 +778,8 @@ static const struct hda_pintbl alienware_pincfgs[] = {
778}; 778};
779 779
780static const struct snd_pci_quirk ca0132_quirks[] = { 780static const struct snd_pci_quirk ca0132_quirks[] = {
781 SND_PCI_QUIRK(0x1028, 0x0685, "Alienware 15", QUIRK_ALIENWARE), 781 SND_PCI_QUIRK(0x1028, 0x0685, "Alienware 15 2015", QUIRK_ALIENWARE),
782 SND_PCI_QUIRK(0x1028, 0x0688, "Alienware 17 2015", QUIRK_ALIENWARE),
782 {} 783 {}
783}; 784};
784 785
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
index c8b8ef5246a6..ef198903c0c3 100644
--- a/sound/pci/hda/patch_conexant.c
+++ b/sound/pci/hda/patch_conexant.c
@@ -955,6 +955,7 @@ static int patch_conexant_auto(struct hda_codec *codec)
955 */ 955 */
956 956
957static const struct hda_device_id snd_hda_id_conexant[] = { 957static const struct hda_device_id snd_hda_id_conexant[] = {
958 HDA_CODEC_ENTRY(0x14f12008, "CX8200", patch_conexant_auto),
958 HDA_CODEC_ENTRY(0x14f15045, "CX20549 (Venice)", patch_conexant_auto), 959 HDA_CODEC_ENTRY(0x14f15045, "CX20549 (Venice)", patch_conexant_auto),
959 HDA_CODEC_ENTRY(0x14f15047, "CX20551 (Waikiki)", patch_conexant_auto), 960 HDA_CODEC_ENTRY(0x14f15047, "CX20551 (Waikiki)", patch_conexant_auto),
960 HDA_CODEC_ENTRY(0x14f15051, "CX20561 (Hermosa)", patch_conexant_auto), 961 HDA_CODEC_ENTRY(0x14f15051, "CX20561 (Hermosa)", patch_conexant_auto),
@@ -972,9 +973,9 @@ static const struct hda_device_id snd_hda_id_conexant[] = {
972 HDA_CODEC_ENTRY(0x14f150ac, "CX20652", patch_conexant_auto), 973 HDA_CODEC_ENTRY(0x14f150ac, "CX20652", patch_conexant_auto),
973 HDA_CODEC_ENTRY(0x14f150b8, "CX20664", patch_conexant_auto), 974 HDA_CODEC_ENTRY(0x14f150b8, "CX20664", patch_conexant_auto),
974 HDA_CODEC_ENTRY(0x14f150b9, "CX20665", patch_conexant_auto), 975 HDA_CODEC_ENTRY(0x14f150b9, "CX20665", patch_conexant_auto),
975 HDA_CODEC_ENTRY(0x14f150f1, "CX20721", patch_conexant_auto), 976 HDA_CODEC_ENTRY(0x14f150f1, "CX21722", patch_conexant_auto),
976 HDA_CODEC_ENTRY(0x14f150f2, "CX20722", patch_conexant_auto), 977 HDA_CODEC_ENTRY(0x14f150f2, "CX20722", patch_conexant_auto),
977 HDA_CODEC_ENTRY(0x14f150f3, "CX20723", patch_conexant_auto), 978 HDA_CODEC_ENTRY(0x14f150f3, "CX21724", patch_conexant_auto),
978 HDA_CODEC_ENTRY(0x14f150f4, "CX20724", patch_conexant_auto), 979 HDA_CODEC_ENTRY(0x14f150f4, "CX20724", patch_conexant_auto),
979 HDA_CODEC_ENTRY(0x14f1510f, "CX20751/2", patch_conexant_auto), 980 HDA_CODEC_ENTRY(0x14f1510f, "CX20751/2", patch_conexant_auto),
980 HDA_CODEC_ENTRY(0x14f15110, "CX20751/2", patch_conexant_auto), 981 HDA_CODEC_ENTRY(0x14f15110, "CX20751/2", patch_conexant_auto),
diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
index 60cd9e700909..4b6fb668c91c 100644
--- a/sound/pci/hda/patch_hdmi.c
+++ b/sound/pci/hda/patch_hdmi.c
@@ -2352,6 +2352,12 @@ static void intel_pin_eld_notify(void *audio_ptr, int port)
2352 struct hda_codec *codec = audio_ptr; 2352 struct hda_codec *codec = audio_ptr;
2353 int pin_nid = port + 0x04; 2353 int pin_nid = port + 0x04;
2354 2354
2355 /* skip notification during system suspend (but not in runtime PM);
2356 * the state will be updated at resume
2357 */
2358 if (snd_power_get_state(codec->card) != SNDRV_CTL_POWER_D0)
2359 return;
2360
2355 check_presence_and_report(codec, pin_nid); 2361 check_presence_and_report(codec, pin_nid);
2356} 2362}
2357 2363
@@ -2378,7 +2384,8 @@ static int patch_generic_hdmi(struct hda_codec *codec)
2378 * can cover the codec power request, and so need not set this flag. 2384 * can cover the codec power request, and so need not set this flag.
2379 * For previous platforms, there is no such power well feature. 2385 * For previous platforms, there is no such power well feature.
2380 */ 2386 */
2381 if (is_valleyview_plus(codec) || is_skylake(codec)) 2387 if (is_valleyview_plus(codec) || is_skylake(codec) ||
2388 is_broxton(codec))
2382 codec->core.link_power_control = 1; 2389 codec->core.link_power_control = 1;
2383 2390
2384 if (is_haswell_plus(codec) || is_valleyview_plus(codec)) { 2391 if (is_haswell_plus(codec) || is_valleyview_plus(codec)) {
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 2f7b065f9ac4..6c268dad143f 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -111,6 +111,7 @@ struct alc_spec {
111 void (*power_hook)(struct hda_codec *codec); 111 void (*power_hook)(struct hda_codec *codec);
112#endif 112#endif
113 void (*shutup)(struct hda_codec *codec); 113 void (*shutup)(struct hda_codec *codec);
114 void (*reboot_notify)(struct hda_codec *codec);
114 115
115 int init_amp; 116 int init_amp;
116 int codec_variant; /* flag for other variants */ 117 int codec_variant; /* flag for other variants */
@@ -773,6 +774,25 @@ static inline void alc_shutup(struct hda_codec *codec)
773 snd_hda_shutup_pins(codec); 774 snd_hda_shutup_pins(codec);
774} 775}
775 776
777static void alc_reboot_notify(struct hda_codec *codec)
778{
779 struct alc_spec *spec = codec->spec;
780
781 if (spec && spec->reboot_notify)
782 spec->reboot_notify(codec);
783 else
784 alc_shutup(codec);
785}
786
787/* power down codec to D3 at reboot/shutdown; set as reboot_notify ops */
788static void alc_d3_at_reboot(struct hda_codec *codec)
789{
790 snd_hda_codec_set_power_to_all(codec, codec->core.afg, AC_PWRST_D3);
791 snd_hda_codec_write(codec, codec->core.afg, 0,
792 AC_VERB_SET_POWER_STATE, AC_PWRST_D3);
793 msleep(10);
794}
795
776#define alc_free snd_hda_gen_free 796#define alc_free snd_hda_gen_free
777 797
778#ifdef CONFIG_PM 798#ifdef CONFIG_PM
@@ -818,7 +838,7 @@ static const struct hda_codec_ops alc_patch_ops = {
818 .suspend = alc_suspend, 838 .suspend = alc_suspend,
819 .check_power_status = snd_hda_gen_check_power_status, 839 .check_power_status = snd_hda_gen_check_power_status,
820#endif 840#endif
821 .reboot_notify = alc_shutup, 841 .reboot_notify = alc_reboot_notify,
822}; 842};
823 843
824 844
@@ -1759,6 +1779,7 @@ enum {
1759 ALC882_FIXUP_NO_PRIMARY_HP, 1779 ALC882_FIXUP_NO_PRIMARY_HP,
1760 ALC887_FIXUP_ASUS_BASS, 1780 ALC887_FIXUP_ASUS_BASS,
1761 ALC887_FIXUP_BASS_CHMAP, 1781 ALC887_FIXUP_BASS_CHMAP,
1782 ALC882_FIXUP_DISABLE_AAMIX,
1762}; 1783};
1763 1784
1764static void alc889_fixup_coef(struct hda_codec *codec, 1785static void alc889_fixup_coef(struct hda_codec *codec,
@@ -1920,6 +1941,8 @@ static void alc882_fixup_no_primary_hp(struct hda_codec *codec,
1920 1941
1921static void alc_fixup_bass_chmap(struct hda_codec *codec, 1942static void alc_fixup_bass_chmap(struct hda_codec *codec,
1922 const struct hda_fixup *fix, int action); 1943 const struct hda_fixup *fix, int action);
1944static void alc_fixup_disable_aamix(struct hda_codec *codec,
1945 const struct hda_fixup *fix, int action);
1923 1946
1924static const struct hda_fixup alc882_fixups[] = { 1947static const struct hda_fixup alc882_fixups[] = {
1925 [ALC882_FIXUP_ABIT_AW9D_MAX] = { 1948 [ALC882_FIXUP_ABIT_AW9D_MAX] = {
@@ -2151,6 +2174,10 @@ static const struct hda_fixup alc882_fixups[] = {
2151 .type = HDA_FIXUP_FUNC, 2174 .type = HDA_FIXUP_FUNC,
2152 .v.func = alc_fixup_bass_chmap, 2175 .v.func = alc_fixup_bass_chmap,
2153 }, 2176 },
2177 [ALC882_FIXUP_DISABLE_AAMIX] = {
2178 .type = HDA_FIXUP_FUNC,
2179 .v.func = alc_fixup_disable_aamix,
2180 },
2154}; 2181};
2155 2182
2156static const struct snd_pci_quirk alc882_fixup_tbl[] = { 2183static const struct snd_pci_quirk alc882_fixup_tbl[] = {
@@ -2218,6 +2245,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
2218 SND_PCI_QUIRK(0x1462, 0x7350, "MSI-7350", ALC889_FIXUP_CD), 2245 SND_PCI_QUIRK(0x1462, 0x7350, "MSI-7350", ALC889_FIXUP_CD),
2219 SND_PCI_QUIRK_VENDOR(0x1462, "MSI", ALC882_FIXUP_GPIO3), 2246 SND_PCI_QUIRK_VENDOR(0x1462, "MSI", ALC882_FIXUP_GPIO3),
2220 SND_PCI_QUIRK(0x1458, 0xa002, "Gigabyte EP45-DS3/Z87X-UD3H", ALC889_FIXUP_FRONT_HP_NO_PRESENCE), 2247 SND_PCI_QUIRK(0x1458, 0xa002, "Gigabyte EP45-DS3/Z87X-UD3H", ALC889_FIXUP_FRONT_HP_NO_PRESENCE),
2248 SND_PCI_QUIRK(0x1458, 0xa182, "Gigabyte Z170X-UD3", ALC882_FIXUP_DISABLE_AAMIX),
2221 SND_PCI_QUIRK(0x147b, 0x107a, "Abit AW9D-MAX", ALC882_FIXUP_ABIT_AW9D_MAX), 2249 SND_PCI_QUIRK(0x147b, 0x107a, "Abit AW9D-MAX", ALC882_FIXUP_ABIT_AW9D_MAX),
2222 SND_PCI_QUIRK_VENDOR(0x1558, "Clevo laptop", ALC882_FIXUP_EAPD), 2250 SND_PCI_QUIRK_VENDOR(0x1558, "Clevo laptop", ALC882_FIXUP_EAPD),
2223 SND_PCI_QUIRK(0x161f, 0x2054, "Medion laptop", ALC883_FIXUP_EAPD), 2251 SND_PCI_QUIRK(0x161f, 0x2054, "Medion laptop", ALC883_FIXUP_EAPD),
@@ -4190,6 +4218,8 @@ static void alc_fixup_tpt440_dock(struct hda_codec *codec,
4190 struct alc_spec *spec = codec->spec; 4218 struct alc_spec *spec = codec->spec;
4191 4219
4192 if (action == HDA_FIXUP_ACT_PRE_PROBE) { 4220 if (action == HDA_FIXUP_ACT_PRE_PROBE) {
4221 spec->shutup = alc_no_shutup; /* reduce click noise */
4222 spec->reboot_notify = alc_d3_at_reboot; /* reduce noise */
4193 spec->parse_flags = HDA_PINCFG_NO_HP_FIXUP; 4223 spec->parse_flags = HDA_PINCFG_NO_HP_FIXUP;
4194 codec->power_save_node = 0; /* avoid click noises */ 4224 codec->power_save_node = 0; /* avoid click noises */
4195 snd_hda_apply_pincfgs(codec, pincfgs); 4225 snd_hda_apply_pincfgs(codec, pincfgs);
@@ -4570,6 +4600,7 @@ enum {
4570 ALC255_FIXUP_HEADSET_MODE_NO_HP_MIC, 4600 ALC255_FIXUP_HEADSET_MODE_NO_HP_MIC,
4571 ALC293_FIXUP_DELL1_MIC_NO_PRESENCE, 4601 ALC293_FIXUP_DELL1_MIC_NO_PRESENCE,
4572 ALC292_FIXUP_TPT440_DOCK, 4602 ALC292_FIXUP_TPT440_DOCK,
4603 ALC292_FIXUP_TPT440,
4573 ALC283_FIXUP_BXBT2807_MIC, 4604 ALC283_FIXUP_BXBT2807_MIC,
4574 ALC255_FIXUP_DELL_WMI_MIC_MUTE_LED, 4605 ALC255_FIXUP_DELL_WMI_MIC_MUTE_LED,
4575 ALC282_FIXUP_ASPIRE_V5_PINS, 4606 ALC282_FIXUP_ASPIRE_V5_PINS,
@@ -4585,8 +4616,11 @@ enum {
4585 ALC288_FIXUP_DISABLE_AAMIX, 4616 ALC288_FIXUP_DISABLE_AAMIX,
4586 ALC292_FIXUP_DELL_E7X, 4617 ALC292_FIXUP_DELL_E7X,
4587 ALC292_FIXUP_DISABLE_AAMIX, 4618 ALC292_FIXUP_DISABLE_AAMIX,
4619 ALC293_FIXUP_DISABLE_AAMIX_MULTIJACK,
4588 ALC298_FIXUP_DELL1_MIC_NO_PRESENCE, 4620 ALC298_FIXUP_DELL1_MIC_NO_PRESENCE,
4589 ALC275_FIXUP_DELL_XPS, 4621 ALC275_FIXUP_DELL_XPS,
4622 ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE,
4623 ALC293_FIXUP_LENOVO_SPK_NOISE,
4590}; 4624};
4591 4625
4592static const struct hda_fixup alc269_fixups[] = { 4626static const struct hda_fixup alc269_fixups[] = {
@@ -5041,6 +5075,12 @@ static const struct hda_fixup alc269_fixups[] = {
5041 .chained = true, 5075 .chained = true,
5042 .chain_id = ALC269_FIXUP_LIMIT_INT_MIC_BOOST 5076 .chain_id = ALC269_FIXUP_LIMIT_INT_MIC_BOOST
5043 }, 5077 },
5078 [ALC292_FIXUP_TPT440] = {
5079 .type = HDA_FIXUP_FUNC,
5080 .v.func = alc_fixup_disable_aamix,
5081 .chained = true,
5082 .chain_id = ALC292_FIXUP_TPT440_DOCK,
5083 },
5044 [ALC283_FIXUP_BXBT2807_MIC] = { 5084 [ALC283_FIXUP_BXBT2807_MIC] = {
5045 .type = HDA_FIXUP_PINS, 5085 .type = HDA_FIXUP_PINS,
5046 .v.pins = (const struct hda_pintbl[]) { 5086 .v.pins = (const struct hda_pintbl[]) {
@@ -5140,6 +5180,12 @@ static const struct hda_fixup alc269_fixups[] = {
5140 .chained = true, 5180 .chained = true,
5141 .chain_id = ALC269_FIXUP_DELL2_MIC_NO_PRESENCE 5181 .chain_id = ALC269_FIXUP_DELL2_MIC_NO_PRESENCE
5142 }, 5182 },
5183 [ALC293_FIXUP_DISABLE_AAMIX_MULTIJACK] = {
5184 .type = HDA_FIXUP_FUNC,
5185 .v.func = alc_fixup_disable_aamix,
5186 .chained = true,
5187 .chain_id = ALC293_FIXUP_DELL1_MIC_NO_PRESENCE
5188 },
5143 [ALC292_FIXUP_DELL_E7X] = { 5189 [ALC292_FIXUP_DELL_E7X] = {
5144 .type = HDA_FIXUP_FUNC, 5190 .type = HDA_FIXUP_FUNC,
5145 .v.func = alc_fixup_dell_xps13, 5191 .v.func = alc_fixup_dell_xps13,
@@ -5167,6 +5213,23 @@ static const struct hda_fixup alc269_fixups[] = {
5167 {} 5213 {}
5168 } 5214 }
5169 }, 5215 },
5216 [ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE] = {
5217 .type = HDA_FIXUP_VERBS,
5218 .v.verbs = (const struct hda_verb[]) {
5219 /* Disable pass-through path for FRONT 14h */
5220 {0x20, AC_VERB_SET_COEF_INDEX, 0x36},
5221 {0x20, AC_VERB_SET_PROC_COEF, 0x1737},
5222 {}
5223 },
5224 .chained = true,
5225 .chain_id = ALC255_FIXUP_DELL1_MIC_NO_PRESENCE
5226 },
5227 [ALC293_FIXUP_LENOVO_SPK_NOISE] = {
5228 .type = HDA_FIXUP_FUNC,
5229 .v.func = alc_fixup_disable_aamix,
5230 .chained = true,
5231 .chain_id = ALC269_FIXUP_THINKPAD_ACPI
5232 },
5170}; 5233};
5171 5234
5172static const struct snd_pci_quirk alc269_fixup_tbl[] = { 5235static const struct snd_pci_quirk alc269_fixup_tbl[] = {
@@ -5180,8 +5243,10 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
5180 SND_PCI_QUIRK(0x1025, 0x0742, "Acer AO756", ALC271_FIXUP_HP_GATE_MIC_JACK), 5243 SND_PCI_QUIRK(0x1025, 0x0742, "Acer AO756", ALC271_FIXUP_HP_GATE_MIC_JACK),
5181 SND_PCI_QUIRK(0x1025, 0x0775, "Acer Aspire E1-572", ALC271_FIXUP_HP_GATE_MIC_JACK_E1_572), 5244 SND_PCI_QUIRK(0x1025, 0x0775, "Acer Aspire E1-572", ALC271_FIXUP_HP_GATE_MIC_JACK_E1_572),
5182 SND_PCI_QUIRK(0x1025, 0x079b, "Acer Aspire V5-573G", ALC282_FIXUP_ASPIRE_V5_PINS), 5245 SND_PCI_QUIRK(0x1025, 0x079b, "Acer Aspire V5-573G", ALC282_FIXUP_ASPIRE_V5_PINS),
5246 SND_PCI_QUIRK(0x1025, 0x106d, "Acer Cloudbook 14", ALC283_FIXUP_CHROME_BOOK),
5183 SND_PCI_QUIRK(0x1028, 0x0470, "Dell M101z", ALC269_FIXUP_DELL_M101Z), 5247 SND_PCI_QUIRK(0x1028, 0x0470, "Dell M101z", ALC269_FIXUP_DELL_M101Z),
5184 SND_PCI_QUIRK(0x1028, 0x054b, "Dell XPS one 2710", ALC275_FIXUP_DELL_XPS), 5248 SND_PCI_QUIRK(0x1028, 0x054b, "Dell XPS one 2710", ALC275_FIXUP_DELL_XPS),
5249 SND_PCI_QUIRK(0x1028, 0x05bd, "Dell Latitude E6440", ALC292_FIXUP_DELL_E7X),
5185 SND_PCI_QUIRK(0x1028, 0x05ca, "Dell Latitude E7240", ALC292_FIXUP_DELL_E7X), 5250 SND_PCI_QUIRK(0x1028, 0x05ca, "Dell Latitude E7240", ALC292_FIXUP_DELL_E7X),
5186 SND_PCI_QUIRK(0x1028, 0x05cb, "Dell Latitude E7440", ALC292_FIXUP_DELL_E7X), 5251 SND_PCI_QUIRK(0x1028, 0x05cb, "Dell Latitude E7440", ALC292_FIXUP_DELL_E7X),
5187 SND_PCI_QUIRK(0x1028, 0x05da, "Dell Vostro 5460", ALC290_FIXUP_SUBWOOFER), 5252 SND_PCI_QUIRK(0x1028, 0x05da, "Dell Vostro 5460", ALC290_FIXUP_SUBWOOFER),
@@ -5199,11 +5264,12 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
5199 SND_PCI_QUIRK(0x1028, 0x06c7, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE), 5264 SND_PCI_QUIRK(0x1028, 0x06c7, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE),
5200 SND_PCI_QUIRK(0x1028, 0x06d9, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), 5265 SND_PCI_QUIRK(0x1028, 0x06d9, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
5201 SND_PCI_QUIRK(0x1028, 0x06da, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), 5266 SND_PCI_QUIRK(0x1028, 0x06da, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
5202 SND_PCI_QUIRK(0x1028, 0x06db, "Dell", ALC292_FIXUP_DISABLE_AAMIX), 5267 SND_PCI_QUIRK(0x1028, 0x06db, "Dell", ALC293_FIXUP_DISABLE_AAMIX_MULTIJACK),
5203 SND_PCI_QUIRK(0x1028, 0x06dd, "Dell", ALC292_FIXUP_DISABLE_AAMIX), 5268 SND_PCI_QUIRK(0x1028, 0x06dd, "Dell", ALC293_FIXUP_DISABLE_AAMIX_MULTIJACK),
5204 SND_PCI_QUIRK(0x1028, 0x06de, "Dell", ALC292_FIXUP_DISABLE_AAMIX), 5269 SND_PCI_QUIRK(0x1028, 0x06de, "Dell", ALC293_FIXUP_DISABLE_AAMIX_MULTIJACK),
5205 SND_PCI_QUIRK(0x1028, 0x06df, "Dell", ALC292_FIXUP_DISABLE_AAMIX), 5270 SND_PCI_QUIRK(0x1028, 0x06df, "Dell", ALC293_FIXUP_DISABLE_AAMIX_MULTIJACK),
5206 SND_PCI_QUIRK(0x1028, 0x06e0, "Dell", ALC292_FIXUP_DISABLE_AAMIX), 5271 SND_PCI_QUIRK(0x1028, 0x06e0, "Dell", ALC293_FIXUP_DISABLE_AAMIX_MULTIJACK),
5272 SND_PCI_QUIRK(0x1028, 0x0704, "Dell XPS 13", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE),
5207 SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), 5273 SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
5208 SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), 5274 SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
5209 SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2), 5275 SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2),
@@ -5302,15 +5368,17 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
5302 SND_PCI_QUIRK(0x17aa, 0x21fb, "Thinkpad T430s", ALC269_FIXUP_LENOVO_DOCK), 5368 SND_PCI_QUIRK(0x17aa, 0x21fb, "Thinkpad T430s", ALC269_FIXUP_LENOVO_DOCK),
5303 SND_PCI_QUIRK(0x17aa, 0x2203, "Thinkpad X230 Tablet", ALC269_FIXUP_LENOVO_DOCK), 5369 SND_PCI_QUIRK(0x17aa, 0x2203, "Thinkpad X230 Tablet", ALC269_FIXUP_LENOVO_DOCK),
5304 SND_PCI_QUIRK(0x17aa, 0x2208, "Thinkpad T431s", ALC269_FIXUP_LENOVO_DOCK), 5370 SND_PCI_QUIRK(0x17aa, 0x2208, "Thinkpad T431s", ALC269_FIXUP_LENOVO_DOCK),
5305 SND_PCI_QUIRK(0x17aa, 0x220c, "Thinkpad T440s", ALC292_FIXUP_TPT440_DOCK), 5371 SND_PCI_QUIRK(0x17aa, 0x220c, "Thinkpad T440s", ALC292_FIXUP_TPT440),
5306 SND_PCI_QUIRK(0x17aa, 0x220e, "Thinkpad T440p", ALC292_FIXUP_TPT440_DOCK), 5372 SND_PCI_QUIRK(0x17aa, 0x220e, "Thinkpad T440p", ALC292_FIXUP_TPT440_DOCK),
5307 SND_PCI_QUIRK(0x17aa, 0x2210, "Thinkpad T540p", ALC292_FIXUP_TPT440_DOCK), 5373 SND_PCI_QUIRK(0x17aa, 0x2210, "Thinkpad T540p", ALC292_FIXUP_TPT440_DOCK),
5308 SND_PCI_QUIRK(0x17aa, 0x2211, "Thinkpad W541", ALC292_FIXUP_TPT440_DOCK), 5374 SND_PCI_QUIRK(0x17aa, 0x2211, "Thinkpad W541", ALC292_FIXUP_TPT440_DOCK),
5309 SND_PCI_QUIRK(0x17aa, 0x2212, "Thinkpad T440", ALC292_FIXUP_TPT440_DOCK), 5375 SND_PCI_QUIRK(0x17aa, 0x2212, "Thinkpad T440", ALC292_FIXUP_TPT440_DOCK),
5310 SND_PCI_QUIRK(0x17aa, 0x2214, "Thinkpad X240", ALC292_FIXUP_TPT440_DOCK), 5376 SND_PCI_QUIRK(0x17aa, 0x2214, "Thinkpad X240", ALC292_FIXUP_TPT440_DOCK),
5311 SND_PCI_QUIRK(0x17aa, 0x2215, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), 5377 SND_PCI_QUIRK(0x17aa, 0x2215, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
5378 SND_PCI_QUIRK(0x17aa, 0x2218, "Thinkpad X1 Carbon 2nd", ALC292_FIXUP_TPT440_DOCK),
5312 SND_PCI_QUIRK(0x17aa, 0x2223, "ThinkPad T550", ALC292_FIXUP_TPT440_DOCK), 5379 SND_PCI_QUIRK(0x17aa, 0x2223, "ThinkPad T550", ALC292_FIXUP_TPT440_DOCK),
5313 SND_PCI_QUIRK(0x17aa, 0x2226, "ThinkPad X250", ALC292_FIXUP_TPT440_DOCK), 5380 SND_PCI_QUIRK(0x17aa, 0x2226, "ThinkPad X250", ALC292_FIXUP_TPT440_DOCK),
5381 SND_PCI_QUIRK(0x17aa, 0x2233, "Thinkpad", ALC293_FIXUP_LENOVO_SPK_NOISE),
5314 SND_PCI_QUIRK(0x17aa, 0x3977, "IdeaPad S210", ALC283_FIXUP_INT_MIC), 5382 SND_PCI_QUIRK(0x17aa, 0x3977, "IdeaPad S210", ALC283_FIXUP_INT_MIC),
5315 SND_PCI_QUIRK(0x17aa, 0x3978, "IdeaPad Y410P", ALC269_FIXUP_NO_SHUTUP), 5383 SND_PCI_QUIRK(0x17aa, 0x3978, "IdeaPad Y410P", ALC269_FIXUP_NO_SHUTUP),
5316 SND_PCI_QUIRK(0x17aa, 0x5013, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), 5384 SND_PCI_QUIRK(0x17aa, 0x5013, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
@@ -5320,6 +5388,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
5320 SND_PCI_QUIRK(0x17aa, 0x5034, "Thinkpad T450", ALC292_FIXUP_TPT440_DOCK), 5388 SND_PCI_QUIRK(0x17aa, 0x5034, "Thinkpad T450", ALC292_FIXUP_TPT440_DOCK),
5321 SND_PCI_QUIRK(0x17aa, 0x5036, "Thinkpad T450s", ALC292_FIXUP_TPT440_DOCK), 5389 SND_PCI_QUIRK(0x17aa, 0x5036, "Thinkpad T450s", ALC292_FIXUP_TPT440_DOCK),
5322 SND_PCI_QUIRK(0x17aa, 0x503c, "Thinkpad L450", ALC292_FIXUP_TPT440_DOCK), 5390 SND_PCI_QUIRK(0x17aa, 0x503c, "Thinkpad L450", ALC292_FIXUP_TPT440_DOCK),
5391 SND_PCI_QUIRK(0x17aa, 0x504b, "Thinkpad", ALC293_FIXUP_LENOVO_SPK_NOISE),
5323 SND_PCI_QUIRK(0x17aa, 0x5109, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), 5392 SND_PCI_QUIRK(0x17aa, 0x5109, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
5324 SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_PCM_44K), 5393 SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_PCM_44K),
5325 SND_PCI_QUIRK(0x17aa, 0x9e54, "LENOVO NB", ALC269_FIXUP_LENOVO_EAPD), 5394 SND_PCI_QUIRK(0x17aa, 0x9e54, "LENOVO NB", ALC269_FIXUP_LENOVO_EAPD),
@@ -5400,6 +5469,7 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
5400 {.id = ALC283_FIXUP_CHROME_BOOK, .name = "alc283-dac-wcaps"}, 5469 {.id = ALC283_FIXUP_CHROME_BOOK, .name = "alc283-dac-wcaps"},
5401 {.id = ALC283_FIXUP_SENSE_COMBO_JACK, .name = "alc283-sense-combo"}, 5470 {.id = ALC283_FIXUP_SENSE_COMBO_JACK, .name = "alc283-sense-combo"},
5402 {.id = ALC292_FIXUP_TPT440_DOCK, .name = "tpt440-dock"}, 5471 {.id = ALC292_FIXUP_TPT440_DOCK, .name = "tpt440-dock"},
5472 {.id = ALC292_FIXUP_TPT440, .name = "tpt440"},
5403 {} 5473 {}
5404}; 5474};
5405 5475
@@ -6386,6 +6456,7 @@ static const struct hda_fixup alc662_fixups[] = {
6386static const struct snd_pci_quirk alc662_fixup_tbl[] = { 6456static const struct snd_pci_quirk alc662_fixup_tbl[] = {
6387 SND_PCI_QUIRK(0x1019, 0x9087, "ECS", ALC662_FIXUP_ASUS_MODE2), 6457 SND_PCI_QUIRK(0x1019, 0x9087, "ECS", ALC662_FIXUP_ASUS_MODE2),
6388 SND_PCI_QUIRK(0x1025, 0x022f, "Acer Aspire One", ALC662_FIXUP_INV_DMIC), 6458 SND_PCI_QUIRK(0x1025, 0x022f, "Acer Aspire One", ALC662_FIXUP_INV_DMIC),
6459 SND_PCI_QUIRK(0x1025, 0x0241, "Packard Bell DOTS", ALC662_FIXUP_INV_DMIC),
6389 SND_PCI_QUIRK(0x1025, 0x0308, "Acer Aspire 8942G", ALC662_FIXUP_ASPIRE), 6460 SND_PCI_QUIRK(0x1025, 0x0308, "Acer Aspire 8942G", ALC662_FIXUP_ASPIRE),
6390 SND_PCI_QUIRK(0x1025, 0x031c, "Gateway NV79", ALC662_FIXUP_SKU_IGNORE), 6461 SND_PCI_QUIRK(0x1025, 0x031c, "Gateway NV79", ALC662_FIXUP_SKU_IGNORE),
6391 SND_PCI_QUIRK(0x1025, 0x0349, "eMachines eM250", ALC662_FIXUP_INV_DMIC), 6462 SND_PCI_QUIRK(0x1025, 0x0349, "eMachines eM250", ALC662_FIXUP_INV_DMIC),
diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
index 826122d8acee..2c7c5eb8b1e9 100644
--- a/sound/pci/hda/patch_sigmatel.c
+++ b/sound/pci/hda/patch_sigmatel.c
@@ -3110,6 +3110,29 @@ static void stac92hd71bxx_fixup_hp_hdx(struct hda_codec *codec,
3110 spec->gpio_led = 0x08; 3110 spec->gpio_led = 0x08;
3111} 3111}
3112 3112
3113static bool is_hp_output(struct hda_codec *codec, hda_nid_t pin)
3114{
3115 unsigned int pin_cfg = snd_hda_codec_get_pincfg(codec, pin);
3116
3117 /* count line-out, too, as BIOS sets often so */
3118 return get_defcfg_connect(pin_cfg) != AC_JACK_PORT_NONE &&
3119 (get_defcfg_device(pin_cfg) == AC_JACK_LINE_OUT ||
3120 get_defcfg_device(pin_cfg) == AC_JACK_HP_OUT);
3121}
3122
3123static void fixup_hp_headphone(struct hda_codec *codec, hda_nid_t pin)
3124{
3125 unsigned int pin_cfg = snd_hda_codec_get_pincfg(codec, pin);
3126
3127 /* It was changed in the BIOS to just satisfy MS DTM.
3128 * Lets turn it back into slaved HP
3129 */
3130 pin_cfg = (pin_cfg & (~AC_DEFCFG_DEVICE)) |
3131 (AC_JACK_HP_OUT << AC_DEFCFG_DEVICE_SHIFT);
3132 pin_cfg = (pin_cfg & (~(AC_DEFCFG_DEF_ASSOC | AC_DEFCFG_SEQUENCE))) |
3133 0x1f;
3134 snd_hda_codec_set_pincfg(codec, pin, pin_cfg);
3135}
3113 3136
3114static void stac92hd71bxx_fixup_hp(struct hda_codec *codec, 3137static void stac92hd71bxx_fixup_hp(struct hda_codec *codec,
3115 const struct hda_fixup *fix, int action) 3138 const struct hda_fixup *fix, int action)
@@ -3119,22 +3142,12 @@ static void stac92hd71bxx_fixup_hp(struct hda_codec *codec,
3119 if (action != HDA_FIXUP_ACT_PRE_PROBE) 3142 if (action != HDA_FIXUP_ACT_PRE_PROBE)
3120 return; 3143 return;
3121 3144
3122 if (hp_blike_system(codec->core.subsystem_id)) { 3145 /* when both output A and F are assigned, these are supposedly
3123 unsigned int pin_cfg = snd_hda_codec_get_pincfg(codec, 0x0f); 3146 * dock and built-in headphones; fix both pin configs
3124 if (get_defcfg_device(pin_cfg) == AC_JACK_LINE_OUT || 3147 */
3125 get_defcfg_device(pin_cfg) == AC_JACK_SPEAKER || 3148 if (is_hp_output(codec, 0x0a) && is_hp_output(codec, 0x0f)) {
3126 get_defcfg_device(pin_cfg) == AC_JACK_HP_OUT) { 3149 fixup_hp_headphone(codec, 0x0a);
3127 /* It was changed in the BIOS to just satisfy MS DTM. 3150 fixup_hp_headphone(codec, 0x0f);
3128 * Lets turn it back into slaved HP
3129 */
3130 pin_cfg = (pin_cfg & (~AC_DEFCFG_DEVICE))
3131 | (AC_JACK_HP_OUT <<
3132 AC_DEFCFG_DEVICE_SHIFT);
3133 pin_cfg = (pin_cfg & (~(AC_DEFCFG_DEF_ASSOC
3134 | AC_DEFCFG_SEQUENCE)))
3135 | 0x1f;
3136 snd_hda_codec_set_pincfg(codec, 0x0f, pin_cfg);
3137 }
3138 } 3151 }
3139 3152
3140 if (find_mute_led_cfg(codec, 1)) 3153 if (find_mute_led_cfg(codec, 1))
diff --git a/sound/pci/rme96.c b/sound/pci/rme96.c
index 714df906249e..41c31db65039 100644
--- a/sound/pci/rme96.c
+++ b/sound/pci/rme96.c
@@ -741,10 +741,11 @@ snd_rme96_playback_setrate(struct rme96 *rme96,
741 { 741 {
742 /* change to/from double-speed: reset the DAC (if available) */ 742 /* change to/from double-speed: reset the DAC (if available) */
743 snd_rme96_reset_dac(rme96); 743 snd_rme96_reset_dac(rme96);
744 return 1; /* need to restore volume */
744 } else { 745 } else {
745 writel(rme96->wcreg, rme96->iobase + RME96_IO_CONTROL_REGISTER); 746 writel(rme96->wcreg, rme96->iobase + RME96_IO_CONTROL_REGISTER);
747 return 0;
746 } 748 }
747 return 0;
748} 749}
749 750
750static int 751static int
@@ -980,6 +981,7 @@ snd_rme96_playback_hw_params(struct snd_pcm_substream *substream,
980 struct rme96 *rme96 = snd_pcm_substream_chip(substream); 981 struct rme96 *rme96 = snd_pcm_substream_chip(substream);
981 struct snd_pcm_runtime *runtime = substream->runtime; 982 struct snd_pcm_runtime *runtime = substream->runtime;
982 int err, rate, dummy; 983 int err, rate, dummy;
984 bool apply_dac_volume = false;
983 985
984 runtime->dma_area = (void __force *)(rme96->iobase + 986 runtime->dma_area = (void __force *)(rme96->iobase +
985 RME96_IO_PLAY_BUFFER); 987 RME96_IO_PLAY_BUFFER);
@@ -993,24 +995,26 @@ snd_rme96_playback_hw_params(struct snd_pcm_substream *substream,
993 { 995 {
994 /* slave clock */ 996 /* slave clock */
995 if ((int)params_rate(params) != rate) { 997 if ((int)params_rate(params) != rate) {
996 spin_unlock_irq(&rme96->lock); 998 err = -EIO;
997 return -EIO; 999 goto error;
998 } 1000 }
999 } else if ((err = snd_rme96_playback_setrate(rme96, params_rate(params))) < 0) { 1001 } else {
1000 spin_unlock_irq(&rme96->lock); 1002 err = snd_rme96_playback_setrate(rme96, params_rate(params));
1001 return err; 1003 if (err < 0)
1002 } 1004 goto error;
1003 if ((err = snd_rme96_playback_setformat(rme96, params_format(params))) < 0) { 1005 apply_dac_volume = err > 0; /* need to restore volume later? */
1004 spin_unlock_irq(&rme96->lock);
1005 return err;
1006 } 1006 }
1007
1008 err = snd_rme96_playback_setformat(rme96, params_format(params));
1009 if (err < 0)
1010 goto error;
1007 snd_rme96_setframelog(rme96, params_channels(params), 1); 1011 snd_rme96_setframelog(rme96, params_channels(params), 1);
1008 if (rme96->capture_periodsize != 0) { 1012 if (rme96->capture_periodsize != 0) {
1009 if (params_period_size(params) << rme96->playback_frlog != 1013 if (params_period_size(params) << rme96->playback_frlog !=
1010 rme96->capture_periodsize) 1014 rme96->capture_periodsize)
1011 { 1015 {
1012 spin_unlock_irq(&rme96->lock); 1016 err = -EBUSY;
1013 return -EBUSY; 1017 goto error;
1014 } 1018 }
1015 } 1019 }
1016 rme96->playback_periodsize = 1020 rme96->playback_periodsize =
@@ -1021,9 +1025,16 @@ snd_rme96_playback_hw_params(struct snd_pcm_substream *substream,
1021 rme96->wcreg &= ~(RME96_WCR_PRO | RME96_WCR_DOLBY | RME96_WCR_EMP); 1025 rme96->wcreg &= ~(RME96_WCR_PRO | RME96_WCR_DOLBY | RME96_WCR_EMP);
1022 writel(rme96->wcreg |= rme96->wcreg_spdif_stream, rme96->iobase + RME96_IO_CONTROL_REGISTER); 1026 writel(rme96->wcreg |= rme96->wcreg_spdif_stream, rme96->iobase + RME96_IO_CONTROL_REGISTER);
1023 } 1027 }
1028
1029 err = 0;
1030 error:
1024 spin_unlock_irq(&rme96->lock); 1031 spin_unlock_irq(&rme96->lock);
1025 1032 if (apply_dac_volume) {
1026 return 0; 1033 usleep_range(3000, 10000);
1034 snd_rme96_apply_dac_volume(rme96);
1035 }
1036
1037 return err;
1027} 1038}
1028 1039
1029static int 1040static int
diff --git a/sound/soc/codecs/arizona.c b/sound/soc/codecs/arizona.c
index 9929efc6b9aa..b3ea24d64c50 100644
--- a/sound/soc/codecs/arizona.c
+++ b/sound/soc/codecs/arizona.c
@@ -1023,24 +1023,18 @@ void arizona_init_dvfs(struct arizona_priv *priv)
1023} 1023}
1024EXPORT_SYMBOL_GPL(arizona_init_dvfs); 1024EXPORT_SYMBOL_GPL(arizona_init_dvfs);
1025 1025
1026static unsigned int arizona_sysclk_48k_rates[] = { 1026static unsigned int arizona_opclk_ref_48k_rates[] = {
1027 6144000, 1027 6144000,
1028 12288000, 1028 12288000,
1029 24576000, 1029 24576000,
1030 49152000, 1030 49152000,
1031 73728000,
1032 98304000,
1033 147456000,
1034}; 1031};
1035 1032
1036static unsigned int arizona_sysclk_44k1_rates[] = { 1033static unsigned int arizona_opclk_ref_44k1_rates[] = {
1037 5644800, 1034 5644800,
1038 11289600, 1035 11289600,
1039 22579200, 1036 22579200,
1040 45158400, 1037 45158400,
1041 67737600,
1042 90316800,
1043 135475200,
1044}; 1038};
1045 1039
1046static int arizona_set_opclk(struct snd_soc_codec *codec, unsigned int clk, 1040static int arizona_set_opclk(struct snd_soc_codec *codec, unsigned int clk,
@@ -1065,11 +1059,11 @@ static int arizona_set_opclk(struct snd_soc_codec *codec, unsigned int clk,
1065 } 1059 }
1066 1060
1067 if (refclk % 8000) 1061 if (refclk % 8000)
1068 rates = arizona_sysclk_44k1_rates; 1062 rates = arizona_opclk_ref_44k1_rates;
1069 else 1063 else
1070 rates = arizona_sysclk_48k_rates; 1064 rates = arizona_opclk_ref_48k_rates;
1071 1065
1072 for (ref = 0; ref < ARRAY_SIZE(arizona_sysclk_48k_rates) && 1066 for (ref = 0; ref < ARRAY_SIZE(arizona_opclk_ref_48k_rates) &&
1073 rates[ref] <= refclk; ref++) { 1067 rates[ref] <= refclk; ref++) {
1074 div = 1; 1068 div = 1;
1075 while (rates[ref] / div >= freq && div < 32) { 1069 while (rates[ref] / div >= freq && div < 32) {
diff --git a/sound/soc/codecs/es8328.c b/sound/soc/codecs/es8328.c
index 969e337dc17c..84f5eb07a91b 100644
--- a/sound/soc/codecs/es8328.c
+++ b/sound/soc/codecs/es8328.c
@@ -205,18 +205,18 @@ static const struct snd_kcontrol_new es8328_right_line_controls =
205 205
206/* Left Mixer */ 206/* Left Mixer */
207static const struct snd_kcontrol_new es8328_left_mixer_controls[] = { 207static const struct snd_kcontrol_new es8328_left_mixer_controls[] = {
208 SOC_DAPM_SINGLE("Playback Switch", ES8328_DACCONTROL17, 8, 1, 0), 208 SOC_DAPM_SINGLE("Playback Switch", ES8328_DACCONTROL17, 7, 1, 0),
209 SOC_DAPM_SINGLE("Left Bypass Switch", ES8328_DACCONTROL17, 7, 1, 0), 209 SOC_DAPM_SINGLE("Left Bypass Switch", ES8328_DACCONTROL17, 6, 1, 0),
210 SOC_DAPM_SINGLE("Right Playback Switch", ES8328_DACCONTROL18, 8, 1, 0), 210 SOC_DAPM_SINGLE("Right Playback Switch", ES8328_DACCONTROL18, 7, 1, 0),
211 SOC_DAPM_SINGLE("Right Bypass Switch", ES8328_DACCONTROL18, 7, 1, 0), 211 SOC_DAPM_SINGLE("Right Bypass Switch", ES8328_DACCONTROL18, 6, 1, 0),
212}; 212};
213 213
214/* Right Mixer */ 214/* Right Mixer */
215static const struct snd_kcontrol_new es8328_right_mixer_controls[] = { 215static const struct snd_kcontrol_new es8328_right_mixer_controls[] = {
216 SOC_DAPM_SINGLE("Left Playback Switch", ES8328_DACCONTROL19, 8, 1, 0), 216 SOC_DAPM_SINGLE("Left Playback Switch", ES8328_DACCONTROL19, 7, 1, 0),
217 SOC_DAPM_SINGLE("Left Bypass Switch", ES8328_DACCONTROL19, 7, 1, 0), 217 SOC_DAPM_SINGLE("Left Bypass Switch", ES8328_DACCONTROL19, 6, 1, 0),
218 SOC_DAPM_SINGLE("Playback Switch", ES8328_DACCONTROL20, 8, 1, 0), 218 SOC_DAPM_SINGLE("Playback Switch", ES8328_DACCONTROL20, 7, 1, 0),
219 SOC_DAPM_SINGLE("Right Bypass Switch", ES8328_DACCONTROL20, 7, 1, 0), 219 SOC_DAPM_SINGLE("Right Bypass Switch", ES8328_DACCONTROL20, 6, 1, 0),
220}; 220};
221 221
222static const char * const es8328_pga_sel[] = { 222static const char * const es8328_pga_sel[] = {
diff --git a/sound/soc/codecs/nau8825.c b/sound/soc/codecs/nau8825.c
index 7fc7b4e3f444..c1b87c5800b1 100644
--- a/sound/soc/codecs/nau8825.c
+++ b/sound/soc/codecs/nau8825.c
@@ -1271,6 +1271,36 @@ static int nau8825_i2c_remove(struct i2c_client *client)
1271 return 0; 1271 return 0;
1272} 1272}
1273 1273
1274#ifdef CONFIG_PM_SLEEP
1275static int nau8825_suspend(struct device *dev)
1276{
1277 struct i2c_client *client = to_i2c_client(dev);
1278 struct nau8825 *nau8825 = dev_get_drvdata(dev);
1279
1280 disable_irq(client->irq);
1281 regcache_cache_only(nau8825->regmap, true);
1282 regcache_mark_dirty(nau8825->regmap);
1283
1284 return 0;
1285}
1286
1287static int nau8825_resume(struct device *dev)
1288{
1289 struct i2c_client *client = to_i2c_client(dev);
1290 struct nau8825 *nau8825 = dev_get_drvdata(dev);
1291
1292 regcache_cache_only(nau8825->regmap, false);
1293 regcache_sync(nau8825->regmap);
1294 enable_irq(client->irq);
1295
1296 return 0;
1297}
1298#endif
1299
1300static const struct dev_pm_ops nau8825_pm = {
1301 SET_SYSTEM_SLEEP_PM_OPS(nau8825_suspend, nau8825_resume)
1302};
1303
1274static const struct i2c_device_id nau8825_i2c_ids[] = { 1304static const struct i2c_device_id nau8825_i2c_ids[] = {
1275 { "nau8825", 0 }, 1305 { "nau8825", 0 },
1276 { } 1306 { }
@@ -1297,6 +1327,7 @@ static struct i2c_driver nau8825_driver = {
1297 .name = "nau8825", 1327 .name = "nau8825",
1298 .of_match_table = of_match_ptr(nau8825_of_ids), 1328 .of_match_table = of_match_ptr(nau8825_of_ids),
1299 .acpi_match_table = ACPI_PTR(nau8825_acpi_match), 1329 .acpi_match_table = ACPI_PTR(nau8825_acpi_match),
1330 .pm = &nau8825_pm,
1300 }, 1331 },
1301 .probe = nau8825_i2c_probe, 1332 .probe = nau8825_i2c_probe,
1302 .remove = nau8825_i2c_remove, 1333 .remove = nau8825_i2c_remove,
diff --git a/sound/soc/codecs/rl6231.c b/sound/soc/codecs/rl6231.c
index aca479fa7670..1dc68ab08a17 100644
--- a/sound/soc/codecs/rl6231.c
+++ b/sound/soc/codecs/rl6231.c
@@ -80,8 +80,10 @@ int rl6231_calc_dmic_clk(int rate)
80 } 80 }
81 81
82 for (i = 0; i < ARRAY_SIZE(div); i++) { 82 for (i = 0; i < ARRAY_SIZE(div); i++) {
83 /* find divider that gives DMIC frequency below 3MHz */ 83 if ((div[i] % 3) == 0)
84 if (3000000 * div[i] >= rate) 84 continue;
85 /* find divider that gives DMIC frequency below 3.072MHz */
86 if (3072000 * div[i] >= rate)
85 return i; 87 return i;
86 } 88 }
87 89
diff --git a/sound/soc/codecs/rt5645.c b/sound/soc/codecs/rt5645.c
index 28132375e427..ef76940f9dcb 100644
--- a/sound/soc/codecs/rt5645.c
+++ b/sound/soc/codecs/rt5645.c
@@ -245,7 +245,7 @@ struct rt5645_priv {
245 struct snd_soc_jack *hp_jack; 245 struct snd_soc_jack *hp_jack;
246 struct snd_soc_jack *mic_jack; 246 struct snd_soc_jack *mic_jack;
247 struct snd_soc_jack *btn_jack; 247 struct snd_soc_jack *btn_jack;
248 struct delayed_work jack_detect_work; 248 struct delayed_work jack_detect_work, rcclock_work;
249 struct regulator_bulk_data supplies[ARRAY_SIZE(rt5645_supply_names)]; 249 struct regulator_bulk_data supplies[ARRAY_SIZE(rt5645_supply_names)];
250 struct rt5645_eq_param_s *eq_param; 250 struct rt5645_eq_param_s *eq_param;
251 251
@@ -565,12 +565,33 @@ static int rt5645_hweq_put(struct snd_kcontrol *kcontrol,
565 .put = rt5645_hweq_put \ 565 .put = rt5645_hweq_put \
566} 566}
567 567
568static int rt5645_spk_put_volsw(struct snd_kcontrol *kcontrol,
569 struct snd_ctl_elem_value *ucontrol)
570{
571 struct snd_soc_component *component = snd_kcontrol_chip(kcontrol);
572 struct rt5645_priv *rt5645 = snd_soc_component_get_drvdata(component);
573 int ret;
574
575 cancel_delayed_work_sync(&rt5645->rcclock_work);
576
577 regmap_update_bits(rt5645->regmap, RT5645_MICBIAS,
578 RT5645_PWR_CLK25M_MASK, RT5645_PWR_CLK25M_PU);
579
580 ret = snd_soc_put_volsw(kcontrol, ucontrol);
581
582 queue_delayed_work(system_power_efficient_wq, &rt5645->rcclock_work,
583 msecs_to_jiffies(200));
584
585 return ret;
586}
587
568static const struct snd_kcontrol_new rt5645_snd_controls[] = { 588static const struct snd_kcontrol_new rt5645_snd_controls[] = {
569 /* Speaker Output Volume */ 589 /* Speaker Output Volume */
570 SOC_DOUBLE("Speaker Channel Switch", RT5645_SPK_VOL, 590 SOC_DOUBLE("Speaker Channel Switch", RT5645_SPK_VOL,
571 RT5645_VOL_L_SFT, RT5645_VOL_R_SFT, 1, 1), 591 RT5645_VOL_L_SFT, RT5645_VOL_R_SFT, 1, 1),
572 SOC_DOUBLE_TLV("Speaker Playback Volume", RT5645_SPK_VOL, 592 SOC_DOUBLE_EXT_TLV("Speaker Playback Volume", RT5645_SPK_VOL,
573 RT5645_L_VOL_SFT, RT5645_R_VOL_SFT, 39, 1, out_vol_tlv), 593 RT5645_L_VOL_SFT, RT5645_R_VOL_SFT, 39, 1, snd_soc_get_volsw,
594 rt5645_spk_put_volsw, out_vol_tlv),
574 595
575 /* ClassD modulator Speaker Gain Ratio */ 596 /* ClassD modulator Speaker Gain Ratio */
576 SOC_SINGLE_TLV("Speaker ClassD Playback Volume", RT5645_SPO_CLSD_RATIO, 597 SOC_SINGLE_TLV("Speaker ClassD Playback Volume", RT5645_SPO_CLSD_RATIO,
@@ -1498,7 +1519,7 @@ static void hp_amp_power(struct snd_soc_codec *codec, int on)
1498 regmap_write(rt5645->regmap, RT5645_PR_BASE + 1519 regmap_write(rt5645->regmap, RT5645_PR_BASE +
1499 RT5645_MAMP_INT_REG2, 0xfc00); 1520 RT5645_MAMP_INT_REG2, 0xfc00);
1500 snd_soc_write(codec, RT5645_DEPOP_M2, 0x1140); 1521 snd_soc_write(codec, RT5645_DEPOP_M2, 0x1140);
1501 msleep(40); 1522 msleep(70);
1502 rt5645->hp_on = true; 1523 rt5645->hp_on = true;
1503 } else { 1524 } else {
1504 /* depop parameters */ 1525 /* depop parameters */
@@ -3122,6 +3143,15 @@ static void rt5645_jack_detect_work(struct work_struct *work)
3122 SND_JACK_BTN_2 | SND_JACK_BTN_3); 3143 SND_JACK_BTN_2 | SND_JACK_BTN_3);
3123} 3144}
3124 3145
3146static void rt5645_rcclock_work(struct work_struct *work)
3147{
3148 struct rt5645_priv *rt5645 =
3149 container_of(work, struct rt5645_priv, rcclock_work.work);
3150
3151 regmap_update_bits(rt5645->regmap, RT5645_MICBIAS,
3152 RT5645_PWR_CLK25M_MASK, RT5645_PWR_CLK25M_PD);
3153}
3154
3125static irqreturn_t rt5645_irq(int irq, void *data) 3155static irqreturn_t rt5645_irq(int irq, void *data)
3126{ 3156{
3127 struct rt5645_priv *rt5645 = data; 3157 struct rt5645_priv *rt5645 = data;
@@ -3348,6 +3378,27 @@ static const struct dmi_system_id dmi_platform_intel_braswell[] = {
3348 DMI_MATCH(DMI_PRODUCT_NAME, "Reks"), 3378 DMI_MATCH(DMI_PRODUCT_NAME, "Reks"),
3349 }, 3379 },
3350 }, 3380 },
3381 {
3382 .ident = "Google Edgar",
3383 .callback = strago_quirk_cb,
3384 .matches = {
3385 DMI_MATCH(DMI_PRODUCT_NAME, "Edgar"),
3386 },
3387 },
3388 {
3389 .ident = "Google Wizpig",
3390 .callback = strago_quirk_cb,
3391 .matches = {
3392 DMI_MATCH(DMI_PRODUCT_NAME, "Wizpig"),
3393 },
3394 },
3395 {
3396 .ident = "Google Terra",
3397 .callback = strago_quirk_cb,
3398 .matches = {
3399 DMI_MATCH(DMI_PRODUCT_NAME, "Terra"),
3400 },
3401 },
3351 { } 3402 { }
3352}; 3403};
3353 3404
@@ -3587,6 +3638,7 @@ static int rt5645_i2c_probe(struct i2c_client *i2c,
3587 } 3638 }
3588 3639
3589 INIT_DELAYED_WORK(&rt5645->jack_detect_work, rt5645_jack_detect_work); 3640 INIT_DELAYED_WORK(&rt5645->jack_detect_work, rt5645_jack_detect_work);
3641 INIT_DELAYED_WORK(&rt5645->rcclock_work, rt5645_rcclock_work);
3590 3642
3591 if (rt5645->i2c->irq) { 3643 if (rt5645->i2c->irq) {
3592 ret = request_threaded_irq(rt5645->i2c->irq, NULL, rt5645_irq, 3644 ret = request_threaded_irq(rt5645->i2c->irq, NULL, rt5645_irq,
@@ -3621,6 +3673,7 @@ static int rt5645_i2c_remove(struct i2c_client *i2c)
3621 free_irq(i2c->irq, rt5645); 3673 free_irq(i2c->irq, rt5645);
3622 3674
3623 cancel_delayed_work_sync(&rt5645->jack_detect_work); 3675 cancel_delayed_work_sync(&rt5645->jack_detect_work);
3676 cancel_delayed_work_sync(&rt5645->rcclock_work);
3624 3677
3625 snd_soc_unregister_codec(&i2c->dev); 3678 snd_soc_unregister_codec(&i2c->dev);
3626 regulator_bulk_disable(ARRAY_SIZE(rt5645->supplies), rt5645->supplies); 3679 regulator_bulk_disable(ARRAY_SIZE(rt5645->supplies), rt5645->supplies);
diff --git a/sound/soc/codecs/rt5670.h b/sound/soc/codecs/rt5670.h
index dc2b46236c5c..3f1b0f1df809 100644
--- a/sound/soc/codecs/rt5670.h
+++ b/sound/soc/codecs/rt5670.h
@@ -973,12 +973,12 @@
973#define RT5670_SCLK_SRC_MCLK (0x0 << 14) 973#define RT5670_SCLK_SRC_MCLK (0x0 << 14)
974#define RT5670_SCLK_SRC_PLL1 (0x1 << 14) 974#define RT5670_SCLK_SRC_PLL1 (0x1 << 14)
975#define RT5670_SCLK_SRC_RCCLK (0x2 << 14) /* 15MHz */ 975#define RT5670_SCLK_SRC_RCCLK (0x2 << 14) /* 15MHz */
976#define RT5670_PLL1_SRC_MASK (0x3 << 12) 976#define RT5670_PLL1_SRC_MASK (0x7 << 11)
977#define RT5670_PLL1_SRC_SFT 12 977#define RT5670_PLL1_SRC_SFT 11
978#define RT5670_PLL1_SRC_MCLK (0x0 << 12) 978#define RT5670_PLL1_SRC_MCLK (0x0 << 11)
979#define RT5670_PLL1_SRC_BCLK1 (0x1 << 12) 979#define RT5670_PLL1_SRC_BCLK1 (0x1 << 11)
980#define RT5670_PLL1_SRC_BCLK2 (0x2 << 12) 980#define RT5670_PLL1_SRC_BCLK2 (0x2 << 11)
981#define RT5670_PLL1_SRC_BCLK3 (0x3 << 12) 981#define RT5670_PLL1_SRC_BCLK3 (0x3 << 11)
982#define RT5670_PLL1_PD_MASK (0x1 << 3) 982#define RT5670_PLL1_PD_MASK (0x1 << 3)
983#define RT5670_PLL1_PD_SFT 3 983#define RT5670_PLL1_PD_SFT 3
984#define RT5670_PLL1_PD_1 (0x0 << 3) 984#define RT5670_PLL1_PD_1 (0x0 << 3)
diff --git a/sound/soc/codecs/rt5677.c b/sound/soc/codecs/rt5677.c
index 1f590b5a6718..2828591a4b03 100644
--- a/sound/soc/codecs/rt5677.c
+++ b/sound/soc/codecs/rt5677.c
@@ -1386,90 +1386,90 @@ static const struct snd_kcontrol_new rt5677_dac_r_mix[] = {
1386}; 1386};
1387 1387
1388static const struct snd_kcontrol_new rt5677_sto1_dac_l_mix[] = { 1388static const struct snd_kcontrol_new rt5677_sto1_dac_l_mix[] = {
1389 SOC_DAPM_SINGLE("ST L Switch", RT5677_STO1_DAC_MIXER, 1389 SOC_DAPM_SINGLE_AUTODISABLE("ST L Switch", RT5677_STO1_DAC_MIXER,
1390 RT5677_M_ST_DAC1_L_SFT, 1, 1), 1390 RT5677_M_ST_DAC1_L_SFT, 1, 1),
1391 SOC_DAPM_SINGLE("DAC1 L Switch", RT5677_STO1_DAC_MIXER, 1391 SOC_DAPM_SINGLE_AUTODISABLE("DAC1 L Switch", RT5677_STO1_DAC_MIXER,
1392 RT5677_M_DAC1_L_STO_L_SFT, 1, 1), 1392 RT5677_M_DAC1_L_STO_L_SFT, 1, 1),
1393 SOC_DAPM_SINGLE("DAC2 L Switch", RT5677_STO1_DAC_MIXER, 1393 SOC_DAPM_SINGLE_AUTODISABLE("DAC2 L Switch", RT5677_STO1_DAC_MIXER,
1394 RT5677_M_DAC2_L_STO_L_SFT, 1, 1), 1394 RT5677_M_DAC2_L_STO_L_SFT, 1, 1),
1395 SOC_DAPM_SINGLE("DAC1 R Switch", RT5677_STO1_DAC_MIXER, 1395 SOC_DAPM_SINGLE_AUTODISABLE("DAC1 R Switch", RT5677_STO1_DAC_MIXER,
1396 RT5677_M_DAC1_R_STO_L_SFT, 1, 1), 1396 RT5677_M_DAC1_R_STO_L_SFT, 1, 1),
1397}; 1397};
1398 1398
1399static const struct snd_kcontrol_new rt5677_sto1_dac_r_mix[] = { 1399static const struct snd_kcontrol_new rt5677_sto1_dac_r_mix[] = {
1400 SOC_DAPM_SINGLE("ST R Switch", RT5677_STO1_DAC_MIXER, 1400 SOC_DAPM_SINGLE_AUTODISABLE("ST R Switch", RT5677_STO1_DAC_MIXER,
1401 RT5677_M_ST_DAC1_R_SFT, 1, 1), 1401 RT5677_M_ST_DAC1_R_SFT, 1, 1),
1402 SOC_DAPM_SINGLE("DAC1 R Switch", RT5677_STO1_DAC_MIXER, 1402 SOC_DAPM_SINGLE_AUTODISABLE("DAC1 R Switch", RT5677_STO1_DAC_MIXER,
1403 RT5677_M_DAC1_R_STO_R_SFT, 1, 1), 1403 RT5677_M_DAC1_R_STO_R_SFT, 1, 1),
1404 SOC_DAPM_SINGLE("DAC2 R Switch", RT5677_STO1_DAC_MIXER, 1404 SOC_DAPM_SINGLE_AUTODISABLE("DAC2 R Switch", RT5677_STO1_DAC_MIXER,
1405 RT5677_M_DAC2_R_STO_R_SFT, 1, 1), 1405 RT5677_M_DAC2_R_STO_R_SFT, 1, 1),
1406 SOC_DAPM_SINGLE("DAC1 L Switch", RT5677_STO1_DAC_MIXER, 1406 SOC_DAPM_SINGLE_AUTODISABLE("DAC1 L Switch", RT5677_STO1_DAC_MIXER,
1407 RT5677_M_DAC1_L_STO_R_SFT, 1, 1), 1407 RT5677_M_DAC1_L_STO_R_SFT, 1, 1),
1408}; 1408};
1409 1409
1410static const struct snd_kcontrol_new rt5677_mono_dac_l_mix[] = { 1410static const struct snd_kcontrol_new rt5677_mono_dac_l_mix[] = {
1411 SOC_DAPM_SINGLE("ST L Switch", RT5677_MONO_DAC_MIXER, 1411 SOC_DAPM_SINGLE_AUTODISABLE("ST L Switch", RT5677_MONO_DAC_MIXER,
1412 RT5677_M_ST_DAC2_L_SFT, 1, 1), 1412 RT5677_M_ST_DAC2_L_SFT, 1, 1),
1413 SOC_DAPM_SINGLE("DAC1 L Switch", RT5677_MONO_DAC_MIXER, 1413 SOC_DAPM_SINGLE_AUTODISABLE("DAC1 L Switch", RT5677_MONO_DAC_MIXER,
1414 RT5677_M_DAC1_L_MONO_L_SFT, 1, 1), 1414 RT5677_M_DAC1_L_MONO_L_SFT, 1, 1),
1415 SOC_DAPM_SINGLE("DAC2 L Switch", RT5677_MONO_DAC_MIXER, 1415 SOC_DAPM_SINGLE_AUTODISABLE("DAC2 L Switch", RT5677_MONO_DAC_MIXER,
1416 RT5677_M_DAC2_L_MONO_L_SFT, 1, 1), 1416 RT5677_M_DAC2_L_MONO_L_SFT, 1, 1),
1417 SOC_DAPM_SINGLE("DAC2 R Switch", RT5677_MONO_DAC_MIXER, 1417 SOC_DAPM_SINGLE_AUTODISABLE("DAC2 R Switch", RT5677_MONO_DAC_MIXER,
1418 RT5677_M_DAC2_R_MONO_L_SFT, 1, 1), 1418 RT5677_M_DAC2_R_MONO_L_SFT, 1, 1),
1419}; 1419};
1420 1420
1421static const struct snd_kcontrol_new rt5677_mono_dac_r_mix[] = { 1421static const struct snd_kcontrol_new rt5677_mono_dac_r_mix[] = {
1422 SOC_DAPM_SINGLE("ST R Switch", RT5677_MONO_DAC_MIXER, 1422 SOC_DAPM_SINGLE_AUTODISABLE("ST R Switch", RT5677_MONO_DAC_MIXER,
1423 RT5677_M_ST_DAC2_R_SFT, 1, 1), 1423 RT5677_M_ST_DAC2_R_SFT, 1, 1),
1424 SOC_DAPM_SINGLE("DAC1 R Switch", RT5677_MONO_DAC_MIXER, 1424 SOC_DAPM_SINGLE_AUTODISABLE("DAC1 R Switch", RT5677_MONO_DAC_MIXER,
1425 RT5677_M_DAC1_R_MONO_R_SFT, 1, 1), 1425 RT5677_M_DAC1_R_MONO_R_SFT, 1, 1),
1426 SOC_DAPM_SINGLE("DAC2 R Switch", RT5677_MONO_DAC_MIXER, 1426 SOC_DAPM_SINGLE_AUTODISABLE("DAC2 R Switch", RT5677_MONO_DAC_MIXER,
1427 RT5677_M_DAC2_R_MONO_R_SFT, 1, 1), 1427 RT5677_M_DAC2_R_MONO_R_SFT, 1, 1),
1428 SOC_DAPM_SINGLE("DAC2 L Switch", RT5677_MONO_DAC_MIXER, 1428 SOC_DAPM_SINGLE_AUTODISABLE("DAC2 L Switch", RT5677_MONO_DAC_MIXER,
1429 RT5677_M_DAC2_L_MONO_R_SFT, 1, 1), 1429 RT5677_M_DAC2_L_MONO_R_SFT, 1, 1),
1430}; 1430};
1431 1431
1432static const struct snd_kcontrol_new rt5677_dd1_l_mix[] = { 1432static const struct snd_kcontrol_new rt5677_dd1_l_mix[] = {
1433 SOC_DAPM_SINGLE("Sto DAC Mix L Switch", RT5677_DD1_MIXER, 1433 SOC_DAPM_SINGLE_AUTODISABLE("Sto DAC Mix L Switch", RT5677_DD1_MIXER,
1434 RT5677_M_STO_L_DD1_L_SFT, 1, 1), 1434 RT5677_M_STO_L_DD1_L_SFT, 1, 1),
1435 SOC_DAPM_SINGLE("Mono DAC Mix L Switch", RT5677_DD1_MIXER, 1435 SOC_DAPM_SINGLE_AUTODISABLE("Mono DAC Mix L Switch", RT5677_DD1_MIXER,
1436 RT5677_M_MONO_L_DD1_L_SFT, 1, 1), 1436 RT5677_M_MONO_L_DD1_L_SFT, 1, 1),
1437 SOC_DAPM_SINGLE("DAC3 L Switch", RT5677_DD1_MIXER, 1437 SOC_DAPM_SINGLE_AUTODISABLE("DAC3 L Switch", RT5677_DD1_MIXER,
1438 RT5677_M_DAC3_L_DD1_L_SFT, 1, 1), 1438 RT5677_M_DAC3_L_DD1_L_SFT, 1, 1),
1439 SOC_DAPM_SINGLE("DAC3 R Switch", RT5677_DD1_MIXER, 1439 SOC_DAPM_SINGLE_AUTODISABLE("DAC3 R Switch", RT5677_DD1_MIXER,
1440 RT5677_M_DAC3_R_DD1_L_SFT, 1, 1), 1440 RT5677_M_DAC3_R_DD1_L_SFT, 1, 1),
1441}; 1441};
1442 1442
1443static const struct snd_kcontrol_new rt5677_dd1_r_mix[] = { 1443static const struct snd_kcontrol_new rt5677_dd1_r_mix[] = {
1444 SOC_DAPM_SINGLE("Sto DAC Mix R Switch", RT5677_DD1_MIXER, 1444 SOC_DAPM_SINGLE_AUTODISABLE("Sto DAC Mix R Switch", RT5677_DD1_MIXER,
1445 RT5677_M_STO_R_DD1_R_SFT, 1, 1), 1445 RT5677_M_STO_R_DD1_R_SFT, 1, 1),
1446 SOC_DAPM_SINGLE("Mono DAC Mix R Switch", RT5677_DD1_MIXER, 1446 SOC_DAPM_SINGLE_AUTODISABLE("Mono DAC Mix R Switch", RT5677_DD1_MIXER,
1447 RT5677_M_MONO_R_DD1_R_SFT, 1, 1), 1447 RT5677_M_MONO_R_DD1_R_SFT, 1, 1),
1448 SOC_DAPM_SINGLE("DAC3 R Switch", RT5677_DD1_MIXER, 1448 SOC_DAPM_SINGLE_AUTODISABLE("DAC3 R Switch", RT5677_DD1_MIXER,
1449 RT5677_M_DAC3_R_DD1_R_SFT, 1, 1), 1449 RT5677_M_DAC3_R_DD1_R_SFT, 1, 1),
1450 SOC_DAPM_SINGLE("DAC3 L Switch", RT5677_DD1_MIXER, 1450 SOC_DAPM_SINGLE_AUTODISABLE("DAC3 L Switch", RT5677_DD1_MIXER,
1451 RT5677_M_DAC3_L_DD1_R_SFT, 1, 1), 1451 RT5677_M_DAC3_L_DD1_R_SFT, 1, 1),
1452}; 1452};
1453 1453
1454static const struct snd_kcontrol_new rt5677_dd2_l_mix[] = { 1454static const struct snd_kcontrol_new rt5677_dd2_l_mix[] = {
1455 SOC_DAPM_SINGLE("Sto DAC Mix L Switch", RT5677_DD2_MIXER, 1455 SOC_DAPM_SINGLE_AUTODISABLE("Sto DAC Mix L Switch", RT5677_DD2_MIXER,
1456 RT5677_M_STO_L_DD2_L_SFT, 1, 1), 1456 RT5677_M_STO_L_DD2_L_SFT, 1, 1),
1457 SOC_DAPM_SINGLE("Mono DAC Mix L Switch", RT5677_DD2_MIXER, 1457 SOC_DAPM_SINGLE_AUTODISABLE("Mono DAC Mix L Switch", RT5677_DD2_MIXER,
1458 RT5677_M_MONO_L_DD2_L_SFT, 1, 1), 1458 RT5677_M_MONO_L_DD2_L_SFT, 1, 1),
1459 SOC_DAPM_SINGLE("DAC4 L Switch", RT5677_DD2_MIXER, 1459 SOC_DAPM_SINGLE_AUTODISABLE("DAC4 L Switch", RT5677_DD2_MIXER,
1460 RT5677_M_DAC4_L_DD2_L_SFT, 1, 1), 1460 RT5677_M_DAC4_L_DD2_L_SFT, 1, 1),
1461 SOC_DAPM_SINGLE("DAC4 R Switch", RT5677_DD2_MIXER, 1461 SOC_DAPM_SINGLE_AUTODISABLE("DAC4 R Switch", RT5677_DD2_MIXER,
1462 RT5677_M_DAC4_R_DD2_L_SFT, 1, 1), 1462 RT5677_M_DAC4_R_DD2_L_SFT, 1, 1),
1463}; 1463};
1464 1464
1465static const struct snd_kcontrol_new rt5677_dd2_r_mix[] = { 1465static const struct snd_kcontrol_new rt5677_dd2_r_mix[] = {
1466 SOC_DAPM_SINGLE("Sto DAC Mix R Switch", RT5677_DD2_MIXER, 1466 SOC_DAPM_SINGLE_AUTODISABLE("Sto DAC Mix R Switch", RT5677_DD2_MIXER,
1467 RT5677_M_STO_R_DD2_R_SFT, 1, 1), 1467 RT5677_M_STO_R_DD2_R_SFT, 1, 1),
1468 SOC_DAPM_SINGLE("Mono DAC Mix R Switch", RT5677_DD2_MIXER, 1468 SOC_DAPM_SINGLE_AUTODISABLE("Mono DAC Mix R Switch", RT5677_DD2_MIXER,
1469 RT5677_M_MONO_R_DD2_R_SFT, 1, 1), 1469 RT5677_M_MONO_R_DD2_R_SFT, 1, 1),
1470 SOC_DAPM_SINGLE("DAC4 R Switch", RT5677_DD2_MIXER, 1470 SOC_DAPM_SINGLE_AUTODISABLE("DAC4 R Switch", RT5677_DD2_MIXER,
1471 RT5677_M_DAC4_R_DD2_R_SFT, 1, 1), 1471 RT5677_M_DAC4_R_DD2_R_SFT, 1, 1),
1472 SOC_DAPM_SINGLE("DAC4 L Switch", RT5677_DD2_MIXER, 1472 SOC_DAPM_SINGLE_AUTODISABLE("DAC4 L Switch", RT5677_DD2_MIXER,
1473 RT5677_M_DAC4_L_DD2_R_SFT, 1, 1), 1473 RT5677_M_DAC4_L_DD2_R_SFT, 1, 1),
1474}; 1474};
1475 1475
@@ -2596,6 +2596,21 @@ static int rt5677_vref_event(struct snd_soc_dapm_widget *w,
2596 return 0; 2596 return 0;
2597} 2597}
2598 2598
2599static int rt5677_filter_power_event(struct snd_soc_dapm_widget *w,
2600 struct snd_kcontrol *kcontrol, int event)
2601{
2602 switch (event) {
2603 case SND_SOC_DAPM_POST_PMU:
2604 msleep(50);
2605 break;
2606
2607 default:
2608 return 0;
2609 }
2610
2611 return 0;
2612}
2613
2599static const struct snd_soc_dapm_widget rt5677_dapm_widgets[] = { 2614static const struct snd_soc_dapm_widget rt5677_dapm_widgets[] = {
2600 SND_SOC_DAPM_SUPPLY("PLL1", RT5677_PWR_ANLG2, RT5677_PWR_PLL1_BIT, 2615 SND_SOC_DAPM_SUPPLY("PLL1", RT5677_PWR_ANLG2, RT5677_PWR_PLL1_BIT,
2601 0, rt5677_set_pll1_event, SND_SOC_DAPM_PRE_PMU | 2616 0, rt5677_set_pll1_event, SND_SOC_DAPM_PRE_PMU |
@@ -3072,19 +3087,26 @@ static const struct snd_soc_dapm_widget rt5677_dapm_widgets[] = {
3072 3087
3073 /* DAC Mixer */ 3088 /* DAC Mixer */
3074 SND_SOC_DAPM_SUPPLY("dac stereo1 filter", RT5677_PWR_DIG2, 3089 SND_SOC_DAPM_SUPPLY("dac stereo1 filter", RT5677_PWR_DIG2,
3075 RT5677_PWR_DAC_S1F_BIT, 0, NULL, 0), 3090 RT5677_PWR_DAC_S1F_BIT, 0, rt5677_filter_power_event,
3091 SND_SOC_DAPM_POST_PMU),
3076 SND_SOC_DAPM_SUPPLY("dac mono2 left filter", RT5677_PWR_DIG2, 3092 SND_SOC_DAPM_SUPPLY("dac mono2 left filter", RT5677_PWR_DIG2,
3077 RT5677_PWR_DAC_M2F_L_BIT, 0, NULL, 0), 3093 RT5677_PWR_DAC_M2F_L_BIT, 0, rt5677_filter_power_event,
3094 SND_SOC_DAPM_POST_PMU),
3078 SND_SOC_DAPM_SUPPLY("dac mono2 right filter", RT5677_PWR_DIG2, 3095 SND_SOC_DAPM_SUPPLY("dac mono2 right filter", RT5677_PWR_DIG2,
3079 RT5677_PWR_DAC_M2F_R_BIT, 0, NULL, 0), 3096 RT5677_PWR_DAC_M2F_R_BIT, 0, rt5677_filter_power_event,
3097 SND_SOC_DAPM_POST_PMU),
3080 SND_SOC_DAPM_SUPPLY("dac mono3 left filter", RT5677_PWR_DIG2, 3098 SND_SOC_DAPM_SUPPLY("dac mono3 left filter", RT5677_PWR_DIG2,
3081 RT5677_PWR_DAC_M3F_L_BIT, 0, NULL, 0), 3099 RT5677_PWR_DAC_M3F_L_BIT, 0, rt5677_filter_power_event,
3100 SND_SOC_DAPM_POST_PMU),
3082 SND_SOC_DAPM_SUPPLY("dac mono3 right filter", RT5677_PWR_DIG2, 3101 SND_SOC_DAPM_SUPPLY("dac mono3 right filter", RT5677_PWR_DIG2,
3083 RT5677_PWR_DAC_M3F_R_BIT, 0, NULL, 0), 3102 RT5677_PWR_DAC_M3F_R_BIT, 0, rt5677_filter_power_event,
3103 SND_SOC_DAPM_POST_PMU),
3084 SND_SOC_DAPM_SUPPLY("dac mono4 left filter", RT5677_PWR_DIG2, 3104 SND_SOC_DAPM_SUPPLY("dac mono4 left filter", RT5677_PWR_DIG2,
3085 RT5677_PWR_DAC_M4F_L_BIT, 0, NULL, 0), 3105 RT5677_PWR_DAC_M4F_L_BIT, 0, rt5677_filter_power_event,
3106 SND_SOC_DAPM_POST_PMU),
3086 SND_SOC_DAPM_SUPPLY("dac mono4 right filter", RT5677_PWR_DIG2, 3107 SND_SOC_DAPM_SUPPLY("dac mono4 right filter", RT5677_PWR_DIG2,
3087 RT5677_PWR_DAC_M4F_R_BIT, 0, NULL, 0), 3108 RT5677_PWR_DAC_M4F_R_BIT, 0, rt5677_filter_power_event,
3109 SND_SOC_DAPM_POST_PMU),
3088 3110
3089 SND_SOC_DAPM_MIXER("Stereo DAC MIXL", SND_SOC_NOPM, 0, 0, 3111 SND_SOC_DAPM_MIXER("Stereo DAC MIXL", SND_SOC_NOPM, 0, 0,
3090 rt5677_sto1_dac_l_mix, ARRAY_SIZE(rt5677_sto1_dac_l_mix)), 3112 rt5677_sto1_dac_l_mix, ARRAY_SIZE(rt5677_sto1_dac_l_mix)),
diff --git a/sound/soc/codecs/wm8960.c b/sound/soc/codecs/wm8960.c
index 056375339ea3..5380798883b5 100644
--- a/sound/soc/codecs/wm8960.c
+++ b/sound/soc/codecs/wm8960.c
@@ -229,7 +229,7 @@ SOC_DOUBLE_R_TLV("Capture Volume", WM8960_LINVOL, WM8960_RINVOL,
229SOC_DOUBLE_R("Capture Volume ZC Switch", WM8960_LINVOL, WM8960_RINVOL, 229SOC_DOUBLE_R("Capture Volume ZC Switch", WM8960_LINVOL, WM8960_RINVOL,
230 6, 1, 0), 230 6, 1, 0),
231SOC_DOUBLE_R("Capture Switch", WM8960_LINVOL, WM8960_RINVOL, 231SOC_DOUBLE_R("Capture Switch", WM8960_LINVOL, WM8960_RINVOL,
232 7, 1, 0), 232 7, 1, 1),
233 233
234SOC_SINGLE_TLV("Right Input Boost Mixer RINPUT3 Volume", 234SOC_SINGLE_TLV("Right Input Boost Mixer RINPUT3 Volume",
235 WM8960_INBMIX1, 4, 7, 0, lineinboost_tlv), 235 WM8960_INBMIX1, 4, 7, 0, lineinboost_tlv),
diff --git a/sound/soc/codecs/wm8962.c b/sound/soc/codecs/wm8962.c
index b563d6746ac4..6a49b36d12f9 100644
--- a/sound/soc/codecs/wm8962.c
+++ b/sound/soc/codecs/wm8962.c
@@ -365,8 +365,8 @@ static const struct reg_default wm8962_reg[] = {
365 { 16924, 0x0059 }, /* R16924 - HDBASS_PG_1 */ 365 { 16924, 0x0059 }, /* R16924 - HDBASS_PG_1 */
366 { 16925, 0x999A }, /* R16925 - HDBASS_PG_0 */ 366 { 16925, 0x999A }, /* R16925 - HDBASS_PG_0 */
367 367
368 { 17048, 0x0083 }, /* R17408 - HPF_C_1 */ 368 { 17408, 0x0083 }, /* R17408 - HPF_C_1 */
369 { 17049, 0x98AD }, /* R17409 - HPF_C_0 */ 369 { 17409, 0x98AD }, /* R17409 - HPF_C_0 */
370 370
371 { 17920, 0x007F }, /* R17920 - ADCL_RETUNE_C1_1 */ 371 { 17920, 0x007F }, /* R17920 - ADCL_RETUNE_C1_1 */
372 { 17921, 0xFFFF }, /* R17921 - ADCL_RETUNE_C1_0 */ 372 { 17921, 0xFFFF }, /* R17921 - ADCL_RETUNE_C1_0 */
diff --git a/sound/soc/davinci/davinci-mcasp.c b/sound/soc/davinci/davinci-mcasp.c
index 4495a40a9468..c1c9c2e3525b 100644
--- a/sound/soc/davinci/davinci-mcasp.c
+++ b/sound/soc/davinci/davinci-mcasp.c
@@ -681,8 +681,8 @@ static int davinci_mcasp_set_tdm_slot(struct snd_soc_dai *dai,
681 } 681 }
682 682
683 mcasp->tdm_slots = slots; 683 mcasp->tdm_slots = slots;
684 mcasp->tdm_mask[SNDRV_PCM_STREAM_PLAYBACK] = rx_mask; 684 mcasp->tdm_mask[SNDRV_PCM_STREAM_PLAYBACK] = tx_mask;
685 mcasp->tdm_mask[SNDRV_PCM_STREAM_CAPTURE] = tx_mask; 685 mcasp->tdm_mask[SNDRV_PCM_STREAM_CAPTURE] = rx_mask;
686 mcasp->slot_width = slot_width; 686 mcasp->slot_width = slot_width;
687 687
688 return davinci_mcasp_set_ch_constraints(mcasp); 688 return davinci_mcasp_set_ch_constraints(mcasp);
@@ -908,6 +908,14 @@ static int mcasp_i2s_hw_param(struct davinci_mcasp *mcasp, int stream,
908 mcasp_set_bits(mcasp, DAVINCI_MCASP_RXFMT_REG, busel | RXORD); 908 mcasp_set_bits(mcasp, DAVINCI_MCASP_RXFMT_REG, busel | RXORD);
909 mcasp_mod_bits(mcasp, DAVINCI_MCASP_RXFMCTL_REG, 909 mcasp_mod_bits(mcasp, DAVINCI_MCASP_RXFMCTL_REG,
910 FSRMOD(total_slots), FSRMOD(0x1FF)); 910 FSRMOD(total_slots), FSRMOD(0x1FF));
911 /*
912 * If McASP is set to be TX/RX synchronous and the playback is
913 * not running already we need to configure the TX slots in
914 * order to have correct FSX on the bus
915 */
916 if (mcasp_is_synchronous(mcasp) && !mcasp->channels)
917 mcasp_mod_bits(mcasp, DAVINCI_MCASP_TXFMCTL_REG,
918 FSXMOD(total_slots), FSXMOD(0x1FF));
911 } 919 }
912 920
913 return 0; 921 return 0;
diff --git a/sound/soc/fsl/Kconfig b/sound/soc/fsl/Kconfig
index 19c302b0d763..14dfdee05fd5 100644
--- a/sound/soc/fsl/Kconfig
+++ b/sound/soc/fsl/Kconfig
@@ -283,6 +283,8 @@ config SND_SOC_IMX_MC13783
283config SND_SOC_FSL_ASOC_CARD 283config SND_SOC_FSL_ASOC_CARD
284 tristate "Generic ASoC Sound Card with ASRC support" 284 tristate "Generic ASoC Sound Card with ASRC support"
285 depends on OF && I2C 285 depends on OF && I2C
286 # enforce SND_SOC_FSL_ASOC_CARD=m if SND_AC97_CODEC=m:
287 depends on SND_AC97_CODEC || SND_AC97_CODEC=n
286 select SND_SOC_IMX_AUDMUX 288 select SND_SOC_IMX_AUDMUX
287 select SND_SOC_IMX_PCM_DMA 289 select SND_SOC_IMX_PCM_DMA
288 select SND_SOC_FSL_ESAI 290 select SND_SOC_FSL_ESAI
diff --git a/sound/soc/fsl/fsl_sai.c b/sound/soc/fsl/fsl_sai.c
index a4435f5e3be9..ffd5f9acc849 100644
--- a/sound/soc/fsl/fsl_sai.c
+++ b/sound/soc/fsl/fsl_sai.c
@@ -454,7 +454,8 @@ static int fsl_sai_trigger(struct snd_pcm_substream *substream, int cmd,
454 * Rx sync with Tx clocks: Clear SYNC for Tx, set it for Rx. 454 * Rx sync with Tx clocks: Clear SYNC for Tx, set it for Rx.
455 * Tx sync with Rx clocks: Clear SYNC for Rx, set it for Tx. 455 * Tx sync with Rx clocks: Clear SYNC for Rx, set it for Tx.
456 */ 456 */
457 regmap_update_bits(sai->regmap, FSL_SAI_TCR2, FSL_SAI_CR2_SYNC, 0); 457 regmap_update_bits(sai->regmap, FSL_SAI_TCR2, FSL_SAI_CR2_SYNC,
458 sai->synchronous[TX] ? FSL_SAI_CR2_SYNC : 0);
458 regmap_update_bits(sai->regmap, FSL_SAI_RCR2, FSL_SAI_CR2_SYNC, 459 regmap_update_bits(sai->regmap, FSL_SAI_RCR2, FSL_SAI_CR2_SYNC,
459 sai->synchronous[RX] ? FSL_SAI_CR2_SYNC : 0); 460 sai->synchronous[RX] ? FSL_SAI_CR2_SYNC : 0);
460 461
diff --git a/sound/soc/intel/Kconfig b/sound/soc/intel/Kconfig
index 7b778ab85f8b..d430ef5a4f38 100644
--- a/sound/soc/intel/Kconfig
+++ b/sound/soc/intel/Kconfig
@@ -144,7 +144,7 @@ config SND_SOC_INTEL_SKYLAKE
144 144
145config SND_SOC_INTEL_SKL_RT286_MACH 145config SND_SOC_INTEL_SKL_RT286_MACH
146 tristate "ASoC Audio driver for SKL with RT286 I2S mode" 146 tristate "ASoC Audio driver for SKL with RT286 I2S mode"
147 depends on X86 && ACPI 147 depends on X86 && ACPI && I2C
148 select SND_SOC_INTEL_SST 148 select SND_SOC_INTEL_SST
149 select SND_SOC_INTEL_SKYLAKE 149 select SND_SOC_INTEL_SKYLAKE
150 select SND_SOC_RT286 150 select SND_SOC_RT286
diff --git a/sound/soc/intel/skylake/skl-topology.c b/sound/soc/intel/skylake/skl-topology.c
index a7854c8fc523..ffea427aeca8 100644
--- a/sound/soc/intel/skylake/skl-topology.c
+++ b/sound/soc/intel/skylake/skl-topology.c
@@ -1240,6 +1240,7 @@ int skl_tplg_init(struct snd_soc_platform *platform, struct hdac_ext_bus *ebus)
1240 */ 1240 */
1241 ret = snd_soc_tplg_component_load(&platform->component, 1241 ret = snd_soc_tplg_component_load(&platform->component,
1242 &skl_tplg_ops, fw, 0); 1242 &skl_tplg_ops, fw, 0);
1243 release_firmware(fw);
1243 if (ret < 0) { 1244 if (ret < 0) {
1244 dev_err(bus->dev, "tplg component load failed%d\n", ret); 1245 dev_err(bus->dev, "tplg component load failed%d\n", ret);
1245 return -EINVAL; 1246 return -EINVAL;
diff --git a/sound/soc/rockchip/rockchip_spdif.c b/sound/soc/rockchip/rockchip_spdif.c
index a38a3029062c..ac72ff5055bb 100644
--- a/sound/soc/rockchip/rockchip_spdif.c
+++ b/sound/soc/rockchip/rockchip_spdif.c
@@ -280,7 +280,7 @@ static int rk_spdif_probe(struct platform_device *pdev)
280 int ret; 280 int ret;
281 281
282 match = of_match_node(rk_spdif_match, np); 282 match = of_match_node(rk_spdif_match, np);
283 if ((int) match->data == RK_SPDIF_RK3288) { 283 if (match->data == (void *)RK_SPDIF_RK3288) {
284 struct regmap *grf; 284 struct regmap *grf;
285 285
286 grf = syscon_regmap_lookup_by_phandle(np, "rockchip,grf"); 286 grf = syscon_regmap_lookup_by_phandle(np, "rockchip,grf");
diff --git a/sound/soc/rockchip/rockchip_spdif.h b/sound/soc/rockchip/rockchip_spdif.h
index 07f86a21046a..921b4095fb92 100644
--- a/sound/soc/rockchip/rockchip_spdif.h
+++ b/sound/soc/rockchip/rockchip_spdif.h
@@ -28,9 +28,9 @@
28#define SPDIF_CFGR_VDW(x) (x << SPDIF_CFGR_VDW_SHIFT) 28#define SPDIF_CFGR_VDW(x) (x << SPDIF_CFGR_VDW_SHIFT)
29#define SDPIF_CFGR_VDW_MASK (0xf << SPDIF_CFGR_VDW_SHIFT) 29#define SDPIF_CFGR_VDW_MASK (0xf << SPDIF_CFGR_VDW_SHIFT)
30 30
31#define SPDIF_CFGR_VDW_16 SPDIF_CFGR_VDW(0x00) 31#define SPDIF_CFGR_VDW_16 SPDIF_CFGR_VDW(0x0)
32#define SPDIF_CFGR_VDW_20 SPDIF_CFGR_VDW(0x01) 32#define SPDIF_CFGR_VDW_20 SPDIF_CFGR_VDW(0x1)
33#define SPDIF_CFGR_VDW_24 SPDIF_CFGR_VDW(0x10) 33#define SPDIF_CFGR_VDW_24 SPDIF_CFGR_VDW(0x2)
34 34
35/* 35/*
36 * DMACR 36 * DMACR
diff --git a/sound/soc/sh/rcar/gen.c b/sound/soc/sh/rcar/gen.c
index 76da7620904c..edcf4cc2e84f 100644
--- a/sound/soc/sh/rcar/gen.c
+++ b/sound/soc/sh/rcar/gen.c
@@ -235,7 +235,7 @@ static int rsnd_gen2_probe(struct platform_device *pdev,
235 RSND_GEN_S_REG(SCU_SYS_STATUS0, 0x1c8), 235 RSND_GEN_S_REG(SCU_SYS_STATUS0, 0x1c8),
236 RSND_GEN_S_REG(SCU_SYS_INT_EN0, 0x1cc), 236 RSND_GEN_S_REG(SCU_SYS_INT_EN0, 0x1cc),
237 RSND_GEN_S_REG(SCU_SYS_STATUS1, 0x1d0), 237 RSND_GEN_S_REG(SCU_SYS_STATUS1, 0x1d0),
238 RSND_GEN_S_REG(SCU_SYS_INT_EN1, 0x1c4), 238 RSND_GEN_S_REG(SCU_SYS_INT_EN1, 0x1d4),
239 RSND_GEN_M_REG(SRC_SWRSR, 0x200, 0x40), 239 RSND_GEN_M_REG(SRC_SWRSR, 0x200, 0x40),
240 RSND_GEN_M_REG(SRC_SRCIR, 0x204, 0x40), 240 RSND_GEN_M_REG(SRC_SRCIR, 0x204, 0x40),
241 RSND_GEN_M_REG(SRC_ADINR, 0x214, 0x40), 241 RSND_GEN_M_REG(SRC_ADINR, 0x214, 0x40),
diff --git a/sound/soc/sh/rcar/src.c b/sound/soc/sh/rcar/src.c
index 261b50217c48..68b439ed22d7 100644
--- a/sound/soc/sh/rcar/src.c
+++ b/sound/soc/sh/rcar/src.c
@@ -923,6 +923,7 @@ static int rsnd_src_pcm_new_gen2(struct rsnd_mod *mod,
923 struct snd_soc_pcm_runtime *rtd) 923 struct snd_soc_pcm_runtime *rtd)
924{ 924{
925 struct rsnd_dai *rdai = rsnd_io_to_rdai(io); 925 struct rsnd_dai *rdai = rsnd_io_to_rdai(io);
926 struct rsnd_mod *dvc = rsnd_io_to_mod_dvc(io);
926 struct rsnd_src *src = rsnd_mod_to_src(mod); 927 struct rsnd_src *src = rsnd_mod_to_src(mod);
927 int ret; 928 int ret;
928 929
@@ -937,6 +938,12 @@ static int rsnd_src_pcm_new_gen2(struct rsnd_mod *mod,
937 return 0; 938 return 0;
938 939
939 /* 940 /*
941 * SRC In doesn't work if DVC was enabled
942 */
943 if (dvc && !rsnd_io_is_play(io))
944 return 0;
945
946 /*
940 * enable sync convert 947 * enable sync convert
941 */ 948 */
942 ret = rsnd_kctrl_new_s(mod, io, rtd, 949 ret = rsnd_kctrl_new_s(mod, io, rtd,
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
index 24b096066a07..a1305f827a98 100644
--- a/sound/soc/soc-core.c
+++ b/sound/soc/soc-core.c
@@ -795,12 +795,12 @@ static void soc_resume_deferred(struct work_struct *work)
795 795
796 dev_dbg(card->dev, "ASoC: resume work completed\n"); 796 dev_dbg(card->dev, "ASoC: resume work completed\n");
797 797
798 /* userspace can access us now we are back as we were before */
799 snd_power_change_state(card->snd_card, SNDRV_CTL_POWER_D0);
800
801 /* Recheck all endpoints too, their state is affected by suspend */ 798 /* Recheck all endpoints too, their state is affected by suspend */
802 dapm_mark_endpoints_dirty(card); 799 dapm_mark_endpoints_dirty(card);
803 snd_soc_dapm_sync(&card->dapm); 800 snd_soc_dapm_sync(&card->dapm);
801
802 /* userspace can access us now we are back as we were before */
803 snd_power_change_state(card->snd_card, SNDRV_CTL_POWER_D0);
804} 804}
805 805
806/* powers up audio subsystem after a suspend */ 806/* powers up audio subsystem after a suspend */
diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
index 016eba10b1ec..7d009428934a 100644
--- a/sound/soc/soc-dapm.c
+++ b/sound/soc/soc-dapm.c
@@ -2293,6 +2293,12 @@ void snd_soc_dapm_free_widget(struct snd_soc_dapm_widget *w)
2293 kfree(w); 2293 kfree(w);
2294} 2294}
2295 2295
2296void snd_soc_dapm_reset_cache(struct snd_soc_dapm_context *dapm)
2297{
2298 dapm->path_sink_cache.widget = NULL;
2299 dapm->path_source_cache.widget = NULL;
2300}
2301
2296/* free all dapm widgets and resources */ 2302/* free all dapm widgets and resources */
2297static void dapm_free_widgets(struct snd_soc_dapm_context *dapm) 2303static void dapm_free_widgets(struct snd_soc_dapm_context *dapm)
2298{ 2304{
@@ -2303,6 +2309,7 @@ static void dapm_free_widgets(struct snd_soc_dapm_context *dapm)
2303 continue; 2309 continue;
2304 snd_soc_dapm_free_widget(w); 2310 snd_soc_dapm_free_widget(w);
2305 } 2311 }
2312 snd_soc_dapm_reset_cache(dapm);
2306} 2313}
2307 2314
2308static struct snd_soc_dapm_widget *dapm_find_widget( 2315static struct snd_soc_dapm_widget *dapm_find_widget(
diff --git a/sound/soc/soc-ops.c b/sound/soc/soc-ops.c
index ecd38e52285a..2f67ba6d7a8f 100644
--- a/sound/soc/soc-ops.c
+++ b/sound/soc/soc-ops.c
@@ -404,7 +404,7 @@ EXPORT_SYMBOL_GPL(snd_soc_get_volsw_sx);
404/** 404/**
405 * snd_soc_put_volsw_sx - double mixer set callback 405 * snd_soc_put_volsw_sx - double mixer set callback
406 * @kcontrol: mixer control 406 * @kcontrol: mixer control
407 * @uinfo: control element information 407 * @ucontrol: control element information
408 * 408 *
409 * Callback to set the value of a double mixer control that spans 2 registers. 409 * Callback to set the value of a double mixer control that spans 2 registers.
410 * 410 *
diff --git a/sound/soc/soc-topology.c b/sound/soc/soc-topology.c
index 8d7ec80af51b..6963ba20991c 100644
--- a/sound/soc/soc-topology.c
+++ b/sound/soc/soc-topology.c
@@ -531,7 +531,7 @@ static int soc_tplg_kcontrol_bind_io(struct snd_soc_tplg_ctl_hdr *hdr,
531 /* TLV bytes controls need standard kcontrol info handler, 531 /* TLV bytes controls need standard kcontrol info handler,
532 * TLV callback and extended put/get handlers. 532 * TLV callback and extended put/get handlers.
533 */ 533 */
534 k->info = snd_soc_bytes_info; 534 k->info = snd_soc_bytes_info_ext;
535 k->tlv.c = snd_soc_bytes_tlv_callback; 535 k->tlv.c = snd_soc_bytes_tlv_callback;
536 536
537 ext_ops = tplg->bytes_ext_ops; 537 ext_ops = tplg->bytes_ext_ops;
@@ -1805,6 +1805,7 @@ void snd_soc_tplg_widget_remove_all(struct snd_soc_dapm_context *dapm,
1805 snd_soc_tplg_widget_remove(w); 1805 snd_soc_tplg_widget_remove(w);
1806 snd_soc_dapm_free_widget(w); 1806 snd_soc_dapm_free_widget(w);
1807 } 1807 }
1808 snd_soc_dapm_reset_cache(dapm);
1808} 1809}
1809EXPORT_SYMBOL_GPL(snd_soc_tplg_widget_remove_all); 1810EXPORT_SYMBOL_GPL(snd_soc_tplg_widget_remove_all);
1810 1811
diff --git a/sound/soc/sti/uniperif_player.c b/sound/soc/sti/uniperif_player.c
index 843f037a317d..5c2bc53f0a9b 100644
--- a/sound/soc/sti/uniperif_player.c
+++ b/sound/soc/sti/uniperif_player.c
@@ -669,6 +669,7 @@ static int uni_player_startup(struct snd_pcm_substream *substream,
669{ 669{
670 struct sti_uniperiph_data *priv = snd_soc_dai_get_drvdata(dai); 670 struct sti_uniperiph_data *priv = snd_soc_dai_get_drvdata(dai);
671 struct uniperif *player = priv->dai_data.uni; 671 struct uniperif *player = priv->dai_data.uni;
672 player->substream = substream;
672 673
673 player->clk_adj = 0; 674 player->clk_adj = 0;
674 675
@@ -950,6 +951,8 @@ static void uni_player_shutdown(struct snd_pcm_substream *substream,
950 if (player->state != UNIPERIF_STATE_STOPPED) 951 if (player->state != UNIPERIF_STATE_STOPPED)
951 /* Stop the player */ 952 /* Stop the player */
952 uni_player_stop(player); 953 uni_player_stop(player);
954
955 player->substream = NULL;
953} 956}
954 957
955static int uni_player_parse_dt_clk_glue(struct platform_device *pdev, 958static int uni_player_parse_dt_clk_glue(struct platform_device *pdev,
@@ -989,7 +992,7 @@ static int uni_player_parse_dt(struct platform_device *pdev,
989 if (!info) 992 if (!info)
990 return -ENOMEM; 993 return -ENOMEM;
991 994
992 if (of_property_read_u32(pnode, "version", &player->ver) || 995 if (of_property_read_u32(pnode, "st,version", &player->ver) ||
993 player->ver == SND_ST_UNIPERIF_VERSION_UNKNOWN) { 996 player->ver == SND_ST_UNIPERIF_VERSION_UNKNOWN) {
994 dev_err(dev, "Unknown uniperipheral version "); 997 dev_err(dev, "Unknown uniperipheral version ");
995 return -EINVAL; 998 return -EINVAL;
@@ -998,13 +1001,13 @@ static int uni_player_parse_dt(struct platform_device *pdev,
998 if (player->ver >= SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0) 1001 if (player->ver >= SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0)
999 info->underflow_enabled = 1; 1002 info->underflow_enabled = 1;
1000 1003
1001 if (of_property_read_u32(pnode, "uniperiph-id", &info->id)) { 1004 if (of_property_read_u32(pnode, "st,uniperiph-id", &info->id)) {
1002 dev_err(dev, "uniperipheral id not defined"); 1005 dev_err(dev, "uniperipheral id not defined");
1003 return -EINVAL; 1006 return -EINVAL;
1004 } 1007 }
1005 1008
1006 /* Read the device mode property */ 1009 /* Read the device mode property */
1007 if (of_property_read_string(pnode, "mode", &mode)) { 1010 if (of_property_read_string(pnode, "st,mode", &mode)) {
1008 dev_err(dev, "uniperipheral mode not defined"); 1011 dev_err(dev, "uniperipheral mode not defined");
1009 return -EINVAL; 1012 return -EINVAL;
1010 } 1013 }
diff --git a/sound/soc/sti/uniperif_reader.c b/sound/soc/sti/uniperif_reader.c
index f791239a3087..8a0eb2050169 100644
--- a/sound/soc/sti/uniperif_reader.c
+++ b/sound/soc/sti/uniperif_reader.c
@@ -316,7 +316,7 @@ static int uni_reader_parse_dt(struct platform_device *pdev,
316 if (!info) 316 if (!info)
317 return -ENOMEM; 317 return -ENOMEM;
318 318
319 if (of_property_read_u32(node, "version", &reader->ver) || 319 if (of_property_read_u32(node, "st,version", &reader->ver) ||
320 reader->ver == SND_ST_UNIPERIF_VERSION_UNKNOWN) { 320 reader->ver == SND_ST_UNIPERIF_VERSION_UNKNOWN) {
321 dev_err(&pdev->dev, "Unknown uniperipheral version "); 321 dev_err(&pdev->dev, "Unknown uniperipheral version ");
322 return -EINVAL; 322 return -EINVAL;
@@ -346,7 +346,6 @@ int uni_reader_init(struct platform_device *pdev,
346 reader->hw = &uni_reader_pcm_hw; 346 reader->hw = &uni_reader_pcm_hw;
347 reader->dai_ops = &uni_reader_dai_ops; 347 reader->dai_ops = &uni_reader_dai_ops;
348 348
349 dev_err(reader->dev, "%s: enter\n", __func__);
350 ret = uni_reader_parse_dt(pdev, reader); 349 ret = uni_reader_parse_dt(pdev, reader);
351 if (ret < 0) { 350 if (ret < 0) {
352 dev_err(reader->dev, "Failed to parse DeviceTree"); 351 dev_err(reader->dev, "Failed to parse DeviceTree");
diff --git a/sound/soc/sunxi/sun4i-codec.c b/sound/soc/sunxi/sun4i-codec.c
index bcbf4da168b6..1bb896d78d09 100644
--- a/sound/soc/sunxi/sun4i-codec.c
+++ b/sound/soc/sunxi/sun4i-codec.c
@@ -2,6 +2,7 @@
2 * Copyright 2014 Emilio López <emilio@elopez.com.ar> 2 * Copyright 2014 Emilio López <emilio@elopez.com.ar>
3 * Copyright 2014 Jon Smirl <jonsmirl@gmail.com> 3 * Copyright 2014 Jon Smirl <jonsmirl@gmail.com>
4 * Copyright 2015 Maxime Ripard <maxime.ripard@free-electrons.com> 4 * Copyright 2015 Maxime Ripard <maxime.ripard@free-electrons.com>
5 * Copyright 2015 Adam Sampson <ats@offog.org>
5 * 6 *
6 * Based on the Allwinner SDK driver, released under the GPL. 7 * Based on the Allwinner SDK driver, released under the GPL.
7 * 8 *
@@ -404,7 +405,7 @@ static const struct snd_kcontrol_new sun4i_codec_pa_mute =
404static DECLARE_TLV_DB_SCALE(sun4i_codec_pa_volume_scale, -6300, 100, 1); 405static DECLARE_TLV_DB_SCALE(sun4i_codec_pa_volume_scale, -6300, 100, 1);
405 406
406static const struct snd_kcontrol_new sun4i_codec_widgets[] = { 407static const struct snd_kcontrol_new sun4i_codec_widgets[] = {
407 SOC_SINGLE_TLV("PA Volume", SUN4I_CODEC_DAC_ACTL, 408 SOC_SINGLE_TLV("Power Amplifier Volume", SUN4I_CODEC_DAC_ACTL,
408 SUN4I_CODEC_DAC_ACTL_PA_VOL, 0x3F, 0, 409 SUN4I_CODEC_DAC_ACTL_PA_VOL, 0x3F, 0,
409 sun4i_codec_pa_volume_scale), 410 sun4i_codec_pa_volume_scale),
410}; 411};
@@ -452,12 +453,12 @@ static const struct snd_soc_dapm_widget sun4i_codec_dapm_widgets[] = {
452 SND_SOC_DAPM_SUPPLY("Mixer Enable", SUN4I_CODEC_DAC_ACTL, 453 SND_SOC_DAPM_SUPPLY("Mixer Enable", SUN4I_CODEC_DAC_ACTL,
453 SUN4I_CODEC_DAC_ACTL_MIXEN, 0, NULL, 0), 454 SUN4I_CODEC_DAC_ACTL_MIXEN, 0, NULL, 0),
454 455
455 /* Pre-Amplifier */ 456 /* Power Amplifier */
456 SND_SOC_DAPM_MIXER("Pre-Amplifier", SUN4I_CODEC_ADC_ACTL, 457 SND_SOC_DAPM_MIXER("Power Amplifier", SUN4I_CODEC_ADC_ACTL,
457 SUN4I_CODEC_ADC_ACTL_PA_EN, 0, 458 SUN4I_CODEC_ADC_ACTL_PA_EN, 0,
458 sun4i_codec_pa_mixer_controls, 459 sun4i_codec_pa_mixer_controls,
459 ARRAY_SIZE(sun4i_codec_pa_mixer_controls)), 460 ARRAY_SIZE(sun4i_codec_pa_mixer_controls)),
460 SND_SOC_DAPM_SWITCH("Pre-Amplifier Mute", SND_SOC_NOPM, 0, 0, 461 SND_SOC_DAPM_SWITCH("Power Amplifier Mute", SND_SOC_NOPM, 0, 0,
461 &sun4i_codec_pa_mute), 462 &sun4i_codec_pa_mute),
462 463
463 SND_SOC_DAPM_OUTPUT("HP Right"), 464 SND_SOC_DAPM_OUTPUT("HP Right"),
@@ -480,16 +481,16 @@ static const struct snd_soc_dapm_route sun4i_codec_dapm_routes[] = {
480 { "Left Mixer", NULL, "Mixer Enable" }, 481 { "Left Mixer", NULL, "Mixer Enable" },
481 { "Left Mixer", "Left DAC Playback Switch", "Left DAC" }, 482 { "Left Mixer", "Left DAC Playback Switch", "Left DAC" },
482 483
483 /* Pre-Amplifier Mixer Routes */ 484 /* Power Amplifier Routes */
484 { "Pre-Amplifier", "Mixer Playback Switch", "Left Mixer" }, 485 { "Power Amplifier", "Mixer Playback Switch", "Left Mixer" },
485 { "Pre-Amplifier", "Mixer Playback Switch", "Right Mixer" }, 486 { "Power Amplifier", "Mixer Playback Switch", "Right Mixer" },
486 { "Pre-Amplifier", "DAC Playback Switch", "Left DAC" }, 487 { "Power Amplifier", "DAC Playback Switch", "Left DAC" },
487 { "Pre-Amplifier", "DAC Playback Switch", "Right DAC" }, 488 { "Power Amplifier", "DAC Playback Switch", "Right DAC" },
488 489
489 /* PA -> HP path */ 490 /* Headphone Output Routes */
490 { "Pre-Amplifier Mute", "Switch", "Pre-Amplifier" }, 491 { "Power Amplifier Mute", "Switch", "Power Amplifier" },
491 { "HP Right", NULL, "Pre-Amplifier Mute" }, 492 { "HP Right", NULL, "Power Amplifier Mute" },
492 { "HP Left", NULL, "Pre-Amplifier Mute" }, 493 { "HP Left", NULL, "Power Amplifier Mute" },
493}; 494};
494 495
495static struct snd_soc_codec_driver sun4i_codec_codec = { 496static struct snd_soc_codec_driver sun4i_codec_codec = {
diff --git a/sound/usb/midi.c b/sound/usb/midi.c
index 7661616f3636..5b4c58c3e2c5 100644
--- a/sound/usb/midi.c
+++ b/sound/usb/midi.c
@@ -174,6 +174,8 @@ struct snd_usb_midi_in_endpoint {
174 u8 running_status_length; 174 u8 running_status_length;
175 } ports[0x10]; 175 } ports[0x10];
176 u8 seen_f5; 176 u8 seen_f5;
177 bool in_sysex;
178 u8 last_cin;
177 u8 error_resubmit; 179 u8 error_resubmit;
178 int current_port; 180 int current_port;
179}; 181};
@@ -468,6 +470,39 @@ static void snd_usbmidi_maudio_broken_running_status_input(
468} 470}
469 471
470/* 472/*
473 * QinHeng CH345 is buggy: every second packet inside a SysEx has not CIN 4
474 * but the previously seen CIN, but still with three data bytes.
475 */
476static void ch345_broken_sysex_input(struct snd_usb_midi_in_endpoint *ep,
477 uint8_t *buffer, int buffer_length)
478{
479 unsigned int i, cin, length;
480
481 for (i = 0; i + 3 < buffer_length; i += 4) {
482 if (buffer[i] == 0 && i > 0)
483 break;
484 cin = buffer[i] & 0x0f;
485 if (ep->in_sysex &&
486 cin == ep->last_cin &&
487 (buffer[i + 1 + (cin == 0x6)] & 0x80) == 0)
488 cin = 0x4;
489#if 0
490 if (buffer[i + 1] == 0x90) {
491 /*
492 * Either a corrupted running status or a real note-on
493 * message; impossible to detect reliably.
494 */
495 }
496#endif
497 length = snd_usbmidi_cin_length[cin];
498 snd_usbmidi_input_data(ep, 0, &buffer[i + 1], length);
499 ep->in_sysex = cin == 0x4;
500 if (!ep->in_sysex)
501 ep->last_cin = cin;
502 }
503}
504
505/*
471 * CME protocol: like the standard protocol, but SysEx commands are sent as a 506 * CME protocol: like the standard protocol, but SysEx commands are sent as a
472 * single USB packet preceded by a 0x0F byte. 507 * single USB packet preceded by a 0x0F byte.
473 */ 508 */
@@ -660,6 +695,12 @@ static struct usb_protocol_ops snd_usbmidi_cme_ops = {
660 .output_packet = snd_usbmidi_output_standard_packet, 695 .output_packet = snd_usbmidi_output_standard_packet,
661}; 696};
662 697
698static struct usb_protocol_ops snd_usbmidi_ch345_broken_sysex_ops = {
699 .input = ch345_broken_sysex_input,
700 .output = snd_usbmidi_standard_output,
701 .output_packet = snd_usbmidi_output_standard_packet,
702};
703
663/* 704/*
664 * AKAI MPD16 protocol: 705 * AKAI MPD16 protocol:
665 * 706 *
@@ -1341,6 +1382,7 @@ static int snd_usbmidi_out_endpoint_create(struct snd_usb_midi *umidi,
1341 * Various chips declare a packet size larger than 4 bytes, but 1382 * Various chips declare a packet size larger than 4 bytes, but
1342 * do not actually work with larger packets: 1383 * do not actually work with larger packets:
1343 */ 1384 */
1385 case USB_ID(0x0a67, 0x5011): /* Medeli DD305 */
1344 case USB_ID(0x0a92, 0x1020): /* ESI M4U */ 1386 case USB_ID(0x0a92, 0x1020): /* ESI M4U */
1345 case USB_ID(0x1430, 0x474b): /* RedOctane GH MIDI INTERFACE */ 1387 case USB_ID(0x1430, 0x474b): /* RedOctane GH MIDI INTERFACE */
1346 case USB_ID(0x15ca, 0x0101): /* Textech USB Midi Cable */ 1388 case USB_ID(0x15ca, 0x0101): /* Textech USB Midi Cable */
@@ -2378,6 +2420,10 @@ int snd_usbmidi_create(struct snd_card *card,
2378 2420
2379 err = snd_usbmidi_detect_per_port_endpoints(umidi, endpoints); 2421 err = snd_usbmidi_detect_per_port_endpoints(umidi, endpoints);
2380 break; 2422 break;
2423 case QUIRK_MIDI_CH345:
2424 umidi->usb_protocol_ops = &snd_usbmidi_ch345_broken_sysex_ops;
2425 err = snd_usbmidi_detect_per_port_endpoints(umidi, endpoints);
2426 break;
2381 default: 2427 default:
2382 dev_err(&umidi->dev->dev, "invalid quirk type %d\n", 2428 dev_err(&umidi->dev->dev, "invalid quirk type %d\n",
2383 quirk->type); 2429 quirk->type);
diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
index f494dced3c11..4f85757009b3 100644
--- a/sound/usb/mixer.c
+++ b/sound/usb/mixer.c
@@ -1354,6 +1354,8 @@ static void build_feature_ctl(struct mixer_build *state, void *raw_desc,
1354 } 1354 }
1355 } 1355 }
1356 1356
1357 snd_usb_mixer_fu_apply_quirk(state->mixer, cval, unitid, kctl);
1358
1357 range = (cval->max - cval->min) / cval->res; 1359 range = (cval->max - cval->min) / cval->res;
1358 /* 1360 /*
1359 * Are there devices with volume range more than 255? I use a bit more 1361 * Are there devices with volume range more than 255? I use a bit more
diff --git a/sound/usb/mixer_maps.c b/sound/usb/mixer_maps.c
index 6a803eff87f7..ddca6547399b 100644
--- a/sound/usb/mixer_maps.c
+++ b/sound/usb/mixer_maps.c
@@ -348,13 +348,6 @@ static struct usbmix_name_map bose_companion5_map[] = {
348 { 0 } /* terminator */ 348 { 0 } /* terminator */
349}; 349};
350 350
351/* Dragonfly DAC 1.2, the dB conversion factor is 1 instead of 256 */
352static struct usbmix_dB_map dragonfly_1_2_dB = {0, 5000};
353static struct usbmix_name_map dragonfly_1_2_map[] = {
354 { 7, NULL, .dB = &dragonfly_1_2_dB },
355 { 0 } /* terminator */
356};
357
358/* 351/*
359 * Control map entries 352 * Control map entries
360 */ 353 */
@@ -470,11 +463,6 @@ static struct usbmix_ctl_map usbmix_ctl_maps[] = {
470 .id = USB_ID(0x05a7, 0x1020), 463 .id = USB_ID(0x05a7, 0x1020),
471 .map = bose_companion5_map, 464 .map = bose_companion5_map,
472 }, 465 },
473 {
474 /* Dragonfly DAC 1.2 */
475 .id = USB_ID(0x21b4, 0x0081),
476 .map = dragonfly_1_2_map,
477 },
478 { 0 } /* terminator */ 466 { 0 } /* terminator */
479}; 467};
480 468
diff --git a/sound/usb/mixer_quirks.c b/sound/usb/mixer_quirks.c
index fe91184ce832..0ce888dceed0 100644
--- a/sound/usb/mixer_quirks.c
+++ b/sound/usb/mixer_quirks.c
@@ -37,6 +37,7 @@
37#include <sound/control.h> 37#include <sound/control.h>
38#include <sound/hwdep.h> 38#include <sound/hwdep.h>
39#include <sound/info.h> 39#include <sound/info.h>
40#include <sound/tlv.h>
40 41
41#include "usbaudio.h" 42#include "usbaudio.h"
42#include "mixer.h" 43#include "mixer.h"
@@ -1825,3 +1826,39 @@ void snd_usb_mixer_rc_memory_change(struct usb_mixer_interface *mixer,
1825 } 1826 }
1826} 1827}
1827 1828
1829static void snd_dragonfly_quirk_db_scale(struct usb_mixer_interface *mixer,
1830 struct snd_kcontrol *kctl)
1831{
1832 /* Approximation using 10 ranges based on output measurement on hw v1.2.
1833 * This seems close to the cubic mapping e.g. alsamixer uses. */
1834 static const DECLARE_TLV_DB_RANGE(scale,
1835 0, 1, TLV_DB_MINMAX_ITEM(-5300, -4970),
1836 2, 5, TLV_DB_MINMAX_ITEM(-4710, -4160),
1837 6, 7, TLV_DB_MINMAX_ITEM(-3884, -3710),
1838 8, 14, TLV_DB_MINMAX_ITEM(-3443, -2560),
1839 15, 16, TLV_DB_MINMAX_ITEM(-2475, -2324),
1840 17, 19, TLV_DB_MINMAX_ITEM(-2228, -2031),
1841 20, 26, TLV_DB_MINMAX_ITEM(-1910, -1393),
1842 27, 31, TLV_DB_MINMAX_ITEM(-1322, -1032),
1843 32, 40, TLV_DB_MINMAX_ITEM(-968, -490),
1844 41, 50, TLV_DB_MINMAX_ITEM(-441, 0),
1845 );
1846
1847 usb_audio_info(mixer->chip, "applying DragonFly dB scale quirk\n");
1848 kctl->tlv.p = scale;
1849 kctl->vd[0].access |= SNDRV_CTL_ELEM_ACCESS_TLV_READ;
1850 kctl->vd[0].access &= ~SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK;
1851}
1852
1853void snd_usb_mixer_fu_apply_quirk(struct usb_mixer_interface *mixer,
1854 struct usb_mixer_elem_info *cval, int unitid,
1855 struct snd_kcontrol *kctl)
1856{
1857 switch (mixer->chip->usb_id) {
1858 case USB_ID(0x21b4, 0x0081): /* AudioQuest DragonFly */
1859 if (unitid == 7 && cval->min == 0 && cval->max == 50)
1860 snd_dragonfly_quirk_db_scale(mixer, kctl);
1861 break;
1862 }
1863}
1864
diff --git a/sound/usb/mixer_quirks.h b/sound/usb/mixer_quirks.h
index bdbfab093816..177c329cd4dd 100644
--- a/sound/usb/mixer_quirks.h
+++ b/sound/usb/mixer_quirks.h
@@ -9,5 +9,9 @@ void snd_emuusb_set_samplerate(struct snd_usb_audio *chip,
9void snd_usb_mixer_rc_memory_change(struct usb_mixer_interface *mixer, 9void snd_usb_mixer_rc_memory_change(struct usb_mixer_interface *mixer,
10 int unitid); 10 int unitid);
11 11
12void snd_usb_mixer_fu_apply_quirk(struct usb_mixer_interface *mixer,
13 struct usb_mixer_elem_info *cval, int unitid,
14 struct snd_kcontrol *kctl);
15
12#endif /* SND_USB_MIXER_QUIRKS_H */ 16#endif /* SND_USB_MIXER_QUIRKS_H */
13 17
diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h
index 1a1e2e4df35e..c60a776e815d 100644
--- a/sound/usb/quirks-table.h
+++ b/sound/usb/quirks-table.h
@@ -2829,6 +2829,17 @@ YAMAHA_DEVICE(0x7010, "UB99"),
2829 .idProduct = 0x1020, 2829 .idProduct = 0x1020,
2830}, 2830},
2831 2831
2832/* QinHeng devices */
2833{
2834 USB_DEVICE(0x1a86, 0x752d),
2835 .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
2836 .vendor_name = "QinHeng",
2837 .product_name = "CH345",
2838 .ifnum = 1,
2839 .type = QUIRK_MIDI_CH345
2840 }
2841},
2842
2832/* KeithMcMillen Stringport */ 2843/* KeithMcMillen Stringport */
2833{ 2844{
2834 USB_DEVICE(0x1f38, 0x0001), 2845 USB_DEVICE(0x1f38, 0x0001),
diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
index 5ca80e7d30cd..b6c0c8e3b450 100644
--- a/sound/usb/quirks.c
+++ b/sound/usb/quirks.c
@@ -538,6 +538,7 @@ int snd_usb_create_quirk(struct snd_usb_audio *chip,
538 [QUIRK_MIDI_CME] = create_any_midi_quirk, 538 [QUIRK_MIDI_CME] = create_any_midi_quirk,
539 [QUIRK_MIDI_AKAI] = create_any_midi_quirk, 539 [QUIRK_MIDI_AKAI] = create_any_midi_quirk,
540 [QUIRK_MIDI_FTDI] = create_any_midi_quirk, 540 [QUIRK_MIDI_FTDI] = create_any_midi_quirk,
541 [QUIRK_MIDI_CH345] = create_any_midi_quirk,
541 [QUIRK_AUDIO_STANDARD_INTERFACE] = create_standard_audio_quirk, 542 [QUIRK_AUDIO_STANDARD_INTERFACE] = create_standard_audio_quirk,
542 [QUIRK_AUDIO_FIXED_ENDPOINT] = create_fixed_stream_quirk, 543 [QUIRK_AUDIO_FIXED_ENDPOINT] = create_fixed_stream_quirk,
543 [QUIRK_AUDIO_EDIROL_UAXX] = create_uaxx_quirk, 544 [QUIRK_AUDIO_EDIROL_UAXX] = create_uaxx_quirk,
@@ -1124,6 +1125,7 @@ bool snd_usb_get_sample_rate_quirk(struct snd_usb_audio *chip)
1124 case USB_ID(0x045E, 0x0779): /* MS Lifecam HD-3000 */ 1125 case USB_ID(0x045E, 0x0779): /* MS Lifecam HD-3000 */
1125 case USB_ID(0x04D8, 0xFEEA): /* Benchmark DAC1 Pre */ 1126 case USB_ID(0x04D8, 0xFEEA): /* Benchmark DAC1 Pre */
1126 case USB_ID(0x074D, 0x3553): /* Outlaw RR2150 (Micronas UAC3553B) */ 1127 case USB_ID(0x074D, 0x3553): /* Outlaw RR2150 (Micronas UAC3553B) */
1128 case USB_ID(0x21B4, 0x0081): /* AudioQuest DragonFly */
1127 return true; 1129 return true;
1128 } 1130 }
1129 return false; 1131 return false;
diff --git a/sound/usb/usbaudio.h b/sound/usb/usbaudio.h
index 15a12715bd05..b665d85555cb 100644
--- a/sound/usb/usbaudio.h
+++ b/sound/usb/usbaudio.h
@@ -95,6 +95,7 @@ enum quirk_type {
95 QUIRK_MIDI_AKAI, 95 QUIRK_MIDI_AKAI,
96 QUIRK_MIDI_US122L, 96 QUIRK_MIDI_US122L,
97 QUIRK_MIDI_FTDI, 97 QUIRK_MIDI_FTDI,
98 QUIRK_MIDI_CH345,
98 QUIRK_AUDIO_STANDARD_INTERFACE, 99 QUIRK_AUDIO_STANDARD_INTERFACE,
99 QUIRK_AUDIO_FIXED_ENDPOINT, 100 QUIRK_AUDIO_FIXED_ENDPOINT,
100 QUIRK_AUDIO_EDIROL_UAXX, 101 QUIRK_AUDIO_EDIROL_UAXX,
diff --git a/tools/Makefile b/tools/Makefile
index d6f307dfb1a3..7dc820a8c1f1 100644
--- a/tools/Makefile
+++ b/tools/Makefile
@@ -32,6 +32,10 @@ help:
32 @echo ' from the kernel command line to build and install one of' 32 @echo ' from the kernel command line to build and install one of'
33 @echo ' the tools above' 33 @echo ' the tools above'
34 @echo '' 34 @echo ''
35 @echo ' $$ make tools/all'
36 @echo ''
37 @echo ' builds all tools.'
38 @echo ''
35 @echo ' $$ make tools/install' 39 @echo ' $$ make tools/install'
36 @echo '' 40 @echo ''
37 @echo ' installs all tools.' 41 @echo ' installs all tools.'
@@ -77,6 +81,11 @@ tmon: FORCE
77freefall: FORCE 81freefall: FORCE
78 $(call descend,laptop/$@) 82 $(call descend,laptop/$@)
79 83
84all: acpi cgroup cpupower hv firewire lguest \
85 perf selftests turbostat usb \
86 virtio vm net x86_energy_perf_policy \
87 tmon freefall
88
80acpi_install: 89acpi_install:
81 $(call descend,power/$(@:_install=),install) 90 $(call descend,power/$(@:_install=),install)
82 91
@@ -101,7 +110,7 @@ freefall_install:
101install: acpi_install cgroup_install cpupower_install hv_install firewire_install lguest_install \ 110install: acpi_install cgroup_install cpupower_install hv_install firewire_install lguest_install \
102 perf_install selftests_install turbostat_install usb_install \ 111 perf_install selftests_install turbostat_install usb_install \
103 virtio_install vm_install net_install x86_energy_perf_policy_install \ 112 virtio_install vm_install net_install x86_energy_perf_policy_install \
104 tmon freefall_install 113 tmon_install freefall_install
105 114
106acpi_clean: 115acpi_clean:
107 $(call descend,power/acpi,clean) 116 $(call descend,power/acpi,clean)
diff --git a/tools/net/Makefile b/tools/net/Makefile
index ee577ea03ba5..ddf888010652 100644
--- a/tools/net/Makefile
+++ b/tools/net/Makefile
@@ -4,6 +4,9 @@ CC = gcc
4LEX = flex 4LEX = flex
5YACC = bison 5YACC = bison
6 6
7CFLAGS += -Wall -O2
8CFLAGS += -D__EXPORTED_HEADERS__ -I../../include/uapi -I../../include
9
7%.yacc.c: %.y 10%.yacc.c: %.y
8 $(YACC) -o $@ -d $< 11 $(YACC) -o $@ -d $<
9 12
@@ -12,15 +15,13 @@ YACC = bison
12 15
13all : bpf_jit_disasm bpf_dbg bpf_asm 16all : bpf_jit_disasm bpf_dbg bpf_asm
14 17
15bpf_jit_disasm : CFLAGS = -Wall -O2 -DPACKAGE='bpf_jit_disasm' 18bpf_jit_disasm : CFLAGS += -DPACKAGE='bpf_jit_disasm'
16bpf_jit_disasm : LDLIBS = -lopcodes -lbfd -ldl 19bpf_jit_disasm : LDLIBS = -lopcodes -lbfd -ldl
17bpf_jit_disasm : bpf_jit_disasm.o 20bpf_jit_disasm : bpf_jit_disasm.o
18 21
19bpf_dbg : CFLAGS = -Wall -O2
20bpf_dbg : LDLIBS = -lreadline 22bpf_dbg : LDLIBS = -lreadline
21bpf_dbg : bpf_dbg.o 23bpf_dbg : bpf_dbg.o
22 24
23bpf_asm : CFLAGS = -Wall -O2 -I.
24bpf_asm : LDLIBS = 25bpf_asm : LDLIBS =
25bpf_asm : bpf_asm.o bpf_exp.yacc.o bpf_exp.lex.o 26bpf_asm : bpf_asm.o bpf_exp.yacc.o bpf_exp.lex.o
26bpf_exp.lex.o : bpf_exp.yacc.c 27bpf_exp.lex.o : bpf_exp.yacc.c
diff --git a/tools/perf/builtin-inject.c b/tools/perf/builtin-inject.c
index 0a945d2e8ca5..99d127fe9c35 100644
--- a/tools/perf/builtin-inject.c
+++ b/tools/perf/builtin-inject.c
@@ -675,6 +675,7 @@ int cmd_inject(int argc, const char **argv, const char *prefix __maybe_unused)
675 .fork = perf_event__repipe, 675 .fork = perf_event__repipe,
676 .exit = perf_event__repipe, 676 .exit = perf_event__repipe,
677 .lost = perf_event__repipe, 677 .lost = perf_event__repipe,
678 .lost_samples = perf_event__repipe,
678 .aux = perf_event__repipe, 679 .aux = perf_event__repipe,
679 .itrace_start = perf_event__repipe, 680 .itrace_start = perf_event__repipe,
680 .context_switch = perf_event__repipe, 681 .context_switch = perf_event__repipe,
diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c
index 2853ad2bd435..f256fac1e722 100644
--- a/tools/perf/builtin-report.c
+++ b/tools/perf/builtin-report.c
@@ -44,7 +44,7 @@
44struct report { 44struct report {
45 struct perf_tool tool; 45 struct perf_tool tool;
46 struct perf_session *session; 46 struct perf_session *session;
47 bool force, use_tui, use_gtk, use_stdio; 47 bool use_tui, use_gtk, use_stdio;
48 bool hide_unresolved; 48 bool hide_unresolved;
49 bool dont_use_callchains; 49 bool dont_use_callchains;
50 bool show_full_info; 50 bool show_full_info;
@@ -678,7 +678,7 @@ int cmd_report(int argc, const char **argv, const char *prefix __maybe_unused)
678 "file", "vmlinux pathname"), 678 "file", "vmlinux pathname"),
679 OPT_STRING(0, "kallsyms", &symbol_conf.kallsyms_name, 679 OPT_STRING(0, "kallsyms", &symbol_conf.kallsyms_name,
680 "file", "kallsyms pathname"), 680 "file", "kallsyms pathname"),
681 OPT_BOOLEAN('f', "force", &report.force, "don't complain, do it"), 681 OPT_BOOLEAN('f', "force", &symbol_conf.force, "don't complain, do it"),
682 OPT_BOOLEAN('m', "modules", &symbol_conf.use_modules, 682 OPT_BOOLEAN('m', "modules", &symbol_conf.use_modules,
683 "load module symbols - WARNING: use only with -k and LIVE kernel"), 683 "load module symbols - WARNING: use only with -k and LIVE kernel"),
684 OPT_BOOLEAN('n', "show-nr-samples", &symbol_conf.show_nr_samples, 684 OPT_BOOLEAN('n', "show-nr-samples", &symbol_conf.show_nr_samples,
@@ -832,7 +832,7 @@ int cmd_report(int argc, const char **argv, const char *prefix __maybe_unused)
832 } 832 }
833 833
834 file.path = input_name; 834 file.path = input_name;
835 file.force = report.force; 835 file.force = symbol_conf.force;
836 836
837repeat: 837repeat:
838 session = perf_session__new(&file, false, &report.tool); 838 session = perf_session__new(&file, false, &report.tool);
diff --git a/tools/perf/ui/browsers/hists.c b/tools/perf/ui/browsers/hists.c
index e5afb8936040..fa9eb92c9e24 100644
--- a/tools/perf/ui/browsers/hists.c
+++ b/tools/perf/ui/browsers/hists.c
@@ -1430,7 +1430,6 @@ close_file_and_continue:
1430 1430
1431struct popup_action { 1431struct popup_action {
1432 struct thread *thread; 1432 struct thread *thread;
1433 struct dso *dso;
1434 struct map_symbol ms; 1433 struct map_symbol ms;
1435 int socket; 1434 int socket;
1436 1435
@@ -1565,7 +1564,6 @@ add_dso_opt(struct hist_browser *browser, struct popup_action *act,
1565 return 0; 1564 return 0;
1566 1565
1567 act->ms.map = map; 1566 act->ms.map = map;
1568 act->dso = map->dso;
1569 act->fn = do_zoom_dso; 1567 act->fn = do_zoom_dso;
1570 return 1; 1568 return 1;
1571} 1569}
@@ -1827,7 +1825,6 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events,
1827 1825
1828 while (1) { 1826 while (1) {
1829 struct thread *thread = NULL; 1827 struct thread *thread = NULL;
1830 struct dso *dso = NULL;
1831 struct map *map = NULL; 1828 struct map *map = NULL;
1832 int choice = 0; 1829 int choice = 0;
1833 int socked_id = -1; 1830 int socked_id = -1;
@@ -1839,8 +1836,6 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events,
1839 if (browser->he_selection != NULL) { 1836 if (browser->he_selection != NULL) {
1840 thread = hist_browser__selected_thread(browser); 1837 thread = hist_browser__selected_thread(browser);
1841 map = browser->selection->map; 1838 map = browser->selection->map;
1842 if (map)
1843 dso = map->dso;
1844 socked_id = browser->he_selection->socket; 1839 socked_id = browser->he_selection->socket;
1845 } 1840 }
1846 switch (key) { 1841 switch (key) {
@@ -1874,7 +1869,7 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events,
1874 hist_browser__dump(browser); 1869 hist_browser__dump(browser);
1875 continue; 1870 continue;
1876 case 'd': 1871 case 'd':
1877 actions->dso = dso; 1872 actions->ms.map = map;
1878 do_zoom_dso(browser, actions); 1873 do_zoom_dso(browser, actions);
1879 continue; 1874 continue;
1880 case 'V': 1875 case 'V':
diff --git a/tools/perf/util/build-id.c b/tools/perf/util/build-id.c
index d909459fb54c..217b5a60e2ab 100644
--- a/tools/perf/util/build-id.c
+++ b/tools/perf/util/build-id.c
@@ -76,6 +76,7 @@ struct perf_tool build_id__mark_dso_hit_ops = {
76 .exit = perf_event__exit_del_thread, 76 .exit = perf_event__exit_del_thread,
77 .attr = perf_event__process_attr, 77 .attr = perf_event__process_attr,
78 .build_id = perf_event__process_build_id, 78 .build_id = perf_event__process_build_id,
79 .ordered_events = true,
79}; 80};
80 81
81int build_id__sprintf(const u8 *build_id, int len, char *bf) 82int build_id__sprintf(const u8 *build_id, int len, char *bf)
diff --git a/tools/perf/util/dso.c b/tools/perf/util/dso.c
index 7c0c08386a1d..425df5c86c9c 100644
--- a/tools/perf/util/dso.c
+++ b/tools/perf/util/dso.c
@@ -933,6 +933,7 @@ static struct dso *__dso__findlink_by_longname(struct rb_root *root,
933 /* Add new node and rebalance tree */ 933 /* Add new node and rebalance tree */
934 rb_link_node(&dso->rb_node, parent, p); 934 rb_link_node(&dso->rb_node, parent, p);
935 rb_insert_color(&dso->rb_node, root); 935 rb_insert_color(&dso->rb_node, root);
936 dso->root = root;
936 } 937 }
937 return NULL; 938 return NULL;
938} 939}
@@ -945,15 +946,30 @@ static inline struct dso *__dso__find_by_longname(struct rb_root *root,
945 946
946void dso__set_long_name(struct dso *dso, const char *name, bool name_allocated) 947void dso__set_long_name(struct dso *dso, const char *name, bool name_allocated)
947{ 948{
949 struct rb_root *root = dso->root;
950
948 if (name == NULL) 951 if (name == NULL)
949 return; 952 return;
950 953
951 if (dso->long_name_allocated) 954 if (dso->long_name_allocated)
952 free((char *)dso->long_name); 955 free((char *)dso->long_name);
953 956
957 if (root) {
958 rb_erase(&dso->rb_node, root);
959 /*
960 * __dso__findlink_by_longname() isn't guaranteed to add it
961 * back, so a clean removal is required here.
962 */
963 RB_CLEAR_NODE(&dso->rb_node);
964 dso->root = NULL;
965 }
966
954 dso->long_name = name; 967 dso->long_name = name;
955 dso->long_name_len = strlen(name); 968 dso->long_name_len = strlen(name);
956 dso->long_name_allocated = name_allocated; 969 dso->long_name_allocated = name_allocated;
970
971 if (root)
972 __dso__findlink_by_longname(root, dso, NULL);
957} 973}
958 974
959void dso__set_short_name(struct dso *dso, const char *name, bool name_allocated) 975void dso__set_short_name(struct dso *dso, const char *name, bool name_allocated)
@@ -1046,6 +1062,7 @@ struct dso *dso__new(const char *name)
1046 dso->kernel = DSO_TYPE_USER; 1062 dso->kernel = DSO_TYPE_USER;
1047 dso->needs_swap = DSO_SWAP__UNSET; 1063 dso->needs_swap = DSO_SWAP__UNSET;
1048 RB_CLEAR_NODE(&dso->rb_node); 1064 RB_CLEAR_NODE(&dso->rb_node);
1065 dso->root = NULL;
1049 INIT_LIST_HEAD(&dso->node); 1066 INIT_LIST_HEAD(&dso->node);
1050 INIT_LIST_HEAD(&dso->data.open_entry); 1067 INIT_LIST_HEAD(&dso->data.open_entry);
1051 pthread_mutex_init(&dso->lock, NULL); 1068 pthread_mutex_init(&dso->lock, NULL);
diff --git a/tools/perf/util/dso.h b/tools/perf/util/dso.h
index fc8db9c764ac..45ec4d0a50ed 100644
--- a/tools/perf/util/dso.h
+++ b/tools/perf/util/dso.h
@@ -135,6 +135,7 @@ struct dso {
135 pthread_mutex_t lock; 135 pthread_mutex_t lock;
136 struct list_head node; 136 struct list_head node;
137 struct rb_node rb_node; /* rbtree node sorted by long name */ 137 struct rb_node rb_node; /* rbtree node sorted by long name */
138 struct rb_root *root; /* root of rbtree that rb_node is in */
138 struct rb_root symbols[MAP__NR_TYPES]; 139 struct rb_root symbols[MAP__NR_TYPES];
139 struct rb_root symbol_names[MAP__NR_TYPES]; 140 struct rb_root symbol_names[MAP__NR_TYPES];
140 struct { 141 struct {
diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c
index 5ef90be2a249..8b303ff20289 100644
--- a/tools/perf/util/machine.c
+++ b/tools/perf/util/machine.c
@@ -91,6 +91,7 @@ static void dsos__purge(struct dsos *dsos)
91 91
92 list_for_each_entry_safe(pos, n, &dsos->head, node) { 92 list_for_each_entry_safe(pos, n, &dsos->head, node) {
93 RB_CLEAR_NODE(&pos->rb_node); 93 RB_CLEAR_NODE(&pos->rb_node);
94 pos->root = NULL;
94 list_del_init(&pos->node); 95 list_del_init(&pos->node);
95 dso__put(pos); 96 dso__put(pos);
96 } 97 }
diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c
index bd8f03de5e40..05012bb178d7 100644
--- a/tools/perf/util/probe-finder.c
+++ b/tools/perf/util/probe-finder.c
@@ -1183,7 +1183,7 @@ static int add_probe_trace_event(Dwarf_Die *sc_die, struct probe_finder *pf)
1183 container_of(pf, struct trace_event_finder, pf); 1183 container_of(pf, struct trace_event_finder, pf);
1184 struct perf_probe_point *pp = &pf->pev->point; 1184 struct perf_probe_point *pp = &pf->pev->point;
1185 struct probe_trace_event *tev; 1185 struct probe_trace_event *tev;
1186 struct perf_probe_arg *args; 1186 struct perf_probe_arg *args = NULL;
1187 int ret, i; 1187 int ret, i;
1188 1188
1189 /* Check number of tevs */ 1189 /* Check number of tevs */
@@ -1198,19 +1198,23 @@ static int add_probe_trace_event(Dwarf_Die *sc_die, struct probe_finder *pf)
1198 ret = convert_to_trace_point(&pf->sp_die, tf->mod, pf->addr, 1198 ret = convert_to_trace_point(&pf->sp_die, tf->mod, pf->addr,
1199 pp->retprobe, pp->function, &tev->point); 1199 pp->retprobe, pp->function, &tev->point);
1200 if (ret < 0) 1200 if (ret < 0)
1201 return ret; 1201 goto end;
1202 1202
1203 tev->point.realname = strdup(dwarf_diename(sc_die)); 1203 tev->point.realname = strdup(dwarf_diename(sc_die));
1204 if (!tev->point.realname) 1204 if (!tev->point.realname) {
1205 return -ENOMEM; 1205 ret = -ENOMEM;
1206 goto end;
1207 }
1206 1208
1207 pr_debug("Probe point found: %s+%lu\n", tev->point.symbol, 1209 pr_debug("Probe point found: %s+%lu\n", tev->point.symbol,
1208 tev->point.offset); 1210 tev->point.offset);
1209 1211
1210 /* Expand special probe argument if exist */ 1212 /* Expand special probe argument if exist */
1211 args = zalloc(sizeof(struct perf_probe_arg) * MAX_PROBE_ARGS); 1213 args = zalloc(sizeof(struct perf_probe_arg) * MAX_PROBE_ARGS);
1212 if (args == NULL) 1214 if (args == NULL) {
1213 return -ENOMEM; 1215 ret = -ENOMEM;
1216 goto end;
1217 }
1214 1218
1215 ret = expand_probe_args(sc_die, pf, args); 1219 ret = expand_probe_args(sc_die, pf, args);
1216 if (ret < 0) 1220 if (ret < 0)
@@ -1234,6 +1238,10 @@ static int add_probe_trace_event(Dwarf_Die *sc_die, struct probe_finder *pf)
1234 } 1238 }
1235 1239
1236end: 1240end:
1241 if (ret) {
1242 clear_probe_trace_event(tev);
1243 tf->ntevs--;
1244 }
1237 free(args); 1245 free(args);
1238 return ret; 1246 return ret;
1239} 1247}
@@ -1246,7 +1254,7 @@ int debuginfo__find_trace_events(struct debuginfo *dbg,
1246 struct trace_event_finder tf = { 1254 struct trace_event_finder tf = {
1247 .pf = {.pev = pev, .callback = add_probe_trace_event}, 1255 .pf = {.pev = pev, .callback = add_probe_trace_event},
1248 .max_tevs = probe_conf.max_probes, .mod = dbg->mod}; 1256 .max_tevs = probe_conf.max_probes, .mod = dbg->mod};
1249 int ret; 1257 int ret, i;
1250 1258
1251 /* Allocate result tevs array */ 1259 /* Allocate result tevs array */
1252 *tevs = zalloc(sizeof(struct probe_trace_event) * tf.max_tevs); 1260 *tevs = zalloc(sizeof(struct probe_trace_event) * tf.max_tevs);
@@ -1258,6 +1266,8 @@ int debuginfo__find_trace_events(struct debuginfo *dbg,
1258 1266
1259 ret = debuginfo__find_probes(dbg, &tf.pf); 1267 ret = debuginfo__find_probes(dbg, &tf.pf);
1260 if (ret < 0) { 1268 if (ret < 0) {
1269 for (i = 0; i < tf.ntevs; i++)
1270 clear_probe_trace_event(&tf.tevs[i]);
1261 zfree(tevs); 1271 zfree(tevs);
1262 return ret; 1272 return ret;
1263 } 1273 }
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
index b4cc7662677e..cd08027a6d2c 100644
--- a/tools/perf/util/symbol.c
+++ b/tools/perf/util/symbol.c
@@ -654,19 +654,24 @@ static int dso__split_kallsyms_for_kcore(struct dso *dso, struct map *map,
654 struct map_groups *kmaps = map__kmaps(map); 654 struct map_groups *kmaps = map__kmaps(map);
655 struct map *curr_map; 655 struct map *curr_map;
656 struct symbol *pos; 656 struct symbol *pos;
657 int count = 0, moved = 0; 657 int count = 0;
658 struct rb_root old_root = dso->symbols[map->type];
658 struct rb_root *root = &dso->symbols[map->type]; 659 struct rb_root *root = &dso->symbols[map->type];
659 struct rb_node *next = rb_first(root); 660 struct rb_node *next = rb_first(root);
660 661
661 if (!kmaps) 662 if (!kmaps)
662 return -1; 663 return -1;
663 664
665 *root = RB_ROOT;
666
664 while (next) { 667 while (next) {
665 char *module; 668 char *module;
666 669
667 pos = rb_entry(next, struct symbol, rb_node); 670 pos = rb_entry(next, struct symbol, rb_node);
668 next = rb_next(&pos->rb_node); 671 next = rb_next(&pos->rb_node);
669 672
673 rb_erase_init(&pos->rb_node, &old_root);
674
670 module = strchr(pos->name, '\t'); 675 module = strchr(pos->name, '\t');
671 if (module) 676 if (module)
672 *module = '\0'; 677 *module = '\0';
@@ -674,28 +679,21 @@ static int dso__split_kallsyms_for_kcore(struct dso *dso, struct map *map,
674 curr_map = map_groups__find(kmaps, map->type, pos->start); 679 curr_map = map_groups__find(kmaps, map->type, pos->start);
675 680
676 if (!curr_map || (filter && filter(curr_map, pos))) { 681 if (!curr_map || (filter && filter(curr_map, pos))) {
677 rb_erase_init(&pos->rb_node, root);
678 symbol__delete(pos); 682 symbol__delete(pos);
679 } else { 683 continue;
680 pos->start -= curr_map->start - curr_map->pgoff;
681 if (pos->end)
682 pos->end -= curr_map->start - curr_map->pgoff;
683 if (curr_map->dso != map->dso) {
684 rb_erase_init(&pos->rb_node, root);
685 symbols__insert(
686 &curr_map->dso->symbols[curr_map->type],
687 pos);
688 ++moved;
689 } else {
690 ++count;
691 }
692 } 684 }
685
686 pos->start -= curr_map->start - curr_map->pgoff;
687 if (pos->end)
688 pos->end -= curr_map->start - curr_map->pgoff;
689 symbols__insert(&curr_map->dso->symbols[curr_map->type], pos);
690 ++count;
693 } 691 }
694 692
695 /* Symbols have been adjusted */ 693 /* Symbols have been adjusted */
696 dso->adjust_symbols = 1; 694 dso->adjust_symbols = 1;
697 695
698 return count + moved; 696 return count;
699} 697}
700 698
701/* 699/*
@@ -1438,9 +1436,9 @@ int dso__load(struct dso *dso, struct map *map, symbol_filter_t filter)
1438 if (lstat(dso->name, &st) < 0) 1436 if (lstat(dso->name, &st) < 0)
1439 goto out; 1437 goto out;
1440 1438
1441 if (st.st_uid && (st.st_uid != geteuid())) { 1439 if (!symbol_conf.force && st.st_uid && (st.st_uid != geteuid())) {
1442 pr_warning("File %s not owned by current user or root, " 1440 pr_warning("File %s not owned by current user or root, "
1443 "ignoring it.\n", dso->name); 1441 "ignoring it (use -f to override).\n", dso->name);
1444 goto out; 1442 goto out;
1445 } 1443 }
1446 1444
diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h
index 40073c60b83d..dcd786e364f2 100644
--- a/tools/perf/util/symbol.h
+++ b/tools/perf/util/symbol.h
@@ -84,6 +84,7 @@ struct symbol_conf {
84 unsigned short priv_size; 84 unsigned short priv_size;
85 unsigned short nr_events; 85 unsigned short nr_events;
86 bool try_vmlinux_path, 86 bool try_vmlinux_path,
87 force,
87 ignore_vmlinux, 88 ignore_vmlinux,
88 ignore_vmlinux_buildid, 89 ignore_vmlinux_buildid,
89 show_kernel_path, 90 show_kernel_path,
diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c
index d8e4b20b6d54..0dac7e05a6ac 100644
--- a/tools/power/x86/turbostat/turbostat.c
+++ b/tools/power/x86/turbostat/turbostat.c
@@ -1173,9 +1173,9 @@ dump_nhm_platform_info(void)
1173 unsigned long long msr; 1173 unsigned long long msr;
1174 unsigned int ratio; 1174 unsigned int ratio;
1175 1175
1176 get_msr(base_cpu, MSR_NHM_PLATFORM_INFO, &msr); 1176 get_msr(base_cpu, MSR_PLATFORM_INFO, &msr);
1177 1177
1178 fprintf(stderr, "cpu%d: MSR_NHM_PLATFORM_INFO: 0x%08llx\n", base_cpu, msr); 1178 fprintf(stderr, "cpu%d: MSR_PLATFORM_INFO: 0x%08llx\n", base_cpu, msr);
1179 1179
1180 ratio = (msr >> 40) & 0xFF; 1180 ratio = (msr >> 40) & 0xFF;
1181 fprintf(stderr, "%d * %.0f = %.0f MHz max efficiency frequency\n", 1181 fprintf(stderr, "%d * %.0f = %.0f MHz max efficiency frequency\n",
@@ -1807,7 +1807,7 @@ void check_permissions()
1807 * 1807 *
1808 * MSR_SMI_COUNT 0x00000034 1808 * MSR_SMI_COUNT 0x00000034
1809 * 1809 *
1810 * MSR_NHM_PLATFORM_INFO 0x000000ce 1810 * MSR_PLATFORM_INFO 0x000000ce
1811 * MSR_NHM_SNB_PKG_CST_CFG_CTL 0x000000e2 1811 * MSR_NHM_SNB_PKG_CST_CFG_CTL 0x000000e2
1812 * 1812 *
1813 * MSR_PKG_C3_RESIDENCY 0x000003f8 1813 * MSR_PKG_C3_RESIDENCY 0x000003f8
@@ -1876,7 +1876,7 @@ int probe_nhm_msrs(unsigned int family, unsigned int model)
1876 get_msr(base_cpu, MSR_NHM_SNB_PKG_CST_CFG_CTL, &msr); 1876 get_msr(base_cpu, MSR_NHM_SNB_PKG_CST_CFG_CTL, &msr);
1877 pkg_cstate_limit = pkg_cstate_limits[msr & 0xF]; 1877 pkg_cstate_limit = pkg_cstate_limits[msr & 0xF];
1878 1878
1879 get_msr(base_cpu, MSR_NHM_PLATFORM_INFO, &msr); 1879 get_msr(base_cpu, MSR_PLATFORM_INFO, &msr);
1880 base_ratio = (msr >> 8) & 0xFF; 1880 base_ratio = (msr >> 8) & 0xFF;
1881 1881
1882 base_hz = base_ratio * bclk * 1000000; 1882 base_hz = base_ratio * bclk * 1000000;
diff --git a/tools/testing/nvdimm/test/nfit.c b/tools/testing/nvdimm/test/nfit.c
index 40ab4476c80a..51cf8256c6cd 100644
--- a/tools/testing/nvdimm/test/nfit.c
+++ b/tools/testing/nvdimm/test/nfit.c
@@ -420,8 +420,7 @@ static struct nfit_test_resource *nfit_test_lookup(resource_size_t addr)
420 420
421static int nfit_test0_alloc(struct nfit_test *t) 421static int nfit_test0_alloc(struct nfit_test *t)
422{ 422{
423 size_t nfit_size = sizeof(struct acpi_table_nfit) 423 size_t nfit_size = sizeof(struct acpi_nfit_system_address) * NUM_SPA
424 + sizeof(struct acpi_nfit_system_address) * NUM_SPA
425 + sizeof(struct acpi_nfit_memory_map) * NUM_MEM 424 + sizeof(struct acpi_nfit_memory_map) * NUM_MEM
426 + sizeof(struct acpi_nfit_control_region) * NUM_DCR 425 + sizeof(struct acpi_nfit_control_region) * NUM_DCR
427 + sizeof(struct acpi_nfit_data_region) * NUM_BDW 426 + sizeof(struct acpi_nfit_data_region) * NUM_BDW
@@ -471,8 +470,7 @@ static int nfit_test0_alloc(struct nfit_test *t)
471 470
472static int nfit_test1_alloc(struct nfit_test *t) 471static int nfit_test1_alloc(struct nfit_test *t)
473{ 472{
474 size_t nfit_size = sizeof(struct acpi_table_nfit) 473 size_t nfit_size = sizeof(struct acpi_nfit_system_address)
475 + sizeof(struct acpi_nfit_system_address)
476 + sizeof(struct acpi_nfit_memory_map) 474 + sizeof(struct acpi_nfit_memory_map)
477 + sizeof(struct acpi_nfit_control_region); 475 + sizeof(struct acpi_nfit_control_region);
478 476
@@ -488,39 +486,24 @@ static int nfit_test1_alloc(struct nfit_test *t)
488 return 0; 486 return 0;
489} 487}
490 488
491static void nfit_test_init_header(struct acpi_table_nfit *nfit, size_t size)
492{
493 memcpy(nfit->header.signature, ACPI_SIG_NFIT, 4);
494 nfit->header.length = size;
495 nfit->header.revision = 1;
496 memcpy(nfit->header.oem_id, "LIBND", 6);
497 memcpy(nfit->header.oem_table_id, "TEST", 5);
498 nfit->header.oem_revision = 1;
499 memcpy(nfit->header.asl_compiler_id, "TST", 4);
500 nfit->header.asl_compiler_revision = 1;
501}
502
503static void nfit_test0_setup(struct nfit_test *t) 489static void nfit_test0_setup(struct nfit_test *t)
504{ 490{
505 struct nvdimm_bus_descriptor *nd_desc; 491 struct nvdimm_bus_descriptor *nd_desc;
506 struct acpi_nfit_desc *acpi_desc; 492 struct acpi_nfit_desc *acpi_desc;
507 struct acpi_nfit_memory_map *memdev; 493 struct acpi_nfit_memory_map *memdev;
508 void *nfit_buf = t->nfit_buf; 494 void *nfit_buf = t->nfit_buf;
509 size_t size = t->nfit_size;
510 struct acpi_nfit_system_address *spa; 495 struct acpi_nfit_system_address *spa;
511 struct acpi_nfit_control_region *dcr; 496 struct acpi_nfit_control_region *dcr;
512 struct acpi_nfit_data_region *bdw; 497 struct acpi_nfit_data_region *bdw;
513 struct acpi_nfit_flush_address *flush; 498 struct acpi_nfit_flush_address *flush;
514 unsigned int offset; 499 unsigned int offset;
515 500
516 nfit_test_init_header(nfit_buf, size);
517
518 /* 501 /*
519 * spa0 (interleave first half of dimm0 and dimm1, note storage 502 * spa0 (interleave first half of dimm0 and dimm1, note storage
520 * does not actually alias the related block-data-window 503 * does not actually alias the related block-data-window
521 * regions) 504 * regions)
522 */ 505 */
523 spa = nfit_buf + sizeof(struct acpi_table_nfit); 506 spa = nfit_buf;
524 spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS; 507 spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
525 spa->header.length = sizeof(*spa); 508 spa->header.length = sizeof(*spa);
526 memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_PM), 16); 509 memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_PM), 16);
@@ -533,7 +516,7 @@ static void nfit_test0_setup(struct nfit_test *t)
533 * does not actually alias the related block-data-window 516 * does not actually alias the related block-data-window
534 * regions) 517 * regions)
535 */ 518 */
536 spa = nfit_buf + sizeof(struct acpi_table_nfit) + sizeof(*spa); 519 spa = nfit_buf + sizeof(*spa);
537 spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS; 520 spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
538 spa->header.length = sizeof(*spa); 521 spa->header.length = sizeof(*spa);
539 memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_PM), 16); 522 memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_PM), 16);
@@ -542,7 +525,7 @@ static void nfit_test0_setup(struct nfit_test *t)
542 spa->length = SPA1_SIZE; 525 spa->length = SPA1_SIZE;
543 526
544 /* spa2 (dcr0) dimm0 */ 527 /* spa2 (dcr0) dimm0 */
545 spa = nfit_buf + sizeof(struct acpi_table_nfit) + sizeof(*spa) * 2; 528 spa = nfit_buf + sizeof(*spa) * 2;
546 spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS; 529 spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
547 spa->header.length = sizeof(*spa); 530 spa->header.length = sizeof(*spa);
548 memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_DCR), 16); 531 memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_DCR), 16);
@@ -551,7 +534,7 @@ static void nfit_test0_setup(struct nfit_test *t)
551 spa->length = DCR_SIZE; 534 spa->length = DCR_SIZE;
552 535
553 /* spa3 (dcr1) dimm1 */ 536 /* spa3 (dcr1) dimm1 */
554 spa = nfit_buf + sizeof(struct acpi_table_nfit) + sizeof(*spa) * 3; 537 spa = nfit_buf + sizeof(*spa) * 3;
555 spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS; 538 spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
556 spa->header.length = sizeof(*spa); 539 spa->header.length = sizeof(*spa);
557 memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_DCR), 16); 540 memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_DCR), 16);
@@ -560,7 +543,7 @@ static void nfit_test0_setup(struct nfit_test *t)
560 spa->length = DCR_SIZE; 543 spa->length = DCR_SIZE;
561 544
562 /* spa4 (dcr2) dimm2 */ 545 /* spa4 (dcr2) dimm2 */
563 spa = nfit_buf + sizeof(struct acpi_table_nfit) + sizeof(*spa) * 4; 546 spa = nfit_buf + sizeof(*spa) * 4;
564 spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS; 547 spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
565 spa->header.length = sizeof(*spa); 548 spa->header.length = sizeof(*spa);
566 memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_DCR), 16); 549 memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_DCR), 16);
@@ -569,7 +552,7 @@ static void nfit_test0_setup(struct nfit_test *t)
569 spa->length = DCR_SIZE; 552 spa->length = DCR_SIZE;
570 553
571 /* spa5 (dcr3) dimm3 */ 554 /* spa5 (dcr3) dimm3 */
572 spa = nfit_buf + sizeof(struct acpi_table_nfit) + sizeof(*spa) * 5; 555 spa = nfit_buf + sizeof(*spa) * 5;
573 spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS; 556 spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
574 spa->header.length = sizeof(*spa); 557 spa->header.length = sizeof(*spa);
575 memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_DCR), 16); 558 memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_DCR), 16);
@@ -578,7 +561,7 @@ static void nfit_test0_setup(struct nfit_test *t)
578 spa->length = DCR_SIZE; 561 spa->length = DCR_SIZE;
579 562
580 /* spa6 (bdw for dcr0) dimm0 */ 563 /* spa6 (bdw for dcr0) dimm0 */
581 spa = nfit_buf + sizeof(struct acpi_table_nfit) + sizeof(*spa) * 6; 564 spa = nfit_buf + sizeof(*spa) * 6;
582 spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS; 565 spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
583 spa->header.length = sizeof(*spa); 566 spa->header.length = sizeof(*spa);
584 memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_BDW), 16); 567 memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_BDW), 16);
@@ -587,7 +570,7 @@ static void nfit_test0_setup(struct nfit_test *t)
587 spa->length = DIMM_SIZE; 570 spa->length = DIMM_SIZE;
588 571
589 /* spa7 (bdw for dcr1) dimm1 */ 572 /* spa7 (bdw for dcr1) dimm1 */
590 spa = nfit_buf + sizeof(struct acpi_table_nfit) + sizeof(*spa) * 7; 573 spa = nfit_buf + sizeof(*spa) * 7;
591 spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS; 574 spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
592 spa->header.length = sizeof(*spa); 575 spa->header.length = sizeof(*spa);
593 memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_BDW), 16); 576 memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_BDW), 16);
@@ -596,7 +579,7 @@ static void nfit_test0_setup(struct nfit_test *t)
596 spa->length = DIMM_SIZE; 579 spa->length = DIMM_SIZE;
597 580
598 /* spa8 (bdw for dcr2) dimm2 */ 581 /* spa8 (bdw for dcr2) dimm2 */
599 spa = nfit_buf + sizeof(struct acpi_table_nfit) + sizeof(*spa) * 8; 582 spa = nfit_buf + sizeof(*spa) * 8;
600 spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS; 583 spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
601 spa->header.length = sizeof(*spa); 584 spa->header.length = sizeof(*spa);
602 memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_BDW), 16); 585 memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_BDW), 16);
@@ -605,7 +588,7 @@ static void nfit_test0_setup(struct nfit_test *t)
605 spa->length = DIMM_SIZE; 588 spa->length = DIMM_SIZE;
606 589
607 /* spa9 (bdw for dcr3) dimm3 */ 590 /* spa9 (bdw for dcr3) dimm3 */
608 spa = nfit_buf + sizeof(struct acpi_table_nfit) + sizeof(*spa) * 9; 591 spa = nfit_buf + sizeof(*spa) * 9;
609 spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS; 592 spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
610 spa->header.length = sizeof(*spa); 593 spa->header.length = sizeof(*spa);
611 memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_BDW), 16); 594 memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_BDW), 16);
@@ -613,7 +596,7 @@ static void nfit_test0_setup(struct nfit_test *t)
613 spa->address = t->dimm_dma[3]; 596 spa->address = t->dimm_dma[3];
614 spa->length = DIMM_SIZE; 597 spa->length = DIMM_SIZE;
615 598
616 offset = sizeof(struct acpi_table_nfit) + sizeof(*spa) * 10; 599 offset = sizeof(*spa) * 10;
617 /* mem-region0 (spa0, dimm0) */ 600 /* mem-region0 (spa0, dimm0) */
618 memdev = nfit_buf + offset; 601 memdev = nfit_buf + offset;
619 memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP; 602 memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
@@ -1100,15 +1083,13 @@ static void nfit_test0_setup(struct nfit_test *t)
1100 1083
1101static void nfit_test1_setup(struct nfit_test *t) 1084static void nfit_test1_setup(struct nfit_test *t)
1102{ 1085{
1103 size_t size = t->nfit_size, offset; 1086 size_t offset;
1104 void *nfit_buf = t->nfit_buf; 1087 void *nfit_buf = t->nfit_buf;
1105 struct acpi_nfit_memory_map *memdev; 1088 struct acpi_nfit_memory_map *memdev;
1106 struct acpi_nfit_control_region *dcr; 1089 struct acpi_nfit_control_region *dcr;
1107 struct acpi_nfit_system_address *spa; 1090 struct acpi_nfit_system_address *spa;
1108 1091
1109 nfit_test_init_header(nfit_buf, size); 1092 offset = 0;
1110
1111 offset = sizeof(struct acpi_table_nfit);
1112 /* spa0 (flat range with no bdw aliasing) */ 1093 /* spa0 (flat range with no bdw aliasing) */
1113 spa = nfit_buf + offset; 1094 spa = nfit_buf + offset;
1114 spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS; 1095 spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
diff --git a/tools/testing/selftests/futex/README b/tools/testing/selftests/futex/README
index 3224a049b196..0558bb9ce0a6 100644
--- a/tools/testing/selftests/futex/README
+++ b/tools/testing/selftests/futex/README
@@ -27,7 +27,7 @@ o The build system shall remain as simple as possible, avoiding any archive or
27o Where possible, any helper functions or other package-wide code shall be 27o Where possible, any helper functions or other package-wide code shall be
28 implemented in header files, avoiding the need to compile intermediate object 28 implemented in header files, avoiding the need to compile intermediate object
29 files. 29 files.
30o External dependendencies shall remain as minimal as possible. Currently gcc 30o External dependencies shall remain as minimal as possible. Currently gcc
31 and glibc are the only dependencies. 31 and glibc are the only dependencies.
32o Tests return 0 for success and < 0 for failure. 32o Tests return 0 for success and < 0 for failure.
33 33
diff --git a/tools/testing/selftests/seccomp/seccomp_bpf.c b/tools/testing/selftests/seccomp/seccomp_bpf.c
index e38cc54942db..882fe83a3554 100644
--- a/tools/testing/selftests/seccomp/seccomp_bpf.c
+++ b/tools/testing/selftests/seccomp/seccomp_bpf.c
@@ -492,6 +492,9 @@ TEST_SIGNAL(KILL_one_arg_six, SIGSYS)
492 pid_t parent = getppid(); 492 pid_t parent = getppid();
493 int fd; 493 int fd;
494 void *map1, *map2; 494 void *map1, *map2;
495 int page_size = sysconf(_SC_PAGESIZE);
496
497 ASSERT_LT(0, page_size);
495 498
496 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); 499 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
497 ASSERT_EQ(0, ret); 500 ASSERT_EQ(0, ret);
@@ -504,16 +507,16 @@ TEST_SIGNAL(KILL_one_arg_six, SIGSYS)
504 507
505 EXPECT_EQ(parent, syscall(__NR_getppid)); 508 EXPECT_EQ(parent, syscall(__NR_getppid));
506 map1 = (void *)syscall(sysno, 509 map1 = (void *)syscall(sysno,
507 NULL, PAGE_SIZE, PROT_READ, MAP_PRIVATE, fd, PAGE_SIZE); 510 NULL, page_size, PROT_READ, MAP_PRIVATE, fd, page_size);
508 EXPECT_NE(MAP_FAILED, map1); 511 EXPECT_NE(MAP_FAILED, map1);
509 /* mmap2() should never return. */ 512 /* mmap2() should never return. */
510 map2 = (void *)syscall(sysno, 513 map2 = (void *)syscall(sysno,
511 NULL, PAGE_SIZE, PROT_READ, MAP_PRIVATE, fd, 0x0C0FFEE); 514 NULL, page_size, PROT_READ, MAP_PRIVATE, fd, 0x0C0FFEE);
512 EXPECT_EQ(MAP_FAILED, map2); 515 EXPECT_EQ(MAP_FAILED, map2);
513 516
514 /* The test failed, so clean up the resources. */ 517 /* The test failed, so clean up the resources. */
515 munmap(map1, PAGE_SIZE); 518 munmap(map1, page_size);
516 munmap(map2, PAGE_SIZE); 519 munmap(map2, page_size);
517 close(fd); 520 close(fd);
518} 521}
519 522
diff --git a/tools/virtio/linux/kernel.h b/tools/virtio/linux/kernel.h
index 0a3da64638ce..4db7d5691ba7 100644
--- a/tools/virtio/linux/kernel.h
+++ b/tools/virtio/linux/kernel.h
@@ -110,4 +110,10 @@ static inline void free_page(unsigned long addr)
110 (void) (&_min1 == &_min2); \ 110 (void) (&_min1 == &_min2); \
111 _min1 < _min2 ? _min1 : _min2; }) 111 _min1 < _min2 ? _min1 : _min2; })
112 112
113/* TODO: empty stubs for now. Broken but enough for virtio_ring.c */
114#define list_add_tail(a, b) do {} while (0)
115#define list_del(a) do {} while (0)
116#define list_for_each_entry(a, b, c) while (0)
117/* end of stubs */
118
113#endif /* KERNEL_H */ 119#endif /* KERNEL_H */
diff --git a/tools/virtio/linux/virtio.h b/tools/virtio/linux/virtio.h
index a3e07016a440..ee125e714053 100644
--- a/tools/virtio/linux/virtio.h
+++ b/tools/virtio/linux/virtio.h
@@ -3,12 +3,6 @@
3#include <linux/scatterlist.h> 3#include <linux/scatterlist.h>
4#include <linux/kernel.h> 4#include <linux/kernel.h>
5 5
6/* TODO: empty stubs for now. Broken but enough for virtio_ring.c */
7#define list_add_tail(a, b) do {} while (0)
8#define list_del(a) do {} while (0)
9#define list_for_each_entry(a, b, c) while (0)
10/* end of stubs */
11
12struct virtio_device { 6struct virtio_device {
13 void *dev; 7 void *dev;
14 u64 features; 8 u64 features;
diff --git a/tools/virtio/linux/virtio_config.h b/tools/virtio/linux/virtio_config.h
index 806d683ab107..57a6964a1e35 100644
--- a/tools/virtio/linux/virtio_config.h
+++ b/tools/virtio/linux/virtio_config.h
@@ -40,33 +40,39 @@ static inline void __virtio_clear_bit(struct virtio_device *vdev,
40#define virtio_has_feature(dev, feature) \ 40#define virtio_has_feature(dev, feature) \
41 (__virtio_test_bit((dev), feature)) 41 (__virtio_test_bit((dev), feature))
42 42
43static inline bool virtio_is_little_endian(struct virtio_device *vdev)
44{
45 return virtio_has_feature(vdev, VIRTIO_F_VERSION_1) ||
46 virtio_legacy_is_little_endian();
47}
48
49/* Memory accessors */
43static inline u16 virtio16_to_cpu(struct virtio_device *vdev, __virtio16 val) 50static inline u16 virtio16_to_cpu(struct virtio_device *vdev, __virtio16 val)
44{ 51{
45 return __virtio16_to_cpu(virtio_has_feature(vdev, VIRTIO_F_VERSION_1), val); 52 return __virtio16_to_cpu(virtio_is_little_endian(vdev), val);
46} 53}
47 54
48static inline __virtio16 cpu_to_virtio16(struct virtio_device *vdev, u16 val) 55static inline __virtio16 cpu_to_virtio16(struct virtio_device *vdev, u16 val)
49{ 56{
50 return __cpu_to_virtio16(virtio_has_feature(vdev, VIRTIO_F_VERSION_1), val); 57 return __cpu_to_virtio16(virtio_is_little_endian(vdev), val);
51} 58}
52 59
53static inline u32 virtio32_to_cpu(struct virtio_device *vdev, __virtio32 val) 60static inline u32 virtio32_to_cpu(struct virtio_device *vdev, __virtio32 val)
54{ 61{
55 return __virtio32_to_cpu(virtio_has_feature(vdev, VIRTIO_F_VERSION_1), val); 62 return __virtio32_to_cpu(virtio_is_little_endian(vdev), val);
56} 63}
57 64
58static inline __virtio32 cpu_to_virtio32(struct virtio_device *vdev, u32 val) 65static inline __virtio32 cpu_to_virtio32(struct virtio_device *vdev, u32 val)
59{ 66{
60 return __cpu_to_virtio32(virtio_has_feature(vdev, VIRTIO_F_VERSION_1), val); 67 return __cpu_to_virtio32(virtio_is_little_endian(vdev), val);
61} 68}
62 69
63static inline u64 virtio64_to_cpu(struct virtio_device *vdev, __virtio64 val) 70static inline u64 virtio64_to_cpu(struct virtio_device *vdev, __virtio64 val)
64{ 71{
65 return __virtio64_to_cpu(virtio_has_feature(vdev, VIRTIO_F_VERSION_1), val); 72 return __virtio64_to_cpu(virtio_is_little_endian(vdev), val);
66} 73}
67 74
68static inline __virtio64 cpu_to_virtio64(struct virtio_device *vdev, u64 val) 75static inline __virtio64 cpu_to_virtio64(struct virtio_device *vdev, u64 val)
69{ 76{
70 return __cpu_to_virtio64(virtio_has_feature(vdev, VIRTIO_F_VERSION_1), val); 77 return __cpu_to_virtio64(virtio_is_little_endian(vdev), val);
71} 78}
72
diff --git a/tools/vm/page-types.c b/tools/vm/page-types.c
index bcf5ec760eb9..5a6016224bb9 100644
--- a/tools/vm/page-types.c
+++ b/tools/vm/page-types.c
@@ -128,6 +128,7 @@ static const char * const page_flag_names[] = {
128 [KPF_THP] = "t:thp", 128 [KPF_THP] = "t:thp",
129 [KPF_BALLOON] = "o:balloon", 129 [KPF_BALLOON] = "o:balloon",
130 [KPF_ZERO_PAGE] = "z:zero_page", 130 [KPF_ZERO_PAGE] = "z:zero_page",
131 [KPF_IDLE] = "i:idle_page",
131 132
132 [KPF_RESERVED] = "r:reserved", 133 [KPF_RESERVED] = "r:reserved",
133 [KPF_MLOCKED] = "m:mlocked", 134 [KPF_MLOCKED] = "m:mlocked",
diff --git a/virt/kvm/arm/arch_timer.c b/virt/kvm/arm/arch_timer.c
index 21a0ab2d8919..69bca185c471 100644
--- a/virt/kvm/arm/arch_timer.c
+++ b/virt/kvm/arm/arch_timer.c
@@ -221,17 +221,23 @@ void kvm_timer_flush_hwstate(struct kvm_vcpu *vcpu)
221 kvm_timer_update_state(vcpu); 221 kvm_timer_update_state(vcpu);
222 222
223 /* 223 /*
224 * If we enter the guest with the virtual input level to the VGIC 224 * If we enter the guest with the virtual input level to the VGIC
225 * asserted, then we have already told the VGIC what we need to, and 225 * asserted, then we have already told the VGIC what we need to, and
226 * we don't need to exit from the guest until the guest deactivates 226 * we don't need to exit from the guest until the guest deactivates
227 * the already injected interrupt, so therefore we should set the 227 * the already injected interrupt, so therefore we should set the
228 * hardware active state to prevent unnecessary exits from the guest. 228 * hardware active state to prevent unnecessary exits from the guest.
229 * 229 *
230 * Conversely, if the virtual input level is deasserted, then always 230 * Also, if we enter the guest with the virtual timer interrupt active,
231 * clear the hardware active state to ensure that hardware interrupts 231 * then it must be active on the physical distributor, because we set
232 * from the timer triggers a guest exit. 232 * the HW bit and the guest must be able to deactivate the virtual and
233 */ 233 * physical interrupt at the same time.
234 if (timer->irq.level) 234 *
235 * Conversely, if the virtual input level is deasserted and the virtual
236 * interrupt is not active, then always clear the hardware active state
237 * to ensure that hardware interrupts from the timer triggers a guest
238 * exit.
239 */
240 if (timer->irq.level || kvm_vgic_map_is_active(vcpu, timer->map))
235 phys_active = true; 241 phys_active = true;
236 else 242 else
237 phys_active = false; 243 phys_active = false;
diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c
index 533538385d5d..65461f821a75 100644
--- a/virt/kvm/arm/vgic.c
+++ b/virt/kvm/arm/vgic.c
@@ -1096,6 +1096,27 @@ static void vgic_retire_lr(int lr_nr, struct kvm_vcpu *vcpu)
1096 vgic_set_lr(vcpu, lr_nr, vlr); 1096 vgic_set_lr(vcpu, lr_nr, vlr);
1097} 1097}
1098 1098
1099static bool dist_active_irq(struct kvm_vcpu *vcpu)
1100{
1101 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
1102
1103 return test_bit(vcpu->vcpu_id, dist->irq_active_on_cpu);
1104}
1105
1106bool kvm_vgic_map_is_active(struct kvm_vcpu *vcpu, struct irq_phys_map *map)
1107{
1108 int i;
1109
1110 for (i = 0; i < vcpu->arch.vgic_cpu.nr_lr; i++) {
1111 struct vgic_lr vlr = vgic_get_lr(vcpu, i);
1112
1113 if (vlr.irq == map->virt_irq && vlr.state & LR_STATE_ACTIVE)
1114 return true;
1115 }
1116
1117 return dist_active_irq(vcpu);
1118}
1119
1099/* 1120/*
1100 * An interrupt may have been disabled after being made pending on the 1121 * An interrupt may have been disabled after being made pending on the
1101 * CPU interface (the classic case is a timer running while we're 1122 * CPU interface (the classic case is a timer running while we're
@@ -1248,7 +1269,7 @@ static void __kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu)
1248 * may have been serviced from another vcpu. In all cases, 1269 * may have been serviced from another vcpu. In all cases,
1249 * move along. 1270 * move along.
1250 */ 1271 */
1251 if (!kvm_vgic_vcpu_pending_irq(vcpu) && !kvm_vgic_vcpu_active_irq(vcpu)) 1272 if (!kvm_vgic_vcpu_pending_irq(vcpu) && !dist_active_irq(vcpu))
1252 goto epilog; 1273 goto epilog;
1253 1274
1254 /* SGIs */ 1275 /* SGIs */
@@ -1396,25 +1417,13 @@ static bool vgic_process_maintenance(struct kvm_vcpu *vcpu)
1396static bool vgic_sync_hwirq(struct kvm_vcpu *vcpu, int lr, struct vgic_lr vlr) 1417static bool vgic_sync_hwirq(struct kvm_vcpu *vcpu, int lr, struct vgic_lr vlr)
1397{ 1418{
1398 struct vgic_dist *dist = &vcpu->kvm->arch.vgic; 1419 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
1399 struct irq_phys_map *map;
1400 bool phys_active;
1401 bool level_pending; 1420 bool level_pending;
1402 int ret;
1403 1421
1404 if (!(vlr.state & LR_HW)) 1422 if (!(vlr.state & LR_HW))
1405 return false; 1423 return false;
1406 1424
1407 map = vgic_irq_map_search(vcpu, vlr.irq); 1425 if (vlr.state & LR_STATE_ACTIVE)
1408 BUG_ON(!map); 1426 return false;
1409
1410 ret = irq_get_irqchip_state(map->irq,
1411 IRQCHIP_STATE_ACTIVE,
1412 &phys_active);
1413
1414 WARN_ON(ret);
1415
1416 if (phys_active)
1417 return 0;
1418 1427
1419 spin_lock(&dist->lock); 1428 spin_lock(&dist->lock);
1420 level_pending = process_queued_irq(vcpu, lr, vlr); 1429 level_pending = process_queued_irq(vcpu, lr, vlr);
@@ -1479,17 +1488,6 @@ int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu)
1479 return test_bit(vcpu->vcpu_id, dist->irq_pending_on_cpu); 1488 return test_bit(vcpu->vcpu_id, dist->irq_pending_on_cpu);
1480} 1489}
1481 1490
1482int kvm_vgic_vcpu_active_irq(struct kvm_vcpu *vcpu)
1483{
1484 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
1485
1486 if (!irqchip_in_kernel(vcpu->kvm))
1487 return 0;
1488
1489 return test_bit(vcpu->vcpu_id, dist->irq_active_on_cpu);
1490}
1491
1492
1493void vgic_kick_vcpus(struct kvm *kvm) 1491void vgic_kick_vcpus(struct kvm *kvm)
1494{ 1492{
1495 struct kvm_vcpu *vcpu; 1493 struct kvm_vcpu *vcpu;